xref: /netbsd-src/sys/kern/kern_time.c (revision da9817918ec7e88db2912a2882967c7570a83f47)
1 /*	$NetBSD: kern_time.c,v 1.160 2009/03/29 19:21:19 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Christopher G. Demetriou, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1982, 1986, 1989, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.160 2009/03/29 19:21:19 christos Exp $");
65 
66 #include <sys/param.h>
67 #include <sys/resourcevar.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/proc.h>
71 #include <sys/vnode.h>
72 #include <sys/signalvar.h>
73 #include <sys/syslog.h>
74 #include <sys/timetc.h>
75 #include <sys/timex.h>
76 #include <sys/kauth.h>
77 #include <sys/mount.h>
78 #include <sys/sa.h>
79 #include <sys/savar.h>
80 #include <sys/syscallargs.h>
81 #include <sys/cpu.h>
82 
83 #include <uvm/uvm_extern.h>
84 
85 #include "opt_sa.h"
86 
87 static void	timer_intr(void *);
88 static void	itimerfire(struct ptimer *);
89 static void	itimerfree(struct ptimers *, int);
90 
91 kmutex_t	timer_lock;
92 
93 static void	*timer_sih;
94 static TAILQ_HEAD(, ptimer) timer_queue;
95 
96 POOL_INIT(ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
97     &pool_allocator_nointr, IPL_NONE);
98 POOL_INIT(ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl",
99     &pool_allocator_nointr, IPL_NONE);
100 
101 /*
102  * Initialize timekeeping.
103  */
104 void
105 time_init(void)
106 {
107 
108 	/* nothing yet */
109 }
110 
111 void
112 time_init2(void)
113 {
114 
115 	TAILQ_INIT(&timer_queue);
116 	mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED);
117 	timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
118 	    timer_intr, NULL);
119 }
120 
121 /* Time of day and interval timer support.
122  *
123  * These routines provide the kernel entry points to get and set
124  * the time-of-day and per-process interval timers.  Subroutines
125  * here provide support for adding and subtracting timeval structures
126  * and decrementing interval timers, optionally reloading the interval
127  * timers when they expire.
128  */
129 
130 /* This function is used by clock_settime and settimeofday */
131 static int
132 settime1(struct proc *p, const struct timespec *ts, bool check_kauth)
133 {
134 	struct timespec delta, now;
135 	int s;
136 
137 	/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
138 	s = splclock();
139 	nanotime(&now);
140 	timespecsub(ts, &now, &delta);
141 
142 	if (check_kauth && kauth_authorize_system(kauth_cred_get(),
143 	    KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts),
144 	    &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) {
145 		splx(s);
146 		return (EPERM);
147 	}
148 
149 #ifdef notyet
150 	if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
151 		splx(s);
152 		return (EPERM);
153 	}
154 #endif
155 
156 	tc_setclock(ts);
157 
158 	timespecadd(&boottime, &delta, &boottime);
159 
160 	resettodr();
161 	splx(s);
162 
163 	return (0);
164 }
165 
166 int
167 settime(struct proc *p, struct timespec *ts)
168 {
169 	return (settime1(p, ts, true));
170 }
171 
172 /* ARGSUSED */
173 int
174 sys___clock_gettime50(struct lwp *l,
175     const struct sys___clock_gettime50_args *uap, register_t *retval)
176 {
177 	/* {
178 		syscallarg(clockid_t) clock_id;
179 		syscallarg(struct timespec *) tp;
180 	} */
181 	clockid_t clock_id;
182 	struct timespec ats;
183 
184 	clock_id = SCARG(uap, clock_id);
185 	switch (clock_id) {
186 	case CLOCK_REALTIME:
187 		nanotime(&ats);
188 		break;
189 	case CLOCK_MONOTONIC:
190 		nanouptime(&ats);
191 		break;
192 	default:
193 		return (EINVAL);
194 	}
195 
196 	return copyout(&ats, SCARG(uap, tp), sizeof(ats));
197 }
198 
199 /* ARGSUSED */
200 int
201 sys___clock_settime50(struct lwp *l,
202     const struct sys___clock_settime50_args *uap, register_t *retval)
203 {
204 	/* {
205 		syscallarg(clockid_t) clock_id;
206 		syscallarg(const struct timespec *) tp;
207 	} */
208 	int error;
209 	struct timespec ats;
210 
211 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
212 		return error;
213 
214 	return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true);
215 }
216 
217 
218 int
219 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp,
220     bool check_kauth)
221 {
222 	int error;
223 
224 	switch (clock_id) {
225 	case CLOCK_REALTIME:
226 		if ((error = settime1(p, tp, check_kauth)) != 0)
227 			return (error);
228 		break;
229 	case CLOCK_MONOTONIC:
230 		return (EINVAL);	/* read-only clock */
231 	default:
232 		return (EINVAL);
233 	}
234 
235 	return 0;
236 }
237 
238 int
239 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap,
240     register_t *retval)
241 {
242 	/* {
243 		syscallarg(clockid_t) clock_id;
244 		syscallarg(struct timespec *) tp;
245 	} */
246 	clockid_t clock_id;
247 	struct timespec ts;
248 	int error = 0;
249 
250 	clock_id = SCARG(uap, clock_id);
251 	switch (clock_id) {
252 	case CLOCK_REALTIME:
253 	case CLOCK_MONOTONIC:
254 		ts.tv_sec = 0;
255 		if (tc_getfrequency() > 1000000000)
256 			ts.tv_nsec = 1;
257 		else
258 			ts.tv_nsec = 1000000000 / tc_getfrequency();
259 		break;
260 	default:
261 		return (EINVAL);
262 	}
263 
264 	if (SCARG(uap, tp))
265 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
266 
267 	return error;
268 }
269 
270 /* ARGSUSED */
271 int
272 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap,
273     register_t *retval)
274 {
275 	/* {
276 		syscallarg(struct timespec *) rqtp;
277 		syscallarg(struct timespec *) rmtp;
278 	} */
279 	struct timespec rmt, rqt;
280 	int error, error1;
281 
282 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
283 	if (error)
284 		return (error);
285 
286 	error = nanosleep1(l, &rqt, SCARG(uap, rmtp) ? &rmt : NULL);
287 	if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR))
288 		return error;
289 
290 	error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt));
291 	return error1 ? error1 : error;
292 }
293 
294 int
295 nanosleep1(struct lwp *l, struct timespec *rqt, struct timespec *rmt)
296 {
297 	struct timespec rmtstart;
298 	int error, timo;
299 
300 	if ((error = itimespecfix(rqt)) != 0)
301 		return error;
302 
303 	timo = tstohz(rqt);
304 	/*
305 	 * Avoid inadvertantly sleeping forever
306 	 */
307 	if (timo == 0)
308 		timo = 1;
309 	getnanouptime(&rmtstart);
310 again:
311 	error = kpause("nanoslp", true, timo, NULL);
312 	if (rmt != NULL || error == 0) {
313 		struct timespec rmtend;
314 		struct timespec t0;
315 		struct timespec *t;
316 
317 		getnanouptime(&rmtend);
318 		t = (rmt != NULL) ? rmt : &t0;
319 		timespecsub(&rmtend, &rmtstart, t);
320 		timespecsub(rqt, t, t);
321 		if (t->tv_sec < 0)
322 			timespecclear(t);
323 		if (error == 0) {
324 			timo = tstohz(t);
325 			if (timo > 0)
326 				goto again;
327 		}
328 	}
329 
330 	if (error == ERESTART)
331 		error = EINTR;
332 	if (error == EWOULDBLOCK)
333 		error = 0;
334 
335 	return error;
336 }
337 
338 /* ARGSUSED */
339 int
340 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap,
341     register_t *retval)
342 {
343 	/* {
344 		syscallarg(struct timeval *) tp;
345 		syscallarg(void *) tzp;		really "struct timezone *";
346 	} */
347 	struct timeval atv;
348 	int error = 0;
349 	struct timezone tzfake;
350 
351 	if (SCARG(uap, tp)) {
352 		microtime(&atv);
353 		error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
354 		if (error)
355 			return (error);
356 	}
357 	if (SCARG(uap, tzp)) {
358 		/*
359 		 * NetBSD has no kernel notion of time zone, so we just
360 		 * fake up a timezone struct and return it if demanded.
361 		 */
362 		tzfake.tz_minuteswest = 0;
363 		tzfake.tz_dsttime = 0;
364 		error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
365 	}
366 	return (error);
367 }
368 
369 /* ARGSUSED */
370 int
371 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap,
372     register_t *retval)
373 {
374 	/* {
375 		syscallarg(const struct timeval *) tv;
376 		syscallarg(const void *) tzp; really "const struct timezone *";
377 	} */
378 
379 	return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true);
380 }
381 
382 int
383 settimeofday1(const struct timeval *utv, bool userspace,
384     const void *utzp, struct lwp *l, bool check_kauth)
385 {
386 	struct timeval atv;
387 	struct timespec ts;
388 	int error;
389 
390 	/* Verify all parameters before changing time. */
391 
392 	/*
393 	 * NetBSD has no kernel notion of time zone, and only an
394 	 * obsolete program would try to set it, so we log a warning.
395 	 */
396 	if (utzp)
397 		log(LOG_WARNING, "pid %d attempted to set the "
398 		    "(obsolete) kernel time zone\n", l->l_proc->p_pid);
399 
400 	if (utv == NULL)
401 		return 0;
402 
403 	if (userspace) {
404 		if ((error = copyin(utv, &atv, sizeof(atv))) != 0)
405 			return error;
406 		utv = &atv;
407 	}
408 
409 	TIMEVAL_TO_TIMESPEC(utv, &ts);
410 	return settime1(l->l_proc, &ts, check_kauth);
411 }
412 
413 int	time_adjusted;			/* set if an adjustment is made */
414 
415 /* ARGSUSED */
416 int
417 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap,
418     register_t *retval)
419 {
420 	/* {
421 		syscallarg(const struct timeval *) delta;
422 		syscallarg(struct timeval *) olddelta;
423 	} */
424 	int error = 0;
425 	struct timeval atv, oldatv;
426 
427 	if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
428 	    KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0)
429 		return error;
430 
431 	if (SCARG(uap, delta)) {
432 		error = copyin(SCARG(uap, delta), &atv,
433 		    sizeof(*SCARG(uap, delta)));
434 		if (error)
435 			return (error);
436 	}
437 	adjtime1(SCARG(uap, delta) ? &atv : NULL,
438 	    SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc);
439 	if (SCARG(uap, olddelta))
440 		error = copyout(&oldatv, SCARG(uap, olddelta),
441 		    sizeof(*SCARG(uap, olddelta)));
442 	return error;
443 }
444 
445 void
446 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p)
447 {
448 	extern int64_t time_adjtime;  /* in kern_ntptime.c */
449 
450 	if (olddelta) {
451 		mutex_spin_enter(&timecounter_lock);
452 		olddelta->tv_sec = time_adjtime / 1000000;
453 		olddelta->tv_usec = time_adjtime % 1000000;
454 		if (olddelta->tv_usec < 0) {
455 			olddelta->tv_usec += 1000000;
456 			olddelta->tv_sec--;
457 		}
458 		mutex_spin_exit(&timecounter_lock);
459 	}
460 
461 	if (delta) {
462 		mutex_spin_enter(&timecounter_lock);
463 		time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec;
464 
465 		if (time_adjtime) {
466 			/* We need to save the system time during shutdown */
467 			time_adjusted |= 1;
468 		}
469 		mutex_spin_exit(&timecounter_lock);
470 	}
471 }
472 
473 /*
474  * Interval timer support. Both the BSD getitimer() family and the POSIX
475  * timer_*() family of routines are supported.
476  *
477  * All timers are kept in an array pointed to by p_timers, which is
478  * allocated on demand - many processes don't use timers at all. The
479  * first three elements in this array are reserved for the BSD timers:
480  * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element
481  * 2 is ITIMER_PROF. The rest may be allocated by the timer_create()
482  * syscall.
483  *
484  * Realtime timers are kept in the ptimer structure as an absolute
485  * time; virtual time timers are kept as a linked list of deltas.
486  * Virtual time timers are processed in the hardclock() routine of
487  * kern_clock.c.  The real time timer is processed by a callout
488  * routine, called from the softclock() routine.  Since a callout may
489  * be delayed in real time due to interrupt processing in the system,
490  * it is possible for the real time timeout routine (realtimeexpire,
491  * given below), to be delayed in real time past when it is supposed
492  * to occur.  It does not suffice, therefore, to reload the real timer
493  * .it_value from the real time timers .it_interval.  Rather, we
494  * compute the next time in absolute time the timer should go off.  */
495 
496 /* Allocate a POSIX realtime timer. */
497 int
498 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap,
499     register_t *retval)
500 {
501 	/* {
502 		syscallarg(clockid_t) clock_id;
503 		syscallarg(struct sigevent *) evp;
504 		syscallarg(timer_t *) timerid;
505 	} */
506 
507 	return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id),
508 	    SCARG(uap, evp), copyin, l);
509 }
510 
511 int
512 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
513     copyin_t fetch_event, struct lwp *l)
514 {
515 	int error;
516 	timer_t timerid;
517 	struct ptimers *pts;
518 	struct ptimer *pt;
519 	struct proc *p;
520 
521 	p = l->l_proc;
522 
523 	if (id < CLOCK_REALTIME || id > CLOCK_PROF)
524 		return (EINVAL);
525 
526 	if ((pts = p->p_timers) == NULL)
527 		pts = timers_alloc(p);
528 
529 	pt = pool_get(&ptimer_pool, PR_WAITOK);
530 	if (evp != NULL) {
531 		if (((error =
532 		    (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
533 		    ((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
534 			(pt->pt_ev.sigev_notify > SIGEV_SA))) {
535 			pool_put(&ptimer_pool, pt);
536 			return (error ? error : EINVAL);
537 		}
538 	}
539 
540 	/* Find a free timer slot, skipping those reserved for setitimer(). */
541 	mutex_spin_enter(&timer_lock);
542 	for (timerid = 3; timerid < TIMER_MAX; timerid++)
543 		if (pts->pts_timers[timerid] == NULL)
544 			break;
545 	if (timerid == TIMER_MAX) {
546 		mutex_spin_exit(&timer_lock);
547 		pool_put(&ptimer_pool, pt);
548 		return EAGAIN;
549 	}
550 	if (evp == NULL) {
551 		pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
552 		switch (id) {
553 		case CLOCK_REALTIME:
554 			pt->pt_ev.sigev_signo = SIGALRM;
555 			break;
556 		case CLOCK_VIRTUAL:
557 			pt->pt_ev.sigev_signo = SIGVTALRM;
558 			break;
559 		case CLOCK_PROF:
560 			pt->pt_ev.sigev_signo = SIGPROF;
561 			break;
562 		}
563 		pt->pt_ev.sigev_value.sival_int = timerid;
564 	}
565 	pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo;
566 	pt->pt_info.ksi_errno = 0;
567 	pt->pt_info.ksi_code = 0;
568 	pt->pt_info.ksi_pid = p->p_pid;
569 	pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred);
570 	pt->pt_info.ksi_value = pt->pt_ev.sigev_value;
571 	pt->pt_type = id;
572 	pt->pt_proc = p;
573 	pt->pt_overruns = 0;
574 	pt->pt_poverruns = 0;
575 	pt->pt_entry = timerid;
576 	pt->pt_queued = false;
577 	timespecclear(&pt->pt_time.it_value);
578 	if (id == CLOCK_REALTIME)
579 		callout_init(&pt->pt_ch, 0);
580 	else
581 		pt->pt_active = 0;
582 
583 	pts->pts_timers[timerid] = pt;
584 	mutex_spin_exit(&timer_lock);
585 
586 	return copyout(&timerid, tid, sizeof(timerid));
587 }
588 
589 /* Delete a POSIX realtime timer */
590 int
591 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap,
592     register_t *retval)
593 {
594 	/* {
595 		syscallarg(timer_t) timerid;
596 	} */
597 	struct proc *p = l->l_proc;
598 	timer_t timerid;
599 	struct ptimers *pts;
600 	struct ptimer *pt, *ptn;
601 
602 	timerid = SCARG(uap, timerid);
603 	pts = p->p_timers;
604 
605 	if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
606 		return (EINVAL);
607 
608 	mutex_spin_enter(&timer_lock);
609 	if ((pt = pts->pts_timers[timerid]) == NULL) {
610 		mutex_spin_exit(&timer_lock);
611 		return (EINVAL);
612 	}
613 	if (pt->pt_type != CLOCK_REALTIME) {
614 		if (pt->pt_active) {
615 			ptn = LIST_NEXT(pt, pt_list);
616 			LIST_REMOVE(pt, pt_list);
617 			for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
618 				timespecadd(&pt->pt_time.it_value,
619 				    &ptn->pt_time.it_value,
620 				    &ptn->pt_time.it_value);
621 			pt->pt_active = 0;
622 		}
623 	}
624 	itimerfree(pts, timerid);
625 
626 	return (0);
627 }
628 
629 /*
630  * Set up the given timer. The value in pt->pt_time.it_value is taken
631  * to be an absolute time for CLOCK_REALTIME timers and a relative
632  * time for virtual timers.
633  * Must be called at splclock().
634  */
635 void
636 timer_settime(struct ptimer *pt)
637 {
638 	struct ptimer *ptn, *pptn;
639 	struct ptlist *ptl;
640 
641 	KASSERT(mutex_owned(&timer_lock));
642 
643 	if (pt->pt_type == CLOCK_REALTIME) {
644 		callout_stop(&pt->pt_ch);
645 		if (timespecisset(&pt->pt_time.it_value)) {
646 			/*
647 			 * Don't need to check tshzto() return value, here.
648 			 * callout_reset() does it for us.
649 			 */
650 			callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value),
651 			    realtimerexpire, pt);
652 		}
653 	} else {
654 		if (pt->pt_active) {
655 			ptn = LIST_NEXT(pt, pt_list);
656 			LIST_REMOVE(pt, pt_list);
657 			for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
658 				timespecadd(&pt->pt_time.it_value,
659 				    &ptn->pt_time.it_value,
660 				    &ptn->pt_time.it_value);
661 		}
662 		if (timespecisset(&pt->pt_time.it_value)) {
663 			if (pt->pt_type == CLOCK_VIRTUAL)
664 				ptl = &pt->pt_proc->p_timers->pts_virtual;
665 			else
666 				ptl = &pt->pt_proc->p_timers->pts_prof;
667 
668 			for (ptn = LIST_FIRST(ptl), pptn = NULL;
669 			     ptn && timespeccmp(&pt->pt_time.it_value,
670 				 &ptn->pt_time.it_value, >);
671 			     pptn = ptn, ptn = LIST_NEXT(ptn, pt_list))
672 				timespecsub(&pt->pt_time.it_value,
673 				    &ptn->pt_time.it_value,
674 				    &pt->pt_time.it_value);
675 
676 			if (pptn)
677 				LIST_INSERT_AFTER(pptn, pt, pt_list);
678 			else
679 				LIST_INSERT_HEAD(ptl, pt, pt_list);
680 
681 			for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list))
682 				timespecsub(&ptn->pt_time.it_value,
683 				    &pt->pt_time.it_value,
684 				    &ptn->pt_time.it_value);
685 
686 			pt->pt_active = 1;
687 		} else
688 			pt->pt_active = 0;
689 	}
690 }
691 
692 void
693 timer_gettime(struct ptimer *pt, struct itimerspec *aits)
694 {
695 	struct timespec now;
696 	struct ptimer *ptn;
697 
698 	KASSERT(mutex_owned(&timer_lock));
699 
700 	*aits = pt->pt_time;
701 	if (pt->pt_type == CLOCK_REALTIME) {
702 		/*
703 		 * Convert from absolute to relative time in .it_value
704 		 * part of real time timer.  If time for real time
705 		 * timer has passed return 0, else return difference
706 		 * between current time and time for the timer to go
707 		 * off.
708 		 */
709 		if (timespecisset(&aits->it_value)) {
710 			getnanotime(&now);
711 			if (timespeccmp(&aits->it_value, &now, <))
712 				timespecclear(&aits->it_value);
713 			else
714 				timespecsub(&aits->it_value, &now,
715 				    &aits->it_value);
716 		}
717 	} else if (pt->pt_active) {
718 		if (pt->pt_type == CLOCK_VIRTUAL)
719 			ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual);
720 		else
721 			ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof);
722 		for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list))
723 			timespecadd(&aits->it_value,
724 			    &ptn->pt_time.it_value, &aits->it_value);
725 		KASSERT(ptn != NULL); /* pt should be findable on the list */
726 	} else
727 		timespecclear(&aits->it_value);
728 }
729 
730 
731 
732 /* Set and arm a POSIX realtime timer */
733 int
734 sys___timer_settime50(struct lwp *l,
735     const struct sys___timer_settime50_args *uap,
736     register_t *retval)
737 {
738 	/* {
739 		syscallarg(timer_t) timerid;
740 		syscallarg(int) flags;
741 		syscallarg(const struct itimerspec *) value;
742 		syscallarg(struct itimerspec *) ovalue;
743 	} */
744 	int error;
745 	struct itimerspec value, ovalue, *ovp = NULL;
746 
747 	if ((error = copyin(SCARG(uap, value), &value,
748 	    sizeof(struct itimerspec))) != 0)
749 		return (error);
750 
751 	if (SCARG(uap, ovalue))
752 		ovp = &ovalue;
753 
754 	if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp,
755 	    SCARG(uap, flags), l->l_proc)) != 0)
756 		return error;
757 
758 	if (ovp)
759 		return copyout(&ovalue, SCARG(uap, ovalue),
760 		    sizeof(struct itimerspec));
761 	return 0;
762 }
763 
764 int
765 dotimer_settime(int timerid, struct itimerspec *value,
766     struct itimerspec *ovalue, int flags, struct proc *p)
767 {
768 	struct timespec now;
769 	struct itimerspec val, oval;
770 	struct ptimers *pts;
771 	struct ptimer *pt;
772 	int error;
773 
774 	pts = p->p_timers;
775 
776 	if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
777 		return EINVAL;
778 	val = *value;
779 	if ((error = itimespecfix(&val.it_value)) != 0 ||
780 	    (error = itimespecfix(&val.it_interval)) != 0)
781 		return error;
782 
783 	mutex_spin_enter(&timer_lock);
784 	if ((pt = pts->pts_timers[timerid]) == NULL) {
785 		mutex_spin_exit(&timer_lock);
786 		return EINVAL;
787 	}
788 
789 	oval = pt->pt_time;
790 	pt->pt_time = val;
791 
792 	/*
793 	 * If we've been passed a relative time for a realtime timer,
794 	 * convert it to absolute; if an absolute time for a virtual
795 	 * timer, convert it to relative and make sure we don't set it
796 	 * to zero, which would cancel the timer, or let it go
797 	 * negative, which would confuse the comparison tests.
798 	 */
799 	if (timespecisset(&pt->pt_time.it_value)) {
800 		if (pt->pt_type == CLOCK_REALTIME) {
801 			if ((flags & TIMER_ABSTIME) == 0) {
802 				getnanotime(&now);
803 				timespecadd(&pt->pt_time.it_value, &now,
804 				    &pt->pt_time.it_value);
805 			}
806 		} else {
807 			if ((flags & TIMER_ABSTIME) != 0) {
808 				getnanotime(&now);
809 				timespecsub(&pt->pt_time.it_value, &now,
810 				    &pt->pt_time.it_value);
811 				if (!timespecisset(&pt->pt_time.it_value) ||
812 				    pt->pt_time.it_value.tv_sec < 0) {
813 					pt->pt_time.it_value.tv_sec = 0;
814 					pt->pt_time.it_value.tv_nsec = 1;
815 				}
816 			}
817 		}
818 	}
819 
820 	timer_settime(pt);
821 	mutex_spin_exit(&timer_lock);
822 
823 	if (ovalue)
824 		*ovalue = oval;
825 
826 	return (0);
827 }
828 
829 /* Return the time remaining until a POSIX timer fires. */
830 int
831 sys___timer_gettime50(struct lwp *l,
832     const struct sys___timer_gettime50_args *uap, register_t *retval)
833 {
834 	/* {
835 		syscallarg(timer_t) timerid;
836 		syscallarg(struct itimerspec *) value;
837 	} */
838 	struct itimerspec its;
839 	int error;
840 
841 	if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc,
842 	    &its)) != 0)
843 		return error;
844 
845 	return copyout(&its, SCARG(uap, value), sizeof(its));
846 }
847 
848 int
849 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its)
850 {
851 	struct ptimer *pt;
852 	struct ptimers *pts;
853 
854 	pts = p->p_timers;
855 	if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
856 		return (EINVAL);
857 	mutex_spin_enter(&timer_lock);
858 	if ((pt = pts->pts_timers[timerid]) == NULL) {
859 		mutex_spin_exit(&timer_lock);
860 		return (EINVAL);
861 	}
862 	timer_gettime(pt, its);
863 	mutex_spin_exit(&timer_lock);
864 
865 	return 0;
866 }
867 
868 /*
869  * Return the count of the number of times a periodic timer expired
870  * while a notification was already pending. The counter is reset when
871  * a timer expires and a notification can be posted.
872  */
873 int
874 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap,
875     register_t *retval)
876 {
877 	/* {
878 		syscallarg(timer_t) timerid;
879 	} */
880 	struct proc *p = l->l_proc;
881 	struct ptimers *pts;
882 	int timerid;
883 	struct ptimer *pt;
884 
885 	timerid = SCARG(uap, timerid);
886 
887 	pts = p->p_timers;
888 	if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
889 		return (EINVAL);
890 	mutex_spin_enter(&timer_lock);
891 	if ((pt = pts->pts_timers[timerid]) == NULL) {
892 		mutex_spin_exit(&timer_lock);
893 		return (EINVAL);
894 	}
895 	*retval = pt->pt_poverruns;
896 	mutex_spin_exit(&timer_lock);
897 
898 	return (0);
899 }
900 
901 #ifdef KERN_SA
902 /* Glue function that triggers an upcall; called from userret(). */
903 void
904 timerupcall(struct lwp *l)
905 {
906 	struct ptimers *pt = l->l_proc->p_timers;
907 	struct proc *p = l->l_proc;
908 	unsigned int i, fired, done;
909 
910 	KDASSERT(l->l_proc->p_sa);
911 	/* Bail out if we do not own the virtual processor */
912 	if (l->l_savp->savp_lwp != l)
913 		return ;
914 
915 	mutex_enter(p->p_lock);
916 
917 	fired = pt->pts_fired;
918 	done = 0;
919 	while ((i = ffs(fired)) != 0) {
920 		siginfo_t *si;
921 		int mask = 1 << --i;
922 		int f;
923 
924 		f = ~l->l_pflag & LP_SA_NOBLOCK;
925 		l->l_pflag |= LP_SA_NOBLOCK;
926 		si = siginfo_alloc(PR_WAITOK);
927 		si->_info = pt->pts_timers[i]->pt_info.ksi_info;
928 		if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l,
929 		    sizeof(*si), si, siginfo_free) != 0) {
930 			siginfo_free(si);
931 			/* XXX What do we do here?? */
932 		} else
933 			done |= mask;
934 		fired &= ~mask;
935 		l->l_pflag ^= f;
936 	}
937 	pt->pts_fired &= ~done;
938 	if (pt->pts_fired == 0)
939 		l->l_proc->p_timerpend = 0;
940 
941 	mutex_exit(p->p_lock);
942 }
943 #endif /* KERN_SA */
944 
945 /*
946  * Real interval timer expired:
947  * send process whose timer expired an alarm signal.
948  * If time is not set up to reload, then just return.
949  * Else compute next time timer should go off which is > current time.
950  * This is where delay in processing this timeout causes multiple
951  * SIGALRM calls to be compressed into one.
952  */
953 void
954 realtimerexpire(void *arg)
955 {
956 	uint64_t last_val, next_val, interval, now_ms;
957 	struct timespec now, next;
958 	struct ptimer *pt;
959 	int backwards;
960 
961 	pt = arg;
962 
963 	mutex_spin_enter(&timer_lock);
964 	itimerfire(pt);
965 
966 	if (!timespecisset(&pt->pt_time.it_interval)) {
967 		timespecclear(&pt->pt_time.it_value);
968 		mutex_spin_exit(&timer_lock);
969 		return;
970 	}
971 
972 	getnanotime(&now);
973 	backwards = (timespeccmp(&pt->pt_time.it_value, &now, >));
974 	timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next);
975 	/* Handle the easy case of non-overflown timers first. */
976 	if (!backwards && timespeccmp(&next, &now, >)) {
977 		pt->pt_time.it_value = next;
978 	} else {
979 		now_ms = timespec2ns(&now);
980 		last_val = timespec2ns(&pt->pt_time.it_value);
981 		interval = timespec2ns(&pt->pt_time.it_interval);
982 
983 		next_val = now_ms +
984 		    (now_ms - last_val + interval - 1) % interval;
985 
986 		if (backwards)
987 			next_val += interval;
988 		else
989 			pt->pt_overruns += (now_ms - last_val) / interval;
990 
991 		pt->pt_time.it_value.tv_sec = next_val / 1000000000;
992 		pt->pt_time.it_value.tv_nsec = next_val % 1000000000;
993 	}
994 
995 	/*
996 	 * Don't need to check tshzto() return value, here.
997 	 * callout_reset() does it for us.
998 	 */
999 	callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value),
1000 	    realtimerexpire, pt);
1001 	mutex_spin_exit(&timer_lock);
1002 }
1003 
1004 /* BSD routine to get the value of an interval timer. */
1005 /* ARGSUSED */
1006 int
1007 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap,
1008     register_t *retval)
1009 {
1010 	/* {
1011 		syscallarg(int) which;
1012 		syscallarg(struct itimerval *) itv;
1013 	} */
1014 	struct proc *p = l->l_proc;
1015 	struct itimerval aitv;
1016 	int error;
1017 
1018 	error = dogetitimer(p, SCARG(uap, which), &aitv);
1019 	if (error)
1020 		return error;
1021 	return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval)));
1022 }
1023 
1024 int
1025 dogetitimer(struct proc *p, int which, struct itimerval *itvp)
1026 {
1027 	struct ptimers *pts;
1028 	struct ptimer *pt;
1029 	struct itimerspec its;
1030 
1031 	if ((u_int)which > ITIMER_PROF)
1032 		return (EINVAL);
1033 
1034 	mutex_spin_enter(&timer_lock);
1035 	pts = p->p_timers;
1036 	if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) {
1037 		timerclear(&itvp->it_value);
1038 		timerclear(&itvp->it_interval);
1039 	} else {
1040 		timer_gettime(pt, &its);
1041 		TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value);
1042 		TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval);
1043 	}
1044 	mutex_spin_exit(&timer_lock);
1045 
1046 	return 0;
1047 }
1048 
1049 /* BSD routine to set/arm an interval timer. */
1050 /* ARGSUSED */
1051 int
1052 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap,
1053     register_t *retval)
1054 {
1055 	/* {
1056 		syscallarg(int) which;
1057 		syscallarg(const struct itimerval *) itv;
1058 		syscallarg(struct itimerval *) oitv;
1059 	} */
1060 	struct proc *p = l->l_proc;
1061 	int which = SCARG(uap, which);
1062 	struct sys___getitimer50_args getargs;
1063 	const struct itimerval *itvp;
1064 	struct itimerval aitv;
1065 	int error;
1066 
1067 	if ((u_int)which > ITIMER_PROF)
1068 		return (EINVAL);
1069 	itvp = SCARG(uap, itv);
1070 	if (itvp &&
1071 	    (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0))
1072 		return (error);
1073 	if (SCARG(uap, oitv) != NULL) {
1074 		SCARG(&getargs, which) = which;
1075 		SCARG(&getargs, itv) = SCARG(uap, oitv);
1076 		if ((error = sys___getitimer50(l, &getargs, retval)) != 0)
1077 			return (error);
1078 	}
1079 	if (itvp == 0)
1080 		return (0);
1081 
1082 	return dosetitimer(p, which, &aitv);
1083 }
1084 
1085 int
1086 dosetitimer(struct proc *p, int which, struct itimerval *itvp)
1087 {
1088 	struct timespec now;
1089 	struct ptimers *pts;
1090 	struct ptimer *pt, *spare;
1091 
1092 	if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval))
1093 		return (EINVAL);
1094 
1095 	/*
1096 	 * Don't bother allocating data structures if the process just
1097 	 * wants to clear the timer.
1098 	 */
1099 	spare = NULL;
1100 	pts = p->p_timers;
1101  retry:
1102 	if (!timerisset(&itvp->it_value) && (pts == NULL ||
1103 	    pts->pts_timers[which] == NULL))
1104 		return (0);
1105 	if (pts == NULL)
1106 		pts = timers_alloc(p);
1107 	mutex_spin_enter(&timer_lock);
1108 	pt = pts->pts_timers[which];
1109 	if (pt == NULL) {
1110 		if (spare == NULL) {
1111 			mutex_spin_exit(&timer_lock);
1112 			spare = pool_get(&ptimer_pool, PR_WAITOK);
1113 			goto retry;
1114 		}
1115 		pt = spare;
1116 		spare = NULL;
1117 		pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
1118 		pt->pt_ev.sigev_value.sival_int = which;
1119 		pt->pt_overruns = 0;
1120 		pt->pt_proc = p;
1121 		pt->pt_type = which;
1122 		pt->pt_entry = which;
1123 		pt->pt_queued = false;
1124 		if (pt->pt_type == CLOCK_REALTIME)
1125 			callout_init(&pt->pt_ch, CALLOUT_MPSAFE);
1126 		else
1127 			pt->pt_active = 0;
1128 
1129 		switch (which) {
1130 		case ITIMER_REAL:
1131 			pt->pt_ev.sigev_signo = SIGALRM;
1132 			break;
1133 		case ITIMER_VIRTUAL:
1134 			pt->pt_ev.sigev_signo = SIGVTALRM;
1135 			break;
1136 		case ITIMER_PROF:
1137 			pt->pt_ev.sigev_signo = SIGPROF;
1138 			break;
1139 		}
1140 		pts->pts_timers[which] = pt;
1141 	}
1142 
1143 	TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value);
1144 	TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval);
1145 
1146 	if ((which == ITIMER_REAL) && timespecisset(&pt->pt_time.it_value)) {
1147 		/* Convert to absolute time */
1148 		/* XXX need to wrap in splclock for timecounters case? */
1149 		getnanotime(&now);
1150 		timespecadd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value);
1151 	}
1152 	timer_settime(pt);
1153 	mutex_spin_exit(&timer_lock);
1154 	if (spare != NULL)
1155 		pool_put(&ptimer_pool, spare);
1156 
1157 	return (0);
1158 }
1159 
1160 /* Utility routines to manage the array of pointers to timers. */
1161 struct ptimers *
1162 timers_alloc(struct proc *p)
1163 {
1164 	struct ptimers *pts;
1165 	int i;
1166 
1167 	pts = pool_get(&ptimers_pool, PR_WAITOK);
1168 	LIST_INIT(&pts->pts_virtual);
1169 	LIST_INIT(&pts->pts_prof);
1170 	for (i = 0; i < TIMER_MAX; i++)
1171 		pts->pts_timers[i] = NULL;
1172 	pts->pts_fired = 0;
1173 	mutex_spin_enter(&timer_lock);
1174 	if (p->p_timers == NULL) {
1175 		p->p_timers = pts;
1176 		mutex_spin_exit(&timer_lock);
1177 		return pts;
1178 	}
1179 	mutex_spin_exit(&timer_lock);
1180 	pool_put(&ptimers_pool, pts);
1181 	return p->p_timers;
1182 }
1183 
1184 /*
1185  * Clean up the per-process timers. If "which" is set to TIMERS_ALL,
1186  * then clean up all timers and free all the data structures. If
1187  * "which" is set to TIMERS_POSIX, only clean up the timers allocated
1188  * by timer_create(), not the BSD setitimer() timers, and only free the
1189  * structure if none of those remain.
1190  */
1191 void
1192 timers_free(struct proc *p, int which)
1193 {
1194 	struct ptimers *pts;
1195 	struct ptimer *ptn;
1196 	struct timespec ts;
1197 	int i;
1198 
1199 	if (p->p_timers == NULL)
1200 		return;
1201 
1202 	pts = p->p_timers;
1203 	mutex_spin_enter(&timer_lock);
1204 	if (which == TIMERS_ALL) {
1205 		p->p_timers = NULL;
1206 		i = 0;
1207 	} else {
1208 		timespecclear(&ts);
1209 		for (ptn = LIST_FIRST(&pts->pts_virtual);
1210 		     ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL];
1211 		     ptn = LIST_NEXT(ptn, pt_list)) {
1212 			KASSERT(ptn->pt_type != CLOCK_REALTIME);
1213 			timespecadd(&ts, &ptn->pt_time.it_value, &ts);
1214 		}
1215 		LIST_FIRST(&pts->pts_virtual) = NULL;
1216 		if (ptn) {
1217 			KASSERT(ptn->pt_type != CLOCK_REALTIME);
1218 			timespecadd(&ts, &ptn->pt_time.it_value,
1219 			    &ptn->pt_time.it_value);
1220 			LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list);
1221 		}
1222 		timespecclear(&ts);
1223 		for (ptn = LIST_FIRST(&pts->pts_prof);
1224 		     ptn && ptn != pts->pts_timers[ITIMER_PROF];
1225 		     ptn = LIST_NEXT(ptn, pt_list)) {
1226 			KASSERT(ptn->pt_type != CLOCK_REALTIME);
1227 			timespecadd(&ts, &ptn->pt_time.it_value, &ts);
1228 		}
1229 		LIST_FIRST(&pts->pts_prof) = NULL;
1230 		if (ptn) {
1231 			KASSERT(ptn->pt_type != CLOCK_REALTIME);
1232 			timespecadd(&ts, &ptn->pt_time.it_value,
1233 			    &ptn->pt_time.it_value);
1234 			LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list);
1235 		}
1236 		i = 3;
1237 	}
1238 	for ( ; i < TIMER_MAX; i++) {
1239 		if (pts->pts_timers[i] != NULL) {
1240 			itimerfree(pts, i);
1241 			mutex_spin_enter(&timer_lock);
1242 		}
1243 	}
1244 	if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL &&
1245 	    pts->pts_timers[2] == NULL) {
1246 		p->p_timers = NULL;
1247 		mutex_spin_exit(&timer_lock);
1248 		pool_put(&ptimers_pool, pts);
1249 	} else
1250 		mutex_spin_exit(&timer_lock);
1251 }
1252 
1253 static void
1254 itimerfree(struct ptimers *pts, int index)
1255 {
1256 	struct ptimer *pt;
1257 
1258 	KASSERT(mutex_owned(&timer_lock));
1259 
1260 	pt = pts->pts_timers[index];
1261 	pts->pts_timers[index] = NULL;
1262 	if (pt->pt_type == CLOCK_REALTIME)
1263 		callout_halt(&pt->pt_ch, &timer_lock);
1264 	else if (pt->pt_queued)
1265 		TAILQ_REMOVE(&timer_queue, pt, pt_chain);
1266 	mutex_spin_exit(&timer_lock);
1267 	if (pt->pt_type == CLOCK_REALTIME)
1268 		callout_destroy(&pt->pt_ch);
1269 	pool_put(&ptimer_pool, pt);
1270 }
1271 
1272 /*
1273  * Decrement an interval timer by a specified number
1274  * of nanoseconds, which must be less than a second,
1275  * i.e. < 1000000000.  If the timer expires, then reload
1276  * it.  In this case, carry over (nsec - old value) to
1277  * reduce the value reloaded into the timer so that
1278  * the timer does not drift.  This routine assumes
1279  * that it is called in a context where the timers
1280  * on which it is operating cannot change in value.
1281  */
1282 static int
1283 itimerdecr(struct ptimer *pt, int nsec)
1284 {
1285 	struct itimerspec *itp;
1286 
1287 	KASSERT(mutex_owned(&timer_lock));
1288 
1289 	itp = &pt->pt_time;
1290 	if (itp->it_value.tv_nsec < nsec) {
1291 		if (itp->it_value.tv_sec == 0) {
1292 			/* expired, and already in next interval */
1293 			nsec -= itp->it_value.tv_nsec;
1294 			goto expire;
1295 		}
1296 		itp->it_value.tv_nsec += 1000000000;
1297 		itp->it_value.tv_sec--;
1298 	}
1299 	itp->it_value.tv_nsec -= nsec;
1300 	nsec = 0;
1301 	if (timespecisset(&itp->it_value))
1302 		return (1);
1303 	/* expired, exactly at end of interval */
1304 expire:
1305 	if (timespecisset(&itp->it_interval)) {
1306 		itp->it_value = itp->it_interval;
1307 		itp->it_value.tv_nsec -= nsec;
1308 		if (itp->it_value.tv_nsec < 0) {
1309 			itp->it_value.tv_nsec += 1000000000;
1310 			itp->it_value.tv_sec--;
1311 		}
1312 		timer_settime(pt);
1313 	} else
1314 		itp->it_value.tv_nsec = 0;		/* sec is already 0 */
1315 	return (0);
1316 }
1317 
1318 static void
1319 itimerfire(struct ptimer *pt)
1320 {
1321 
1322 	KASSERT(mutex_owned(&timer_lock));
1323 
1324 	/*
1325 	 * XXX Can overrun, but we don't do signal queueing yet, anyway.
1326 	 * XXX Relying on the clock interrupt is stupid.
1327 	 */
1328 	if ((pt->pt_ev.sigev_notify == SIGEV_SA && pt->pt_proc->p_sa == NULL) ||
1329 	    (pt->pt_ev.sigev_notify != SIGEV_SIGNAL &&
1330 	    pt->pt_ev.sigev_notify != SIGEV_SA) || pt->pt_queued)
1331 		return;
1332 	TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain);
1333 	pt->pt_queued = true;
1334 	softint_schedule(timer_sih);
1335 }
1336 
1337 void
1338 timer_tick(lwp_t *l, bool user)
1339 {
1340 	struct ptimers *pts;
1341 	struct ptimer *pt;
1342 	proc_t *p;
1343 
1344 	p = l->l_proc;
1345 	if (p->p_timers == NULL)
1346 		return;
1347 
1348 	mutex_spin_enter(&timer_lock);
1349 	if ((pts = l->l_proc->p_timers) != NULL) {
1350 		/*
1351 		 * Run current process's virtual and profile time, as needed.
1352 		 */
1353 		if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL)
1354 			if (itimerdecr(pt, tick * 1000) == 0)
1355 				itimerfire(pt);
1356 		if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL)
1357 			if (itimerdecr(pt, tick * 1000) == 0)
1358 				itimerfire(pt);
1359 	}
1360 	mutex_spin_exit(&timer_lock);
1361 }
1362 
1363 #ifdef KERN_SA
1364 /*
1365  * timer_sa_intr:
1366  *
1367  *	SIGEV_SA handling for timer_intr(). We are called (and return)
1368  * with the timer lock held. We know that the process had SA enabled
1369  * when this timer was enqueued. As timer_intr() is a soft interrupt
1370  * handler, SA should still be enabled by the time we get here.
1371  */
1372 static void
1373 timer_sa_intr(struct ptimer *pt, proc_t *p)
1374 {
1375 	unsigned int		i;
1376 	struct sadata		*sa;
1377 	struct sadata_vp	*vp;
1378 
1379 	/* Cause the process to generate an upcall when it returns. */
1380 	if (!p->p_timerpend) {
1381 		/*
1382 		 * XXX stop signals can be processed inside tsleep,
1383 		 * which can be inside sa_yield's inner loop, which
1384 		 * makes testing for sa_idle alone insuffucent to
1385 		 * determine if we really should call setrunnable.
1386 		 */
1387 		pt->pt_poverruns = pt->pt_overruns;
1388 		pt->pt_overruns = 0;
1389 		i = 1 << pt->pt_entry;
1390 		p->p_timers->pts_fired = i;
1391 		p->p_timerpend = 1;
1392 
1393 		sa = p->p_sa;
1394 		mutex_enter(&sa->sa_mutex);
1395 		SLIST_FOREACH(vp, &sa->sa_vps, savp_next) {
1396 			struct lwp *vp_lwp = vp->savp_lwp;
1397 			lwp_lock(vp_lwp);
1398 			lwp_need_userret(vp_lwp);
1399 			if (vp_lwp->l_flag & LW_SA_IDLE) {
1400 				vp_lwp->l_flag &= ~LW_SA_IDLE;
1401 				lwp_unsleep(vp_lwp, true);
1402 				break;
1403 			}
1404 			lwp_unlock(vp_lwp);
1405 		}
1406 		mutex_exit(&sa->sa_mutex);
1407 	} else {
1408 		i = 1 << pt->pt_entry;
1409 		if ((p->p_timers->pts_fired & i) == 0) {
1410 			pt->pt_poverruns = pt->pt_overruns;
1411 			pt->pt_overruns = 0;
1412 			p->p_timers->pts_fired |= i;
1413 		} else
1414 			pt->pt_overruns++;
1415 	}
1416 }
1417 #endif /* KERN_SA */
1418 
1419 static void
1420 timer_intr(void *cookie)
1421 {
1422 	ksiginfo_t ksi;
1423 	struct ptimer *pt;
1424 	proc_t *p;
1425 
1426 	mutex_enter(proc_lock);
1427 	mutex_spin_enter(&timer_lock);
1428 	while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) {
1429 		TAILQ_REMOVE(&timer_queue, pt, pt_chain);
1430 		KASSERT(pt->pt_queued);
1431 		pt->pt_queued = false;
1432 
1433 		if (pt->pt_proc->p_timers == NULL) {
1434 			/* Process is dying. */
1435 			continue;
1436 		}
1437 		p = pt->pt_proc;
1438 #ifdef KERN_SA
1439 		if (pt->pt_ev.sigev_notify == SIGEV_SA) {
1440 			timer_sa_intr(pt, p);
1441 			continue;
1442 		}
1443 #endif /* KERN_SA */
1444 		if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL)
1445 			continue;
1446 		if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) {
1447 			pt->pt_overruns++;
1448 			continue;
1449 		}
1450 
1451 		KSI_INIT(&ksi);
1452 		ksi.ksi_signo = pt->pt_ev.sigev_signo;
1453 		ksi.ksi_code = SI_TIMER;
1454 		ksi.ksi_value = pt->pt_ev.sigev_value;
1455 		pt->pt_poverruns = pt->pt_overruns;
1456 		pt->pt_overruns = 0;
1457 		mutex_spin_exit(&timer_lock);
1458 		kpsignal(p, &ksi, NULL);
1459 		mutex_spin_enter(&timer_lock);
1460 	}
1461 	mutex_spin_exit(&timer_lock);
1462 	mutex_exit(proc_lock);
1463 }
1464