xref: /openbsd-src/sys/kern/kern_time.c (revision f90ef06a3045119dcc88b72d8b98ca60e3c00d5a)
1 /*	$OpenBSD: kern_time.c,v 1.164 2023/08/05 20:07:55 cheloha Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/clockintr.h>
39 #include <sys/mutex.h>
40 #include <sys/rwlock.h>
41 #include <sys/proc.h>
42 #include <sys/ktrace.h>
43 #include <sys/signalvar.h>
44 #include <sys/stdint.h>
45 #include <sys/pledge.h>
46 #include <sys/task.h>
47 #include <sys/time.h>
48 #include <sys/timeout.h>
49 #include <sys/timetc.h>
50 
51 #include <sys/mount.h>
52 #include <sys/syscallargs.h>
53 
54 #include <dev/clock_subr.h>
55 
56 int itimerfix(struct itimerval *);
57 void process_reset_itimer_flag(struct process *);
58 
59 /*
60  * Time of day and interval timer support.
61  *
62  * These routines provide the kernel entry points to get and set
63  * the time-of-day and per-process interval timers.  Subroutines
64  * here provide support for adding and subtracting timeval structures
65  * and decrementing interval timers, optionally reloading the interval
66  * timers when they expire.
67  */
68 
69 /* This function is used by clock_settime and settimeofday */
70 int
71 settime(const struct timespec *ts)
72 {
73 	struct timespec now;
74 
75 	/*
76 	 * Don't allow the time to be set forward so far it will wrap
77 	 * and become negative, thus allowing an attacker to bypass
78 	 * the next check below.  The cutoff is 1 year before rollover
79 	 * occurs, so even if the attacker uses adjtime(2) to move
80 	 * the time past the cutoff, it will take a very long time
81 	 * to get to the wrap point.
82 	 *
83 	 * XXX: we check against UINT_MAX until we can figure out
84 	 *	how to deal with the hardware RTCs.
85 	 */
86 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
87 		printf("denied attempt to set clock forward to %lld\n",
88 		    (long long)ts->tv_sec);
89 		return (EPERM);
90 	}
91 	/*
92 	 * If the system is secure, we do not allow the time to be
93 	 * set to an earlier value (it may be slowed using adjtime,
94 	 * but not set back). This feature prevent interlopers from
95 	 * setting arbitrary time stamps on files.
96 	 */
97 	nanotime(&now);
98 	if (securelevel > 1 && timespeccmp(ts, &now, <=)) {
99 		printf("denied attempt to set clock back %lld seconds\n",
100 		    (long long)now.tv_sec - ts->tv_sec);
101 		return (EPERM);
102 	}
103 
104 	tc_setrealtimeclock(ts);
105 	KERNEL_LOCK();
106 	resettodr();
107 	KERNEL_UNLOCK();
108 
109 	return (0);
110 }
111 
112 int
113 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
114 {
115 	struct proc *q;
116 	int error = 0;
117 
118 	switch (clock_id) {
119 	case CLOCK_REALTIME:
120 		nanotime(tp);
121 		break;
122 	case CLOCK_UPTIME:
123 		nanoruntime(tp);
124 		break;
125 	case CLOCK_MONOTONIC:
126 	case CLOCK_BOOTTIME:
127 		nanouptime(tp);
128 		break;
129 	case CLOCK_PROCESS_CPUTIME_ID:
130 		nanouptime(tp);
131 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
132 		timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
133 		timespecadd(tp, &p->p_rtime, tp);
134 		break;
135 	case CLOCK_THREAD_CPUTIME_ID:
136 		nanouptime(tp);
137 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
138 		timespecadd(tp, &p->p_tu.tu_runtime, tp);
139 		timespecadd(tp, &p->p_rtime, tp);
140 		break;
141 	default:
142 		/* check for clock from pthread_getcpuclockid() */
143 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
144 			KERNEL_LOCK();
145 			q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
146 			if (q == NULL)
147 				error = ESRCH;
148 			else
149 				*tp = q->p_tu.tu_runtime;
150 			KERNEL_UNLOCK();
151 		} else
152 			error = EINVAL;
153 		break;
154 	}
155 	return (error);
156 }
157 
158 int
159 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
160 {
161 	struct sys_clock_gettime_args /* {
162 		syscallarg(clockid_t) clock_id;
163 		syscallarg(struct timespec *) tp;
164 	} */ *uap = v;
165 	struct timespec ats;
166 	int error;
167 
168 	memset(&ats, 0, sizeof(ats));
169 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
170 		return (error);
171 
172 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
173 #ifdef KTRACE
174 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
175 		ktrabstimespec(p, &ats);
176 #endif
177 	return (error);
178 }
179 
180 int
181 sys_clock_settime(struct proc *p, void *v, register_t *retval)
182 {
183 	struct sys_clock_settime_args /* {
184 		syscallarg(clockid_t) clock_id;
185 		syscallarg(const struct timespec *) tp;
186 	} */ *uap = v;
187 	struct timespec ats;
188 	clockid_t clock_id;
189 	int error;
190 
191 	if ((error = suser(p)) != 0)
192 		return (error);
193 
194 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
195 		return (error);
196 
197 	clock_id = SCARG(uap, clock_id);
198 	switch (clock_id) {
199 	case CLOCK_REALTIME:
200 		if (!timespecisvalid(&ats))
201 			return (EINVAL);
202 		if ((error = settime(&ats)) != 0)
203 			return (error);
204 		break;
205 	default:	/* Other clocks are read-only */
206 		return (EINVAL);
207 	}
208 
209 	return (0);
210 }
211 
212 int
213 sys_clock_getres(struct proc *p, void *v, register_t *retval)
214 {
215 	struct sys_clock_getres_args /* {
216 		syscallarg(clockid_t) clock_id;
217 		syscallarg(struct timespec *) tp;
218 	} */ *uap = v;
219 	clockid_t clock_id;
220 	struct bintime bt;
221 	struct timespec ts;
222 	struct proc *q;
223 	u_int64_t scale;
224 	int error = 0;
225 
226 	memset(&ts, 0, sizeof(ts));
227 	clock_id = SCARG(uap, clock_id);
228 
229 	switch (clock_id) {
230 	case CLOCK_REALTIME:
231 	case CLOCK_MONOTONIC:
232 	case CLOCK_BOOTTIME:
233 	case CLOCK_UPTIME:
234 		memset(&bt, 0, sizeof(bt));
235 		rw_enter_read(&tc_lock);
236 		scale = ((1ULL << 63) / tc_getfrequency()) * 2;
237 		bt.frac = tc_getprecision() * scale;
238 		rw_exit_read(&tc_lock);
239 		BINTIME_TO_TIMESPEC(&bt, &ts);
240 		break;
241 	case CLOCK_PROCESS_CPUTIME_ID:
242 	case CLOCK_THREAD_CPUTIME_ID:
243 		ts.tv_nsec = 1000000000 / stathz;
244 		break;
245 	default:
246 		/* check for clock from pthread_getcpuclockid() */
247 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
248 			KERNEL_LOCK();
249 			q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
250 			if (q == NULL)
251 				error = ESRCH;
252 			else
253 				ts.tv_nsec = 1000000000 / stathz;
254 			KERNEL_UNLOCK();
255 		} else
256 			error = EINVAL;
257 		break;
258 	}
259 
260 	if (error == 0 && SCARG(uap, tp)) {
261 		ts.tv_nsec = MAX(ts.tv_nsec, 1);
262 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
263 #ifdef KTRACE
264 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
265 			ktrreltimespec(p, &ts);
266 #endif
267 	}
268 
269 	return error;
270 }
271 
272 int
273 sys_nanosleep(struct proc *p, void *v, register_t *retval)
274 {
275 	struct sys_nanosleep_args/* {
276 		syscallarg(const struct timespec *) rqtp;
277 		syscallarg(struct timespec *) rmtp;
278 	} */ *uap = v;
279 	struct timespec elapsed, remainder, request, start, stop;
280 	uint64_t nsecs;
281 	struct timespec *rmtp;
282 	int copyout_error, error;
283 
284 	rmtp = SCARG(uap, rmtp);
285 	error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
286 	if (error)
287 		return (error);
288 #ifdef KTRACE
289 	if (KTRPOINT(p, KTR_STRUCT))
290 		ktrreltimespec(p, &request);
291 #endif
292 
293 	if (request.tv_sec < 0 || !timespecisvalid(&request))
294 		return (EINVAL);
295 
296 	do {
297 		getnanouptime(&start);
298 		nsecs = MAX(1, MIN(TIMESPEC_TO_NSEC(&request), MAXTSLP));
299 		error = tsleep_nsec(&nowake, PWAIT | PCATCH, "nanoslp", nsecs);
300 		getnanouptime(&stop);
301 		timespecsub(&stop, &start, &elapsed);
302 		timespecsub(&request, &elapsed, &request);
303 		if (request.tv_sec < 0)
304 			timespecclear(&request);
305 		if (error != EWOULDBLOCK)
306 			break;
307 	} while (timespecisset(&request));
308 
309 	if (error == ERESTART)
310 		error = EINTR;
311 	if (error == EWOULDBLOCK)
312 		error = 0;
313 
314 	if (rmtp) {
315 		memset(&remainder, 0, sizeof(remainder));
316 		remainder = request;
317 		copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
318 		if (copyout_error)
319 			error = copyout_error;
320 #ifdef KTRACE
321 		if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
322 			ktrreltimespec(p, &remainder);
323 #endif
324 	}
325 
326 	return error;
327 }
328 
329 int
330 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
331 {
332 	struct sys_gettimeofday_args /* {
333 		syscallarg(struct timeval *) tp;
334 		syscallarg(struct timezone *) tzp;
335 	} */ *uap = v;
336 	struct timeval atv;
337 	static const struct timezone zerotz = { 0, 0 };
338 	struct timeval *tp;
339 	struct timezone *tzp;
340 	int error = 0;
341 
342 	tp = SCARG(uap, tp);
343 	tzp = SCARG(uap, tzp);
344 
345 	if (tp) {
346 		memset(&atv, 0, sizeof(atv));
347 		microtime(&atv);
348 		if ((error = copyout(&atv, tp, sizeof (atv))))
349 			return (error);
350 #ifdef KTRACE
351 		if (KTRPOINT(p, KTR_STRUCT))
352 			ktrabstimeval(p, &atv);
353 #endif
354 	}
355 	if (tzp)
356 		error = copyout(&zerotz, tzp, sizeof(zerotz));
357 	return (error);
358 }
359 
360 int
361 sys_settimeofday(struct proc *p, void *v, register_t *retval)
362 {
363 	struct sys_settimeofday_args /* {
364 		syscallarg(const struct timeval *) tv;
365 		syscallarg(const struct timezone *) tzp;
366 	} */ *uap = v;
367 	struct timezone atz;
368 	struct timeval atv;
369 	const struct timeval *tv;
370 	const struct timezone *tzp;
371 	int error;
372 
373 	tv = SCARG(uap, tv);
374 	tzp = SCARG(uap, tzp);
375 
376 	if ((error = suser(p)))
377 		return (error);
378 	/* Verify all parameters before changing time. */
379 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
380 		return (error);
381 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
382 		return (error);
383 	if (tv) {
384 		struct timespec ts;
385 
386 #ifdef KTRACE
387 		if (KTRPOINT(p, KTR_STRUCT))
388 			ktrabstimeval(p, &atv);
389 #endif
390 		if (!timerisvalid(&atv))
391 			return (EINVAL);
392 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
393 		if ((error = settime(&ts)) != 0)
394 			return (error);
395 	}
396 
397 	return (0);
398 }
399 
400 #define ADJFREQ_MAX (500000000LL << 32)
401 #define ADJFREQ_MIN (-ADJFREQ_MAX)
402 
403 int
404 sys_adjfreq(struct proc *p, void *v, register_t *retval)
405 {
406 	struct sys_adjfreq_args /* {
407 		syscallarg(const int64_t *) freq;
408 		syscallarg(int64_t *) oldfreq;
409 	} */ *uap = v;
410 	int error = 0;
411 	int64_t f, oldf;
412 	const int64_t *freq = SCARG(uap, freq);
413 	int64_t *oldfreq = SCARG(uap, oldfreq);
414 
415 	if (freq) {
416 		if ((error = suser(p)))
417 			return (error);
418 		if ((error = copyin(freq, &f, sizeof(f))))
419 			return (error);
420 		if (f < ADJFREQ_MIN || f > ADJFREQ_MAX)
421 			return (EINVAL);
422 	}
423 
424 	rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE);
425 	if (oldfreq) {
426 		tc_adjfreq(&oldf, NULL);
427 		if ((error = copyout(&oldf, oldfreq, sizeof(oldf))))
428 			goto out;
429 	}
430 	if (freq)
431 		tc_adjfreq(NULL, &f);
432 out:
433 	rw_exit(&tc_lock);
434 	return (error);
435 }
436 
437 int
438 sys_adjtime(struct proc *p, void *v, register_t *retval)
439 {
440 	struct sys_adjtime_args /* {
441 		syscallarg(const struct timeval *) delta;
442 		syscallarg(struct timeval *) olddelta;
443 	} */ *uap = v;
444 	struct timeval atv;
445 	const struct timeval *delta = SCARG(uap, delta);
446 	struct timeval *olddelta = SCARG(uap, olddelta);
447 	int64_t adjustment, remaining;
448 	int error;
449 
450 	error = pledge_adjtime(p, delta);
451 	if (error)
452 		return error;
453 
454 	if (delta) {
455 		if ((error = suser(p)))
456 			return (error);
457 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
458 			return (error);
459 #ifdef KTRACE
460 		if (KTRPOINT(p, KTR_STRUCT))
461 			ktrreltimeval(p, &atv);
462 #endif
463 		if (!timerisvalid(&atv))
464 			return (EINVAL);
465 
466 		if (atv.tv_sec > INT64_MAX / 1000000)
467 			return EINVAL;
468 		if (atv.tv_sec < INT64_MIN / 1000000)
469 			return EINVAL;
470 		adjustment = atv.tv_sec * 1000000;
471 		if (adjustment > INT64_MAX - atv.tv_usec)
472 			return EINVAL;
473 		adjustment += atv.tv_usec;
474 
475 		rw_enter_write(&tc_lock);
476 	}
477 
478 	if (olddelta) {
479 		tc_adjtime(&remaining, NULL);
480 		memset(&atv, 0, sizeof(atv));
481 		atv.tv_sec =  remaining / 1000000;
482 		atv.tv_usec = remaining % 1000000;
483 		if (atv.tv_usec < 0) {
484 			atv.tv_usec += 1000000;
485 			atv.tv_sec--;
486 		}
487 
488 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
489 			goto out;
490 	}
491 
492 	if (delta)
493 		tc_adjtime(NULL, &adjustment);
494 out:
495 	if (delta)
496 		rw_exit_write(&tc_lock);
497 	return (error);
498 }
499 
500 
501 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
502 
503 /*
504  * Get or set value of an interval timer.  The process virtual and
505  * profiling virtual time timers are kept internally in the
506  * way they are specified externally: in time until they expire.
507  *
508  * The real time interval timer's it_value, in contrast, is kept as an
509  * absolute time rather than as a delta, so that it is easy to keep
510  * periodic real-time signals from drifting.
511  *
512  * Virtual time timers are processed in the hardclock() routine of
513  * kern_clock.c.  The real time timer is processed by a timeout
514  * routine, called from the softclock() routine.  Since a callout
515  * may be delayed in real time due to interrupt processing in the system,
516  * it is possible for the real time timeout routine (realitexpire, given below),
517  * to be delayed in real time past when it is supposed to occur.  It
518  * does not suffice, therefore, to reload the real timer .it_value from the
519  * real time timers .it_interval.  Rather, we compute the next time in
520  * absolute time the timer should go off.
521  */
522 void
523 setitimer(int which, const struct itimerval *itv, struct itimerval *olditv)
524 {
525 	struct itimerspec its, oldits;
526 	struct timespec now;
527 	struct itimerspec *itimer;
528 	struct process *pr;
529 
530 	KASSERT(which >= ITIMER_REAL && which <= ITIMER_PROF);
531 
532 	pr = curproc->p_p;
533 	itimer = &pr->ps_timer[which];
534 
535 	if (itv != NULL) {
536 		TIMEVAL_TO_TIMESPEC(&itv->it_value, &its.it_value);
537 		TIMEVAL_TO_TIMESPEC(&itv->it_interval, &its.it_interval);
538 	}
539 
540 	if (which == ITIMER_REAL) {
541 		mtx_enter(&pr->ps_mtx);
542 		nanouptime(&now);
543 	} else
544 		mtx_enter(&itimer_mtx);
545 
546 	if (olditv != NULL)
547 		oldits = *itimer;
548 	if (itv != NULL) {
549 		if (which == ITIMER_REAL) {
550 			if (timespecisset(&its.it_value)) {
551 				timespecadd(&its.it_value, &now, &its.it_value);
552 				timeout_abs_ts(&pr->ps_realit_to,&its.it_value);
553 			} else
554 				timeout_del(&pr->ps_realit_to);
555 		}
556 		*itimer = its;
557 		if (which == ITIMER_VIRTUAL || which == ITIMER_PROF) {
558 			process_reset_itimer_flag(pr);
559 			need_resched(curcpu());
560 		}
561 	}
562 
563 	if (which == ITIMER_REAL)
564 		mtx_leave(&pr->ps_mtx);
565 	else
566 		mtx_leave(&itimer_mtx);
567 
568 	if (olditv != NULL) {
569 		if (which == ITIMER_REAL && timespecisset(&oldits.it_value)) {
570 			if (timespeccmp(&oldits.it_value, &now, <))
571 				timespecclear(&oldits.it_value);
572 			else {
573 				timespecsub(&oldits.it_value, &now,
574 				    &oldits.it_value);
575 			}
576 		}
577 		TIMESPEC_TO_TIMEVAL(&olditv->it_value, &oldits.it_value);
578 		TIMESPEC_TO_TIMEVAL(&olditv->it_interval, &oldits.it_interval);
579 	}
580 }
581 
582 void
583 cancel_all_itimers(void)
584 {
585 	struct itimerval itv;
586 	int i;
587 
588 	timerclear(&itv.it_value);
589 	timerclear(&itv.it_interval);
590 
591 	for (i = 0; i < nitems(curproc->p_p->ps_timer); i++)
592 		setitimer(i, &itv, NULL);
593 }
594 
595 int
596 sys_getitimer(struct proc *p, void *v, register_t *retval)
597 {
598 	struct sys_getitimer_args /* {
599 		syscallarg(int) which;
600 		syscallarg(struct itimerval *) itv;
601 	} */ *uap = v;
602 	struct itimerval aitv;
603 	int which;
604 
605 	which = SCARG(uap, which);
606 	if (which < ITIMER_REAL || which > ITIMER_PROF)
607 		return EINVAL;
608 
609 	memset(&aitv, 0, sizeof(aitv));
610 
611 	setitimer(which, NULL, &aitv);
612 
613 	return copyout(&aitv, SCARG(uap, itv), sizeof(aitv));
614 }
615 
616 int
617 sys_setitimer(struct proc *p, void *v, register_t *retval)
618 {
619 	struct sys_setitimer_args /* {
620 		syscallarg(int) which;
621 		syscallarg(const struct itimerval *) itv;
622 		syscallarg(struct itimerval *) oitv;
623 	} */ *uap = v;
624 	struct itimerval aitv, olditv;
625 	struct itimerval *newitvp, *olditvp;
626 	int error, which;
627 
628 	which = SCARG(uap, which);
629 	if (which < ITIMER_REAL || which > ITIMER_PROF)
630 		return EINVAL;
631 
632 	newitvp = olditvp = NULL;
633 	if (SCARG(uap, itv) != NULL) {
634 		error = copyin(SCARG(uap, itv), &aitv, sizeof(aitv));
635 		if (error)
636 			return error;
637 		error = itimerfix(&aitv);
638 		if (error)
639 			return error;
640 		newitvp = &aitv;
641 	}
642 	if (SCARG(uap, oitv) != NULL) {
643 		memset(&olditv, 0, sizeof(olditv));
644 		olditvp = &olditv;
645 	}
646 	if (newitvp == NULL && olditvp == NULL)
647 		return 0;
648 
649 	setitimer(which, newitvp, olditvp);
650 
651 	if (SCARG(uap, oitv) != NULL)
652 		return copyout(&olditv, SCARG(uap, oitv), sizeof(olditv));
653 
654 	return 0;
655 }
656 
657 /*
658  * Real interval timer expired:
659  * send process whose timer expired an alarm signal.
660  * If time is not set up to reload, then just return.
661  * Else compute next time timer should go off which is > current time.
662  * This is where delay in processing this timeout causes multiple
663  * SIGALRM calls to be compressed into one.
664  */
665 void
666 realitexpire(void *arg)
667 {
668 	struct timespec cts;
669 	struct process *pr = arg;
670 	struct itimerspec *tp = &pr->ps_timer[ITIMER_REAL];
671 	int need_signal = 0;
672 
673 	mtx_enter(&pr->ps_mtx);
674 
675 	/*
676 	 * Do nothing if the timer was cancelled or rescheduled while we
677 	 * were entering the mutex.
678 	 */
679 	if (!timespecisset(&tp->it_value) || timeout_pending(&pr->ps_realit_to))
680 		goto out;
681 
682 	/* The timer expired.  We need to send the signal. */
683 	need_signal = 1;
684 
685 	/* One-shot timers are not reloaded. */
686 	if (!timespecisset(&tp->it_interval)) {
687 		timespecclear(&tp->it_value);
688 		goto out;
689 	}
690 
691 	/*
692 	 * Find the nearest future expiration point and restart
693 	 * the timeout.
694 	 */
695 	nanouptime(&cts);
696 	while (timespeccmp(&tp->it_value, &cts, <=))
697 		timespecadd(&tp->it_value, &tp->it_interval, &tp->it_value);
698 	if ((pr->ps_flags & PS_EXITING) == 0)
699 		timeout_abs_ts(&pr->ps_realit_to, &tp->it_value);
700 
701 out:
702 	mtx_leave(&pr->ps_mtx);
703 
704 	if (need_signal)
705 		prsignal(pr, SIGALRM);
706 }
707 
708 /*
709  * Check if the given setitimer(2) input is valid.  Clear it_interval
710  * if it_value is unset.  Round it_interval up to the minimum interval
711  * if necessary.
712  */
713 int
714 itimerfix(struct itimerval *itv)
715 {
716 	static const struct timeval max = { .tv_sec = UINT_MAX, .tv_usec = 0 };
717 	struct timeval min_interval = { .tv_sec = 0, .tv_usec = tick };
718 
719 	if (itv->it_value.tv_sec < 0 || !timerisvalid(&itv->it_value))
720 		return EINVAL;
721 	if (timercmp(&itv->it_value, &max, >))
722 		return EINVAL;
723 	if (itv->it_interval.tv_sec < 0 || !timerisvalid(&itv->it_interval))
724 		return EINVAL;
725 	if (timercmp(&itv->it_interval, &max, >))
726 		return EINVAL;
727 
728 	if (!timerisset(&itv->it_value))
729 		timerclear(&itv->it_interval);
730 	if (timerisset(&itv->it_interval)) {
731 		if (timercmp(&itv->it_interval, &min_interval, <))
732 			itv->it_interval = min_interval;
733 	}
734 
735 	return 0;
736 }
737 
738 /*
739  * Decrement an interval timer by the given duration.
740  * If the timer expires and it is periodic then reload it.  When reloading
741  * the timer we subtract any overrun from the next period so that the timer
742  * does not drift.
743  */
744 int
745 itimerdecr(struct itimerspec *itp, const struct timespec *decrement)
746 {
747 	timespecsub(&itp->it_value, decrement, &itp->it_value);
748 	if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value))
749 		return (1);
750 	if (!timespecisset(&itp->it_interval)) {
751 		timespecclear(&itp->it_value);
752 		return (0);
753 	}
754 	while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value))
755 		timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value);
756 	return (0);
757 }
758 
759 void
760 itimer_update(struct clockintr *cl, void *cf)
761 {
762 	struct timespec elapsed;
763 	uint64_t nsecs;
764 	struct clockframe *frame = cf;
765 	struct proc *p = curproc;
766 	struct process *pr;
767 
768 	if (p == NULL || ISSET(p->p_flag, P_SYSTEM | P_WEXIT))
769 		return;
770 
771 	pr = p->p_p;
772 	if (!ISSET(pr->ps_flags, PS_ITIMER))
773 		return;
774 
775 	nsecs = clockintr_advance(cl, hardclock_period) * hardclock_period;
776 	NSEC_TO_TIMESPEC(nsecs, &elapsed);
777 
778 	mtx_enter(&itimer_mtx);
779 	if (CLKF_USERMODE(frame) &&
780 	    timespecisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
781 	    itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], &elapsed) == 0) {
782 		process_reset_itimer_flag(pr);
783 		atomic_setbits_int(&p->p_flag, P_ALRMPEND);
784 		need_proftick(p);
785 	}
786 	if (timespecisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
787 	    itimerdecr(&pr->ps_timer[ITIMER_PROF], &elapsed) == 0) {
788 		process_reset_itimer_flag(pr);
789 		atomic_setbits_int(&p->p_flag, P_PROFPEND);
790 		need_proftick(p);
791 	}
792 	mtx_leave(&itimer_mtx);
793 }
794 
795 void
796 process_reset_itimer_flag(struct process *ps)
797 {
798 	if (timespecisset(&ps->ps_timer[ITIMER_VIRTUAL].it_value) ||
799 	    timespecisset(&ps->ps_timer[ITIMER_PROF].it_value))
800 		atomic_setbits_int(&ps->ps_flags, PS_ITIMER);
801 	else
802 		atomic_clearbits_int(&ps->ps_flags, PS_ITIMER);
803 }
804 
805 struct mutex ratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
806 
807 /*
808  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
809  * for usage and rationale.
810  */
811 int
812 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
813 {
814 	struct timeval tv, delta;
815 	int rv = 0;
816 
817 	getmicrouptime(&tv);
818 
819 	mtx_enter(&ratecheck_mtx);
820 	timersub(&tv, lasttime, &delta);
821 
822 	/*
823 	 * check for 0,0 is so that the message will be seen at least once,
824 	 * even if interval is huge.
825 	 */
826 	if (timercmp(&delta, mininterval, >=) ||
827 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
828 		*lasttime = tv;
829 		rv = 1;
830 	}
831 	mtx_leave(&ratecheck_mtx);
832 
833 	return (rv);
834 }
835 
836 struct mutex ppsratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
837 
838 /*
839  * ppsratecheck(): packets (or events) per second limitation.
840  */
841 int
842 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
843 {
844 	struct timeval tv, delta;
845 	int rv;
846 
847 	microuptime(&tv);
848 
849 	mtx_enter(&ppsratecheck_mtx);
850 	timersub(&tv, lasttime, &delta);
851 
852 	/*
853 	 * check for 0,0 is so that the message will be seen at least once.
854 	 * if more than one second have passed since the last update of
855 	 * lasttime, reset the counter.
856 	 *
857 	 * we do increment *curpps even in *curpps < maxpps case, as some may
858 	 * try to use *curpps for stat purposes as well.
859 	 */
860 	if (maxpps == 0)
861 		rv = 0;
862 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
863 	    delta.tv_sec >= 1) {
864 		*lasttime = tv;
865 		*curpps = 0;
866 		rv = 1;
867 	} else if (maxpps < 0)
868 		rv = 1;
869 	else if (*curpps < maxpps)
870 		rv = 1;
871 	else
872 		rv = 0;
873 
874 	/* be careful about wrap-around */
875 	if (*curpps + 1 > *curpps)
876 		*curpps = *curpps + 1;
877 
878 	mtx_leave(&ppsratecheck_mtx);
879 
880 	return (rv);
881 }
882 
883 todr_chip_handle_t todr_handle;
884 int inittodr_done;
885 
886 #define MINYEAR		((OpenBSD / 100) - 1)	/* minimum plausible year */
887 
888 /*
889  * inittodr:
890  *
891  *      Initialize time from the time-of-day register.
892  */
893 void
894 inittodr(time_t base)
895 {
896 	time_t deltat;
897 	struct timeval rtctime;
898 	struct timespec ts;
899 	int badbase;
900 
901 	inittodr_done = 1;
902 
903 	if (base < (MINYEAR - 1970) * SECYR) {
904 		printf("WARNING: preposterous time in file system\n");
905 		/* read the system clock anyway */
906 		base = (MINYEAR - 1970) * SECYR;
907 		badbase = 1;
908 	} else
909 		badbase = 0;
910 
911 	rtctime.tv_sec = base;
912 	rtctime.tv_usec = 0;
913 
914 	if (todr_handle == NULL ||
915 	    todr_gettime(todr_handle, &rtctime) != 0 ||
916 	    rtctime.tv_sec < (MINYEAR - 1970) * SECYR) {
917 		/*
918 		 * Believe the time in the file system for lack of
919 		 * anything better, resetting the TODR.
920 		 */
921 		rtctime.tv_sec = base;
922 		rtctime.tv_usec = 0;
923 		if (todr_handle != NULL && !badbase)
924 			printf("WARNING: bad clock chip time\n");
925 		ts.tv_sec = rtctime.tv_sec;
926 		ts.tv_nsec = rtctime.tv_usec * 1000;
927 		tc_setclock(&ts);
928 		goto bad;
929 	} else {
930 		ts.tv_sec = rtctime.tv_sec;
931 		ts.tv_nsec = rtctime.tv_usec * 1000;
932 		tc_setclock(&ts);
933 	}
934 
935 	if (!badbase) {
936 		/*
937 		 * See if we gained/lost two or more days; if
938 		 * so, assume something is amiss.
939 		 */
940 		deltat = rtctime.tv_sec - base;
941 		if (deltat < 0)
942 			deltat = -deltat;
943 		if (deltat < 2 * SECDAY)
944 			return;         /* all is well */
945 #ifndef SMALL_KERNEL
946 		printf("WARNING: clock %s %lld days\n",
947 		    rtctime.tv_sec < base ? "lost" : "gained",
948 		    (long long)(deltat / SECDAY));
949 #endif
950 	}
951  bad:
952 	printf("WARNING: CHECK AND RESET THE DATE!\n");
953 }
954 
955 /*
956  * resettodr:
957  *
958  *      Reset the time-of-day register with the current time.
959  */
960 void
961 resettodr(void)
962 {
963 	struct timeval rtctime;
964 
965 	/*
966 	 * Skip writing the RTC if inittodr(9) never ran.  We don't
967 	 * want to overwrite a reasonable value with a nonsense value.
968 	 */
969 	if (!inittodr_done)
970 		return;
971 
972 	microtime(&rtctime);
973 
974 	if (todr_handle != NULL &&
975 	    todr_settime(todr_handle, &rtctime) != 0)
976 		printf("WARNING: can't update clock chip time\n");
977 }
978 
979 void
980 todr_attach(struct todr_chip_handle *todr)
981 {
982 	if (todr_handle == NULL ||
983 	    todr->todr_quality > todr_handle->todr_quality)
984 		todr_handle = todr;
985 }
986 
987 #define RESETTODR_PERIOD	1800
988 
989 void periodic_resettodr(void *);
990 void perform_resettodr(void *);
991 
992 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
993 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
994 
995 void
996 periodic_resettodr(void *arg __unused)
997 {
998 	task_add(systq, &resettodr_task);
999 }
1000 
1001 void
1002 perform_resettodr(void *arg __unused)
1003 {
1004 	resettodr();
1005 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
1006 }
1007 
1008 void
1009 start_periodic_resettodr(void)
1010 {
1011 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
1012 }
1013 
1014 void
1015 stop_periodic_resettodr(void)
1016 {
1017 	timeout_del(&resettodr_to);
1018 	task_del(systq, &resettodr_task);
1019 }
1020