xref: /openbsd-src/sys/kern/kern_time.c (revision 42ac1f71ddfc8f2b1ea1555399aa1e1ffc2faced)
1 /*	$OpenBSD: kern_time.c,v 1.156 2022/05/05 09:45:15 bluhm Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/resourcevar.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/mutex.h>
40 #include <sys/rwlock.h>
41 #include <sys/proc.h>
42 #include <sys/ktrace.h>
43 #include <sys/vnode.h>
44 #include <sys/signalvar.h>
45 #include <sys/stdint.h>
46 #include <sys/pledge.h>
47 #include <sys/task.h>
48 #include <sys/timeout.h>
49 #include <sys/timetc.h>
50 
51 #include <sys/mount.h>
52 #include <sys/syscallargs.h>
53 
54 #include <dev/clock_subr.h>
55 
56 int itimerfix(struct itimerval *);
57 
58 /*
59  * Time of day and interval timer support.
60  *
61  * These routines provide the kernel entry points to get and set
62  * the time-of-day and per-process interval timers.  Subroutines
63  * here provide support for adding and subtracting timeval structures
64  * and decrementing interval timers, optionally reloading the interval
65  * timers when they expire.
66  */
67 
68 /* This function is used by clock_settime and settimeofday */
69 int
70 settime(const struct timespec *ts)
71 {
72 	struct timespec now;
73 
74 	/*
75 	 * Don't allow the time to be set forward so far it will wrap
76 	 * and become negative, thus allowing an attacker to bypass
77 	 * the next check below.  The cutoff is 1 year before rollover
78 	 * occurs, so even if the attacker uses adjtime(2) to move
79 	 * the time past the cutoff, it will take a very long time
80 	 * to get to the wrap point.
81 	 *
82 	 * XXX: we check against UINT_MAX until we can figure out
83 	 *	how to deal with the hardware RTCs.
84 	 */
85 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
86 		printf("denied attempt to set clock forward to %lld\n",
87 		    (long long)ts->tv_sec);
88 		return (EPERM);
89 	}
90 	/*
91 	 * If the system is secure, we do not allow the time to be
92 	 * set to an earlier value (it may be slowed using adjtime,
93 	 * but not set back). This feature prevent interlopers from
94 	 * setting arbitrary time stamps on files.
95 	 */
96 	nanotime(&now);
97 	if (securelevel > 1 && timespeccmp(ts, &now, <=)) {
98 		printf("denied attempt to set clock back %lld seconds\n",
99 		    (long long)now.tv_sec - ts->tv_sec);
100 		return (EPERM);
101 	}
102 
103 	tc_setrealtimeclock(ts);
104 	KERNEL_LOCK();
105 	resettodr();
106 	KERNEL_UNLOCK();
107 
108 	return (0);
109 }
110 
111 int
112 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
113 {
114 	struct proc *q;
115 	int error = 0;
116 
117 	switch (clock_id) {
118 	case CLOCK_REALTIME:
119 		nanotime(tp);
120 		break;
121 	case CLOCK_UPTIME:
122 		nanoruntime(tp);
123 		break;
124 	case CLOCK_MONOTONIC:
125 	case CLOCK_BOOTTIME:
126 		nanouptime(tp);
127 		break;
128 	case CLOCK_PROCESS_CPUTIME_ID:
129 		nanouptime(tp);
130 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
131 		timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
132 		timespecadd(tp, &p->p_rtime, tp);
133 		break;
134 	case CLOCK_THREAD_CPUTIME_ID:
135 		nanouptime(tp);
136 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
137 		timespecadd(tp, &p->p_tu.tu_runtime, tp);
138 		timespecadd(tp, &p->p_rtime, tp);
139 		break;
140 	default:
141 		/* check for clock from pthread_getcpuclockid() */
142 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
143 			KERNEL_LOCK();
144 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
145 			if (q == NULL || q->p_p != p->p_p)
146 				error = ESRCH;
147 			else
148 				*tp = q->p_tu.tu_runtime;
149 			KERNEL_UNLOCK();
150 		} else
151 			error = EINVAL;
152 		break;
153 	}
154 	return (error);
155 }
156 
157 int
158 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
159 {
160 	struct sys_clock_gettime_args /* {
161 		syscallarg(clockid_t) clock_id;
162 		syscallarg(struct timespec *) tp;
163 	} */ *uap = v;
164 	struct timespec ats;
165 	int error;
166 
167 	memset(&ats, 0, sizeof(ats));
168 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
169 		return (error);
170 
171 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
172 #ifdef KTRACE
173 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
174 		ktrabstimespec(p, &ats);
175 #endif
176 	return (error);
177 }
178 
179 int
180 sys_clock_settime(struct proc *p, void *v, register_t *retval)
181 {
182 	struct sys_clock_settime_args /* {
183 		syscallarg(clockid_t) clock_id;
184 		syscallarg(const struct timespec *) tp;
185 	} */ *uap = v;
186 	struct timespec ats;
187 	clockid_t clock_id;
188 	int error;
189 
190 	if ((error = suser(p)) != 0)
191 		return (error);
192 
193 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
194 		return (error);
195 
196 	clock_id = SCARG(uap, clock_id);
197 	switch (clock_id) {
198 	case CLOCK_REALTIME:
199 		if (!timespecisvalid(&ats))
200 			return (EINVAL);
201 		if ((error = settime(&ats)) != 0)
202 			return (error);
203 		break;
204 	default:	/* Other clocks are read-only */
205 		return (EINVAL);
206 	}
207 
208 	return (0);
209 }
210 
211 int
212 sys_clock_getres(struct proc *p, void *v, register_t *retval)
213 {
214 	struct sys_clock_getres_args /* {
215 		syscallarg(clockid_t) clock_id;
216 		syscallarg(struct timespec *) tp;
217 	} */ *uap = v;
218 	clockid_t clock_id;
219 	struct bintime bt;
220 	struct timespec ts;
221 	struct proc *q;
222 	u_int64_t scale;
223 	int error = 0, realstathz;
224 
225 	memset(&ts, 0, sizeof(ts));
226 	realstathz = (stathz == 0) ? hz : stathz;
227 	clock_id = SCARG(uap, clock_id);
228 
229 	switch (clock_id) {
230 	case CLOCK_REALTIME:
231 	case CLOCK_MONOTONIC:
232 	case CLOCK_BOOTTIME:
233 	case CLOCK_UPTIME:
234 		memset(&bt, 0, sizeof(bt));
235 		rw_enter_read(&tc_lock);
236 		scale = ((1ULL << 63) / tc_getfrequency()) * 2;
237 		bt.frac = tc_getprecision() * scale;
238 		rw_exit_read(&tc_lock);
239 		BINTIME_TO_TIMESPEC(&bt, &ts);
240 		break;
241 	case CLOCK_PROCESS_CPUTIME_ID:
242 	case CLOCK_THREAD_CPUTIME_ID:
243 		ts.tv_nsec = 1000000000 / realstathz;
244 		break;
245 	default:
246 		/* check for clock from pthread_getcpuclockid() */
247 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
248 			KERNEL_LOCK();
249 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
250 			if (q == NULL || q->p_p != p->p_p)
251 				error = ESRCH;
252 			else
253 				ts.tv_nsec = 1000000000 / realstathz;
254 			KERNEL_UNLOCK();
255 		} else
256 			error = EINVAL;
257 		break;
258 	}
259 
260 	if (error == 0 && SCARG(uap, tp)) {
261 		ts.tv_nsec = MAX(ts.tv_nsec, 1);
262 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
263 #ifdef KTRACE
264 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
265 			ktrreltimespec(p, &ts);
266 #endif
267 	}
268 
269 	return error;
270 }
271 
272 int
273 sys_nanosleep(struct proc *p, void *v, register_t *retval)
274 {
275 	static int chan;
276 	struct sys_nanosleep_args/* {
277 		syscallarg(const struct timespec *) rqtp;
278 		syscallarg(struct timespec *) rmtp;
279 	} */ *uap = v;
280 	struct timespec elapsed, remainder, request, start, stop;
281 	uint64_t nsecs;
282 	struct timespec *rmtp;
283 	int copyout_error, error;
284 
285 	rmtp = SCARG(uap, rmtp);
286 	error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
287 	if (error)
288 		return (error);
289 #ifdef KTRACE
290 	if (KTRPOINT(p, KTR_STRUCT))
291 		ktrreltimespec(p, &request);
292 #endif
293 
294 	if (request.tv_sec < 0 || !timespecisvalid(&request))
295 		return (EINVAL);
296 
297 	do {
298 		getnanouptime(&start);
299 		nsecs = MAX(1, MIN(TIMESPEC_TO_NSEC(&request), MAXTSLP));
300 		error = tsleep_nsec(&chan, PWAIT | PCATCH, "nanoslp", nsecs);
301 		getnanouptime(&stop);
302 		timespecsub(&stop, &start, &elapsed);
303 		timespecsub(&request, &elapsed, &request);
304 		if (request.tv_sec < 0)
305 			timespecclear(&request);
306 		if (error != EWOULDBLOCK)
307 			break;
308 	} while (timespecisset(&request));
309 
310 	if (error == ERESTART)
311 		error = EINTR;
312 	if (error == EWOULDBLOCK)
313 		error = 0;
314 
315 	if (rmtp) {
316 		memset(&remainder, 0, sizeof(remainder));
317 		remainder = request;
318 		copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
319 		if (copyout_error)
320 			error = copyout_error;
321 #ifdef KTRACE
322 		if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
323 			ktrreltimespec(p, &remainder);
324 #endif
325 	}
326 
327 	return error;
328 }
329 
330 int
331 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
332 {
333 	struct sys_gettimeofday_args /* {
334 		syscallarg(struct timeval *) tp;
335 		syscallarg(struct timezone *) tzp;
336 	} */ *uap = v;
337 	struct timeval atv;
338 	static const struct timezone zerotz = { 0, 0 };
339 	struct timeval *tp;
340 	struct timezone *tzp;
341 	int error = 0;
342 
343 	tp = SCARG(uap, tp);
344 	tzp = SCARG(uap, tzp);
345 
346 	if (tp) {
347 		memset(&atv, 0, sizeof(atv));
348 		microtime(&atv);
349 		if ((error = copyout(&atv, tp, sizeof (atv))))
350 			return (error);
351 #ifdef KTRACE
352 		if (KTRPOINT(p, KTR_STRUCT))
353 			ktrabstimeval(p, &atv);
354 #endif
355 	}
356 	if (tzp)
357 		error = copyout(&zerotz, tzp, sizeof(zerotz));
358 	return (error);
359 }
360 
361 int
362 sys_settimeofday(struct proc *p, void *v, register_t *retval)
363 {
364 	struct sys_settimeofday_args /* {
365 		syscallarg(const struct timeval *) tv;
366 		syscallarg(const struct timezone *) tzp;
367 	} */ *uap = v;
368 	struct timezone atz;
369 	struct timeval atv;
370 	const struct timeval *tv;
371 	const struct timezone *tzp;
372 	int error;
373 
374 	tv = SCARG(uap, tv);
375 	tzp = SCARG(uap, tzp);
376 
377 	if ((error = suser(p)))
378 		return (error);
379 	/* Verify all parameters before changing time. */
380 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
381 		return (error);
382 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
383 		return (error);
384 	if (tv) {
385 		struct timespec ts;
386 
387 #ifdef KTRACE
388 		if (KTRPOINT(p, KTR_STRUCT))
389 			ktrabstimeval(p, &atv);
390 #endif
391 		if (!timerisvalid(&atv))
392 			return (EINVAL);
393 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
394 		if ((error = settime(&ts)) != 0)
395 			return (error);
396 	}
397 
398 	return (0);
399 }
400 
401 #define ADJFREQ_MAX (500000000LL << 32)
402 #define ADJFREQ_MIN (-ADJFREQ_MAX)
403 
404 int
405 sys_adjfreq(struct proc *p, void *v, register_t *retval)
406 {
407 	struct sys_adjfreq_args /* {
408 		syscallarg(const int64_t *) freq;
409 		syscallarg(int64_t *) oldfreq;
410 	} */ *uap = v;
411 	int error = 0;
412 	int64_t f, oldf;
413 	const int64_t *freq = SCARG(uap, freq);
414 	int64_t *oldfreq = SCARG(uap, oldfreq);
415 
416 	if (freq) {
417 		if ((error = suser(p)))
418 			return (error);
419 		if ((error = copyin(freq, &f, sizeof(f))))
420 			return (error);
421 		if (f < ADJFREQ_MIN || f > ADJFREQ_MAX)
422 			return (EINVAL);
423 	}
424 
425 	rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE);
426 	if (oldfreq) {
427 		tc_adjfreq(&oldf, NULL);
428 		if ((error = copyout(&oldf, oldfreq, sizeof(oldf))))
429 			goto out;
430 	}
431 	if (freq)
432 		tc_adjfreq(NULL, &f);
433 out:
434 	rw_exit(&tc_lock);
435 	return (error);
436 }
437 
438 int
439 sys_adjtime(struct proc *p, void *v, register_t *retval)
440 {
441 	struct sys_adjtime_args /* {
442 		syscallarg(const struct timeval *) delta;
443 		syscallarg(struct timeval *) olddelta;
444 	} */ *uap = v;
445 	struct timeval atv;
446 	const struct timeval *delta = SCARG(uap, delta);
447 	struct timeval *olddelta = SCARG(uap, olddelta);
448 	int64_t adjustment, remaining;
449 	int error;
450 
451 	error = pledge_adjtime(p, delta);
452 	if (error)
453 		return error;
454 
455 	if (delta) {
456 		if ((error = suser(p)))
457 			return (error);
458 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
459 			return (error);
460 #ifdef KTRACE
461 		if (KTRPOINT(p, KTR_STRUCT))
462 			ktrreltimeval(p, &atv);
463 #endif
464 		if (!timerisvalid(&atv))
465 			return (EINVAL);
466 
467 		if (atv.tv_sec > INT64_MAX / 1000000)
468 			return EINVAL;
469 		if (atv.tv_sec < INT64_MIN / 1000000)
470 			return EINVAL;
471 		adjustment = atv.tv_sec * 1000000;
472 		if (adjustment > INT64_MAX - atv.tv_usec)
473 			return EINVAL;
474 		adjustment += atv.tv_usec;
475 
476 		rw_enter_write(&tc_lock);
477 	}
478 
479 	if (olddelta) {
480 		tc_adjtime(&remaining, NULL);
481 		memset(&atv, 0, sizeof(atv));
482 		atv.tv_sec =  remaining / 1000000;
483 		atv.tv_usec = remaining % 1000000;
484 		if (atv.tv_usec < 0) {
485 			atv.tv_usec += 1000000;
486 			atv.tv_sec--;
487 		}
488 
489 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
490 			goto out;
491 	}
492 
493 	if (delta)
494 		tc_adjtime(NULL, &adjustment);
495 out:
496 	if (delta)
497 		rw_exit_write(&tc_lock);
498 	return (error);
499 }
500 
501 
502 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
503 
504 /*
505  * Get or set value of an interval timer.  The process virtual and
506  * profiling virtual time timers are kept internally in the
507  * way they are specified externally: in time until they expire.
508  *
509  * The real time interval timer's it_value, in contrast, is kept as an
510  * absolute time rather than as a delta, so that it is easy to keep
511  * periodic real-time signals from drifting.
512  *
513  * Virtual time timers are processed in the hardclock() routine of
514  * kern_clock.c.  The real time timer is processed by a timeout
515  * routine, called from the softclock() routine.  Since a callout
516  * may be delayed in real time due to interrupt processing in the system,
517  * it is possible for the real time timeout routine (realitexpire, given below),
518  * to be delayed in real time past when it is supposed to occur.  It
519  * does not suffice, therefore, to reload the real timer .it_value from the
520  * real time timers .it_interval.  Rather, we compute the next time in
521  * absolute time the timer should go off.
522  */
523 void
524 setitimer(int which, const struct itimerval *itv, struct itimerval *olditv)
525 {
526 	struct itimerspec its, oldits;
527 	struct timespec now;
528 	struct itimerspec *itimer;
529 	struct process *pr;
530 
531 	KASSERT(which >= ITIMER_REAL && which <= ITIMER_PROF);
532 
533 	pr = curproc->p_p;
534 	itimer = &pr->ps_timer[which];
535 
536 	if (itv != NULL) {
537 		TIMEVAL_TO_TIMESPEC(&itv->it_value, &its.it_value);
538 		TIMEVAL_TO_TIMESPEC(&itv->it_interval, &its.it_interval);
539 	}
540 
541 	if (which == ITIMER_REAL) {
542 		mtx_enter(&pr->ps_mtx);
543 		nanouptime(&now);
544 	} else
545 		mtx_enter(&itimer_mtx);
546 
547 	if (olditv != NULL)
548 		oldits = *itimer;
549 	if (itv != NULL) {
550 		if (which == ITIMER_REAL) {
551 			if (timespecisset(&its.it_value)) {
552 				timespecadd(&its.it_value, &now, &its.it_value);
553 				timeout_at_ts(&pr->ps_realit_to, &its.it_value);
554 			} else
555 				timeout_del(&pr->ps_realit_to);
556 		}
557 		*itimer = its;
558 	}
559 
560 	if (which == ITIMER_REAL)
561 		mtx_leave(&pr->ps_mtx);
562 	else
563 		mtx_leave(&itimer_mtx);
564 
565 	if (olditv != NULL) {
566 		if (which == ITIMER_REAL && timespecisset(&oldits.it_value)) {
567 			if (timespeccmp(&oldits.it_value, &now, <))
568 				timespecclear(&oldits.it_value);
569 			else {
570 				timespecsub(&oldits.it_value, &now,
571 				    &oldits.it_value);
572 			}
573 		}
574 		TIMESPEC_TO_TIMEVAL(&olditv->it_value, &oldits.it_value);
575 		TIMESPEC_TO_TIMEVAL(&olditv->it_interval, &oldits.it_interval);
576 	}
577 }
578 
579 void
580 cancel_all_itimers(void)
581 {
582 	struct itimerval itv;
583 	int i;
584 
585 	timerclear(&itv.it_value);
586 	timerclear(&itv.it_interval);
587 
588 	for (i = 0; i < nitems(curproc->p_p->ps_timer); i++)
589 		setitimer(i, &itv, NULL);
590 }
591 
592 int
593 sys_getitimer(struct proc *p, void *v, register_t *retval)
594 {
595 	struct sys_getitimer_args /* {
596 		syscallarg(int) which;
597 		syscallarg(struct itimerval *) itv;
598 	} */ *uap = v;
599 	struct itimerval aitv;
600 	int which;
601 
602 	which = SCARG(uap, which);
603 	if (which < ITIMER_REAL || which > ITIMER_PROF)
604 		return EINVAL;
605 
606 	memset(&aitv, 0, sizeof(aitv));
607 
608 	setitimer(which, NULL, &aitv);
609 
610 	return copyout(&aitv, SCARG(uap, itv), sizeof(aitv));
611 }
612 
613 int
614 sys_setitimer(struct proc *p, void *v, register_t *retval)
615 {
616 	struct sys_setitimer_args /* {
617 		syscallarg(int) which;
618 		syscallarg(const struct itimerval *) itv;
619 		syscallarg(struct itimerval *) oitv;
620 	} */ *uap = v;
621 	struct itimerval aitv, olditv;
622 	struct itimerval *newitvp, *olditvp;
623 	int error, which;
624 
625 	which = SCARG(uap, which);
626 	if (which < ITIMER_REAL || which > ITIMER_PROF)
627 		return EINVAL;
628 
629 	newitvp = olditvp = NULL;
630 	if (SCARG(uap, itv) != NULL) {
631 		error = copyin(SCARG(uap, itv), &aitv, sizeof(aitv));
632 		if (error)
633 			return error;
634 		error = itimerfix(&aitv);
635 		if (error)
636 			return error;
637 		newitvp = &aitv;
638 	}
639 	if (SCARG(uap, oitv) != NULL) {
640 		memset(&olditv, 0, sizeof(olditv));
641 		olditvp = &olditv;
642 	}
643 	if (newitvp == NULL && olditvp == NULL)
644 		return 0;
645 
646 	setitimer(which, newitvp, olditvp);
647 
648 	if (SCARG(uap, oitv) != NULL)
649 		return copyout(&olditv, SCARG(uap, oitv), sizeof(olditv));
650 
651 	return 0;
652 }
653 
654 /*
655  * Real interval timer expired:
656  * send process whose timer expired an alarm signal.
657  * If time is not set up to reload, then just return.
658  * Else compute next time timer should go off which is > current time.
659  * This is where delay in processing this timeout causes multiple
660  * SIGALRM calls to be compressed into one.
661  */
662 void
663 realitexpire(void *arg)
664 {
665 	struct timespec cts;
666 	struct process *pr = arg;
667 	struct itimerspec *tp = &pr->ps_timer[ITIMER_REAL];
668 	int need_signal = 0;
669 
670 	mtx_enter(&pr->ps_mtx);
671 
672 	/*
673 	 * Do nothing if the timer was cancelled or rescheduled while we
674 	 * were entering the mutex.
675 	 */
676 	if (!timespecisset(&tp->it_value) || timeout_pending(&pr->ps_realit_to))
677 		goto out;
678 
679 	/* The timer expired.  We need to send the signal. */
680 	need_signal = 1;
681 
682 	/* One-shot timers are not reloaded. */
683 	if (!timespecisset(&tp->it_interval)) {
684 		timespecclear(&tp->it_value);
685 		goto out;
686 	}
687 
688 	/*
689 	 * Find the nearest future expiration point and restart
690 	 * the timeout.
691 	 */
692 	nanouptime(&cts);
693 	while (timespeccmp(&tp->it_value, &cts, <=))
694 		timespecadd(&tp->it_value, &tp->it_interval, &tp->it_value);
695 	if ((pr->ps_flags & PS_EXITING) == 0)
696 		timeout_at_ts(&pr->ps_realit_to, &tp->it_value);
697 
698 out:
699 	mtx_leave(&pr->ps_mtx);
700 
701 	if (need_signal)
702 		prsignal(pr, SIGALRM);
703 }
704 
705 /*
706  * Check if the given setitimer(2) input is valid.  Clear it_interval
707  * if it_value is unset.  Round it_interval up to the minimum interval
708  * if necessary.
709  */
710 int
711 itimerfix(struct itimerval *itv)
712 {
713 	static const struct timeval max = { .tv_sec = UINT_MAX, .tv_usec = 0 };
714 	struct timeval min_interval = { .tv_sec = 0, .tv_usec = tick };
715 
716 	if (itv->it_value.tv_sec < 0 || !timerisvalid(&itv->it_value))
717 		return EINVAL;
718 	if (timercmp(&itv->it_value, &max, >))
719 		return EINVAL;
720 	if (itv->it_interval.tv_sec < 0 || !timerisvalid(&itv->it_interval))
721 		return EINVAL;
722 	if (timercmp(&itv->it_interval, &max, >))
723 		return EINVAL;
724 
725 	if (!timerisset(&itv->it_value))
726 		timerclear(&itv->it_interval);
727 	if (timerisset(&itv->it_interval)) {
728 		if (timercmp(&itv->it_interval, &min_interval, <))
729 			itv->it_interval = min_interval;
730 	}
731 
732 	return 0;
733 }
734 
735 /*
736  * Decrement an interval timer by the given number of nanoseconds.
737  * If the timer expires and it is periodic then reload it.  When reloading
738  * the timer we subtract any overrun from the next period so that the timer
739  * does not drift.
740  */
741 int
742 itimerdecr(struct itimerspec *itp, long nsec)
743 {
744 	struct timespec decrement;
745 
746 	NSEC_TO_TIMESPEC(nsec, &decrement);
747 
748 	mtx_enter(&itimer_mtx);
749 
750 	/*
751 	 * Double-check that the timer is enabled.  A different thread
752 	 * in setitimer(2) may have disabled it while we were entering
753 	 * the mutex.
754 	 */
755 	if (!timespecisset(&itp->it_value)) {
756 		mtx_leave(&itimer_mtx);
757 		return (1);
758 	}
759 
760 	/*
761 	 * The timer is enabled.  Update and reload it as needed.
762 	 */
763 	timespecsub(&itp->it_value, &decrement, &itp->it_value);
764 	if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value)) {
765 		mtx_leave(&itimer_mtx);
766 		return (1);
767 	}
768 	if (!timespecisset(&itp->it_interval)) {
769 		timespecclear(&itp->it_value);
770 		mtx_leave(&itimer_mtx);
771 		return (0);
772 	}
773 	while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value))
774 		timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value);
775 	mtx_leave(&itimer_mtx);
776 	return (0);
777 }
778 
779 struct mutex ratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
780 
781 /*
782  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
783  * for usage and rationale.
784  */
785 int
786 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
787 {
788 	struct timeval tv, delta;
789 	int rv = 0;
790 
791 	getmicrouptime(&tv);
792 
793 	mtx_enter(&ratecheck_mtx);
794 	timersub(&tv, lasttime, &delta);
795 
796 	/*
797 	 * check for 0,0 is so that the message will be seen at least once,
798 	 * even if interval is huge.
799 	 */
800 	if (timercmp(&delta, mininterval, >=) ||
801 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
802 		*lasttime = tv;
803 		rv = 1;
804 	}
805 	mtx_leave(&ratecheck_mtx);
806 
807 	return (rv);
808 }
809 
810 struct mutex ppsratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
811 
812 /*
813  * ppsratecheck(): packets (or events) per second limitation.
814  */
815 int
816 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
817 {
818 	struct timeval tv, delta;
819 	int rv;
820 
821 	microuptime(&tv);
822 
823 	mtx_enter(&ppsratecheck_mtx);
824 	timersub(&tv, lasttime, &delta);
825 
826 	/*
827 	 * check for 0,0 is so that the message will be seen at least once.
828 	 * if more than one second have passed since the last update of
829 	 * lasttime, reset the counter.
830 	 *
831 	 * we do increment *curpps even in *curpps < maxpps case, as some may
832 	 * try to use *curpps for stat purposes as well.
833 	 */
834 	if (maxpps == 0)
835 		rv = 0;
836 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
837 	    delta.tv_sec >= 1) {
838 		*lasttime = tv;
839 		*curpps = 0;
840 		rv = 1;
841 	} else if (maxpps < 0)
842 		rv = 1;
843 	else if (*curpps < maxpps)
844 		rv = 1;
845 	else
846 		rv = 0;
847 
848 	/* be careful about wrap-around */
849 	if (*curpps + 1 > *curpps)
850 		*curpps = *curpps + 1;
851 
852 	mtx_leave(&ppsratecheck_mtx);
853 
854 	return (rv);
855 }
856 
857 todr_chip_handle_t todr_handle;
858 int inittodr_done;
859 
860 #define MINYEAR		((OpenBSD / 100) - 1)	/* minimum plausible year */
861 
862 /*
863  * inittodr:
864  *
865  *      Initialize time from the time-of-day register.
866  */
867 void
868 inittodr(time_t base)
869 {
870 	time_t deltat;
871 	struct timeval rtctime;
872 	struct timespec ts;
873 	int badbase;
874 
875 	inittodr_done = 1;
876 
877 	if (base < (MINYEAR - 1970) * SECYR) {
878 		printf("WARNING: preposterous time in file system\n");
879 		/* read the system clock anyway */
880 		base = (MINYEAR - 1970) * SECYR;
881 		badbase = 1;
882 	} else
883 		badbase = 0;
884 
885 	rtctime.tv_sec = base;
886 	rtctime.tv_usec = 0;
887 
888 	if (todr_handle == NULL ||
889 	    todr_gettime(todr_handle, &rtctime) != 0 ||
890 	    rtctime.tv_sec < (MINYEAR - 1970) * SECYR) {
891 		/*
892 		 * Believe the time in the file system for lack of
893 		 * anything better, resetting the TODR.
894 		 */
895 		rtctime.tv_sec = base;
896 		rtctime.tv_usec = 0;
897 		if (todr_handle != NULL && !badbase)
898 			printf("WARNING: bad clock chip time\n");
899 		ts.tv_sec = rtctime.tv_sec;
900 		ts.tv_nsec = rtctime.tv_usec * 1000;
901 		tc_setclock(&ts);
902 		goto bad;
903 	} else {
904 		ts.tv_sec = rtctime.tv_sec;
905 		ts.tv_nsec = rtctime.tv_usec * 1000;
906 		tc_setclock(&ts);
907 	}
908 
909 	if (!badbase) {
910 		/*
911 		 * See if we gained/lost two or more days; if
912 		 * so, assume something is amiss.
913 		 */
914 		deltat = rtctime.tv_sec - base;
915 		if (deltat < 0)
916 			deltat = -deltat;
917 		if (deltat < 2 * SECDAY)
918 			return;         /* all is well */
919 #ifndef SMALL_KERNEL
920 		printf("WARNING: clock %s %lld days\n",
921 		    rtctime.tv_sec < base ? "lost" : "gained",
922 		    (long long)(deltat / SECDAY));
923 #endif
924 	}
925  bad:
926 	printf("WARNING: CHECK AND RESET THE DATE!\n");
927 }
928 
929 /*
930  * resettodr:
931  *
932  *      Reset the time-of-day register with the current time.
933  */
934 void
935 resettodr(void)
936 {
937 	struct timeval rtctime;
938 
939 	/*
940 	 * Skip writing the RTC if inittodr(9) never ran.  We don't
941 	 * want to overwrite a reasonable value with a nonsense value.
942 	 */
943 	if (!inittodr_done)
944 		return;
945 
946 	microtime(&rtctime);
947 
948 	if (todr_handle != NULL &&
949 	    todr_settime(todr_handle, &rtctime) != 0)
950 		printf("WARNING: can't update clock chip time\n");
951 }
952 
953 void
954 todr_attach(struct todr_chip_handle *todr)
955 {
956 	todr_handle = todr;
957 }
958 
959 #define RESETTODR_PERIOD	1800
960 
961 void periodic_resettodr(void *);
962 void perform_resettodr(void *);
963 
964 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
965 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
966 
967 void
968 periodic_resettodr(void *arg __unused)
969 {
970 	task_add(systq, &resettodr_task);
971 }
972 
973 void
974 perform_resettodr(void *arg __unused)
975 {
976 	resettodr();
977 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
978 }
979 
980 void
981 start_periodic_resettodr(void)
982 {
983 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
984 }
985 
986 void
987 stop_periodic_resettodr(void)
988 {
989 	timeout_del(&resettodr_to);
990 	task_del(systq, &resettodr_task);
991 }
992