xref: /openbsd-src/sys/kern/kern_time.c (revision 7c0ec4b8992567abb1e1536622dc789a9a39d9f1)
1 /*	$OpenBSD: kern_time.c,v 1.169 2024/07/26 19:16:31 guenther Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/clockintr.h>
39 #include <sys/mutex.h>
40 #include <sys/rwlock.h>
41 #include <sys/proc.h>
42 #include <sys/ktrace.h>
43 #include <sys/resourcevar.h>
44 #include <sys/signalvar.h>
45 #include <sys/stdint.h>
46 #include <sys/pledge.h>
47 #include <sys/task.h>
48 #include <sys/time.h>
49 #include <sys/timeout.h>
50 #include <sys/timetc.h>
51 
52 #include <sys/mount.h>
53 #include <sys/syscallargs.h>
54 
55 #include <dev/clock_subr.h>
56 
57 int itimerfix(struct itimerval *);
58 void process_reset_itimer_flag(struct process *);
59 
60 /*
61  * Time of day and interval timer support.
62  *
63  * These routines provide the kernel entry points to get and set
64  * the time-of-day and per-process interval timers.  Subroutines
65  * here provide support for adding and subtracting timeval structures
66  * and decrementing interval timers, optionally reloading the interval
67  * timers when they expire.
68  */
69 
70 /* This function is used by clock_settime and settimeofday */
71 int
72 settime(const struct timespec *ts)
73 {
74 	struct timespec now;
75 
76 	/*
77 	 * Don't allow the time to be set forward so far it will wrap
78 	 * and become negative, thus allowing an attacker to bypass
79 	 * the next check below.  The cutoff is 1 year before rollover
80 	 * occurs, so even if the attacker uses adjtime(2) to move
81 	 * the time past the cutoff, it will take a very long time
82 	 * to get to the wrap point.
83 	 *
84 	 * XXX: we check against UINT_MAX until we can figure out
85 	 *	how to deal with the hardware RTCs.
86 	 */
87 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
88 		printf("denied attempt to set clock forward to %lld\n",
89 		    (long long)ts->tv_sec);
90 		return (EPERM);
91 	}
92 	/*
93 	 * If the system is secure, we do not allow the time to be
94 	 * set to an earlier value (it may be slowed using adjtime,
95 	 * but not set back). This feature prevent interlopers from
96 	 * setting arbitrary time stamps on files.
97 	 */
98 	nanotime(&now);
99 	if (securelevel > 1 && timespeccmp(ts, &now, <=)) {
100 		printf("denied attempt to set clock back %lld seconds\n",
101 		    (long long)now.tv_sec - ts->tv_sec);
102 		return (EPERM);
103 	}
104 
105 	tc_setrealtimeclock(ts);
106 	KERNEL_LOCK();
107 	resettodr();
108 	KERNEL_UNLOCK();
109 
110 	return (0);
111 }
112 
113 int
114 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
115 {
116 	struct tusage tu;
117 	struct proc *q;
118 	int error = 0;
119 
120 	switch (clock_id) {
121 	case CLOCK_REALTIME:
122 		nanotime(tp);
123 		break;
124 	case CLOCK_UPTIME:
125 		nanoruntime(tp);
126 		break;
127 	case CLOCK_MONOTONIC:
128 	case CLOCK_BOOTTIME:
129 		nanouptime(tp);
130 		break;
131 	case CLOCK_PROCESS_CPUTIME_ID:
132 		nanouptime(tp);
133 		tuagg_get_process(&tu, p->p_p);
134 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
135 		timespecadd(tp, &tu.tu_runtime, tp);
136 		break;
137 	case CLOCK_THREAD_CPUTIME_ID:
138 		nanouptime(tp);
139 		tuagg_get_proc(&tu, p);
140 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
141 		timespecadd(tp, &tu.tu_runtime, tp);
142 		break;
143 	default:
144 		/* check for clock from pthread_getcpuclockid() */
145 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
146 			KERNEL_LOCK();
147 			q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
148 			if (q == NULL)
149 				error = ESRCH;
150 			else
151 				*tp = q->p_tu.tu_runtime;
152 			KERNEL_UNLOCK();
153 		} else
154 			error = EINVAL;
155 		break;
156 	}
157 	return (error);
158 }
159 
160 int
161 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
162 {
163 	struct sys_clock_gettime_args /* {
164 		syscallarg(clockid_t) clock_id;
165 		syscallarg(struct timespec *) tp;
166 	} */ *uap = v;
167 	struct timespec ats;
168 	int error;
169 
170 	memset(&ats, 0, sizeof(ats));
171 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
172 		return (error);
173 
174 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
175 #ifdef KTRACE
176 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
177 		ktrabstimespec(p, &ats);
178 #endif
179 	return (error);
180 }
181 
182 int
183 sys_clock_settime(struct proc *p, void *v, register_t *retval)
184 {
185 	struct sys_clock_settime_args /* {
186 		syscallarg(clockid_t) clock_id;
187 		syscallarg(const struct timespec *) tp;
188 	} */ *uap = v;
189 	struct timespec ats;
190 	clockid_t clock_id;
191 	int error;
192 
193 	if ((error = suser(p)) != 0)
194 		return (error);
195 
196 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
197 		return (error);
198 
199 	clock_id = SCARG(uap, clock_id);
200 	switch (clock_id) {
201 	case CLOCK_REALTIME:
202 		if (!timespecisvalid(&ats))
203 			return (EINVAL);
204 		if ((error = settime(&ats)) != 0)
205 			return (error);
206 		break;
207 	default:	/* Other clocks are read-only */
208 		return (EINVAL);
209 	}
210 
211 	return (0);
212 }
213 
214 int
215 sys_clock_getres(struct proc *p, void *v, register_t *retval)
216 {
217 	struct sys_clock_getres_args /* {
218 		syscallarg(clockid_t) clock_id;
219 		syscallarg(struct timespec *) tp;
220 	} */ *uap = v;
221 	clockid_t clock_id;
222 	struct bintime bt;
223 	struct timespec ts;
224 	struct proc *q;
225 	u_int64_t scale;
226 	int error = 0;
227 
228 	memset(&ts, 0, sizeof(ts));
229 	clock_id = SCARG(uap, clock_id);
230 
231 	switch (clock_id) {
232 	case CLOCK_REALTIME:
233 	case CLOCK_MONOTONIC:
234 	case CLOCK_BOOTTIME:
235 	case CLOCK_UPTIME:
236 		memset(&bt, 0, sizeof(bt));
237 		rw_enter_read(&tc_lock);
238 		scale = ((1ULL << 63) / tc_getfrequency()) * 2;
239 		bt.frac = tc_getprecision() * scale;
240 		rw_exit_read(&tc_lock);
241 		BINTIME_TO_TIMESPEC(&bt, &ts);
242 		break;
243 	case CLOCK_PROCESS_CPUTIME_ID:
244 	case CLOCK_THREAD_CPUTIME_ID:
245 		ts.tv_nsec = 1000000000 / stathz;
246 		break;
247 	default:
248 		/* check for clock from pthread_getcpuclockid() */
249 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
250 			KERNEL_LOCK();
251 			q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
252 			if (q == NULL)
253 				error = ESRCH;
254 			else
255 				ts.tv_nsec = 1000000000 / stathz;
256 			KERNEL_UNLOCK();
257 		} else
258 			error = EINVAL;
259 		break;
260 	}
261 
262 	if (error == 0 && SCARG(uap, tp)) {
263 		ts.tv_nsec = MAX(ts.tv_nsec, 1);
264 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
265 #ifdef KTRACE
266 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
267 			ktrreltimespec(p, &ts);
268 #endif
269 	}
270 
271 	return error;
272 }
273 
274 int
275 sys_nanosleep(struct proc *p, void *v, register_t *retval)
276 {
277 	struct sys_nanosleep_args/* {
278 		syscallarg(const struct timespec *) rqtp;
279 		syscallarg(struct timespec *) rmtp;
280 	} */ *uap = v;
281 	struct timespec elapsed, remainder, request, start, stop;
282 	uint64_t nsecs;
283 	struct timespec *rmtp;
284 	int copyout_error, error;
285 
286 	rmtp = SCARG(uap, rmtp);
287 	error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
288 	if (error)
289 		return (error);
290 #ifdef KTRACE
291 	if (KTRPOINT(p, KTR_STRUCT))
292 		ktrreltimespec(p, &request);
293 #endif
294 
295 	if (request.tv_sec < 0 || !timespecisvalid(&request))
296 		return (EINVAL);
297 
298 	do {
299 		getnanouptime(&start);
300 		nsecs = MAX(1, MIN(TIMESPEC_TO_NSEC(&request), MAXTSLP));
301 		error = tsleep_nsec(&nowake, PWAIT | PCATCH, "nanoslp", nsecs);
302 		getnanouptime(&stop);
303 		timespecsub(&stop, &start, &elapsed);
304 		timespecsub(&request, &elapsed, &request);
305 		if (request.tv_sec < 0)
306 			timespecclear(&request);
307 		if (error != EWOULDBLOCK)
308 			break;
309 	} while (timespecisset(&request));
310 
311 	if (error == ERESTART)
312 		error = EINTR;
313 	if (error == EWOULDBLOCK)
314 		error = 0;
315 
316 	if (rmtp) {
317 		memset(&remainder, 0, sizeof(remainder));
318 		remainder = request;
319 		copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
320 		if (copyout_error)
321 			error = copyout_error;
322 #ifdef KTRACE
323 		if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
324 			ktrreltimespec(p, &remainder);
325 #endif
326 	}
327 
328 	return error;
329 }
330 
331 int
332 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
333 {
334 	struct sys_gettimeofday_args /* {
335 		syscallarg(struct timeval *) tp;
336 		syscallarg(struct timezone *) tzp;
337 	} */ *uap = v;
338 	struct timeval atv;
339 	static const struct timezone zerotz = { 0, 0 };
340 	struct timeval *tp;
341 	struct timezone *tzp;
342 	int error = 0;
343 
344 	tp = SCARG(uap, tp);
345 	tzp = SCARG(uap, tzp);
346 
347 	if (tp) {
348 		memset(&atv, 0, sizeof(atv));
349 		microtime(&atv);
350 		if ((error = copyout(&atv, tp, sizeof (atv))))
351 			return (error);
352 #ifdef KTRACE
353 		if (KTRPOINT(p, KTR_STRUCT))
354 			ktrabstimeval(p, &atv);
355 #endif
356 	}
357 	if (tzp)
358 		error = copyout(&zerotz, tzp, sizeof(zerotz));
359 	return (error);
360 }
361 
362 int
363 sys_settimeofday(struct proc *p, void *v, register_t *retval)
364 {
365 	struct sys_settimeofday_args /* {
366 		syscallarg(const struct timeval *) tv;
367 		syscallarg(const struct timezone *) tzp;
368 	} */ *uap = v;
369 	struct timezone atz;
370 	struct timeval atv;
371 	const struct timeval *tv;
372 	const struct timezone *tzp;
373 	int error;
374 
375 	tv = SCARG(uap, tv);
376 	tzp = SCARG(uap, tzp);
377 
378 	if ((error = suser(p)))
379 		return (error);
380 	/* Verify all parameters before changing time. */
381 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
382 		return (error);
383 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
384 		return (error);
385 	if (tv) {
386 		struct timespec ts;
387 
388 #ifdef KTRACE
389 		if (KTRPOINT(p, KTR_STRUCT))
390 			ktrabstimeval(p, &atv);
391 #endif
392 		if (!timerisvalid(&atv))
393 			return (EINVAL);
394 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
395 		if ((error = settime(&ts)) != 0)
396 			return (error);
397 	}
398 
399 	return (0);
400 }
401 
402 #define ADJFREQ_MAX (500000000LL << 32)
403 #define ADJFREQ_MIN (-ADJFREQ_MAX)
404 
405 int
406 sys_adjfreq(struct proc *p, void *v, register_t *retval)
407 {
408 	struct sys_adjfreq_args /* {
409 		syscallarg(const int64_t *) freq;
410 		syscallarg(int64_t *) oldfreq;
411 	} */ *uap = v;
412 	int error = 0;
413 	int64_t f, oldf;
414 	const int64_t *freq = SCARG(uap, freq);
415 	int64_t *oldfreq = SCARG(uap, oldfreq);
416 
417 	if (freq) {
418 		if ((error = suser(p)))
419 			return (error);
420 		if ((error = copyin(freq, &f, sizeof(f))))
421 			return (error);
422 		if (f < ADJFREQ_MIN || f > ADJFREQ_MAX)
423 			return (EINVAL);
424 	}
425 
426 	rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE);
427 	if (oldfreq) {
428 		tc_adjfreq(&oldf, NULL);
429 		if ((error = copyout(&oldf, oldfreq, sizeof(oldf))))
430 			goto out;
431 	}
432 	if (freq)
433 		tc_adjfreq(NULL, &f);
434 out:
435 	rw_exit(&tc_lock);
436 	return (error);
437 }
438 
439 int
440 sys_adjtime(struct proc *p, void *v, register_t *retval)
441 {
442 	struct sys_adjtime_args /* {
443 		syscallarg(const struct timeval *) delta;
444 		syscallarg(struct timeval *) olddelta;
445 	} */ *uap = v;
446 	struct timeval atv;
447 	const struct timeval *delta = SCARG(uap, delta);
448 	struct timeval *olddelta = SCARG(uap, olddelta);
449 	int64_t adjustment, remaining;
450 	int error;
451 
452 	error = pledge_adjtime(p, delta);
453 	if (error)
454 		return error;
455 
456 	if (delta) {
457 		if ((error = suser(p)))
458 			return (error);
459 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
460 			return (error);
461 #ifdef KTRACE
462 		if (KTRPOINT(p, KTR_STRUCT))
463 			ktrreltimeval(p, &atv);
464 #endif
465 		if (!timerisvalid(&atv))
466 			return (EINVAL);
467 
468 		if (atv.tv_sec > INT64_MAX / 1000000)
469 			return EINVAL;
470 		if (atv.tv_sec < INT64_MIN / 1000000)
471 			return EINVAL;
472 		adjustment = atv.tv_sec * 1000000;
473 		if (adjustment > INT64_MAX - atv.tv_usec)
474 			return EINVAL;
475 		adjustment += atv.tv_usec;
476 
477 		rw_enter_write(&tc_lock);
478 	}
479 
480 	if (olddelta) {
481 		tc_adjtime(&remaining, NULL);
482 		memset(&atv, 0, sizeof(atv));
483 		atv.tv_sec =  remaining / 1000000;
484 		atv.tv_usec = remaining % 1000000;
485 		if (atv.tv_usec < 0) {
486 			atv.tv_usec += 1000000;
487 			atv.tv_sec--;
488 		}
489 
490 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
491 			goto out;
492 	}
493 
494 	if (delta)
495 		tc_adjtime(NULL, &adjustment);
496 out:
497 	if (delta)
498 		rw_exit_write(&tc_lock);
499 	return (error);
500 }
501 
502 
503 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
504 
505 /*
506  * Get or set value of an interval timer.  The process virtual and
507  * profiling virtual time timers are kept internally in the
508  * way they are specified externally: in time until they expire.
509  *
510  * The real time interval timer's it_value, in contrast, is kept as an
511  * absolute time rather than as a delta, so that it is easy to keep
512  * periodic real-time signals from drifting.
513  *
514  * Virtual time timers are processed in the hardclock() routine of
515  * kern_clock.c.  The real time timer is processed by a timeout
516  * routine, called from the softclock() routine.  Since a callout
517  * may be delayed in real time due to interrupt processing in the system,
518  * it is possible for the real time timeout routine (realitexpire, given below),
519  * to be delayed in real time past when it is supposed to occur.  It
520  * does not suffice, therefore, to reload the real timer .it_value from the
521  * real time timers .it_interval.  Rather, we compute the next time in
522  * absolute time the timer should go off.
523  */
524 void
525 setitimer(int which, const struct itimerval *itv, struct itimerval *olditv)
526 {
527 	struct itimerspec its, oldits;
528 	struct timespec now;
529 	struct itimerspec *itimer;
530 	struct process *pr;
531 
532 	KASSERT(which >= ITIMER_REAL && which <= ITIMER_PROF);
533 
534 	pr = curproc->p_p;
535 	itimer = &pr->ps_timer[which];
536 
537 	if (itv != NULL) {
538 		TIMEVAL_TO_TIMESPEC(&itv->it_value, &its.it_value);
539 		TIMEVAL_TO_TIMESPEC(&itv->it_interval, &its.it_interval);
540 	}
541 
542 	if (which == ITIMER_REAL) {
543 		mtx_enter(&pr->ps_mtx);
544 		nanouptime(&now);
545 	} else
546 		mtx_enter(&itimer_mtx);
547 
548 	if (olditv != NULL)
549 		oldits = *itimer;
550 	if (itv != NULL) {
551 		if (which == ITIMER_REAL) {
552 			if (timespecisset(&its.it_value)) {
553 				timespecadd(&its.it_value, &now, &its.it_value);
554 				timeout_abs_ts(&pr->ps_realit_to,&its.it_value);
555 			} else
556 				timeout_del(&pr->ps_realit_to);
557 		}
558 		*itimer = its;
559 		if (which == ITIMER_VIRTUAL || which == ITIMER_PROF) {
560 			process_reset_itimer_flag(pr);
561 			need_resched(curcpu());
562 		}
563 	}
564 
565 	if (which == ITIMER_REAL)
566 		mtx_leave(&pr->ps_mtx);
567 	else
568 		mtx_leave(&itimer_mtx);
569 
570 	if (olditv != NULL) {
571 		if (which == ITIMER_REAL && timespecisset(&oldits.it_value)) {
572 			if (timespeccmp(&oldits.it_value, &now, <))
573 				timespecclear(&oldits.it_value);
574 			else {
575 				timespecsub(&oldits.it_value, &now,
576 				    &oldits.it_value);
577 			}
578 		}
579 		TIMESPEC_TO_TIMEVAL(&olditv->it_value, &oldits.it_value);
580 		TIMESPEC_TO_TIMEVAL(&olditv->it_interval, &oldits.it_interval);
581 	}
582 }
583 
584 void
585 cancel_all_itimers(void)
586 {
587 	struct itimerval itv;
588 	int i;
589 
590 	timerclear(&itv.it_value);
591 	timerclear(&itv.it_interval);
592 
593 	for (i = 0; i < nitems(curproc->p_p->ps_timer); i++)
594 		setitimer(i, &itv, NULL);
595 }
596 
597 int
598 sys_getitimer(struct proc *p, void *v, register_t *retval)
599 {
600 	struct sys_getitimer_args /* {
601 		syscallarg(int) which;
602 		syscallarg(struct itimerval *) itv;
603 	} */ *uap = v;
604 	struct itimerval aitv;
605 	int which, error;
606 
607 	which = SCARG(uap, which);
608 	if (which < ITIMER_REAL || which > ITIMER_PROF)
609 		return EINVAL;
610 
611 	memset(&aitv, 0, sizeof(aitv));
612 
613 	setitimer(which, NULL, &aitv);
614 
615 	error = copyout(&aitv, SCARG(uap, itv), sizeof(aitv));
616 #ifdef KTRACE
617 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
618 		ktritimerval(p, &aitv);
619 #endif
620 	return (error);
621 }
622 
623 int
624 sys_setitimer(struct proc *p, void *v, register_t *retval)
625 {
626 	struct sys_setitimer_args /* {
627 		syscallarg(int) which;
628 		syscallarg(const struct itimerval *) itv;
629 		syscallarg(struct itimerval *) oitv;
630 	} */ *uap = v;
631 	struct itimerval aitv, olditv;
632 	struct itimerval *newitvp, *olditvp;
633 	int error, which;
634 
635 	which = SCARG(uap, which);
636 	if (which < ITIMER_REAL || which > ITIMER_PROF)
637 		return EINVAL;
638 
639 	newitvp = olditvp = NULL;
640 	if (SCARG(uap, itv) != NULL) {
641 		error = copyin(SCARG(uap, itv), &aitv, sizeof(aitv));
642 		if (error)
643 			return error;
644 #ifdef KTRACE
645 		if (KTRPOINT(p, KTR_STRUCT))
646 			ktritimerval(p, &aitv);
647 #endif
648 		error = itimerfix(&aitv);
649 		if (error)
650 			return error;
651 		newitvp = &aitv;
652 	}
653 	if (SCARG(uap, oitv) != NULL) {
654 		memset(&olditv, 0, sizeof(olditv));
655 		olditvp = &olditv;
656 	}
657 	if (newitvp == NULL && olditvp == NULL)
658 		return 0;
659 
660 	setitimer(which, newitvp, olditvp);
661 
662 	if (SCARG(uap, oitv) != NULL) {
663 		error = copyout(&olditv, SCARG(uap, oitv), sizeof(olditv));
664 #ifdef KTRACE
665 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
666 			ktritimerval(p, &aitv);
667 #endif
668 		return error;
669 	}
670 
671 	return 0;
672 }
673 
674 /*
675  * Real interval timer expired:
676  * send process whose timer expired an alarm signal.
677  * If time is not set up to reload, then just return.
678  * Else compute next time timer should go off which is > current time.
679  * This is where delay in processing this timeout causes multiple
680  * SIGALRM calls to be compressed into one.
681  */
682 void
683 realitexpire(void *arg)
684 {
685 	struct timespec cts;
686 	struct process *pr = arg;
687 	struct itimerspec *tp = &pr->ps_timer[ITIMER_REAL];
688 	int need_signal = 0;
689 
690 	mtx_enter(&pr->ps_mtx);
691 
692 	/*
693 	 * Do nothing if the timer was cancelled or rescheduled while we
694 	 * were entering the mutex.
695 	 */
696 	if (!timespecisset(&tp->it_value) || timeout_pending(&pr->ps_realit_to))
697 		goto out;
698 
699 	/* The timer expired.  We need to send the signal. */
700 	need_signal = 1;
701 
702 	/* One-shot timers are not reloaded. */
703 	if (!timespecisset(&tp->it_interval)) {
704 		timespecclear(&tp->it_value);
705 		goto out;
706 	}
707 
708 	/*
709 	 * Find the nearest future expiration point and restart
710 	 * the timeout.
711 	 */
712 	nanouptime(&cts);
713 	while (timespeccmp(&tp->it_value, &cts, <=))
714 		timespecadd(&tp->it_value, &tp->it_interval, &tp->it_value);
715 	if ((pr->ps_flags & PS_EXITING) == 0)
716 		timeout_abs_ts(&pr->ps_realit_to, &tp->it_value);
717 
718 out:
719 	mtx_leave(&pr->ps_mtx);
720 
721 	if (need_signal)
722 		prsignal(pr, SIGALRM);
723 }
724 
725 /*
726  * Check if the given setitimer(2) input is valid.  Clear it_interval
727  * if it_value is unset.  Round it_interval up to the minimum interval
728  * if necessary.
729  */
730 int
731 itimerfix(struct itimerval *itv)
732 {
733 	static const struct timeval max = { .tv_sec = UINT_MAX, .tv_usec = 0 };
734 	struct timeval min_interval = { .tv_sec = 0, .tv_usec = tick };
735 
736 	if (itv->it_value.tv_sec < 0 || !timerisvalid(&itv->it_value))
737 		return EINVAL;
738 	if (timercmp(&itv->it_value, &max, >))
739 		return EINVAL;
740 	if (itv->it_interval.tv_sec < 0 || !timerisvalid(&itv->it_interval))
741 		return EINVAL;
742 	if (timercmp(&itv->it_interval, &max, >))
743 		return EINVAL;
744 
745 	if (!timerisset(&itv->it_value))
746 		timerclear(&itv->it_interval);
747 	if (timerisset(&itv->it_interval)) {
748 		if (timercmp(&itv->it_interval, &min_interval, <))
749 			itv->it_interval = min_interval;
750 	}
751 
752 	return 0;
753 }
754 
755 /*
756  * Decrement an interval timer by the given duration.
757  * If the timer expires and it is periodic then reload it.  When reloading
758  * the timer we subtract any overrun from the next period so that the timer
759  * does not drift.
760  */
761 int
762 itimerdecr(struct itimerspec *itp, const struct timespec *decrement)
763 {
764 	timespecsub(&itp->it_value, decrement, &itp->it_value);
765 	if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value))
766 		return (1);
767 	if (!timespecisset(&itp->it_interval)) {
768 		timespecclear(&itp->it_value);
769 		return (0);
770 	}
771 	while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value))
772 		timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value);
773 	return (0);
774 }
775 
776 void
777 itimer_update(struct clockrequest *cr, void *cf, void *arg)
778 {
779 	struct timespec elapsed;
780 	uint64_t nsecs;
781 	struct clockframe *frame = cf;
782 	struct proc *p = curproc;
783 	struct process *pr;
784 
785 	if (p == NULL || ISSET(p->p_flag, P_SYSTEM | P_WEXIT))
786 		return;
787 
788 	pr = p->p_p;
789 	if (!ISSET(pr->ps_flags, PS_ITIMER))
790 		return;
791 
792 	nsecs = clockrequest_advance(cr, hardclock_period) * hardclock_period;
793 	NSEC_TO_TIMESPEC(nsecs, &elapsed);
794 
795 	mtx_enter(&itimer_mtx);
796 	if (CLKF_USERMODE(frame) &&
797 	    timespecisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
798 	    itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], &elapsed) == 0) {
799 		process_reset_itimer_flag(pr);
800 		atomic_setbits_int(&p->p_flag, P_ALRMPEND);
801 		need_proftick(p);
802 	}
803 	if (timespecisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
804 	    itimerdecr(&pr->ps_timer[ITIMER_PROF], &elapsed) == 0) {
805 		process_reset_itimer_flag(pr);
806 		atomic_setbits_int(&p->p_flag, P_PROFPEND);
807 		need_proftick(p);
808 	}
809 	mtx_leave(&itimer_mtx);
810 }
811 
812 void
813 process_reset_itimer_flag(struct process *ps)
814 {
815 	if (timespecisset(&ps->ps_timer[ITIMER_VIRTUAL].it_value) ||
816 	    timespecisset(&ps->ps_timer[ITIMER_PROF].it_value))
817 		atomic_setbits_int(&ps->ps_flags, PS_ITIMER);
818 	else
819 		atomic_clearbits_int(&ps->ps_flags, PS_ITIMER);
820 }
821 
822 struct mutex ratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
823 
824 /*
825  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
826  * for usage and rationale.
827  */
828 int
829 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
830 {
831 	struct timeval tv, delta;
832 	int rv = 0;
833 
834 	getmicrouptime(&tv);
835 
836 	mtx_enter(&ratecheck_mtx);
837 	timersub(&tv, lasttime, &delta);
838 
839 	/*
840 	 * check for 0,0 is so that the message will be seen at least once,
841 	 * even if interval is huge.
842 	 */
843 	if (timercmp(&delta, mininterval, >=) ||
844 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
845 		*lasttime = tv;
846 		rv = 1;
847 	}
848 	mtx_leave(&ratecheck_mtx);
849 
850 	return (rv);
851 }
852 
853 struct mutex ppsratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
854 
855 /*
856  * ppsratecheck(): packets (or events) per second limitation.
857  */
858 int
859 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
860 {
861 	struct timeval tv, delta;
862 	int rv;
863 
864 	microuptime(&tv);
865 
866 	mtx_enter(&ppsratecheck_mtx);
867 	timersub(&tv, lasttime, &delta);
868 
869 	/*
870 	 * check for 0,0 is so that the message will be seen at least once.
871 	 * if more than one second have passed since the last update of
872 	 * lasttime, reset the counter.
873 	 *
874 	 * we do increment *curpps even in *curpps < maxpps case, as some may
875 	 * try to use *curpps for stat purposes as well.
876 	 */
877 	if (maxpps == 0)
878 		rv = 0;
879 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
880 	    delta.tv_sec >= 1) {
881 		*lasttime = tv;
882 		*curpps = 0;
883 		rv = 1;
884 	} else if (maxpps < 0)
885 		rv = 1;
886 	else if (*curpps < maxpps)
887 		rv = 1;
888 	else
889 		rv = 0;
890 
891 	/* be careful about wrap-around */
892 	if (*curpps + 1 > *curpps)
893 		*curpps = *curpps + 1;
894 
895 	mtx_leave(&ppsratecheck_mtx);
896 
897 	return (rv);
898 }
899 
900 todr_chip_handle_t todr_handle;
901 int inittodr_done;
902 
903 #define MINYEAR		((OpenBSD / 100) - 1)	/* minimum plausible year */
904 
905 /*
906  * inittodr:
907  *
908  *      Initialize time from the time-of-day register.
909  */
910 void
911 inittodr(time_t base)
912 {
913 	time_t deltat;
914 	struct timeval rtctime;
915 	struct timespec ts;
916 	int badbase;
917 
918 	inittodr_done = 1;
919 
920 	if (base < (MINYEAR - 1970) * SECYR) {
921 		printf("WARNING: preposterous time in file system\n");
922 		/* read the system clock anyway */
923 		base = (MINYEAR - 1970) * SECYR;
924 		badbase = 1;
925 	} else
926 		badbase = 0;
927 
928 	rtctime.tv_sec = base;
929 	rtctime.tv_usec = 0;
930 
931 	if (todr_handle == NULL ||
932 	    todr_gettime(todr_handle, &rtctime) != 0 ||
933 	    rtctime.tv_sec < (MINYEAR - 1970) * SECYR) {
934 		/*
935 		 * Believe the time in the file system for lack of
936 		 * anything better, resetting the TODR.
937 		 */
938 		rtctime.tv_sec = base;
939 		rtctime.tv_usec = 0;
940 		if (todr_handle != NULL && !badbase)
941 			printf("WARNING: bad clock chip time\n");
942 		ts.tv_sec = rtctime.tv_sec;
943 		ts.tv_nsec = rtctime.tv_usec * 1000;
944 		tc_setclock(&ts);
945 		goto bad;
946 	} else {
947 		ts.tv_sec = rtctime.tv_sec;
948 		ts.tv_nsec = rtctime.tv_usec * 1000;
949 		tc_setclock(&ts);
950 	}
951 
952 	if (!badbase) {
953 		/*
954 		 * See if we gained/lost two or more days; if
955 		 * so, assume something is amiss.
956 		 */
957 		deltat = rtctime.tv_sec - base;
958 		if (deltat < 0)
959 			deltat = -deltat;
960 		if (deltat < 2 * SECDAY)
961 			return;         /* all is well */
962 #ifndef SMALL_KERNEL
963 		printf("WARNING: clock %s %lld days\n",
964 		    rtctime.tv_sec < base ? "lost" : "gained",
965 		    (long long)(deltat / SECDAY));
966 #endif
967 	}
968  bad:
969 	printf("WARNING: CHECK AND RESET THE DATE!\n");
970 }
971 
972 /*
973  * resettodr:
974  *
975  *      Reset the time-of-day register with the current time.
976  */
977 void
978 resettodr(void)
979 {
980 	struct timeval rtctime;
981 
982 	/*
983 	 * Skip writing the RTC if inittodr(9) never ran.  We don't
984 	 * want to overwrite a reasonable value with a nonsense value.
985 	 */
986 	if (!inittodr_done)
987 		return;
988 
989 	microtime(&rtctime);
990 
991 	if (todr_handle != NULL &&
992 	    todr_settime(todr_handle, &rtctime) != 0)
993 		printf("WARNING: can't update clock chip time\n");
994 }
995 
996 void
997 todr_attach(struct todr_chip_handle *todr)
998 {
999 	if (todr_handle == NULL ||
1000 	    todr->todr_quality > todr_handle->todr_quality)
1001 		todr_handle = todr;
1002 }
1003 
1004 #define RESETTODR_PERIOD	1800
1005 
1006 void periodic_resettodr(void *);
1007 void perform_resettodr(void *);
1008 
1009 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
1010 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
1011 
1012 void
1013 periodic_resettodr(void *arg __unused)
1014 {
1015 	task_add(systq, &resettodr_task);
1016 }
1017 
1018 void
1019 perform_resettodr(void *arg __unused)
1020 {
1021 	resettodr();
1022 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
1023 }
1024 
1025 void
1026 start_periodic_resettodr(void)
1027 {
1028 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
1029 }
1030 
1031 void
1032 stop_periodic_resettodr(void)
1033 {
1034 	timeout_del(&resettodr_to);
1035 	task_del(systq, &resettodr_task);
1036 }
1037