xref: /openbsd-src/sys/kern/kern_time.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: kern_time.c,v 1.126 2019/11/07 14:49:07 cheloha Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/resourcevar.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/rwlock.h>
40 #include <sys/proc.h>
41 #include <sys/ktrace.h>
42 #include <sys/vnode.h>
43 #include <sys/signalvar.h>
44 #include <sys/stdint.h>
45 #include <sys/pledge.h>
46 #include <sys/task.h>
47 #include <sys/timeout.h>
48 #include <sys/timetc.h>
49 
50 #include <sys/mount.h>
51 #include <sys/syscallargs.h>
52 
53 /*
54  * Time of day and interval timer support.
55  *
56  * These routines provide the kernel entry points to get and set
57  * the time-of-day and per-process interval timers.  Subroutines
58  * here provide support for adding and subtracting timeval structures
59  * and decrementing interval timers, optionally reloading the interval
60  * timers when they expire.
61  */
62 
63 /* This function is used by clock_settime and settimeofday */
64 int
65 settime(const struct timespec *ts)
66 {
67 	struct timespec now;
68 
69 	/*
70 	 * Don't allow the time to be set forward so far it will wrap
71 	 * and become negative, thus allowing an attacker to bypass
72 	 * the next check below.  The cutoff is 1 year before rollover
73 	 * occurs, so even if the attacker uses adjtime(2) to move
74 	 * the time past the cutoff, it will take a very long time
75 	 * to get to the wrap point.
76 	 *
77 	 * XXX: we check against UINT_MAX until we can figure out
78 	 *	how to deal with the hardware RTCs.
79 	 */
80 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
81 		printf("denied attempt to set clock forward to %lld\n",
82 		    (long long)ts->tv_sec);
83 		return (EPERM);
84 	}
85 	/*
86 	 * If the system is secure, we do not allow the time to be
87 	 * set to an earlier value (it may be slowed using adjtime,
88 	 * but not set back). This feature prevent interlopers from
89 	 * setting arbitrary time stamps on files.
90 	 */
91 	nanotime(&now);
92 	if (securelevel > 1 && timespeccmp(ts, &now, <)) {
93 		printf("denied attempt to set clock back %lld seconds\n",
94 		    (long long)now.tv_sec - ts->tv_sec);
95 		return (EPERM);
96 	}
97 
98 	tc_setrealtimeclock(ts);
99 	KERNEL_LOCK();
100 	resettodr();
101 	KERNEL_UNLOCK();
102 
103 	return (0);
104 }
105 
106 int
107 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
108 {
109 	struct bintime bt;
110 	struct proc *q;
111 	int error = 0;
112 
113 	switch (clock_id) {
114 	case CLOCK_REALTIME:
115 		nanotime(tp);
116 		break;
117 	case CLOCK_UPTIME:
118 		binuptime(&bt);
119 		bintimesub(&bt, &naptime, &bt);
120 		BINTIME_TO_TIMESPEC(&bt, tp);
121 		break;
122 	case CLOCK_MONOTONIC:
123 	case CLOCK_BOOTTIME:
124 		nanouptime(tp);
125 		break;
126 	case CLOCK_PROCESS_CPUTIME_ID:
127 		nanouptime(tp);
128 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
129 		timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
130 		timespecadd(tp, &p->p_rtime, tp);
131 		break;
132 	case CLOCK_THREAD_CPUTIME_ID:
133 		nanouptime(tp);
134 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
135 		timespecadd(tp, &p->p_tu.tu_runtime, tp);
136 		timespecadd(tp, &p->p_rtime, tp);
137 		break;
138 	default:
139 		/* check for clock from pthread_getcpuclockid() */
140 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
141 			KERNEL_LOCK();
142 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
143 			if (q == NULL || q->p_p != p->p_p)
144 				error = ESRCH;
145 			else
146 				*tp = q->p_tu.tu_runtime;
147 			KERNEL_UNLOCK();
148 		} else
149 			error = EINVAL;
150 		break;
151 	}
152 	return (error);
153 }
154 
155 int
156 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
157 {
158 	struct sys_clock_gettime_args /* {
159 		syscallarg(clockid_t) clock_id;
160 		syscallarg(struct timespec *) tp;
161 	} */ *uap = v;
162 	struct timespec ats;
163 	int error;
164 
165 	memset(&ats, 0, sizeof(ats));
166 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
167 		return (error);
168 
169 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
170 #ifdef KTRACE
171 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
172 		ktrabstimespec(p, &ats);
173 #endif
174 	return (error);
175 }
176 
177 int
178 sys_clock_settime(struct proc *p, void *v, register_t *retval)
179 {
180 	struct sys_clock_settime_args /* {
181 		syscallarg(clockid_t) clock_id;
182 		syscallarg(const struct timespec *) tp;
183 	} */ *uap = v;
184 	struct timespec ats;
185 	clockid_t clock_id;
186 	int error;
187 
188 	if ((error = suser(p)) != 0)
189 		return (error);
190 
191 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
192 		return (error);
193 
194 	clock_id = SCARG(uap, clock_id);
195 	switch (clock_id) {
196 	case CLOCK_REALTIME:
197 		if (!timespecisvalid(&ats))
198 			return (EINVAL);
199 		if ((error = settime(&ats)) != 0)
200 			return (error);
201 		break;
202 	default:	/* Other clocks are read-only */
203 		return (EINVAL);
204 	}
205 
206 	return (0);
207 }
208 
209 int
210 sys_clock_getres(struct proc *p, void *v, register_t *retval)
211 {
212 	struct sys_clock_getres_args /* {
213 		syscallarg(clockid_t) clock_id;
214 		syscallarg(struct timespec *) tp;
215 	} */ *uap = v;
216 	clockid_t clock_id;
217 	struct bintime bt;
218 	struct timespec ts;
219 	struct proc *q;
220 	u_int64_t scale;
221 	int error = 0, realstathz;
222 
223 	memset(&ts, 0, sizeof(ts));
224 	realstathz = (stathz == 0) ? hz : stathz;
225 	clock_id = SCARG(uap, clock_id);
226 
227 	switch (clock_id) {
228 	case CLOCK_REALTIME:
229 	case CLOCK_MONOTONIC:
230 	case CLOCK_BOOTTIME:
231 	case CLOCK_UPTIME:
232 		memset(&bt, 0, sizeof(bt));
233 		rw_enter_read(&tc_lock);
234 		scale = ((1ULL << 63) / tc_getfrequency()) * 2;
235 		bt.frac = tc_getprecision() * scale;
236 		rw_exit_read(&tc_lock);
237 		BINTIME_TO_TIMESPEC(&bt, &ts);
238 		break;
239 	case CLOCK_PROCESS_CPUTIME_ID:
240 	case CLOCK_THREAD_CPUTIME_ID:
241 		ts.tv_nsec = 1000000000 / realstathz;
242 		break;
243 	default:
244 		/* check for clock from pthread_getcpuclockid() */
245 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
246 			KERNEL_LOCK();
247 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
248 			if (q == NULL || q->p_p != p->p_p)
249 				error = ESRCH;
250 			else
251 				ts.tv_nsec = 1000000000 / realstathz;
252 			KERNEL_UNLOCK();
253 		} else
254 			error = EINVAL;
255 		break;
256 	}
257 
258 	if (error == 0 && SCARG(uap, tp)) {
259 		ts.tv_nsec = MAX(ts.tv_nsec, 1);
260 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
261 #ifdef KTRACE
262 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
263 			ktrreltimespec(p, &ts);
264 #endif
265 	}
266 
267 	return error;
268 }
269 
270 int
271 sys_nanosleep(struct proc *p, void *v, register_t *retval)
272 {
273 	static int nanowait;
274 	struct sys_nanosleep_args/* {
275 		syscallarg(const struct timespec *) rqtp;
276 		syscallarg(struct timespec *) rmtp;
277 	} */ *uap = v;
278 	struct timespec elapsed, remainder, request, start, stop;
279 	struct timespec *rmtp;
280 	int copyout_error, error;
281 
282 	rmtp = SCARG(uap, rmtp);
283 	error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
284 	if (error)
285 		return (error);
286 #ifdef KTRACE
287 	if (KTRPOINT(p, KTR_STRUCT))
288 		ktrreltimespec(p, &request);
289 #endif
290 
291 	if (request.tv_sec < 0 || !timespecisvalid(&request))
292 		return (EINVAL);
293 
294 	do {
295 		getnanouptime(&start);
296 		error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep",
297 		    MAX(1, tstohz(&request)));
298 		getnanouptime(&stop);
299 		timespecsub(&stop, &start, &elapsed);
300 		timespecsub(&request, &elapsed, &request);
301 		if (request.tv_sec < 0)
302 			timespecclear(&request);
303 		if (error != EWOULDBLOCK)
304 			break;
305 	} while (timespecisset(&request));
306 
307 	if (error == ERESTART)
308 		error = EINTR;
309 	if (error == EWOULDBLOCK)
310 		error = 0;
311 
312 	if (rmtp) {
313 		memset(&remainder, 0, sizeof(remainder));
314 		remainder = request;
315 		copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
316 		if (copyout_error)
317 			error = copyout_error;
318 #ifdef KTRACE
319 		if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
320 			ktrreltimespec(p, &remainder);
321 #endif
322 	}
323 
324 	return error;
325 }
326 
327 int
328 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
329 {
330 	struct sys_gettimeofday_args /* {
331 		syscallarg(struct timeval *) tp;
332 		syscallarg(struct timezone *) tzp;
333 	} */ *uap = v;
334 	struct timeval atv;
335 	static const struct timezone zerotz = { 0, 0 };
336 	struct timeval *tp;
337 	struct timezone *tzp;
338 	int error = 0;
339 
340 	tp = SCARG(uap, tp);
341 	tzp = SCARG(uap, tzp);
342 
343 	if (tp) {
344 		memset(&atv, 0, sizeof(atv));
345 		microtime(&atv);
346 		if ((error = copyout(&atv, tp, sizeof (atv))))
347 			return (error);
348 #ifdef KTRACE
349 		if (KTRPOINT(p, KTR_STRUCT))
350 			ktrabstimeval(p, &atv);
351 #endif
352 	}
353 	if (tzp)
354 		error = copyout(&zerotz, tzp, sizeof(zerotz));
355 	return (error);
356 }
357 
358 int
359 sys_settimeofday(struct proc *p, void *v, register_t *retval)
360 {
361 	struct sys_settimeofday_args /* {
362 		syscallarg(const struct timeval *) tv;
363 		syscallarg(const struct timezone *) tzp;
364 	} */ *uap = v;
365 	struct timezone atz;
366 	struct timeval atv;
367 	const struct timeval *tv;
368 	const struct timezone *tzp;
369 	int error;
370 
371 	tv = SCARG(uap, tv);
372 	tzp = SCARG(uap, tzp);
373 
374 	if ((error = suser(p)))
375 		return (error);
376 	/* Verify all parameters before changing time. */
377 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
378 		return (error);
379 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
380 		return (error);
381 	if (tv) {
382 		struct timespec ts;
383 
384 		if (!timerisvalid(&atv))
385 			return (EINVAL);
386 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
387 		if ((error = settime(&ts)) != 0)
388 			return (error);
389 	}
390 
391 	return (0);
392 }
393 
394 int
395 sys_adjfreq(struct proc *p, void *v, register_t *retval)
396 {
397 	struct sys_adjfreq_args /* {
398 		syscallarg(const int64_t *) freq;
399 		syscallarg(int64_t *) oldfreq;
400 	} */ *uap = v;
401 	int error = 0;
402 	int64_t f, oldf;
403 	const int64_t *freq = SCARG(uap, freq);
404 	int64_t *oldfreq = SCARG(uap, oldfreq);
405 
406 	if (freq) {
407 		if ((error = suser(p)))
408 			return (error);
409 		if ((error = copyin(freq, &f, sizeof(f))))
410 			return (error);
411 	}
412 
413 	rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE);
414 	if (oldfreq) {
415 		tc_adjfreq(&oldf, NULL);
416 		if ((error = copyout(&oldf, oldfreq, sizeof(oldf))))
417 			goto out;
418 	}
419 	if (freq)
420 		tc_adjfreq(NULL, &f);
421 out:
422 	rw_exit(&tc_lock);
423 	return (error);
424 }
425 
426 int
427 sys_adjtime(struct proc *p, void *v, register_t *retval)
428 {
429 	struct sys_adjtime_args /* {
430 		syscallarg(const struct timeval *) delta;
431 		syscallarg(struct timeval *) olddelta;
432 	} */ *uap = v;
433 	struct timeval atv;
434 	const struct timeval *delta = SCARG(uap, delta);
435 	struct timeval *olddelta = SCARG(uap, olddelta);
436 	int64_t adjustment, remaining;
437 	int error;
438 
439 	error = pledge_adjtime(p, delta);
440 	if (error)
441 		return error;
442 
443 	if (delta) {
444 		if ((error = suser(p)))
445 			return (error);
446 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
447 			return (error);
448 		if (!timerisvalid(&atv))
449 			return (EINVAL);
450 
451 		if (atv.tv_sec >= 0) {
452 			if (atv.tv_sec > INT64_MAX / 1000000)
453 				return EINVAL;
454 			adjustment = atv.tv_sec * 1000000;
455 			if (atv.tv_usec > INT64_MAX - adjustment)
456 				return EINVAL;
457 			adjustment += atv.tv_usec;
458 		} else {
459 			if (atv.tv_sec < INT64_MIN / 1000000)
460 				return EINVAL;
461 			adjustment = atv.tv_sec * 1000000 + atv.tv_usec;
462 		}
463 
464 		rw_enter_write(&tc_lock);
465 	}
466 
467 	if (olddelta) {
468 		tc_adjtime(&remaining, NULL);
469 		memset(&atv, 0, sizeof(atv));
470 		atv.tv_sec =  remaining / 1000000;
471 		atv.tv_usec = remaining % 1000000;
472 		if (atv.tv_usec < 0) {
473 			atv.tv_usec += 1000000;
474 			atv.tv_sec--;
475 		}
476 
477 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
478 			goto out;
479 	}
480 
481 	if (delta)
482 		tc_adjtime(NULL, &adjustment);
483 out:
484 	if (delta)
485 		rw_exit_write(&tc_lock);
486 	return (error);
487 }
488 
489 
490 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
491 
492 /*
493  * Get value of an interval timer.  The process virtual and
494  * profiling virtual time timers are kept internally in the
495  * way they are specified externally: in time until they expire.
496  *
497  * The real time interval timer's it_value, in contrast, is kept as an
498  * absolute time rather than as a delta, so that it is easy to keep
499  * periodic real-time signals from drifting.
500  *
501  * Virtual time timers are processed in the hardclock() routine of
502  * kern_clock.c.  The real time timer is processed by a timeout
503  * routine, called from the softclock() routine.  Since a callout
504  * may be delayed in real time due to interrupt processing in the system,
505  * it is possible for the real time timeout routine (realitexpire, given below),
506  * to be delayed in real time past when it is supposed to occur.  It
507  * does not suffice, therefore, to reload the real timer .it_value from the
508  * real time timers .it_interval.  Rather, we compute the next time in
509  * absolute time the timer should go off.
510  */
511 int
512 sys_getitimer(struct proc *p, void *v, register_t *retval)
513 {
514 	struct sys_getitimer_args /* {
515 		syscallarg(int) which;
516 		syscallarg(struct itimerval *) itv;
517 	} */ *uap = v;
518 	struct itimerval aitv;
519 	struct itimerspec *itimer;
520 	int which;
521 
522 	which = SCARG(uap, which);
523 
524 	if (which < ITIMER_REAL || which > ITIMER_PROF)
525 		return (EINVAL);
526 	itimer = &p->p_p->ps_timer[which];
527 	memset(&aitv, 0, sizeof(aitv));
528 	mtx_enter(&itimer_mtx);
529 	TIMESPEC_TO_TIMEVAL(&aitv.it_interval, &itimer->it_interval);
530 	TIMESPEC_TO_TIMEVAL(&aitv.it_value, &itimer->it_value);
531 	mtx_leave(&itimer_mtx);
532 
533 	if (which == ITIMER_REAL) {
534 		struct timeval now;
535 
536 		getmicrouptime(&now);
537 		/*
538 		 * Convert from absolute to relative time in .it_value
539 		 * part of real time timer.  If time for real time timer
540 		 * has passed return 0, else return difference between
541 		 * current time and time for the timer to go off.
542 		 */
543 		if (timerisset(&aitv.it_value)) {
544 			if (timercmp(&aitv.it_value, &now, <))
545 				timerclear(&aitv.it_value);
546 			else
547 				timersub(&aitv.it_value, &now,
548 				    &aitv.it_value);
549 		}
550 	}
551 
552 	return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval)));
553 }
554 
555 int
556 sys_setitimer(struct proc *p, void *v, register_t *retval)
557 {
558 	struct sys_setitimer_args /* {
559 		syscallarg(int) which;
560 		syscallarg(const struct itimerval *) itv;
561 		syscallarg(struct itimerval *) oitv;
562 	} */ *uap = v;
563 	struct sys_getitimer_args getargs;
564 	struct itimerspec aits;
565 	struct itimerval aitv;
566 	const struct itimerval *itvp;
567 	struct itimerval *oitv;
568 	struct process *pr = p->p_p;
569 	int error;
570 	int timo;
571 	int which;
572 
573 	which = SCARG(uap, which);
574 	oitv = SCARG(uap, oitv);
575 
576 	if (which < ITIMER_REAL || which > ITIMER_PROF)
577 		return (EINVAL);
578 	itvp = SCARG(uap, itv);
579 	if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
580 	    sizeof(struct itimerval))))
581 		return (error);
582 	if (oitv != NULL) {
583 		SCARG(&getargs, which) = which;
584 		SCARG(&getargs, itv) = oitv;
585 		if ((error = sys_getitimer(p, &getargs, retval)))
586 			return (error);
587 	}
588 	if (itvp == 0)
589 		return (0);
590 	if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
591 		return (EINVAL);
592 	TIMEVAL_TO_TIMESPEC(&aitv.it_value, &aits.it_value);
593 	TIMEVAL_TO_TIMESPEC(&aitv.it_interval, &aits.it_interval);
594 	if (which == ITIMER_REAL) {
595 		struct timespec cts;
596 
597 		timeout_del(&pr->ps_realit_to);
598 		getnanouptime(&cts);
599 		if (timespecisset(&aits.it_value)) {
600 			timo = tstohz(&aits.it_value);
601 			timeout_add(&pr->ps_realit_to, timo);
602 			timespecadd(&aits.it_value, &cts, &aits.it_value);
603 		}
604 		pr->ps_timer[ITIMER_REAL] = aits;
605 	} else {
606 		mtx_enter(&itimer_mtx);
607 		pr->ps_timer[which] = aits;
608 		mtx_leave(&itimer_mtx);
609 	}
610 
611 	return (0);
612 }
613 
614 /*
615  * Real interval timer expired:
616  * send process whose timer expired an alarm signal.
617  * If time is not set up to reload, then just return.
618  * Else compute next time timer should go off which is > current time.
619  * This is where delay in processing this timeout causes multiple
620  * SIGALRM calls to be compressed into one.
621  */
622 void
623 realitexpire(void *arg)
624 {
625 	struct process *pr = arg;
626 	struct itimerspec *tp = &pr->ps_timer[ITIMER_REAL];
627 
628 	prsignal(pr, SIGALRM);
629 	if (!timespecisset(&tp->it_interval)) {
630 		timespecclear(&tp->it_value);
631 		return;
632 	}
633 	for (;;) {
634 		struct timespec cts, nts;
635 		int timo;
636 
637 		timespecadd(&tp->it_value, &tp->it_interval, &tp->it_value);
638 		getnanouptime(&cts);
639 		if (timespeccmp(&tp->it_value, &cts, >)) {
640 			nts = tp->it_value;
641 			timespecsub(&nts, &cts, &nts);
642 			timo = tstohz(&nts) - 1;
643 			if (timo <= 0)
644 				timo = 1;
645 			if ((pr->ps_flags & PS_EXITING) == 0)
646 				timeout_add(&pr->ps_realit_to, timo);
647 			return;
648 		}
649 	}
650 }
651 
652 /*
653  * Check that a proposed value to load into the .it_value or
654  * .it_interval part of an interval timer is acceptable.
655  */
656 int
657 itimerfix(struct timeval *tv)
658 {
659 
660 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
661 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
662 		return (EINVAL);
663 
664 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
665 		tv->tv_usec = tick;
666 
667 	return (0);
668 }
669 
670 /*
671  * Decrement an interval timer by the given number of nanoseconds.
672  * If the timer expires and it is periodic then reload it.  When reloading
673  * the timer we subtract any overrun from the next period so that the timer
674  * does not drift.
675  */
676 int
677 itimerdecr(struct itimerspec *itp, long nsec)
678 {
679 	struct timespec decrement;
680 
681 	NSEC_TO_TIMESPEC(nsec, &decrement);
682 
683 	mtx_enter(&itimer_mtx);
684 	timespecsub(&itp->it_value, &decrement, &itp->it_value);
685 	if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value)) {
686 		mtx_leave(&itimer_mtx);
687 		return (1);
688 	}
689 	if (!timespecisset(&itp->it_interval)) {
690 		timespecclear(&itp->it_value);
691 		mtx_leave(&itimer_mtx);
692 		return (0);
693 	}
694 	while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value))
695 		timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value);
696 	mtx_leave(&itimer_mtx);
697 	return (0);
698 }
699 
700 /*
701  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
702  * for usage and rationale.
703  */
704 int
705 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
706 {
707 	struct timeval tv, delta;
708 	int rv = 0;
709 
710 	getmicrouptime(&tv);
711 
712 	timersub(&tv, lasttime, &delta);
713 
714 	/*
715 	 * check for 0,0 is so that the message will be seen at least once,
716 	 * even if interval is huge.
717 	 */
718 	if (timercmp(&delta, mininterval, >=) ||
719 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
720 		*lasttime = tv;
721 		rv = 1;
722 	}
723 
724 	return (rv);
725 }
726 
727 /*
728  * ppsratecheck(): packets (or events) per second limitation.
729  */
730 int
731 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
732 {
733 	struct timeval tv, delta;
734 	int rv;
735 
736 	microuptime(&tv);
737 
738 	timersub(&tv, lasttime, &delta);
739 
740 	/*
741 	 * check for 0,0 is so that the message will be seen at least once.
742 	 * if more than one second have passed since the last update of
743 	 * lasttime, reset the counter.
744 	 *
745 	 * we do increment *curpps even in *curpps < maxpps case, as some may
746 	 * try to use *curpps for stat purposes as well.
747 	 */
748 	if (maxpps == 0)
749 		rv = 0;
750 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
751 	    delta.tv_sec >= 1) {
752 		*lasttime = tv;
753 		*curpps = 0;
754 		rv = 1;
755 	} else if (maxpps < 0)
756 		rv = 1;
757 	else if (*curpps < maxpps)
758 		rv = 1;
759 	else
760 		rv = 0;
761 
762 #if 1 /*DIAGNOSTIC?*/
763 	/* be careful about wrap-around */
764 	if (*curpps + 1 > *curpps)
765 		*curpps = *curpps + 1;
766 #else
767 	/*
768 	 * assume that there's not too many calls to this function.
769 	 * not sure if the assumption holds, as it depends on *caller's*
770 	 * behavior, not the behavior of this function.
771 	 * IMHO it is wrong to make assumption on the caller's behavior,
772 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
773 	 */
774 	*curpps = *curpps + 1;
775 #endif
776 
777 	return (rv);
778 }
779 
780 
781 #define RESETTODR_PERIOD	1800
782 
783 void periodic_resettodr(void *);
784 void perform_resettodr(void *);
785 
786 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
787 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
788 
789 void
790 periodic_resettodr(void *arg __unused)
791 {
792 	task_add(systq, &resettodr_task);
793 }
794 
795 void
796 perform_resettodr(void *arg __unused)
797 {
798 	resettodr();
799 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
800 }
801 
802 void
803 start_periodic_resettodr(void)
804 {
805 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
806 }
807 
808 void
809 stop_periodic_resettodr(void)
810 {
811 	timeout_del(&resettodr_to);
812 	task_del(systq, &resettodr_task);
813 }
814