xref: /openbsd-src/sys/kern/kern_time.c (revision ae3cb403620ab940fbaabb3055fac045a63d56b7)
1 /*	$OpenBSD: kern_time.c,v 1.100 2017/12/18 05:51:53 cheloha Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/resourcevar.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/ktrace.h>
41 #include <sys/vnode.h>
42 #include <sys/signalvar.h>
43 #include <sys/pledge.h>
44 #include <sys/task.h>
45 #include <sys/timeout.h>
46 #include <sys/timetc.h>
47 
48 #include <sys/mount.h>
49 #include <sys/syscallargs.h>
50 
51 
52 int64_t adjtimedelta;		/* unapplied time correction (microseconds) */
53 
54 /*
55  * Time of day and interval timer support.
56  *
57  * These routines provide the kernel entry points to get and set
58  * the time-of-day and per-process interval timers.  Subroutines
59  * here provide support for adding and subtracting timeval structures
60  * and decrementing interval timers, optionally reloading the interval
61  * timers when they expire.
62  */
63 
64 /* This function is used by clock_settime and settimeofday */
65 int
66 settime(struct timespec *ts)
67 {
68 	struct timespec now;
69 
70 	/*
71 	 * Adjtime in progress is meaningless or harmful after
72 	 * setting the clock. Cancel adjtime and then set new time.
73 	 */
74 	adjtimedelta = 0;
75 
76 	/*
77 	 * Don't allow the time to be set forward so far it will wrap
78 	 * and become negative, thus allowing an attacker to bypass
79 	 * the next check below.  The cutoff is 1 year before rollover
80 	 * occurs, so even if the attacker uses adjtime(2) to move
81 	 * the time past the cutoff, it will take a very long time
82 	 * to get to the wrap point.
83 	 *
84 	 * XXX: we check against UINT_MAX until we can figure out
85 	 *	how to deal with the hardware RTCs.
86 	 */
87 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
88 		printf("denied attempt to set clock forward to %lld\n",
89 		    (long long)ts->tv_sec);
90 		return (EPERM);
91 	}
92 	/*
93 	 * If the system is secure, we do not allow the time to be
94 	 * set to an earlier value (it may be slowed using adjtime,
95 	 * but not set back). This feature prevent interlopers from
96 	 * setting arbitrary time stamps on files.
97 	 */
98 	nanotime(&now);
99 	if (securelevel > 1 && timespeccmp(ts, &now, <)) {
100 		printf("denied attempt to set clock back %lld seconds\n",
101 		    (long long)now.tv_sec - ts->tv_sec);
102 		return (EPERM);
103 	}
104 
105 	tc_setrealtimeclock(ts);
106 	resettodr();
107 
108 	return (0);
109 }
110 
111 int
112 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
113 {
114 	struct bintime bt;
115 	struct proc *q;
116 
117 	switch (clock_id) {
118 	case CLOCK_REALTIME:
119 		nanotime(tp);
120 		break;
121 	case CLOCK_UPTIME:
122 		binuptime(&bt);
123 		bintime_sub(&bt, &naptime);
124 		bintime2timespec(&bt, tp);
125 		break;
126 	case CLOCK_MONOTONIC:
127 	case CLOCK_BOOTTIME:
128 		nanouptime(tp);
129 		break;
130 	case CLOCK_PROCESS_CPUTIME_ID:
131 		nanouptime(tp);
132 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
133 		timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
134 		timespecadd(tp, &p->p_rtime, tp);
135 		break;
136 	case CLOCK_THREAD_CPUTIME_ID:
137 		nanouptime(tp);
138 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
139 		timespecadd(tp, &p->p_tu.tu_runtime, tp);
140 		timespecadd(tp, &p->p_rtime, tp);
141 		break;
142 	default:
143 		/* check for clock from pthread_getcpuclockid() */
144 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
145 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
146 			if (q == NULL || q->p_p != p->p_p)
147 				return (ESRCH);
148 			*tp = q->p_tu.tu_runtime;
149 		} else
150 			return (EINVAL);
151 	}
152 	return (0);
153 }
154 
155 int
156 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
157 {
158 	struct sys_clock_gettime_args /* {
159 		syscallarg(clockid_t) clock_id;
160 		syscallarg(struct timespec *) tp;
161 	} */ *uap = v;
162 	struct timespec ats;
163 	int error;
164 
165 	memset(&ats, 0, sizeof(ats));
166 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
167 		return (error);
168 
169 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
170 #ifdef KTRACE
171 	if (error == 0 && KTRPOINT(p, KTR_STRUCT)) {
172 		KERNEL_LOCK();
173 		ktrabstimespec(p, &ats);
174 		KERNEL_UNLOCK();
175 	}
176 #endif
177 	return (error);
178 }
179 
180 int
181 sys_clock_settime(struct proc *p, void *v, register_t *retval)
182 {
183 	struct sys_clock_settime_args /* {
184 		syscallarg(clockid_t) clock_id;
185 		syscallarg(const struct timespec *) tp;
186 	} */ *uap = v;
187 	struct timespec ats;
188 	clockid_t clock_id;
189 	int error;
190 
191 	if ((error = suser(p, 0)) != 0)
192 		return (error);
193 
194 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
195 		return (error);
196 
197 	clock_id = SCARG(uap, clock_id);
198 	switch (clock_id) {
199 	case CLOCK_REALTIME:
200 		if ((error = settime(&ats)) != 0)
201 			return (error);
202 		break;
203 	default:	/* Other clocks are read-only */
204 		return (EINVAL);
205 	}
206 
207 	return (0);
208 }
209 
210 int
211 sys_clock_getres(struct proc *p, void *v, register_t *retval)
212 {
213 	struct sys_clock_getres_args /* {
214 		syscallarg(clockid_t) clock_id;
215 		syscallarg(struct timespec *) tp;
216 	} */ *uap = v;
217 	clockid_t clock_id;
218 	struct timespec ts;
219 	struct proc *q;
220 	int error = 0;
221 
222 	memset(&ts, 0, sizeof(ts));
223 	clock_id = SCARG(uap, clock_id);
224 	switch (clock_id) {
225 	case CLOCK_REALTIME:
226 	case CLOCK_MONOTONIC:
227 	case CLOCK_BOOTTIME:
228 	case CLOCK_UPTIME:
229 	case CLOCK_PROCESS_CPUTIME_ID:
230 	case CLOCK_THREAD_CPUTIME_ID:
231 		ts.tv_sec = 0;
232 		ts.tv_nsec = 1000000000 / hz;
233 		break;
234 	default:
235 		/* check for clock from pthread_getcpuclockid() */
236 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
237 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
238 			if (q == NULL || q->p_p != p->p_p)
239 				return (ESRCH);
240 			ts.tv_sec = 0;
241 			ts.tv_nsec = 1000000000 / hz;
242 		} else
243 			return (EINVAL);
244 	}
245 
246 	if (SCARG(uap, tp)) {
247 		error = copyout(&ts, SCARG(uap, tp), sizeof (ts));
248 #ifdef KTRACE
249 		if (error == 0 && KTRPOINT(p, KTR_STRUCT)) {
250 			KERNEL_LOCK();
251 			ktrreltimespec(p, &ts);
252 			KERNEL_UNLOCK();
253 		}
254 #endif
255 	}
256 
257 	return error;
258 }
259 
260 int
261 sys_nanosleep(struct proc *p, void *v, register_t *retval)
262 {
263 	static int nanowait;
264 	struct sys_nanosleep_args/* {
265 		syscallarg(const struct timespec *) rqtp;
266 		syscallarg(struct timespec *) rmtp;
267 	} */ *uap = v;
268 	struct timespec rqt, rmt;
269 	struct timespec sts, ets;
270 	struct timespec *rmtp;
271 	struct timeval tv;
272 	int error, error1;
273 
274 	rmtp = SCARG(uap, rmtp);
275 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
276 	if (error)
277 		return (error);
278 #ifdef KTRACE
279         if (KTRPOINT(p, KTR_STRUCT)) {
280 		KERNEL_LOCK();
281 		ktrreltimespec(p, &rqt);
282 		KERNEL_UNLOCK();
283 	}
284 #endif
285 
286 	TIMESPEC_TO_TIMEVAL(&tv, &rqt);
287 	if (itimerfix(&tv))
288 		return (EINVAL);
289 
290 	if (rmtp)
291 		getnanouptime(&sts);
292 
293 	error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep",
294 	    MAX(1, tvtohz(&tv)));
295 	if (error == ERESTART)
296 		error = EINTR;
297 	if (error == EWOULDBLOCK)
298 		error = 0;
299 
300 	if (rmtp) {
301 		getnanouptime(&ets);
302 
303 		memset(&rmt, 0, sizeof(rmt));
304 		timespecsub(&ets, &sts, &sts);
305 		timespecsub(&rqt, &sts, &rmt);
306 
307 		if (rmt.tv_sec < 0)
308 			timespecclear(&rmt);
309 
310 		error1 = copyout(&rmt, rmtp, sizeof(rmt));
311 		if (error1 != 0)
312 			error = error1;
313 #ifdef KTRACE
314 		if (error1 == 0 && KTRPOINT(p, KTR_STRUCT)) {
315 			KERNEL_LOCK();
316 			ktrreltimespec(p, &rmt);
317 			KERNEL_UNLOCK();
318 		}
319 #endif
320 	}
321 
322 	return error;
323 }
324 
325 int
326 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
327 {
328 	struct sys_gettimeofday_args /* {
329 		syscallarg(struct timeval *) tp;
330 		syscallarg(struct timezone *) tzp;
331 	} */ *uap = v;
332 	struct timeval atv;
333 	struct timeval *tp;
334 	struct timezone *tzp;
335 	int error = 0;
336 
337 	tp = SCARG(uap, tp);
338 	tzp = SCARG(uap, tzp);
339 
340 	if (tp) {
341 		memset(&atv, 0, sizeof(atv));
342 		microtime(&atv);
343 		if ((error = copyout(&atv, tp, sizeof (atv))))
344 			return (error);
345 #ifdef KTRACE
346 		if (KTRPOINT(p, KTR_STRUCT)) {
347 			KERNEL_LOCK();
348 			ktrabstimeval(p, &atv);
349 			KERNEL_UNLOCK();
350 		}
351 #endif
352 	}
353 	if (tzp)
354 		error = copyout(&tz, tzp, sizeof (tz));
355 	return (error);
356 }
357 
358 int
359 sys_settimeofday(struct proc *p, void *v, register_t *retval)
360 {
361 	struct sys_settimeofday_args /* {
362 		syscallarg(const struct timeval *) tv;
363 		syscallarg(const struct timezone *) tzp;
364 	} */ *uap = v;
365 	struct timezone atz;
366 	struct timeval atv;
367 	const struct timeval *tv;
368 	const struct timezone *tzp;
369 	int error;
370 
371 	tv = SCARG(uap, tv);
372 	tzp = SCARG(uap, tzp);
373 
374 	if ((error = suser(p, 0)))
375 		return (error);
376 	/* Verify all parameters before changing time. */
377 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
378 		return (error);
379 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
380 		return (error);
381 	if (tv) {
382 		struct timespec ts;
383 
384 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
385 		if ((error = settime(&ts)) != 0)
386 			return (error);
387 	}
388 	if (tzp)
389 		tz = atz;
390 	return (0);
391 }
392 
393 int
394 sys_adjfreq(struct proc *p, void *v, register_t *retval)
395 {
396 	struct sys_adjfreq_args /* {
397 		syscallarg(const int64_t *) freq;
398 		syscallarg(int64_t *) oldfreq;
399 	} */ *uap = v;
400 	int error;
401 	int64_t f;
402 	const int64_t *freq = SCARG(uap, freq);
403 	int64_t *oldfreq = SCARG(uap, oldfreq);
404 	if (oldfreq) {
405 		if ((error = tc_adjfreq(&f, NULL)))
406 			return (error);
407 		if ((error = copyout(&f, oldfreq, sizeof(f))))
408 			return (error);
409 	}
410 	if (freq) {
411 		if ((error = suser(p, 0)))
412 			return (error);
413 		if ((error = copyin(freq, &f, sizeof(f))))
414 			return (error);
415 		if ((error = tc_adjfreq(NULL, &f)))
416 			return (error);
417 	}
418 	return (0);
419 }
420 
421 int
422 sys_adjtime(struct proc *p, void *v, register_t *retval)
423 {
424 	struct sys_adjtime_args /* {
425 		syscallarg(const struct timeval *) delta;
426 		syscallarg(struct timeval *) olddelta;
427 	} */ *uap = v;
428 	const struct timeval *delta = SCARG(uap, delta);
429 	struct timeval *olddelta = SCARG(uap, olddelta);
430 	struct timeval atv;
431 	int error;
432 
433 	error = pledge_adjtime(p, delta);
434 	if (error)
435 		return error;
436 
437 	if (olddelta) {
438 		memset(&atv, 0, sizeof(atv));
439 		atv.tv_sec = adjtimedelta / 1000000;
440 		atv.tv_usec = adjtimedelta % 1000000;
441 		if (atv.tv_usec < 0) {
442 			atv.tv_usec += 1000000;
443 			atv.tv_sec--;
444 		}
445 
446 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
447 			return (error);
448 	}
449 
450 	if (delta) {
451 		if ((error = suser(p, 0)))
452 			return (error);
453 
454 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
455 			return (error);
456 
457 		/* XXX Check for overflow? */
458 		adjtimedelta = (int64_t)atv.tv_sec * 1000000 + atv.tv_usec;
459 	}
460 
461 	return (0);
462 }
463 
464 
465 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
466 
467 /*
468  * Get value of an interval timer.  The process virtual and
469  * profiling virtual time timers are kept internally in the
470  * way they are specified externally: in time until they expire.
471  *
472  * The real time interval timer's it_value, in contrast, is kept as an
473  * absolute time rather than as a delta, so that it is easy to keep
474  * periodic real-time signals from drifting.
475  *
476  * Virtual time timers are processed in the hardclock() routine of
477  * kern_clock.c.  The real time timer is processed by a timeout
478  * routine, called from the softclock() routine.  Since a callout
479  * may be delayed in real time due to interrupt processing in the system,
480  * it is possible for the real time timeout routine (realitexpire, given below),
481  * to be delayed in real time past when it is supposed to occur.  It
482  * does not suffice, therefore, to reload the real timer .it_value from the
483  * real time timers .it_interval.  Rather, we compute the next time in
484  * absolute time the timer should go off.
485  */
486 int
487 sys_getitimer(struct proc *p, void *v, register_t *retval)
488 {
489 	struct sys_getitimer_args /* {
490 		syscallarg(int) which;
491 		syscallarg(struct itimerval *) itv;
492 	} */ *uap = v;
493 	struct itimerval aitv;
494 	int which;
495 
496 	which = SCARG(uap, which);
497 
498 	if (which < ITIMER_REAL || which > ITIMER_PROF)
499 		return (EINVAL);
500 	memset(&aitv, 0, sizeof(aitv));
501 	mtx_enter(&itimer_mtx);
502 	aitv.it_interval.tv_sec  = p->p_p->ps_timer[which].it_interval.tv_sec;
503 	aitv.it_interval.tv_usec = p->p_p->ps_timer[which].it_interval.tv_usec;
504 	aitv.it_value.tv_sec     = p->p_p->ps_timer[which].it_value.tv_sec;
505 	aitv.it_value.tv_usec    = p->p_p->ps_timer[which].it_value.tv_usec;
506 	mtx_leave(&itimer_mtx);
507 
508 	if (which == ITIMER_REAL) {
509 		struct timeval now;
510 
511 		getmicrouptime(&now);
512 		/*
513 		 * Convert from absolute to relative time in .it_value
514 		 * part of real time timer.  If time for real time timer
515 		 * has passed return 0, else return difference between
516 		 * current time and time for the timer to go off.
517 		 */
518 		if (timerisset(&aitv.it_value)) {
519 			if (timercmp(&aitv.it_value, &now, <))
520 				timerclear(&aitv.it_value);
521 			else
522 				timersub(&aitv.it_value, &now,
523 				    &aitv.it_value);
524 		}
525 	}
526 
527 	return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval)));
528 }
529 
530 int
531 sys_setitimer(struct proc *p, void *v, register_t *retval)
532 {
533 	struct sys_setitimer_args /* {
534 		syscallarg(int) which;
535 		syscallarg(const struct itimerval *) itv;
536 		syscallarg(struct itimerval *) oitv;
537 	} */ *uap = v;
538 	struct sys_getitimer_args getargs;
539 	struct itimerval aitv;
540 	const struct itimerval *itvp;
541 	struct itimerval *oitv;
542 	struct process *pr = p->p_p;
543 	int error;
544 	int timo;
545 	int which;
546 
547 	which = SCARG(uap, which);
548 	oitv = SCARG(uap, oitv);
549 
550 	if (which < ITIMER_REAL || which > ITIMER_PROF)
551 		return (EINVAL);
552 	itvp = SCARG(uap, itv);
553 	if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
554 	    sizeof(struct itimerval))))
555 		return (error);
556 	if (oitv != NULL) {
557 		SCARG(&getargs, which) = which;
558 		SCARG(&getargs, itv) = oitv;
559 		if ((error = sys_getitimer(p, &getargs, retval)))
560 			return (error);
561 	}
562 	if (itvp == 0)
563 		return (0);
564 	if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
565 		return (EINVAL);
566 	if (which == ITIMER_REAL) {
567 		struct timeval ctv;
568 
569 		timeout_del(&pr->ps_realit_to);
570 		getmicrouptime(&ctv);
571 		if (timerisset(&aitv.it_value)) {
572 			timo = tvtohz(&aitv.it_value);
573 			timeout_add(&pr->ps_realit_to, timo);
574 			timeradd(&aitv.it_value, &ctv, &aitv.it_value);
575 		}
576 		pr->ps_timer[ITIMER_REAL] = aitv;
577 	} else {
578 		itimerround(&aitv.it_interval);
579 		mtx_enter(&itimer_mtx);
580 		pr->ps_timer[which] = aitv;
581 		mtx_leave(&itimer_mtx);
582 	}
583 
584 	return (0);
585 }
586 
587 /*
588  * Real interval timer expired:
589  * send process whose timer expired an alarm signal.
590  * If time is not set up to reload, then just return.
591  * Else compute next time timer should go off which is > current time.
592  * This is where delay in processing this timeout causes multiple
593  * SIGALRM calls to be compressed into one.
594  */
595 void
596 realitexpire(void *arg)
597 {
598 	struct process *pr = arg;
599 	struct itimerval *tp = &pr->ps_timer[ITIMER_REAL];
600 
601 	prsignal(pr, SIGALRM);
602 	if (!timerisset(&tp->it_interval)) {
603 		timerclear(&tp->it_value);
604 		return;
605 	}
606 	for (;;) {
607 		struct timeval ctv, ntv;
608 		int timo;
609 
610 		timeradd(&tp->it_value, &tp->it_interval, &tp->it_value);
611 		getmicrouptime(&ctv);
612 		if (timercmp(&tp->it_value, &ctv, >)) {
613 			ntv = tp->it_value;
614 			timersub(&ntv, &ctv, &ntv);
615 			timo = tvtohz(&ntv) - 1;
616 			if (timo <= 0)
617 				timo = 1;
618 			if ((pr->ps_flags & PS_EXITING) == 0)
619 				timeout_add(&pr->ps_realit_to, timo);
620 			return;
621 		}
622 	}
623 }
624 
625 /*
626  * Check that a timespec value is legit
627  */
628 int
629 timespecfix(struct timespec *ts)
630 {
631 	if (ts->tv_sec < 0 ||
632 	    ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
633 		return (EINVAL);
634 	if (ts->tv_sec > 100000000)
635 		ts->tv_sec = 100000000;
636 	return (0);
637 }
638 
639 /*
640  * Check that a proposed value to load into the .it_value or
641  * .it_interval part of an interval timer is acceptable.
642  */
643 int
644 itimerfix(struct timeval *tv)
645 {
646 
647 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
648 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
649 		return (EINVAL);
650 
651 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
652 		tv->tv_usec = tick;
653 
654 	return (0);
655 }
656 
657 /*
658  * Nonzero timer interval smaller than the resolution of the
659  * system clock are rounded up.
660  */
661 void
662 itimerround(struct timeval *tv)
663 {
664 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
665 		tv->tv_usec = tick;
666 }
667 
668 /*
669  * Decrement an interval timer by a specified number
670  * of microseconds, which must be less than a second,
671  * i.e. < 1000000.  If the timer expires, then reload
672  * it.  In this case, carry over (usec - old value) to
673  * reduce the value reloaded into the timer so that
674  * the timer does not drift.  This routine assumes
675  * that it is called in a context where the timers
676  * on which it is operating cannot change in value.
677  */
678 int
679 itimerdecr(struct itimerval *itp, int usec)
680 {
681 	mtx_enter(&itimer_mtx);
682 	if (itp->it_value.tv_usec < usec) {
683 		if (itp->it_value.tv_sec == 0) {
684 			/* expired, and already in next interval */
685 			usec -= itp->it_value.tv_usec;
686 			goto expire;
687 		}
688 		itp->it_value.tv_usec += 1000000;
689 		itp->it_value.tv_sec--;
690 	}
691 	itp->it_value.tv_usec -= usec;
692 	usec = 0;
693 	if (timerisset(&itp->it_value)) {
694 		mtx_leave(&itimer_mtx);
695 		return (1);
696 	}
697 	/* expired, exactly at end of interval */
698 expire:
699 	if (timerisset(&itp->it_interval)) {
700 		itp->it_value = itp->it_interval;
701 		itp->it_value.tv_usec -= usec;
702 		if (itp->it_value.tv_usec < 0) {
703 			itp->it_value.tv_usec += 1000000;
704 			itp->it_value.tv_sec--;
705 		}
706 	} else
707 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
708 	mtx_leave(&itimer_mtx);
709 	return (0);
710 }
711 
712 /*
713  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
714  * for usage and rationale.
715  */
716 int
717 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
718 {
719 	struct timeval tv, delta;
720 	int rv = 0;
721 
722 	getmicrouptime(&tv);
723 
724 	timersub(&tv, lasttime, &delta);
725 
726 	/*
727 	 * check for 0,0 is so that the message will be seen at least once,
728 	 * even if interval is huge.
729 	 */
730 	if (timercmp(&delta, mininterval, >=) ||
731 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
732 		*lasttime = tv;
733 		rv = 1;
734 	}
735 
736 	return (rv);
737 }
738 
739 /*
740  * ppsratecheck(): packets (or events) per second limitation.
741  */
742 int
743 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
744 {
745 	struct timeval tv, delta;
746 	int rv;
747 
748 	microuptime(&tv);
749 
750 	timersub(&tv, lasttime, &delta);
751 
752 	/*
753 	 * check for 0,0 is so that the message will be seen at least once.
754 	 * if more than one second have passed since the last update of
755 	 * lasttime, reset the counter.
756 	 *
757 	 * we do increment *curpps even in *curpps < maxpps case, as some may
758 	 * try to use *curpps for stat purposes as well.
759 	 */
760 	if (maxpps == 0)
761 		rv = 0;
762 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
763 	    delta.tv_sec >= 1) {
764 		*lasttime = tv;
765 		*curpps = 0;
766 		rv = 1;
767 	} else if (maxpps < 0)
768 		rv = 1;
769 	else if (*curpps < maxpps)
770 		rv = 1;
771 	else
772 		rv = 0;
773 
774 #if 1 /*DIAGNOSTIC?*/
775 	/* be careful about wrap-around */
776 	if (*curpps + 1 > *curpps)
777 		*curpps = *curpps + 1;
778 #else
779 	/*
780 	 * assume that there's not too many calls to this function.
781 	 * not sure if the assumption holds, as it depends on *caller's*
782 	 * behavior, not the behavior of this function.
783 	 * IMHO it is wrong to make assumption on the caller's behavior,
784 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
785 	 */
786 	*curpps = *curpps + 1;
787 #endif
788 
789 	return (rv);
790 }
791 
792 
793 #define RESETTODR_PERIOD	1800
794 
795 void periodic_resettodr(void *);
796 void perform_resettodr(void *);
797 
798 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
799 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
800 
801 void
802 periodic_resettodr(void *arg __unused)
803 {
804 	task_add(systq, &resettodr_task);
805 }
806 
807 void
808 perform_resettodr(void *arg __unused)
809 {
810 	resettodr();
811 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
812 }
813 
814 void
815 start_periodic_resettodr(void)
816 {
817 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
818 }
819 
820 void
821 stop_periodic_resettodr(void)
822 {
823 	timeout_del(&resettodr_to);
824 	task_del(systq, &resettodr_task);
825 }
826