xref: /openbsd-src/sys/kern/kern_time.c (revision 9f11ffb7133c203312a01e4b986886bc88c7d74b)
1 /*	$OpenBSD: kern_time.c,v 1.110 2019/01/31 18:23:27 tedu Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/resourcevar.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/ktrace.h>
41 #include <sys/vnode.h>
42 #include <sys/signalvar.h>
43 #include <sys/pledge.h>
44 #include <sys/task.h>
45 #include <sys/timeout.h>
46 #include <sys/timetc.h>
47 
48 #include <sys/mount.h>
49 #include <sys/syscallargs.h>
50 
51 
52 int64_t adjtimedelta;		/* unapplied time correction (microseconds) */
53 
54 /*
55  * Time of day and interval timer support.
56  *
57  * These routines provide the kernel entry points to get and set
58  * the time-of-day and per-process interval timers.  Subroutines
59  * here provide support for adding and subtracting timeval structures
60  * and decrementing interval timers, optionally reloading the interval
61  * timers when they expire.
62  */
63 
64 /* This function is used by clock_settime and settimeofday */
65 int
66 settime(const struct timespec *ts)
67 {
68 	struct timespec now;
69 
70 	/*
71 	 * Don't allow the time to be set forward so far it will wrap
72 	 * and become negative, thus allowing an attacker to bypass
73 	 * the next check below.  The cutoff is 1 year before rollover
74 	 * occurs, so even if the attacker uses adjtime(2) to move
75 	 * the time past the cutoff, it will take a very long time
76 	 * to get to the wrap point.
77 	 *
78 	 * XXX: we check against UINT_MAX until we can figure out
79 	 *	how to deal with the hardware RTCs.
80 	 */
81 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
82 		printf("denied attempt to set clock forward to %lld\n",
83 		    (long long)ts->tv_sec);
84 		return (EPERM);
85 	}
86 	/*
87 	 * If the system is secure, we do not allow the time to be
88 	 * set to an earlier value (it may be slowed using adjtime,
89 	 * but not set back). This feature prevent interlopers from
90 	 * setting arbitrary time stamps on files.
91 	 */
92 	nanotime(&now);
93 	if (securelevel > 1 && timespeccmp(ts, &now, <)) {
94 		printf("denied attempt to set clock back %lld seconds\n",
95 		    (long long)now.tv_sec - ts->tv_sec);
96 		return (EPERM);
97 	}
98 
99 	/*
100 	 * Adjtime in progress is meaningless or harmful after
101 	 * setting the clock. Cancel adjtime and then set new time.
102 	 */
103 	adjtimedelta = 0;
104 	tc_setrealtimeclock(ts);
105 	resettodr();
106 
107 	return (0);
108 }
109 
110 int
111 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
112 {
113 	struct bintime bt;
114 	struct proc *q;
115 	int error = 0;
116 
117 	switch (clock_id) {
118 	case CLOCK_REALTIME:
119 		nanotime(tp);
120 		break;
121 	case CLOCK_UPTIME:
122 		binuptime(&bt);
123 		bintime_sub(&bt, &naptime);
124 		bintime2timespec(&bt, tp);
125 		break;
126 	case CLOCK_MONOTONIC:
127 	case CLOCK_BOOTTIME:
128 		nanouptime(tp);
129 		break;
130 	case CLOCK_PROCESS_CPUTIME_ID:
131 		nanouptime(tp);
132 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
133 		timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
134 		timespecadd(tp, &p->p_rtime, tp);
135 		break;
136 	case CLOCK_THREAD_CPUTIME_ID:
137 		nanouptime(tp);
138 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
139 		timespecadd(tp, &p->p_tu.tu_runtime, tp);
140 		timespecadd(tp, &p->p_rtime, tp);
141 		break;
142 	default:
143 		/* check for clock from pthread_getcpuclockid() */
144 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
145 			KERNEL_LOCK();
146 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
147 			if (q == NULL || q->p_p != p->p_p)
148 				error = ESRCH;
149 			else
150 				*tp = q->p_tu.tu_runtime;
151 			KERNEL_UNLOCK();
152 		} else
153 			error = EINVAL;
154 		break;
155 	}
156 	return (error);
157 }
158 
159 int
160 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
161 {
162 	struct sys_clock_gettime_args /* {
163 		syscallarg(clockid_t) clock_id;
164 		syscallarg(struct timespec *) tp;
165 	} */ *uap = v;
166 	struct timespec ats;
167 	int error;
168 
169 	memset(&ats, 0, sizeof(ats));
170 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
171 		return (error);
172 
173 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
174 #ifdef KTRACE
175 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
176 		ktrabstimespec(p, &ats);
177 #endif
178 	return (error);
179 }
180 
181 int
182 sys_clock_settime(struct proc *p, void *v, register_t *retval)
183 {
184 	struct sys_clock_settime_args /* {
185 		syscallarg(clockid_t) clock_id;
186 		syscallarg(const struct timespec *) tp;
187 	} */ *uap = v;
188 	struct timespec ats;
189 	clockid_t clock_id;
190 	int error;
191 
192 	if ((error = suser(p)) != 0)
193 		return (error);
194 
195 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
196 		return (error);
197 
198 	clock_id = SCARG(uap, clock_id);
199 	switch (clock_id) {
200 	case CLOCK_REALTIME:
201 		if (!timespecisvalid(&ats))
202 			return (EINVAL);
203 		if ((error = settime(&ats)) != 0)
204 			return (error);
205 		break;
206 	default:	/* Other clocks are read-only */
207 		return (EINVAL);
208 	}
209 
210 	return (0);
211 }
212 
213 int
214 sys_clock_getres(struct proc *p, void *v, register_t *retval)
215 {
216 	struct sys_clock_getres_args /* {
217 		syscallarg(clockid_t) clock_id;
218 		syscallarg(struct timespec *) tp;
219 	} */ *uap = v;
220 	clockid_t clock_id;
221 	struct timespec ts;
222 	struct proc *q;
223 	int error = 0;
224 
225 	memset(&ts, 0, sizeof(ts));
226 	clock_id = SCARG(uap, clock_id);
227 	switch (clock_id) {
228 	case CLOCK_REALTIME:
229 	case CLOCK_MONOTONIC:
230 	case CLOCK_BOOTTIME:
231 	case CLOCK_UPTIME:
232 	case CLOCK_PROCESS_CPUTIME_ID:
233 	case CLOCK_THREAD_CPUTIME_ID:
234 		ts.tv_sec = 0;
235 		ts.tv_nsec = 1000000000 / hz;
236 		break;
237 	default:
238 		/* check for clock from pthread_getcpuclockid() */
239 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
240 			KERNEL_LOCK();
241 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
242 			if (q == NULL || q->p_p != p->p_p)
243 				error = ESRCH;
244 			else {
245 				ts.tv_sec = 0;
246 				ts.tv_nsec = 1000000000 / hz;
247 			}
248 			KERNEL_UNLOCK();
249 		} else
250 			error = EINVAL;
251 		break;
252 	}
253 
254 	if (error == 0 && SCARG(uap, tp)) {
255 		error = copyout(&ts, SCARG(uap, tp), sizeof (ts));
256 #ifdef KTRACE
257 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
258 			ktrreltimespec(p, &ts);
259 #endif
260 	}
261 
262 	return error;
263 }
264 
265 int
266 sys_nanosleep(struct proc *p, void *v, register_t *retval)
267 {
268 	static int nanowait;
269 	struct sys_nanosleep_args/* {
270 		syscallarg(const struct timespec *) rqtp;
271 		syscallarg(struct timespec *) rmtp;
272 	} */ *uap = v;
273 	struct timespec elapsed, remainder, request, start, stop;
274 	struct timespec *rmtp;
275 	int copyout_error, error;
276 
277 	rmtp = SCARG(uap, rmtp);
278 	error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
279 	if (error)
280 		return (error);
281 #ifdef KTRACE
282 	if (KTRPOINT(p, KTR_STRUCT))
283 		ktrreltimespec(p, &request);
284 #endif
285 
286 	if (request.tv_sec < 0 || !timespecisvalid(&request))
287 		return (EINVAL);
288 
289 	do {
290 		getnanouptime(&start);
291 		error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep",
292 		    MAX(1, tstohz(&request)));
293 		getnanouptime(&stop);
294 		timespecsub(&stop, &start, &elapsed);
295 		timespecsub(&request, &elapsed, &request);
296 		if (request.tv_sec < 0)
297 			timespecclear(&request);
298 		if (error != EWOULDBLOCK)
299 			break;
300 	} while (timespecisset(&request));
301 
302 	if (error == ERESTART)
303 		error = EINTR;
304 	if (error == EWOULDBLOCK)
305 		error = 0;
306 
307 	if (rmtp) {
308 		memset(&remainder, 0, sizeof(remainder));
309 		remainder = request;
310 		copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
311 		if (copyout_error)
312 			error = copyout_error;
313 #ifdef KTRACE
314 		if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
315 			ktrreltimespec(p, &remainder);
316 #endif
317 	}
318 
319 	return error;
320 }
321 
322 int
323 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
324 {
325 	struct sys_gettimeofday_args /* {
326 		syscallarg(struct timeval *) tp;
327 		syscallarg(struct timezone *) tzp;
328 	} */ *uap = v;
329 	struct timeval atv;
330 	struct timeval *tp;
331 	struct timezone *tzp;
332 	int error = 0;
333 
334 	tp = SCARG(uap, tp);
335 	tzp = SCARG(uap, tzp);
336 
337 	if (tp) {
338 		memset(&atv, 0, sizeof(atv));
339 		microtime(&atv);
340 		if ((error = copyout(&atv, tp, sizeof (atv))))
341 			return (error);
342 #ifdef KTRACE
343 		if (KTRPOINT(p, KTR_STRUCT))
344 			ktrabstimeval(p, &atv);
345 #endif
346 	}
347 	if (tzp)
348 		error = copyout(&tz, tzp, sizeof (tz));
349 	return (error);
350 }
351 
352 int
353 sys_settimeofday(struct proc *p, void *v, register_t *retval)
354 {
355 	struct sys_settimeofday_args /* {
356 		syscallarg(const struct timeval *) tv;
357 		syscallarg(const struct timezone *) tzp;
358 	} */ *uap = v;
359 	struct timezone atz;
360 	struct timeval atv;
361 	const struct timeval *tv;
362 	const struct timezone *tzp;
363 	int error;
364 
365 	tv = SCARG(uap, tv);
366 	tzp = SCARG(uap, tzp);
367 
368 	if ((error = suser(p)))
369 		return (error);
370 	/* Verify all parameters before changing time. */
371 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
372 		return (error);
373 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
374 		return (error);
375 	if (tv) {
376 		struct timespec ts;
377 
378 		if (!timerisvalid(&atv))
379 			return (EINVAL);
380 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
381 		if ((error = settime(&ts)) != 0)
382 			return (error);
383 	}
384 	if (tzp)
385 		tz = atz;
386 	return (0);
387 }
388 
389 int
390 sys_adjfreq(struct proc *p, void *v, register_t *retval)
391 {
392 	struct sys_adjfreq_args /* {
393 		syscallarg(const int64_t *) freq;
394 		syscallarg(int64_t *) oldfreq;
395 	} */ *uap = v;
396 	int error;
397 	int64_t f;
398 	const int64_t *freq = SCARG(uap, freq);
399 	int64_t *oldfreq = SCARG(uap, oldfreq);
400 	if (oldfreq) {
401 		if ((error = tc_adjfreq(&f, NULL)))
402 			return (error);
403 		if ((error = copyout(&f, oldfreq, sizeof(f))))
404 			return (error);
405 	}
406 	if (freq) {
407 		if ((error = suser(p)))
408 			return (error);
409 		if ((error = copyin(freq, &f, sizeof(f))))
410 			return (error);
411 		if ((error = tc_adjfreq(NULL, &f)))
412 			return (error);
413 	}
414 	return (0);
415 }
416 
417 int
418 sys_adjtime(struct proc *p, void *v, register_t *retval)
419 {
420 	struct sys_adjtime_args /* {
421 		syscallarg(const struct timeval *) delta;
422 		syscallarg(struct timeval *) olddelta;
423 	} */ *uap = v;
424 	const struct timeval *delta = SCARG(uap, delta);
425 	struct timeval *olddelta = SCARG(uap, olddelta);
426 	struct timeval atv;
427 	int error;
428 
429 	error = pledge_adjtime(p, delta);
430 	if (error)
431 		return error;
432 
433 	if (olddelta) {
434 		memset(&atv, 0, sizeof(atv));
435 		atv.tv_sec = adjtimedelta / 1000000;
436 		atv.tv_usec = adjtimedelta % 1000000;
437 		if (atv.tv_usec < 0) {
438 			atv.tv_usec += 1000000;
439 			atv.tv_sec--;
440 		}
441 
442 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
443 			return (error);
444 	}
445 
446 	if (delta) {
447 		if ((error = suser(p)))
448 			return (error);
449 
450 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
451 			return (error);
452 
453 		if (!timerisvalid(&atv))
454 			return (EINVAL);
455 
456 		/* XXX Check for overflow? */
457 		adjtimedelta = (int64_t)atv.tv_sec * 1000000 + atv.tv_usec;
458 	}
459 
460 	return (0);
461 }
462 
463 
464 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
465 
466 /*
467  * Get value of an interval timer.  The process virtual and
468  * profiling virtual time timers are kept internally in the
469  * way they are specified externally: in time until they expire.
470  *
471  * The real time interval timer's it_value, in contrast, is kept as an
472  * absolute time rather than as a delta, so that it is easy to keep
473  * periodic real-time signals from drifting.
474  *
475  * Virtual time timers are processed in the hardclock() routine of
476  * kern_clock.c.  The real time timer is processed by a timeout
477  * routine, called from the softclock() routine.  Since a callout
478  * may be delayed in real time due to interrupt processing in the system,
479  * it is possible for the real time timeout routine (realitexpire, given below),
480  * to be delayed in real time past when it is supposed to occur.  It
481  * does not suffice, therefore, to reload the real timer .it_value from the
482  * real time timers .it_interval.  Rather, we compute the next time in
483  * absolute time the timer should go off.
484  */
485 int
486 sys_getitimer(struct proc *p, void *v, register_t *retval)
487 {
488 	struct sys_getitimer_args /* {
489 		syscallarg(int) which;
490 		syscallarg(struct itimerval *) itv;
491 	} */ *uap = v;
492 	struct itimerval aitv;
493 	int which;
494 
495 	which = SCARG(uap, which);
496 
497 	if (which < ITIMER_REAL || which > ITIMER_PROF)
498 		return (EINVAL);
499 	memset(&aitv, 0, sizeof(aitv));
500 	mtx_enter(&itimer_mtx);
501 	aitv.it_interval.tv_sec  = p->p_p->ps_timer[which].it_interval.tv_sec;
502 	aitv.it_interval.tv_usec = p->p_p->ps_timer[which].it_interval.tv_usec;
503 	aitv.it_value.tv_sec     = p->p_p->ps_timer[which].it_value.tv_sec;
504 	aitv.it_value.tv_usec    = p->p_p->ps_timer[which].it_value.tv_usec;
505 	mtx_leave(&itimer_mtx);
506 
507 	if (which == ITIMER_REAL) {
508 		struct timeval now;
509 
510 		getmicrouptime(&now);
511 		/*
512 		 * Convert from absolute to relative time in .it_value
513 		 * part of real time timer.  If time for real time timer
514 		 * has passed return 0, else return difference between
515 		 * current time and time for the timer to go off.
516 		 */
517 		if (timerisset(&aitv.it_value)) {
518 			if (timercmp(&aitv.it_value, &now, <))
519 				timerclear(&aitv.it_value);
520 			else
521 				timersub(&aitv.it_value, &now,
522 				    &aitv.it_value);
523 		}
524 	}
525 
526 	return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval)));
527 }
528 
529 int
530 sys_setitimer(struct proc *p, void *v, register_t *retval)
531 {
532 	struct sys_setitimer_args /* {
533 		syscallarg(int) which;
534 		syscallarg(const struct itimerval *) itv;
535 		syscallarg(struct itimerval *) oitv;
536 	} */ *uap = v;
537 	struct sys_getitimer_args getargs;
538 	struct itimerval aitv;
539 	const struct itimerval *itvp;
540 	struct itimerval *oitv;
541 	struct process *pr = p->p_p;
542 	int error;
543 	int timo;
544 	int which;
545 
546 	which = SCARG(uap, which);
547 	oitv = SCARG(uap, oitv);
548 
549 	if (which < ITIMER_REAL || which > ITIMER_PROF)
550 		return (EINVAL);
551 	itvp = SCARG(uap, itv);
552 	if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
553 	    sizeof(struct itimerval))))
554 		return (error);
555 	if (oitv != NULL) {
556 		SCARG(&getargs, which) = which;
557 		SCARG(&getargs, itv) = oitv;
558 		if ((error = sys_getitimer(p, &getargs, retval)))
559 			return (error);
560 	}
561 	if (itvp == 0)
562 		return (0);
563 	if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
564 		return (EINVAL);
565 	if (which == ITIMER_REAL) {
566 		struct timeval ctv;
567 
568 		timeout_del(&pr->ps_realit_to);
569 		getmicrouptime(&ctv);
570 		if (timerisset(&aitv.it_value)) {
571 			timo = tvtohz(&aitv.it_value);
572 			timeout_add(&pr->ps_realit_to, timo);
573 			timeradd(&aitv.it_value, &ctv, &aitv.it_value);
574 		}
575 		pr->ps_timer[ITIMER_REAL] = aitv;
576 	} else {
577 		itimerround(&aitv.it_interval);
578 		mtx_enter(&itimer_mtx);
579 		pr->ps_timer[which] = aitv;
580 		mtx_leave(&itimer_mtx);
581 	}
582 
583 	return (0);
584 }
585 
586 /*
587  * Real interval timer expired:
588  * send process whose timer expired an alarm signal.
589  * If time is not set up to reload, then just return.
590  * Else compute next time timer should go off which is > current time.
591  * This is where delay in processing this timeout causes multiple
592  * SIGALRM calls to be compressed into one.
593  */
594 void
595 realitexpire(void *arg)
596 {
597 	struct process *pr = arg;
598 	struct itimerval *tp = &pr->ps_timer[ITIMER_REAL];
599 
600 	prsignal(pr, SIGALRM);
601 	if (!timerisset(&tp->it_interval)) {
602 		timerclear(&tp->it_value);
603 		return;
604 	}
605 	for (;;) {
606 		struct timeval ctv, ntv;
607 		int timo;
608 
609 		timeradd(&tp->it_value, &tp->it_interval, &tp->it_value);
610 		getmicrouptime(&ctv);
611 		if (timercmp(&tp->it_value, &ctv, >)) {
612 			ntv = tp->it_value;
613 			timersub(&ntv, &ctv, &ntv);
614 			timo = tvtohz(&ntv) - 1;
615 			if (timo <= 0)
616 				timo = 1;
617 			if ((pr->ps_flags & PS_EXITING) == 0)
618 				timeout_add(&pr->ps_realit_to, timo);
619 			return;
620 		}
621 	}
622 }
623 
624 /*
625  * Check that a timespec value is legit
626  */
627 int
628 timespecfix(struct timespec *ts)
629 {
630 	if (ts->tv_sec < 0 ||
631 	    ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
632 		return (EINVAL);
633 	if (ts->tv_sec > 100000000)
634 		ts->tv_sec = 100000000;
635 	return (0);
636 }
637 
638 /*
639  * Check that a proposed value to load into the .it_value or
640  * .it_interval part of an interval timer is acceptable.
641  */
642 int
643 itimerfix(struct timeval *tv)
644 {
645 
646 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
647 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
648 		return (EINVAL);
649 
650 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
651 		tv->tv_usec = tick;
652 
653 	return (0);
654 }
655 
656 /*
657  * Nonzero timer interval smaller than the resolution of the
658  * system clock are rounded up.
659  */
660 void
661 itimerround(struct timeval *tv)
662 {
663 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
664 		tv->tv_usec = tick;
665 }
666 
667 /*
668  * Decrement an interval timer by a specified number
669  * of microseconds, which must be less than a second,
670  * i.e. < 1000000.  If the timer expires, then reload
671  * it.  In this case, carry over (usec - old value) to
672  * reduce the value reloaded into the timer so that
673  * the timer does not drift.  This routine assumes
674  * that it is called in a context where the timers
675  * on which it is operating cannot change in value.
676  */
677 int
678 itimerdecr(struct itimerval *itp, int usec)
679 {
680 	mtx_enter(&itimer_mtx);
681 	if (itp->it_value.tv_usec < usec) {
682 		if (itp->it_value.tv_sec == 0) {
683 			/* expired, and already in next interval */
684 			usec -= itp->it_value.tv_usec;
685 			goto expire;
686 		}
687 		itp->it_value.tv_usec += 1000000;
688 		itp->it_value.tv_sec--;
689 	}
690 	itp->it_value.tv_usec -= usec;
691 	usec = 0;
692 	if (timerisset(&itp->it_value)) {
693 		mtx_leave(&itimer_mtx);
694 		return (1);
695 	}
696 	/* expired, exactly at end of interval */
697 expire:
698 	if (timerisset(&itp->it_interval)) {
699 		itp->it_value = itp->it_interval;
700 		itp->it_value.tv_usec -= usec;
701 		if (itp->it_value.tv_usec < 0) {
702 			itp->it_value.tv_usec += 1000000;
703 			itp->it_value.tv_sec--;
704 		}
705 	} else
706 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
707 	mtx_leave(&itimer_mtx);
708 	return (0);
709 }
710 
711 /*
712  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
713  * for usage and rationale.
714  */
715 int
716 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
717 {
718 	struct timeval tv, delta;
719 	int rv = 0;
720 
721 	getmicrouptime(&tv);
722 
723 	timersub(&tv, lasttime, &delta);
724 
725 	/*
726 	 * check for 0,0 is so that the message will be seen at least once,
727 	 * even if interval is huge.
728 	 */
729 	if (timercmp(&delta, mininterval, >=) ||
730 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
731 		*lasttime = tv;
732 		rv = 1;
733 	}
734 
735 	return (rv);
736 }
737 
738 /*
739  * ppsratecheck(): packets (or events) per second limitation.
740  */
741 int
742 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
743 {
744 	struct timeval tv, delta;
745 	int rv;
746 
747 	microuptime(&tv);
748 
749 	timersub(&tv, lasttime, &delta);
750 
751 	/*
752 	 * check for 0,0 is so that the message will be seen at least once.
753 	 * if more than one second have passed since the last update of
754 	 * lasttime, reset the counter.
755 	 *
756 	 * we do increment *curpps even in *curpps < maxpps case, as some may
757 	 * try to use *curpps for stat purposes as well.
758 	 */
759 	if (maxpps == 0)
760 		rv = 0;
761 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
762 	    delta.tv_sec >= 1) {
763 		*lasttime = tv;
764 		*curpps = 0;
765 		rv = 1;
766 	} else if (maxpps < 0)
767 		rv = 1;
768 	else if (*curpps < maxpps)
769 		rv = 1;
770 	else
771 		rv = 0;
772 
773 #if 1 /*DIAGNOSTIC?*/
774 	/* be careful about wrap-around */
775 	if (*curpps + 1 > *curpps)
776 		*curpps = *curpps + 1;
777 #else
778 	/*
779 	 * assume that there's not too many calls to this function.
780 	 * not sure if the assumption holds, as it depends on *caller's*
781 	 * behavior, not the behavior of this function.
782 	 * IMHO it is wrong to make assumption on the caller's behavior,
783 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
784 	 */
785 	*curpps = *curpps + 1;
786 #endif
787 
788 	return (rv);
789 }
790 
791 
792 #define RESETTODR_PERIOD	1800
793 
794 void periodic_resettodr(void *);
795 void perform_resettodr(void *);
796 
797 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
798 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
799 
800 void
801 periodic_resettodr(void *arg __unused)
802 {
803 	task_add(systq, &resettodr_task);
804 }
805 
806 void
807 perform_resettodr(void *arg __unused)
808 {
809 	resettodr();
810 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
811 }
812 
813 void
814 start_periodic_resettodr(void)
815 {
816 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
817 }
818 
819 void
820 stop_periodic_resettodr(void)
821 {
822 	timeout_del(&resettodr_to);
823 	task_del(systq, &resettodr_task);
824 }
825