xref: /openbsd-src/sys/kern/kern_time.c (revision 9b9d2a55a62c8e82206c25f94fcc7f4e2765250e)
1 /*	$OpenBSD: kern_time.c,v 1.92 2015/08/22 20:18:49 deraadt Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/resourcevar.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/ktrace.h>
41 #include <sys/vnode.h>
42 #include <sys/signalvar.h>
43 #include <sys/timetc.h>
44 
45 #include <sys/mount.h>
46 #include <sys/syscallargs.h>
47 
48 
49 int64_t adjtimedelta;		/* unapplied time correction (microseconds) */
50 
51 /*
52  * Time of day and interval timer support.
53  *
54  * These routines provide the kernel entry points to get and set
55  * the time-of-day and per-process interval timers.  Subroutines
56  * here provide support for adding and subtracting timeval structures
57  * and decrementing interval timers, optionally reloading the interval
58  * timers when they expire.
59  */
60 
61 /* This function is used by clock_settime and settimeofday */
62 int
63 settime(struct timespec *ts)
64 {
65 	struct timespec now;
66 
67 	/*
68 	 * Adjtime in progress is meaningless or harmful after
69 	 * setting the clock. Cancel adjtime and then set new time.
70 	 */
71 	adjtimedelta = 0;
72 
73 	/*
74 	 * Don't allow the time to be set forward so far it will wrap
75 	 * and become negative, thus allowing an attacker to bypass
76 	 * the next check below.  The cutoff is 1 year before rollover
77 	 * occurs, so even if the attacker uses adjtime(2) to move
78 	 * the time past the cutoff, it will take a very long time
79 	 * to get to the wrap point.
80 	 *
81 	 * XXX: we check against UINT_MAX until we can figure out
82 	 *	how to deal with the hardware RTCs.
83 	 */
84 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
85 		printf("denied attempt to set clock forward to %lld\n",
86 		    (long long)ts->tv_sec);
87 		return (EPERM);
88 	}
89 	/*
90 	 * If the system is secure, we do not allow the time to be
91 	 * set to an earlier value (it may be slowed using adjtime,
92 	 * but not set back). This feature prevent interlopers from
93 	 * setting arbitrary time stamps on files.
94 	 */
95 	nanotime(&now);
96 	if (securelevel > 1 && timespeccmp(ts, &now, <)) {
97 		printf("denied attempt to set clock back %lld seconds\n",
98 		    (long long)now.tv_sec - ts->tv_sec);
99 		return (EPERM);
100 	}
101 
102 	tc_setrealtimeclock(ts);
103 	resettodr();
104 
105 	return (0);
106 }
107 
108 int
109 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
110 {
111 	struct bintime bt;
112 	struct proc *q;
113 
114 	switch (clock_id) {
115 	case CLOCK_REALTIME:
116 		nanotime(tp);
117 		break;
118 	case CLOCK_UPTIME:
119 		binuptime(&bt);
120 		bintime_sub(&bt, &naptime);
121 		bintime2timespec(&bt, tp);
122 		break;
123 	case CLOCK_MONOTONIC:
124 		nanouptime(tp);
125 		break;
126 	case CLOCK_PROCESS_CPUTIME_ID:
127 		nanouptime(tp);
128 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
129 		timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
130 		timespecadd(tp, &p->p_rtime, tp);
131 		break;
132 	case CLOCK_THREAD_CPUTIME_ID:
133 		nanouptime(tp);
134 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
135 		timespecadd(tp, &p->p_tu.tu_runtime, tp);
136 		timespecadd(tp, &p->p_rtime, tp);
137 		break;
138 	default:
139 		/* check for clock from pthread_getcpuclockid() */
140 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
141 			q = pfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
142 			if (q == NULL || q->p_p != p->p_p)
143 				return (ESRCH);
144 			*tp = q->p_tu.tu_runtime;
145 		} else
146 			return (EINVAL);
147 	}
148 	return (0);
149 }
150 
151 /* ARGSUSED */
152 int
153 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
154 {
155 	struct sys_clock_gettime_args /* {
156 		syscallarg(clockid_t) clock_id;
157 		syscallarg(struct timespec *) tp;
158 	} */ *uap = v;
159 	struct timespec ats;
160 	int error;
161 
162 	memset(&ats, 0, sizeof(ats));
163 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
164 		return (error);
165 
166 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
167 #ifdef KTRACE
168 	if (error == 0 && KTRPOINT(p, KTR_STRUCT)) {
169 		KERNEL_LOCK();
170 		ktrabstimespec(p, &ats);
171 		KERNEL_UNLOCK();
172 	}
173 #endif
174 	return (error);
175 }
176 
177 /* ARGSUSED */
178 int
179 sys_clock_settime(struct proc *p, void *v, register_t *retval)
180 {
181 	struct sys_clock_settime_args /* {
182 		syscallarg(clockid_t) clock_id;
183 		syscallarg(const struct timespec *) tp;
184 	} */ *uap = v;
185 	struct timespec ats;
186 	clockid_t clock_id;
187 	int error;
188 
189 	if ((error = suser(p, 0)) != 0)
190 		return (error);
191 
192 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
193 		return (error);
194 
195 	clock_id = SCARG(uap, clock_id);
196 	switch (clock_id) {
197 	case CLOCK_REALTIME:
198 		if ((error = settime(&ats)) != 0)
199 			return (error);
200 		break;
201 	default:	/* Other clocks are read-only */
202 		return (EINVAL);
203 	}
204 
205 	return (0);
206 }
207 
208 int
209 sys_clock_getres(struct proc *p, void *v, register_t *retval)
210 {
211 	struct sys_clock_getres_args /* {
212 		syscallarg(clockid_t) clock_id;
213 		syscallarg(struct timespec *) tp;
214 	} */ *uap = v;
215 	clockid_t clock_id;
216 	struct timespec ts;
217 	struct proc *q;
218 	int error = 0;
219 
220 	memset(&ts, 0, sizeof(ts));
221 	clock_id = SCARG(uap, clock_id);
222 	switch (clock_id) {
223 	case CLOCK_REALTIME:
224 	case CLOCK_MONOTONIC:
225 	case CLOCK_UPTIME:
226 	case CLOCK_PROCESS_CPUTIME_ID:
227 	case CLOCK_THREAD_CPUTIME_ID:
228 		ts.tv_sec = 0;
229 		ts.tv_nsec = 1000000000 / hz;
230 		break;
231 	default:
232 		/* check for clock from pthread_getcpuclockid() */
233 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
234 			q = pfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
235 			if (q == NULL || q->p_p != p->p_p)
236 				return (ESRCH);
237 			ts.tv_sec = 0;
238 			ts.tv_nsec = 1000000000 / hz;
239 		} else
240 			return (EINVAL);
241 	}
242 
243 	if (SCARG(uap, tp)) {
244 		error = copyout(&ts, SCARG(uap, tp), sizeof (ts));
245 #ifdef KTRACE
246 		if (error == 0 && KTRPOINT(p, KTR_STRUCT)) {
247 			KERNEL_LOCK();
248 			ktrreltimespec(p, &ts);
249 			KERNEL_UNLOCK();
250 		}
251 #endif
252 	}
253 
254 	return error;
255 }
256 
257 /* ARGSUSED */
258 int
259 sys_nanosleep(struct proc *p, void *v, register_t *retval)
260 {
261 	static int nanowait;
262 	struct sys_nanosleep_args/* {
263 		syscallarg(const struct timespec *) rqtp;
264 		syscallarg(struct timespec *) rmtp;
265 	} */ *uap = v;
266 	struct timespec rqt, rmt;
267 	struct timespec sts, ets;
268 	struct timespec *rmtp;
269 	struct timeval tv;
270 	int error, error1;
271 
272 	rmtp = SCARG(uap, rmtp);
273 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
274 	if (error)
275 		return (error);
276 #ifdef KTRACE
277         if (KTRPOINT(p, KTR_STRUCT)) {
278 		KERNEL_LOCK();
279 		ktrreltimespec(p, &rqt);
280 		KERNEL_UNLOCK();
281 	}
282 #endif
283 
284 	TIMESPEC_TO_TIMEVAL(&tv, &rqt);
285 	if (itimerfix(&tv))
286 		return (EINVAL);
287 
288 	if (rmtp)
289 		getnanouptime(&sts);
290 
291 	error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep",
292 	    MAX(1, tvtohz(&tv)));
293 	if (error == ERESTART)
294 		error = EINTR;
295 	if (error == EWOULDBLOCK)
296 		error = 0;
297 
298 	if (rmtp) {
299 		getnanouptime(&ets);
300 
301 		memset(&rmt, 0, sizeof(rmt));
302 		timespecsub(&ets, &sts, &sts);
303 		timespecsub(&rqt, &sts, &rmt);
304 
305 		if (rmt.tv_sec < 0)
306 			timespecclear(&rmt);
307 
308 		error1 = copyout(&rmt, rmtp, sizeof(rmt));
309 		if (error1 != 0)
310 			error = error1;
311 #ifdef KTRACE
312 		if (error1 == 0 && KTRPOINT(p, KTR_STRUCT)) {
313 			KERNEL_LOCK();
314 			ktrreltimespec(p, &rmt);
315 			KERNEL_UNLOCK();
316 		}
317 #endif
318 	}
319 
320 	return error;
321 }
322 
323 /* ARGSUSED */
324 int
325 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
326 {
327 	struct sys_gettimeofday_args /* {
328 		syscallarg(struct timeval *) tp;
329 		syscallarg(struct timezone *) tzp;
330 	} */ *uap = v;
331 	struct timeval atv;
332 	struct timeval *tp;
333 	struct timezone *tzp;
334 	int error = 0;
335 
336 	tp = SCARG(uap, tp);
337 	tzp = SCARG(uap, tzp);
338 
339 	if (tp) {
340 		memset(&atv, 0, sizeof(atv));
341 		microtime(&atv);
342 		if ((error = copyout(&atv, tp, sizeof (atv))))
343 			return (error);
344 #ifdef KTRACE
345 		if (KTRPOINT(p, KTR_STRUCT)) {
346 			KERNEL_LOCK();
347 			ktrabstimeval(p, &atv);
348 			KERNEL_UNLOCK();
349 		}
350 #endif
351 	}
352 	if (tzp)
353 		error = copyout(&tz, tzp, sizeof (tz));
354 	return (error);
355 }
356 
357 /* ARGSUSED */
358 int
359 sys_settimeofday(struct proc *p, void *v, register_t *retval)
360 {
361 	struct sys_settimeofday_args /* {
362 		syscallarg(const struct timeval *) tv;
363 		syscallarg(const struct timezone *) tzp;
364 	} */ *uap = v;
365 	struct timezone atz;
366 	struct timeval atv;
367 	const struct timeval *tv;
368 	const struct timezone *tzp;
369 	int error;
370 
371 	tv = SCARG(uap, tv);
372 	tzp = SCARG(uap, tzp);
373 
374 	if ((error = suser(p, 0)))
375 		return (error);
376 	/* Verify all parameters before changing time. */
377 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
378 		return (error);
379 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
380 		return (error);
381 	if (tv) {
382 		struct timespec ts;
383 
384 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
385 		if ((error = settime(&ts)) != 0)
386 			return (error);
387 	}
388 	if (tzp)
389 		tz = atz;
390 	return (0);
391 }
392 
393 /* ARGSUSED */
394 int
395 sys_adjfreq(struct proc *p, void *v, register_t *retval)
396 {
397 	struct sys_adjfreq_args /* {
398 		syscallarg(const int64_t *) freq;
399 		syscallarg(int64_t *) oldfreq;
400 	} */ *uap = v;
401 	int error;
402 	int64_t f;
403 	const int64_t *freq = SCARG(uap, freq);
404 	int64_t *oldfreq = SCARG(uap, oldfreq);
405 	if (oldfreq) {
406 		if ((error = tc_adjfreq(&f, NULL)))
407 			return (error);
408 		if ((error = copyout(&f, oldfreq, sizeof(f))))
409 			return (error);
410 	}
411 	if (freq) {
412 		if ((error = suser(p, 0)))
413 			return (error);
414 		if ((error = copyin(freq, &f, sizeof(f))))
415 			return (error);
416 		if ((error = tc_adjfreq(NULL, &f)))
417 			return (error);
418 	}
419 	return (0);
420 }
421 
422 /* ARGSUSED */
423 int
424 sys_adjtime(struct proc *p, void *v, register_t *retval)
425 {
426 	struct sys_adjtime_args /* {
427 		syscallarg(const struct timeval *) delta;
428 		syscallarg(struct timeval *) olddelta;
429 	} */ *uap = v;
430 	const struct timeval *delta = SCARG(uap, delta);
431 	struct timeval *olddelta = SCARG(uap, olddelta);
432 	struct timeval atv;
433 	int error;
434 
435 	if (tame_adjtime_check(p, delta))
436 		return (EPERM);
437 
438 	if (olddelta) {
439 		memset(&atv, 0, sizeof(atv));
440 		atv.tv_sec = adjtimedelta / 1000000;
441 		atv.tv_usec = adjtimedelta % 1000000;
442 		if (atv.tv_usec < 0) {
443 			atv.tv_usec += 1000000;
444 			atv.tv_sec--;
445 		}
446 
447 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
448 			return (error);
449 	}
450 
451 	if (delta) {
452 		if ((error = suser(p, 0)))
453 			return (error);
454 
455 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
456 			return (error);
457 
458 		/* XXX Check for overflow? */
459 		adjtimedelta = (int64_t)atv.tv_sec * 1000000 + atv.tv_usec;
460 	}
461 
462 	return (0);
463 }
464 
465 
466 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
467 
468 /*
469  * Get value of an interval timer.  The process virtual and
470  * profiling virtual time timers are kept internally in the
471  * way they are specified externally: in time until they expire.
472  *
473  * The real time interval timer's it_value, in contrast, is kept as an
474  * absolute time rather than as a delta, so that it is easy to keep
475  * periodic real-time signals from drifting.
476  *
477  * Virtual time timers are processed in the hardclock() routine of
478  * kern_clock.c.  The real time timer is processed by a timeout
479  * routine, called from the softclock() routine.  Since a callout
480  * may be delayed in real time due to interrupt processing in the system,
481  * it is possible for the real time timeout routine (realitexpire, given below),
482  * to be delayed in real time past when it is supposed to occur.  It
483  * does not suffice, therefore, to reload the real timer .it_value from the
484  * real time timers .it_interval.  Rather, we compute the next time in
485  * absolute time the timer should go off.
486  */
487 /* ARGSUSED */
488 int
489 sys_getitimer(struct proc *p, void *v, register_t *retval)
490 {
491 	struct sys_getitimer_args /* {
492 		syscallarg(int) which;
493 		syscallarg(struct itimerval *) itv;
494 	} */ *uap = v;
495 	struct itimerval aitv;
496 	int which;
497 
498 	which = SCARG(uap, which);
499 
500 	if (which < ITIMER_REAL || which > ITIMER_PROF)
501 		return (EINVAL);
502 	memset(&aitv, 0, sizeof(aitv));
503 	mtx_enter(&itimer_mtx);
504 	aitv.it_interval.tv_sec  = p->p_p->ps_timer[which].it_interval.tv_sec;
505 	aitv.it_interval.tv_usec = p->p_p->ps_timer[which].it_interval.tv_usec;
506 	aitv.it_value.tv_sec     = p->p_p->ps_timer[which].it_value.tv_sec;
507 	aitv.it_value.tv_usec    = p->p_p->ps_timer[which].it_value.tv_usec;
508 	mtx_leave(&itimer_mtx);
509 
510 	if (which == ITIMER_REAL) {
511 		struct timeval now;
512 
513 		getmicrouptime(&now);
514 		/*
515 		 * Convert from absolute to relative time in .it_value
516 		 * part of real time timer.  If time for real time timer
517 		 * has passed return 0, else return difference between
518 		 * current time and time for the timer to go off.
519 		 */
520 		if (timerisset(&aitv.it_value)) {
521 			if (timercmp(&aitv.it_value, &now, <))
522 				timerclear(&aitv.it_value);
523 			else
524 				timersub(&aitv.it_value, &now,
525 				    &aitv.it_value);
526 		}
527 	}
528 
529 	return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval)));
530 }
531 
532 /* ARGSUSED */
533 int
534 sys_setitimer(struct proc *p, void *v, register_t *retval)
535 {
536 	struct sys_setitimer_args /* {
537 		syscallarg(int) which;
538 		syscallarg(const struct itimerval *) itv;
539 		syscallarg(struct itimerval *) oitv;
540 	} */ *uap = v;
541 	struct sys_getitimer_args getargs;
542 	struct itimerval aitv;
543 	const struct itimerval *itvp;
544 	struct itimerval *oitv;
545 	struct process *pr = p->p_p;
546 	int error;
547 	int timo;
548 	int which;
549 
550 	which = SCARG(uap, which);
551 	oitv = SCARG(uap, oitv);
552 
553 	if (which < ITIMER_REAL || which > ITIMER_PROF)
554 		return (EINVAL);
555 	itvp = SCARG(uap, itv);
556 	if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
557 	    sizeof(struct itimerval))))
558 		return (error);
559 	if (oitv != NULL) {
560 		SCARG(&getargs, which) = which;
561 		SCARG(&getargs, itv) = oitv;
562 		if ((error = sys_getitimer(p, &getargs, retval)))
563 			return (error);
564 	}
565 	if (itvp == 0)
566 		return (0);
567 	if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
568 		return (EINVAL);
569 	if (which == ITIMER_REAL) {
570 		struct timeval ctv;
571 
572 		timeout_del(&pr->ps_realit_to);
573 		getmicrouptime(&ctv);
574 		if (timerisset(&aitv.it_value)) {
575 			timo = tvtohz(&aitv.it_value);
576 			timeout_add(&pr->ps_realit_to, timo);
577 			timeradd(&aitv.it_value, &ctv, &aitv.it_value);
578 		}
579 		pr->ps_timer[ITIMER_REAL] = aitv;
580 	} else {
581 		itimerround(&aitv.it_interval);
582 		mtx_enter(&itimer_mtx);
583 		pr->ps_timer[which] = aitv;
584 		mtx_leave(&itimer_mtx);
585 	}
586 
587 	return (0);
588 }
589 
590 /*
591  * Real interval timer expired:
592  * send process whose timer expired an alarm signal.
593  * If time is not set up to reload, then just return.
594  * Else compute next time timer should go off which is > current time.
595  * This is where delay in processing this timeout causes multiple
596  * SIGALRM calls to be compressed into one.
597  */
598 void
599 realitexpire(void *arg)
600 {
601 	struct process *pr = arg;
602 	struct itimerval *tp = &pr->ps_timer[ITIMER_REAL];
603 
604 	prsignal(pr, SIGALRM);
605 	if (!timerisset(&tp->it_interval)) {
606 		timerclear(&tp->it_value);
607 		return;
608 	}
609 	for (;;) {
610 		struct timeval ctv, ntv;
611 		int timo;
612 
613 		timeradd(&tp->it_value, &tp->it_interval, &tp->it_value);
614 		getmicrouptime(&ctv);
615 		if (timercmp(&tp->it_value, &ctv, >)) {
616 			ntv = tp->it_value;
617 			timersub(&ntv, &ctv, &ntv);
618 			timo = tvtohz(&ntv) - 1;
619 			if (timo <= 0)
620 				timo = 1;
621 			if ((pr->ps_flags & PS_EXITING) == 0)
622 				timeout_add(&pr->ps_realit_to, timo);
623 			return;
624 		}
625 	}
626 }
627 
628 /*
629  * Check that a timespec value is legit
630  */
631 int
632 timespecfix(struct timespec *ts)
633 {
634 	if (ts->tv_sec < 0 || ts->tv_sec > 100000000 ||
635 	    ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
636 		return (EINVAL);
637 	return (0);
638 }
639 
640 /*
641  * Check that a proposed value to load into the .it_value or
642  * .it_interval part of an interval timer is acceptable.
643  */
644 int
645 itimerfix(struct timeval *tv)
646 {
647 
648 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
649 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
650 		return (EINVAL);
651 
652 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
653 		tv->tv_usec = tick;
654 
655 	return (0);
656 }
657 
658 /*
659  * Nonzero timer interval smaller than the resolution of the
660  * system clock are rounded up.
661  */
662 void
663 itimerround(struct timeval *tv)
664 {
665 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
666 		tv->tv_usec = tick;
667 }
668 
669 /*
670  * Decrement an interval timer by a specified number
671  * of microseconds, which must be less than a second,
672  * i.e. < 1000000.  If the timer expires, then reload
673  * it.  In this case, carry over (usec - old value) to
674  * reduce the value reloaded into the timer so that
675  * the timer does not drift.  This routine assumes
676  * that it is called in a context where the timers
677  * on which it is operating cannot change in value.
678  */
679 int
680 itimerdecr(struct itimerval *itp, int usec)
681 {
682 	mtx_enter(&itimer_mtx);
683 	if (itp->it_value.tv_usec < usec) {
684 		if (itp->it_value.tv_sec == 0) {
685 			/* expired, and already in next interval */
686 			usec -= itp->it_value.tv_usec;
687 			goto expire;
688 		}
689 		itp->it_value.tv_usec += 1000000;
690 		itp->it_value.tv_sec--;
691 	}
692 	itp->it_value.tv_usec -= usec;
693 	usec = 0;
694 	if (timerisset(&itp->it_value)) {
695 		mtx_leave(&itimer_mtx);
696 		return (1);
697 	}
698 	/* expired, exactly at end of interval */
699 expire:
700 	if (timerisset(&itp->it_interval)) {
701 		itp->it_value = itp->it_interval;
702 		itp->it_value.tv_usec -= usec;
703 		if (itp->it_value.tv_usec < 0) {
704 			itp->it_value.tv_usec += 1000000;
705 			itp->it_value.tv_sec--;
706 		}
707 	} else
708 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
709 	mtx_leave(&itimer_mtx);
710 	return (0);
711 }
712 
713 /*
714  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
715  * for usage and rationale.
716  */
717 int
718 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
719 {
720 	struct timeval tv, delta;
721 	int rv = 0;
722 
723 	getmicrouptime(&tv);
724 
725 	timersub(&tv, lasttime, &delta);
726 
727 	/*
728 	 * check for 0,0 is so that the message will be seen at least once,
729 	 * even if interval is huge.
730 	 */
731 	if (timercmp(&delta, mininterval, >=) ||
732 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
733 		*lasttime = tv;
734 		rv = 1;
735 	}
736 
737 	return (rv);
738 }
739 
740 /*
741  * ppsratecheck(): packets (or events) per second limitation.
742  */
743 int
744 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
745 {
746 	struct timeval tv, delta;
747 	int rv;
748 
749 	microuptime(&tv);
750 
751 	timersub(&tv, lasttime, &delta);
752 
753 	/*
754 	 * check for 0,0 is so that the message will be seen at least once.
755 	 * if more than one second have passed since the last update of
756 	 * lasttime, reset the counter.
757 	 *
758 	 * we do increment *curpps even in *curpps < maxpps case, as some may
759 	 * try to use *curpps for stat purposes as well.
760 	 */
761 	if (maxpps == 0)
762 		rv = 0;
763 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
764 	    delta.tv_sec >= 1) {
765 		*lasttime = tv;
766 		*curpps = 0;
767 		rv = 1;
768 	} else if (maxpps < 0)
769 		rv = 1;
770 	else if (*curpps < maxpps)
771 		rv = 1;
772 	else
773 		rv = 0;
774 
775 #if 1 /*DIAGNOSTIC?*/
776 	/* be careful about wrap-around */
777 	if (*curpps + 1 > *curpps)
778 		*curpps = *curpps + 1;
779 #else
780 	/*
781 	 * assume that there's not too many calls to this function.
782 	 * not sure if the assumption holds, as it depends on *caller's*
783 	 * behavior, not the behavior of this function.
784 	 * IMHO it is wrong to make assumption on the caller's behavior,
785 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
786 	 */
787 	*curpps = *curpps + 1;
788 #endif
789 
790 	return (rv);
791 }
792 
793