xref: /openbsd-src/sys/kern/kern_time.c (revision daf88648c0e349d5c02e1504293082072c981640)
1 /*	$OpenBSD: kern_time.c,v 1.61 2007/01/10 07:58:08 art Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/resourcevar.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/vnode.h>
41 #include <sys/signalvar.h>
42 #ifdef __HAVE_TIMECOUNTER
43 #include <sys/timetc.h>
44 #endif
45 
46 #include <sys/mount.h>
47 #include <sys/syscallargs.h>
48 
49 #include <machine/cpu.h>
50 
51 void	itimerround(struct timeval *);
52 
53 /*
54  * Time of day and interval timer support.
55  *
56  * These routines provide the kernel entry points to get and set
57  * the time-of-day and per-process interval timers.  Subroutines
58  * here provide support for adding and subtracting timeval structures
59  * and decrementing interval timers, optionally reloading the interval
60  * timers when they expire.
61  */
62 
63 /* This function is used by clock_settime and settimeofday */
64 #ifdef __HAVE_TIMECOUNTER
65 int
66 settime(struct timespec *ts)
67 {
68 	struct timespec now;
69 
70 
71 	/*
72 	 * Don't allow the time to be set forward so far it will wrap
73 	 * and become negative, thus allowing an attacker to bypass
74 	 * the next check below.  The cutoff is 1 year before rollover
75 	 * occurs, so even if the attacker uses adjtime(2) to move
76 	 * the time past the cutoff, it will take a very long time
77 	 * to get to the wrap point.
78 	 *
79 	 * XXX: we check against INT_MAX since on 64-bit
80 	 *	platforms, sizeof(int) != sizeof(long) and
81 	 *	time_t is 32 bits even when atv.tv_sec is 64 bits.
82 	 */
83 	if (ts->tv_sec > INT_MAX - 365*24*60*60) {
84 		printf("denied attempt to set clock forward to %ld\n",
85 		    ts->tv_sec);
86 		return (EPERM);
87 	}
88 	/*
89 	 * If the system is secure, we do not allow the time to be
90 	 * set to an earlier value (it may be slowed using adjtime,
91 	 * but not set back). This feature prevent interlopers from
92 	 * setting arbitrary time stamps on files.
93 	 */
94 	nanotime(&now);
95 	if (securelevel > 1 && timespeccmp(ts, &now, <)) {
96 		printf("denied attempt to set clock back %ld seconds\n",
97 		    now.tv_sec - ts->tv_sec);
98 		return (EPERM);
99 	}
100 
101 	tc_setclock(ts);
102 	resettodr();
103 
104 	return (0);
105 }
106 #else
107 int
108 settime(struct timespec *ts)
109 {
110 	struct timeval delta, tvv, *tv;
111 	int s;
112 
113 	/* XXX - Ugh. */
114 	tv = &tvv;
115 	tvv.tv_sec = ts->tv_sec;
116 	tvv.tv_usec = ts->tv_nsec / 1000;
117 
118 	/*
119 	 * Don't allow the time to be set forward so far it will wrap
120 	 * and become negative, thus allowing an attacker to bypass
121 	 * the next check below.  The cutoff is 1 year before rollover
122 	 * occurs, so even if the attacker uses adjtime(2) to move
123 	 * the time past the cutoff, it will take a very long time
124 	 * to get to the wrap point.
125 	 *
126 	 * XXX: we check against INT_MAX since on 64-bit
127 	 *	platforms, sizeof(int) != sizeof(long) and
128 	 *	time_t is 32 bits even when atv.tv_sec is 64 bits.
129 	 */
130 	if (tv->tv_sec > INT_MAX - 365*24*60*60) {
131 		printf("denied attempt to set clock forward to %ld\n",
132 		    tv->tv_sec);
133 		return (EPERM);
134 	}
135 	/*
136 	 * If the system is secure, we do not allow the time to be
137 	 * set to an earlier value (it may be slowed using adjtime,
138 	 * but not set back). This feature prevent interlopers from
139 	 * setting arbitrary time stamps on files.
140 	 */
141 	if (securelevel > 1 && timercmp(tv, &time, <)) {
142 		printf("denied attempt to set clock back %ld seconds\n",
143 		    time_second - tv->tv_sec);
144 		return (EPERM);
145 	}
146 
147 	/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
148 	s = splclock();
149 	timersub(tv, &time, &delta);
150 	time = *tv;
151 	timeradd(&boottime, &delta, &boottime);
152 	splx(s);
153 	resettodr();
154 
155 	return (0);
156 }
157 #endif
158 
159 /* ARGSUSED */
160 int
161 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
162 {
163 	struct sys_clock_gettime_args /* {
164 		syscallarg(clockid_t) clock_id;
165 		syscallarg(struct timespec *) tp;
166 	} */ *uap = v;
167 	clockid_t clock_id;
168 	struct timespec ats;
169 
170 	clock_id = SCARG(uap, clock_id);
171 	switch (clock_id) {
172 	case CLOCK_REALTIME:
173 		nanotime(&ats);
174 		break;
175 	case CLOCK_MONOTONIC:
176 		nanouptime(&ats);
177 		break;
178 	default:
179 		return (EINVAL);
180 	}
181 
182 	return copyout(&ats, SCARG(uap, tp), sizeof(ats));
183 }
184 
185 /* ARGSUSED */
186 int
187 sys_clock_settime(struct proc *p, void *v, register_t *retval)
188 {
189 	struct sys_clock_settime_args /* {
190 		syscallarg(clockid_t) clock_id;
191 		syscallarg(const struct timespec *) tp;
192 	} */ *uap = v;
193 	struct timespec ats;
194 	clockid_t clock_id;
195 	int error;
196 
197 	if ((error = suser(p, 0)) != 0)
198 		return (error);
199 
200 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
201 		return (error);
202 
203 	clock_id = SCARG(uap, clock_id);
204 	switch (clock_id) {
205 	case CLOCK_REALTIME:
206 		if ((error = settime(&ats)) != 0)
207 			return (error);
208 		break;
209 	case CLOCK_MONOTONIC:
210 		return (EINVAL);	/* read-only clock */
211 	default:
212 		return (EINVAL);
213 	}
214 
215 	return (0);
216 }
217 
218 int
219 sys_clock_getres(struct proc *p, void *v, register_t *retval)
220 {
221 	struct sys_clock_getres_args /* {
222 		syscallarg(clockid_t) clock_id;
223 		syscallarg(struct timespec *) tp;
224 	} */ *uap = v;
225 	clockid_t clock_id;
226 	struct timespec ts;
227 	int error = 0;
228 
229 	clock_id = SCARG(uap, clock_id);
230 	switch (clock_id) {
231 	case CLOCK_REALTIME:
232 	case CLOCK_MONOTONIC:
233 		ts.tv_sec = 0;
234 		ts.tv_nsec = 1000000000 / hz;
235 		break;
236 	default:
237 		return (EINVAL);
238 	}
239 
240 	if (SCARG(uap, tp))
241 		error = copyout(&ts, SCARG(uap, tp), sizeof (ts));
242 
243 	return error;
244 }
245 
246 /* ARGSUSED */
247 int
248 sys_nanosleep(struct proc *p, void *v, register_t *retval)
249 {
250 	static int nanowait;
251 	struct sys_nanosleep_args/* {
252 		syscallarg(const struct timespec *) rqtp;
253 		syscallarg(struct timespec *) rmtp;
254 	} */ *uap = v;
255 	struct timespec rqt, rmt;
256 	struct timespec sts, ets;
257 	struct timeval tv;
258 	int error;
259 
260 	error = copyin((const void *)SCARG(uap, rqtp), (void *)&rqt,
261 	    sizeof(struct timespec));
262 	if (error)
263 		return (error);
264 
265 	TIMESPEC_TO_TIMEVAL(&tv, &rqt);
266 	if (itimerfix(&tv))
267 		return (EINVAL);
268 
269 	if (SCARG(uap, rmtp))
270 		getnanouptime(&sts);
271 
272 	error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep",
273 	    MAX(1, tvtohz(&tv)));
274 	if (error == ERESTART)
275 		error = EINTR;
276 	if (error == EWOULDBLOCK)
277 		error = 0;
278 
279 	if (SCARG(uap, rmtp)) {
280 		getnanouptime(&ets);
281 
282 		timespecsub(&ets, &sts, &sts);
283 		timespecsub(&rqt, &sts, &rmt);
284 
285 		if (rmt.tv_sec < 0)
286 			timespecclear(&rmt);
287 
288 		error = copyout((void *)&rmt, (void *)SCARG(uap,rmtp),
289 		    sizeof(rmt));
290 	}
291 
292 	return error;
293 }
294 
295 /* ARGSUSED */
296 int
297 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
298 {
299 	struct sys_gettimeofday_args /* {
300 		syscallarg(struct timeval *) tp;
301 		syscallarg(struct timezone *) tzp;
302 	} */ *uap = v;
303 	struct timeval atv;
304 	int error = 0;
305 
306 	if (SCARG(uap, tp)) {
307 		microtime(&atv);
308 		if ((error = copyout((void *)&atv, (void *)SCARG(uap, tp),
309 		    sizeof (atv))))
310 			return (error);
311 	}
312 	if (SCARG(uap, tzp))
313 		error = copyout((void *)&tz, (void *)SCARG(uap, tzp),
314 		    sizeof (tz));
315 	return (error);
316 }
317 
318 /* ARGSUSED */
319 int
320 sys_settimeofday(struct proc *p, void *v, register_t *retval)
321 {
322 	struct sys_settimeofday_args /* {
323 		syscallarg(const struct timeval *) tv;
324 		syscallarg(const struct timezone *) tzp;
325 	} */ *uap = v;
326 	struct timezone atz;
327 	struct timeval atv;
328 	int error;
329 
330 	if ((error = suser(p, 0)))
331 		return (error);
332 	/* Verify all parameters before changing time. */
333 	if (SCARG(uap, tv) && (error = copyin((void *)SCARG(uap, tv),
334 	    (void *)&atv, sizeof(atv))))
335 		return (error);
336 	if (SCARG(uap, tzp) && (error = copyin((void *)SCARG(uap, tzp),
337 	    (void *)&atz, sizeof(atz))))
338 		return (error);
339 	if (SCARG(uap, tv)) {
340 		struct timespec ts;
341 
342 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
343 		if ((error = settime(&ts)) != 0)
344 			return (error);
345 	}
346 	if (SCARG(uap, tzp))
347 		tz = atz;
348 	return (0);
349 }
350 
351 #ifdef __HAVE_TIMECOUNTER
352 struct timeval adjtimedelta;		/* unapplied time correction */
353 #else
354 int	tickdelta;			/* current clock skew, us. per tick */
355 long	timedelta;			/* unapplied time correction, us. */
356 long	bigadj = 1000000;		/* use 10x skew above bigadj us. */
357 int64_t	ntp_tick_permanent;
358 int64_t	ntp_tick_acc;
359 #endif
360 
361 /* ARGSUSED */
362 int
363 sys_adjfreq(struct proc *p, void *v, register_t *retval)
364 {
365 	struct sys_adjfreq_args /* {
366 		syscallarg(const int64_t *) freq;
367 		syscallarg(int64_t *) oldfreq;
368 	} */ *uap = v;
369 	int error;
370 	int64_t f;
371 #ifndef __HAVE_TIMECOUNTER
372 	int s;
373 
374 	if (SCARG(uap, oldfreq)) {
375 		f = ntp_tick_permanent * hz;
376 		if ((error = copyout((void *)&f, (void *)SCARG(uap, oldfreq),
377 		    sizeof(int64_t))))
378 			return (error);
379 	}
380 	if (SCARG(uap, freq)) {
381 		if ((error = suser(p, 0)))
382 			return (error);
383 		if ((error = copyin((void *)SCARG(uap, freq), (void *)&f,
384 		    sizeof(int64_t))))
385 			return (error);
386 		s = splclock();
387 		ntp_tick_permanent = f / hz;
388 		splx(s);
389 	}
390 #else
391 	if (SCARG(uap, oldfreq)) {
392 		if ((error = tc_adjfreq(&f, NULL)) != 0)
393 			return (error);
394 		if ((error = copyout(&f, SCARG(uap, oldfreq), sizeof(f))) != 0)
395 			return (error);
396 	}
397 	if (SCARG(uap, freq)) {
398 		if ((error = suser(p, 0)))
399 			return (error);
400 		if ((error = copyin(SCARG(uap, freq), &f, sizeof(f))) != 0)
401 			return (error);
402 		if ((error = tc_adjfreq(NULL, &f)) != 0)
403 			return (error);
404 	}
405 #endif
406 	return (0);
407 }
408 
409 /* ARGSUSED */
410 int
411 sys_adjtime(struct proc *p, void *v, register_t *retval)
412 {
413 	struct sys_adjtime_args /* {
414 		syscallarg(const struct timeval *) delta;
415 		syscallarg(struct timeval *) olddelta;
416 	} */ *uap = v;
417 #ifdef __HAVE_TIMECOUNTER
418 	int error;
419 
420 	if (SCARG(uap, olddelta))
421 		if ((error = copyout((void *)&adjtimedelta,
422 		    (void *)SCARG(uap, olddelta), sizeof(struct timeval))))
423 			return (error);
424 
425 	if (SCARG(uap, delta)) {
426 		if ((error = suser(p, 0)))
427 			return (error);
428 
429 		if ((error = copyin((void *)SCARG(uap, delta),
430 		    (void *)&adjtimedelta, sizeof(struct timeval))))
431 			return (error);
432 	}
433 
434 	/* Normalize the correction. */
435 	while (adjtimedelta.tv_usec >= 1000000) {
436 		adjtimedelta.tv_usec -= 1000000;
437 		adjtimedelta.tv_sec += 1;
438 	}
439 	while (adjtimedelta.tv_usec < 0) {
440 		adjtimedelta.tv_usec += 1000000;
441 		adjtimedelta.tv_sec -= 1;
442 	}
443 	return (0);
444 #else
445 	struct timeval atv;
446 	long ndelta, ntickdelta, odelta;
447 	int s, error;
448 
449 	if (!SCARG(uap, delta)) {
450 		s = splclock();
451 		odelta = timedelta;
452 		splx(s);
453 		goto out;
454 	}
455 	if ((error = suser(p, 0)))
456 		return (error);
457 	if ((error = copyin((void *)SCARG(uap, delta), (void *)&atv,
458 	    sizeof(struct timeval))))
459 		return (error);
460 
461 	/*
462 	 * Compute the total correction and the rate at which to apply it.
463 	 * Round the adjustment down to a whole multiple of the per-tick
464 	 * delta, so that after some number of incremental changes in
465 	 * hardclock(), tickdelta will become zero, lest the correction
466 	 * overshoot and start taking us away from the desired final time.
467 	 */
468 	if (atv.tv_sec > LONG_MAX / 1000000L)
469 		ndelta = LONG_MAX;
470 	else if (atv.tv_sec < LONG_MIN / 1000000L)
471 		ndelta = LONG_MIN;
472 	else {
473 		ndelta = atv.tv_sec * 1000000L;
474 		odelta = ndelta;
475 		ndelta += atv.tv_usec;
476 		if (atv.tv_usec > 0 && ndelta <= odelta)
477 			ndelta = LONG_MAX;
478 		else if (atv.tv_usec < 0 && ndelta >= odelta)
479 			ndelta = LONG_MIN;
480 	}
481 
482 	if (ndelta > bigadj || ndelta < -bigadj)
483 		ntickdelta = 10 * tickadj;
484 	else
485 		ntickdelta = tickadj;
486 	if (ndelta % ntickdelta)
487 		ndelta = ndelta / ntickdelta * ntickdelta;
488 
489 	/*
490 	 * To make hardclock()'s job easier, make the per-tick delta negative
491 	 * if we want time to run slower; then hardclock can simply compute
492 	 * tick + tickdelta, and subtract tickdelta from timedelta.
493 	 */
494 	if (ndelta < 0)
495 		ntickdelta = -ntickdelta;
496 	s = splclock();
497 	odelta = timedelta;
498 	timedelta = ndelta;
499 	tickdelta = ntickdelta;
500 	splx(s);
501 
502 out:
503 	if (SCARG(uap, olddelta)) {
504 		atv.tv_sec = odelta / 1000000;
505 		atv.tv_usec = odelta % 1000000;
506 		if ((error = copyout((void *)&atv, (void *)SCARG(uap, olddelta),
507 		    sizeof(struct timeval))))
508 			return (error);
509 	}
510 	return (0);
511 #endif
512 }
513 
514 
515 /*
516  * Get value of an interval timer.  The process virtual and
517  * profiling virtual time timers are kept in the p_stats area, since
518  * they can be swapped out.  These are kept internally in the
519  * way they are specified externally: in time until they expire.
520  *
521  * The real time interval timer is kept in the process table slot
522  * for the process, and its value (it_value) is kept as an
523  * absolute time rather than as a delta, so that it is easy to keep
524  * periodic real-time signals from drifting.
525  *
526  * Virtual time timers are processed in the hardclock() routine of
527  * kern_clock.c.  The real time timer is processed by a timeout
528  * routine, called from the softclock() routine.  Since a callout
529  * may be delayed in real time due to interrupt processing in the system,
530  * it is possible for the real time timeout routine (realitexpire, given below),
531  * to be delayed in real time past when it is supposed to occur.  It
532  * does not suffice, therefore, to reload the real timer .it_value from the
533  * real time timers .it_interval.  Rather, we compute the next time in
534  * absolute time the timer should go off.
535  */
536 /* ARGSUSED */
537 int
538 sys_getitimer(struct proc *p, void *v, register_t *retval)
539 {
540 	struct sys_getitimer_args /* {
541 		syscallarg(int) which;
542 		syscallarg(struct itimerval *) itv;
543 	} */ *uap = v;
544 	struct itimerval aitv;
545 	int s;
546 
547 	if (SCARG(uap, which) < ITIMER_REAL || SCARG(uap, which) > ITIMER_PROF)
548 		return (EINVAL);
549 	s = splclock();
550 	if (SCARG(uap, which) == ITIMER_REAL) {
551 		struct timeval now;
552 
553 		getmicrouptime(&now);
554 		/*
555 		 * Convert from absolute to relative time in .it_value
556 		 * part of real time timer.  If time for real time timer
557 		 * has passed return 0, else return difference between
558 		 * current time and time for the timer to go off.
559 		 */
560 		aitv = p->p_realtimer;
561 		if (timerisset(&aitv.it_value)) {
562 			if (timercmp(&aitv.it_value, &now, <))
563 				timerclear(&aitv.it_value);
564 			else
565 				timersub(&aitv.it_value, &now,
566 				    &aitv.it_value);
567 		}
568 	} else
569 		aitv = p->p_stats->p_timer[SCARG(uap, which)];
570 	splx(s);
571 	return (copyout((void *)&aitv, (void *)SCARG(uap, itv),
572 	    sizeof (struct itimerval)));
573 }
574 
575 /* ARGSUSED */
576 int
577 sys_setitimer(struct proc *p, void *v, register_t *retval)
578 {
579 	struct sys_setitimer_args /* {
580 		syscallarg(int) which;
581 		syscallarg(const struct itimerval *) itv;
582 		syscallarg(struct itimerval *) oitv;
583 	} */ *uap = v;
584 	struct sys_getitimer_args getargs;
585 	struct itimerval aitv;
586 	const struct itimerval *itvp;
587 	int error;
588 	int timo;
589 
590 	if (SCARG(uap, which) < ITIMER_REAL || SCARG(uap, which) > ITIMER_PROF)
591 		return (EINVAL);
592 	itvp = SCARG(uap, itv);
593 	if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
594 	    sizeof(struct itimerval))))
595 		return (error);
596 	if (SCARG(uap, oitv) != NULL) {
597 		SCARG(&getargs, which) = SCARG(uap, which);
598 		SCARG(&getargs, itv) = SCARG(uap, oitv);
599 		if ((error = sys_getitimer(p, &getargs, retval)))
600 			return (error);
601 	}
602 	if (itvp == 0)
603 		return (0);
604 	if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
605 		return (EINVAL);
606 	if (SCARG(uap, which) == ITIMER_REAL) {
607 		struct timeval ctv;
608 
609 		timeout_del(&p->p_realit_to);
610 		getmicrouptime(&ctv);
611 		if (timerisset(&aitv.it_value)) {
612 			timo = tvtohz(&aitv.it_value);
613 			timeout_add(&p->p_realit_to, timo);
614 			timeradd(&aitv.it_value, &ctv, &aitv.it_value);
615 		}
616 		p->p_realtimer = aitv;
617 	} else {
618 		int s;
619 
620 		itimerround(&aitv.it_interval);
621 		s = splclock();
622 		p->p_stats->p_timer[SCARG(uap, which)] = aitv;
623 		splx(s);
624 	}
625 
626 	return (0);
627 }
628 
629 /*
630  * Real interval timer expired:
631  * send process whose timer expired an alarm signal.
632  * If time is not set up to reload, then just return.
633  * Else compute next time timer should go off which is > current time.
634  * This is where delay in processing this timeout causes multiple
635  * SIGALRM calls to be compressed into one.
636  */
637 void
638 realitexpire(void *arg)
639 {
640 	struct proc *p;
641 
642 	p = (struct proc *)arg;
643 	psignal(p, SIGALRM);
644 	if (!timerisset(&p->p_realtimer.it_interval)) {
645 		timerclear(&p->p_realtimer.it_value);
646 		return;
647 	}
648 	for (;;) {
649 		struct timeval ctv, ntv;
650 		int timo;
651 
652 		timeradd(&p->p_realtimer.it_value,
653 		    &p->p_realtimer.it_interval, &p->p_realtimer.it_value);
654 		getmicrouptime(&ctv);
655 		if (timercmp(&p->p_realtimer.it_value, &ctv, >)) {
656 			ntv = p->p_realtimer.it_value;
657 			timersub(&ntv, &ctv, &ntv);
658 			timo = tvtohz(&ntv) - 1;
659 			if (timo <= 0)
660 				timo = 1;
661 			if ((p->p_flag & P_WEXIT) == 0)
662 				timeout_add(&p->p_realit_to, timo);
663 			return;
664 		}
665 	}
666 }
667 
668 /*
669  * Check that a proposed value to load into the .it_value or
670  * .it_interval part of an interval timer is acceptable.
671  */
672 int
673 itimerfix(struct timeval *tv)
674 {
675 
676 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
677 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
678 		return (EINVAL);
679 
680 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
681 		tv->tv_usec = tick;
682 
683 	return (0);
684 }
685 
686 /*
687  * Timer interval smaller than the resolution of the system clock are
688  * rounded up.
689  */
690 void
691 itimerround(struct timeval *tv)
692 {
693 	if (tv->tv_sec == 0 && tv->tv_usec < tick)
694 		tv->tv_usec = tick;
695 }
696 
697 /*
698  * Decrement an interval timer by a specified number
699  * of microseconds, which must be less than a second,
700  * i.e. < 1000000.  If the timer expires, then reload
701  * it.  In this case, carry over (usec - old value) to
702  * reduce the value reloaded into the timer so that
703  * the timer does not drift.  This routine assumes
704  * that it is called in a context where the timers
705  * on which it is operating cannot change in value.
706  */
707 int
708 itimerdecr(struct itimerval *itp, int usec)
709 {
710 
711 	if (itp->it_value.tv_usec < usec) {
712 		if (itp->it_value.tv_sec == 0) {
713 			/* expired, and already in next interval */
714 			usec -= itp->it_value.tv_usec;
715 			goto expire;
716 		}
717 		itp->it_value.tv_usec += 1000000;
718 		itp->it_value.tv_sec--;
719 	}
720 	itp->it_value.tv_usec -= usec;
721 	usec = 0;
722 	if (timerisset(&itp->it_value))
723 		return (1);
724 	/* expired, exactly at end of interval */
725 expire:
726 	if (timerisset(&itp->it_interval)) {
727 		itp->it_value = itp->it_interval;
728 		itp->it_value.tv_usec -= usec;
729 		if (itp->it_value.tv_usec < 0) {
730 			itp->it_value.tv_usec += 1000000;
731 			itp->it_value.tv_sec--;
732 		}
733 	} else
734 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
735 	return (0);
736 }
737 
738 /*
739  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
740  * for usage and rationale.
741  */
742 int
743 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
744 {
745 	struct timeval tv, delta;
746 	int rv = 0;
747 
748 	microuptime(&tv);
749 
750 	timersub(&tv, lasttime, &delta);
751 
752 	/*
753 	 * check for 0,0 is so that the message will be seen at least once,
754 	 * even if interval is huge.
755 	 */
756 	if (timercmp(&delta, mininterval, >=) ||
757 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
758 		*lasttime = tv;
759 		rv = 1;
760 	}
761 
762 	return (rv);
763 }
764 
765 /*
766  * ppsratecheck(): packets (or events) per second limitation.
767  */
768 int
769 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
770 {
771 	struct timeval tv, delta;
772 	int rv;
773 
774 	microuptime(&tv);
775 
776 	timersub(&tv, lasttime, &delta);
777 
778 	/*
779 	 * check for 0,0 is so that the message will be seen at least once.
780 	 * if more than one second have passed since the last update of
781 	 * lasttime, reset the counter.
782 	 *
783 	 * we do increment *curpps even in *curpps < maxpps case, as some may
784 	 * try to use *curpps for stat purposes as well.
785 	 */
786 	if (maxpps == 0)
787 		rv = 0;
788 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
789 	    delta.tv_sec >= 1) {
790 		*lasttime = tv;
791 		*curpps = 0;
792 		rv = 1;
793 	} else if (maxpps < 0)
794 		rv = 1;
795 	else if (*curpps < maxpps)
796 		rv = 1;
797 	else
798 		rv = 0;
799 
800 #if 1 /*DIAGNOSTIC?*/
801 	/* be careful about wrap-around */
802 	if (*curpps + 1 > *curpps)
803 		*curpps = *curpps + 1;
804 #else
805 	/*
806 	 * assume that there's not too many calls to this function.
807 	 * not sure if the assumption holds, as it depends on *caller's*
808 	 * behavior, not the behavior of this function.
809 	 * IMHO it is wrong to make assumption on the caller's behavior,
810 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
811 	 */
812 	*curpps = *curpps + 1;
813 #endif
814 
815 	return (rv);
816 }
817