xref: /openbsd-src/sys/kern/kern_time.c (revision 5e3c7963eb248119b7dfd4b0defad58a7d9cd306)
1 /*	$OpenBSD: kern_time.c,v 1.109 2019/01/23 21:53:42 cheloha Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/resourcevar.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/ktrace.h>
41 #include <sys/vnode.h>
42 #include <sys/signalvar.h>
43 #include <sys/pledge.h>
44 #include <sys/task.h>
45 #include <sys/timeout.h>
46 #include <sys/timetc.h>
47 
48 #include <sys/mount.h>
49 #include <sys/syscallargs.h>
50 
51 
52 int64_t adjtimedelta;		/* unapplied time correction (microseconds) */
53 
54 /*
55  * Time of day and interval timer support.
56  *
57  * These routines provide the kernel entry points to get and set
58  * the time-of-day and per-process interval timers.  Subroutines
59  * here provide support for adding and subtracting timeval structures
60  * and decrementing interval timers, optionally reloading the interval
61  * timers when they expire.
62  */
63 
64 /* This function is used by clock_settime and settimeofday */
65 int
66 settime(const struct timespec *ts)
67 {
68 	struct timespec now;
69 
70 	/*
71 	 * Don't allow the time to be set forward so far it will wrap
72 	 * and become negative, thus allowing an attacker to bypass
73 	 * the next check below.  The cutoff is 1 year before rollover
74 	 * occurs, so even if the attacker uses adjtime(2) to move
75 	 * the time past the cutoff, it will take a very long time
76 	 * to get to the wrap point.
77 	 *
78 	 * XXX: we check against UINT_MAX until we can figure out
79 	 *	how to deal with the hardware RTCs.
80 	 */
81 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
82 		printf("denied attempt to set clock forward to %lld\n",
83 		    (long long)ts->tv_sec);
84 		return (EPERM);
85 	}
86 	/*
87 	 * If the system is secure, we do not allow the time to be
88 	 * set to an earlier value (it may be slowed using adjtime,
89 	 * but not set back). This feature prevent interlopers from
90 	 * setting arbitrary time stamps on files.
91 	 */
92 	nanotime(&now);
93 	if (securelevel > 1 && timespeccmp(ts, &now, <)) {
94 		printf("denied attempt to set clock back %lld seconds\n",
95 		    (long long)now.tv_sec - ts->tv_sec);
96 		return (EPERM);
97 	}
98 
99 	/*
100 	 * Adjtime in progress is meaningless or harmful after
101 	 * setting the clock. Cancel adjtime and then set new time.
102 	 */
103 	adjtimedelta = 0;
104 	tc_setrealtimeclock(ts);
105 	resettodr();
106 
107 	return (0);
108 }
109 
110 int
111 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
112 {
113 	struct bintime bt;
114 	struct proc *q;
115 
116 	switch (clock_id) {
117 	case CLOCK_REALTIME:
118 		nanotime(tp);
119 		break;
120 	case CLOCK_UPTIME:
121 		binuptime(&bt);
122 		bintime_sub(&bt, &naptime);
123 		bintime2timespec(&bt, tp);
124 		break;
125 	case CLOCK_MONOTONIC:
126 	case CLOCK_BOOTTIME:
127 		nanouptime(tp);
128 		break;
129 	case CLOCK_PROCESS_CPUTIME_ID:
130 		nanouptime(tp);
131 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
132 		timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
133 		timespecadd(tp, &p->p_rtime, tp);
134 		break;
135 	case CLOCK_THREAD_CPUTIME_ID:
136 		nanouptime(tp);
137 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
138 		timespecadd(tp, &p->p_tu.tu_runtime, tp);
139 		timespecadd(tp, &p->p_rtime, tp);
140 		break;
141 	default:
142 		/* check for clock from pthread_getcpuclockid() */
143 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
144 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
145 			if (q == NULL || q->p_p != p->p_p)
146 				return (ESRCH);
147 			*tp = q->p_tu.tu_runtime;
148 		} else
149 			return (EINVAL);
150 	}
151 	return (0);
152 }
153 
154 int
155 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
156 {
157 	struct sys_clock_gettime_args /* {
158 		syscallarg(clockid_t) clock_id;
159 		syscallarg(struct timespec *) tp;
160 	} */ *uap = v;
161 	struct timespec ats;
162 	int error;
163 
164 	memset(&ats, 0, sizeof(ats));
165 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
166 		return (error);
167 
168 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
169 #ifdef KTRACE
170 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
171 		ktrabstimespec(p, &ats);
172 #endif
173 	return (error);
174 }
175 
176 int
177 sys_clock_settime(struct proc *p, void *v, register_t *retval)
178 {
179 	struct sys_clock_settime_args /* {
180 		syscallarg(clockid_t) clock_id;
181 		syscallarg(const struct timespec *) tp;
182 	} */ *uap = v;
183 	struct timespec ats;
184 	clockid_t clock_id;
185 	int error;
186 
187 	if ((error = suser(p)) != 0)
188 		return (error);
189 
190 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
191 		return (error);
192 
193 	clock_id = SCARG(uap, clock_id);
194 	switch (clock_id) {
195 	case CLOCK_REALTIME:
196 		if (!timespecisvalid(&ats))
197 			return (EINVAL);
198 		if ((error = settime(&ats)) != 0)
199 			return (error);
200 		break;
201 	default:	/* Other clocks are read-only */
202 		return (EINVAL);
203 	}
204 
205 	return (0);
206 }
207 
208 int
209 sys_clock_getres(struct proc *p, void *v, register_t *retval)
210 {
211 	struct sys_clock_getres_args /* {
212 		syscallarg(clockid_t) clock_id;
213 		syscallarg(struct timespec *) tp;
214 	} */ *uap = v;
215 	clockid_t clock_id;
216 	struct timespec ts;
217 	struct proc *q;
218 	int error = 0;
219 
220 	memset(&ts, 0, sizeof(ts));
221 	clock_id = SCARG(uap, clock_id);
222 	switch (clock_id) {
223 	case CLOCK_REALTIME:
224 	case CLOCK_MONOTONIC:
225 	case CLOCK_BOOTTIME:
226 	case CLOCK_UPTIME:
227 	case CLOCK_PROCESS_CPUTIME_ID:
228 	case CLOCK_THREAD_CPUTIME_ID:
229 		ts.tv_sec = 0;
230 		ts.tv_nsec = 1000000000 / hz;
231 		break;
232 	default:
233 		/* check for clock from pthread_getcpuclockid() */
234 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
235 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
236 			if (q == NULL || q->p_p != p->p_p)
237 				return (ESRCH);
238 			ts.tv_sec = 0;
239 			ts.tv_nsec = 1000000000 / hz;
240 		} else
241 			return (EINVAL);
242 	}
243 
244 	if (SCARG(uap, tp)) {
245 		error = copyout(&ts, SCARG(uap, tp), sizeof (ts));
246 #ifdef KTRACE
247 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
248 			ktrreltimespec(p, &ts);
249 #endif
250 	}
251 
252 	return error;
253 }
254 
255 int
256 sys_nanosleep(struct proc *p, void *v, register_t *retval)
257 {
258 	static int nanowait;
259 	struct sys_nanosleep_args/* {
260 		syscallarg(const struct timespec *) rqtp;
261 		syscallarg(struct timespec *) rmtp;
262 	} */ *uap = v;
263 	struct timespec elapsed, remainder, request, start, stop;
264 	struct timespec *rmtp;
265 	int copyout_error, error;
266 
267 	rmtp = SCARG(uap, rmtp);
268 	error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
269 	if (error)
270 		return (error);
271 #ifdef KTRACE
272 	if (KTRPOINT(p, KTR_STRUCT))
273 		ktrreltimespec(p, &request);
274 #endif
275 
276 	if (request.tv_sec < 0 || !timespecisvalid(&request))
277 		return (EINVAL);
278 
279 	do {
280 		getnanouptime(&start);
281 		error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep",
282 		    MAX(1, tstohz(&request)));
283 		getnanouptime(&stop);
284 		timespecsub(&stop, &start, &elapsed);
285 		timespecsub(&request, &elapsed, &request);
286 		if (request.tv_sec < 0)
287 			timespecclear(&request);
288 		if (error != EWOULDBLOCK)
289 			break;
290 	} while (timespecisset(&request));
291 
292 	if (error == ERESTART)
293 		error = EINTR;
294 	if (error == EWOULDBLOCK)
295 		error = 0;
296 
297 	if (rmtp) {
298 		memset(&remainder, 0, sizeof(remainder));
299 		remainder = request;
300 		copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
301 		if (copyout_error)
302 			error = copyout_error;
303 #ifdef KTRACE
304 		if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
305 			ktrreltimespec(p, &remainder);
306 #endif
307 	}
308 
309 	return error;
310 }
311 
312 int
313 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
314 {
315 	struct sys_gettimeofday_args /* {
316 		syscallarg(struct timeval *) tp;
317 		syscallarg(struct timezone *) tzp;
318 	} */ *uap = v;
319 	struct timeval atv;
320 	struct timeval *tp;
321 	struct timezone *tzp;
322 	int error = 0;
323 
324 	tp = SCARG(uap, tp);
325 	tzp = SCARG(uap, tzp);
326 
327 	if (tp) {
328 		memset(&atv, 0, sizeof(atv));
329 		microtime(&atv);
330 		if ((error = copyout(&atv, tp, sizeof (atv))))
331 			return (error);
332 #ifdef KTRACE
333 		if (KTRPOINT(p, KTR_STRUCT))
334 			ktrabstimeval(p, &atv);
335 #endif
336 	}
337 	if (tzp)
338 		error = copyout(&tz, tzp, sizeof (tz));
339 	return (error);
340 }
341 
342 int
343 sys_settimeofday(struct proc *p, void *v, register_t *retval)
344 {
345 	struct sys_settimeofday_args /* {
346 		syscallarg(const struct timeval *) tv;
347 		syscallarg(const struct timezone *) tzp;
348 	} */ *uap = v;
349 	struct timezone atz;
350 	struct timeval atv;
351 	const struct timeval *tv;
352 	const struct timezone *tzp;
353 	int error;
354 
355 	tv = SCARG(uap, tv);
356 	tzp = SCARG(uap, tzp);
357 
358 	if ((error = suser(p)))
359 		return (error);
360 	/* Verify all parameters before changing time. */
361 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
362 		return (error);
363 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
364 		return (error);
365 	if (tv) {
366 		struct timespec ts;
367 
368 		if (!timerisvalid(&atv))
369 			return (EINVAL);
370 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
371 		if ((error = settime(&ts)) != 0)
372 			return (error);
373 	}
374 	if (tzp)
375 		tz = atz;
376 	return (0);
377 }
378 
379 int
380 sys_adjfreq(struct proc *p, void *v, register_t *retval)
381 {
382 	struct sys_adjfreq_args /* {
383 		syscallarg(const int64_t *) freq;
384 		syscallarg(int64_t *) oldfreq;
385 	} */ *uap = v;
386 	int error;
387 	int64_t f;
388 	const int64_t *freq = SCARG(uap, freq);
389 	int64_t *oldfreq = SCARG(uap, oldfreq);
390 	if (oldfreq) {
391 		if ((error = tc_adjfreq(&f, NULL)))
392 			return (error);
393 		if ((error = copyout(&f, oldfreq, sizeof(f))))
394 			return (error);
395 	}
396 	if (freq) {
397 		if ((error = suser(p)))
398 			return (error);
399 		if ((error = copyin(freq, &f, sizeof(f))))
400 			return (error);
401 		if ((error = tc_adjfreq(NULL, &f)))
402 			return (error);
403 	}
404 	return (0);
405 }
406 
407 int
408 sys_adjtime(struct proc *p, void *v, register_t *retval)
409 {
410 	struct sys_adjtime_args /* {
411 		syscallarg(const struct timeval *) delta;
412 		syscallarg(struct timeval *) olddelta;
413 	} */ *uap = v;
414 	const struct timeval *delta = SCARG(uap, delta);
415 	struct timeval *olddelta = SCARG(uap, olddelta);
416 	struct timeval atv;
417 	int error;
418 
419 	error = pledge_adjtime(p, delta);
420 	if (error)
421 		return error;
422 
423 	if (olddelta) {
424 		memset(&atv, 0, sizeof(atv));
425 		atv.tv_sec = adjtimedelta / 1000000;
426 		atv.tv_usec = adjtimedelta % 1000000;
427 		if (atv.tv_usec < 0) {
428 			atv.tv_usec += 1000000;
429 			atv.tv_sec--;
430 		}
431 
432 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
433 			return (error);
434 	}
435 
436 	if (delta) {
437 		if ((error = suser(p)))
438 			return (error);
439 
440 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
441 			return (error);
442 
443 		if (!timerisvalid(&atv))
444 			return (EINVAL);
445 
446 		/* XXX Check for overflow? */
447 		adjtimedelta = (int64_t)atv.tv_sec * 1000000 + atv.tv_usec;
448 	}
449 
450 	return (0);
451 }
452 
453 
454 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
455 
456 /*
457  * Get value of an interval timer.  The process virtual and
458  * profiling virtual time timers are kept internally in the
459  * way they are specified externally: in time until they expire.
460  *
461  * The real time interval timer's it_value, in contrast, is kept as an
462  * absolute time rather than as a delta, so that it is easy to keep
463  * periodic real-time signals from drifting.
464  *
465  * Virtual time timers are processed in the hardclock() routine of
466  * kern_clock.c.  The real time timer is processed by a timeout
467  * routine, called from the softclock() routine.  Since a callout
468  * may be delayed in real time due to interrupt processing in the system,
469  * it is possible for the real time timeout routine (realitexpire, given below),
470  * to be delayed in real time past when it is supposed to occur.  It
471  * does not suffice, therefore, to reload the real timer .it_value from the
472  * real time timers .it_interval.  Rather, we compute the next time in
473  * absolute time the timer should go off.
474  */
475 int
476 sys_getitimer(struct proc *p, void *v, register_t *retval)
477 {
478 	struct sys_getitimer_args /* {
479 		syscallarg(int) which;
480 		syscallarg(struct itimerval *) itv;
481 	} */ *uap = v;
482 	struct itimerval aitv;
483 	int which;
484 
485 	which = SCARG(uap, which);
486 
487 	if (which < ITIMER_REAL || which > ITIMER_PROF)
488 		return (EINVAL);
489 	memset(&aitv, 0, sizeof(aitv));
490 	mtx_enter(&itimer_mtx);
491 	aitv.it_interval.tv_sec  = p->p_p->ps_timer[which].it_interval.tv_sec;
492 	aitv.it_interval.tv_usec = p->p_p->ps_timer[which].it_interval.tv_usec;
493 	aitv.it_value.tv_sec     = p->p_p->ps_timer[which].it_value.tv_sec;
494 	aitv.it_value.tv_usec    = p->p_p->ps_timer[which].it_value.tv_usec;
495 	mtx_leave(&itimer_mtx);
496 
497 	if (which == ITIMER_REAL) {
498 		struct timeval now;
499 
500 		getmicrouptime(&now);
501 		/*
502 		 * Convert from absolute to relative time in .it_value
503 		 * part of real time timer.  If time for real time timer
504 		 * has passed return 0, else return difference between
505 		 * current time and time for the timer to go off.
506 		 */
507 		if (timerisset(&aitv.it_value)) {
508 			if (timercmp(&aitv.it_value, &now, <))
509 				timerclear(&aitv.it_value);
510 			else
511 				timersub(&aitv.it_value, &now,
512 				    &aitv.it_value);
513 		}
514 	}
515 
516 	return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval)));
517 }
518 
519 int
520 sys_setitimer(struct proc *p, void *v, register_t *retval)
521 {
522 	struct sys_setitimer_args /* {
523 		syscallarg(int) which;
524 		syscallarg(const struct itimerval *) itv;
525 		syscallarg(struct itimerval *) oitv;
526 	} */ *uap = v;
527 	struct sys_getitimer_args getargs;
528 	struct itimerval aitv;
529 	const struct itimerval *itvp;
530 	struct itimerval *oitv;
531 	struct process *pr = p->p_p;
532 	int error;
533 	int timo;
534 	int which;
535 
536 	which = SCARG(uap, which);
537 	oitv = SCARG(uap, oitv);
538 
539 	if (which < ITIMER_REAL || which > ITIMER_PROF)
540 		return (EINVAL);
541 	itvp = SCARG(uap, itv);
542 	if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
543 	    sizeof(struct itimerval))))
544 		return (error);
545 	if (oitv != NULL) {
546 		SCARG(&getargs, which) = which;
547 		SCARG(&getargs, itv) = oitv;
548 		if ((error = sys_getitimer(p, &getargs, retval)))
549 			return (error);
550 	}
551 	if (itvp == 0)
552 		return (0);
553 	if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
554 		return (EINVAL);
555 	if (which == ITIMER_REAL) {
556 		struct timeval ctv;
557 
558 		timeout_del(&pr->ps_realit_to);
559 		getmicrouptime(&ctv);
560 		if (timerisset(&aitv.it_value)) {
561 			timo = tvtohz(&aitv.it_value);
562 			timeout_add(&pr->ps_realit_to, timo);
563 			timeradd(&aitv.it_value, &ctv, &aitv.it_value);
564 		}
565 		pr->ps_timer[ITIMER_REAL] = aitv;
566 	} else {
567 		itimerround(&aitv.it_interval);
568 		mtx_enter(&itimer_mtx);
569 		pr->ps_timer[which] = aitv;
570 		mtx_leave(&itimer_mtx);
571 	}
572 
573 	return (0);
574 }
575 
576 /*
577  * Real interval timer expired:
578  * send process whose timer expired an alarm signal.
579  * If time is not set up to reload, then just return.
580  * Else compute next time timer should go off which is > current time.
581  * This is where delay in processing this timeout causes multiple
582  * SIGALRM calls to be compressed into one.
583  */
584 void
585 realitexpire(void *arg)
586 {
587 	struct process *pr = arg;
588 	struct itimerval *tp = &pr->ps_timer[ITIMER_REAL];
589 
590 	prsignal(pr, SIGALRM);
591 	if (!timerisset(&tp->it_interval)) {
592 		timerclear(&tp->it_value);
593 		return;
594 	}
595 	for (;;) {
596 		struct timeval ctv, ntv;
597 		int timo;
598 
599 		timeradd(&tp->it_value, &tp->it_interval, &tp->it_value);
600 		getmicrouptime(&ctv);
601 		if (timercmp(&tp->it_value, &ctv, >)) {
602 			ntv = tp->it_value;
603 			timersub(&ntv, &ctv, &ntv);
604 			timo = tvtohz(&ntv) - 1;
605 			if (timo <= 0)
606 				timo = 1;
607 			if ((pr->ps_flags & PS_EXITING) == 0)
608 				timeout_add(&pr->ps_realit_to, timo);
609 			return;
610 		}
611 	}
612 }
613 
614 /*
615  * Check that a timespec value is legit
616  */
617 int
618 timespecfix(struct timespec *ts)
619 {
620 	if (ts->tv_sec < 0 ||
621 	    ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
622 		return (EINVAL);
623 	if (ts->tv_sec > 100000000)
624 		ts->tv_sec = 100000000;
625 	return (0);
626 }
627 
628 /*
629  * Check that a proposed value to load into the .it_value or
630  * .it_interval part of an interval timer is acceptable.
631  */
632 int
633 itimerfix(struct timeval *tv)
634 {
635 
636 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
637 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
638 		return (EINVAL);
639 
640 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
641 		tv->tv_usec = tick;
642 
643 	return (0);
644 }
645 
646 /*
647  * Nonzero timer interval smaller than the resolution of the
648  * system clock are rounded up.
649  */
650 void
651 itimerround(struct timeval *tv)
652 {
653 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
654 		tv->tv_usec = tick;
655 }
656 
657 /*
658  * Decrement an interval timer by a specified number
659  * of microseconds, which must be less than a second,
660  * i.e. < 1000000.  If the timer expires, then reload
661  * it.  In this case, carry over (usec - old value) to
662  * reduce the value reloaded into the timer so that
663  * the timer does not drift.  This routine assumes
664  * that it is called in a context where the timers
665  * on which it is operating cannot change in value.
666  */
667 int
668 itimerdecr(struct itimerval *itp, int usec)
669 {
670 	mtx_enter(&itimer_mtx);
671 	if (itp->it_value.tv_usec < usec) {
672 		if (itp->it_value.tv_sec == 0) {
673 			/* expired, and already in next interval */
674 			usec -= itp->it_value.tv_usec;
675 			goto expire;
676 		}
677 		itp->it_value.tv_usec += 1000000;
678 		itp->it_value.tv_sec--;
679 	}
680 	itp->it_value.tv_usec -= usec;
681 	usec = 0;
682 	if (timerisset(&itp->it_value)) {
683 		mtx_leave(&itimer_mtx);
684 		return (1);
685 	}
686 	/* expired, exactly at end of interval */
687 expire:
688 	if (timerisset(&itp->it_interval)) {
689 		itp->it_value = itp->it_interval;
690 		itp->it_value.tv_usec -= usec;
691 		if (itp->it_value.tv_usec < 0) {
692 			itp->it_value.tv_usec += 1000000;
693 			itp->it_value.tv_sec--;
694 		}
695 	} else
696 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
697 	mtx_leave(&itimer_mtx);
698 	return (0);
699 }
700 
701 /*
702  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
703  * for usage and rationale.
704  */
705 int
706 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
707 {
708 	struct timeval tv, delta;
709 	int rv = 0;
710 
711 	getmicrouptime(&tv);
712 
713 	timersub(&tv, lasttime, &delta);
714 
715 	/*
716 	 * check for 0,0 is so that the message will be seen at least once,
717 	 * even if interval is huge.
718 	 */
719 	if (timercmp(&delta, mininterval, >=) ||
720 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
721 		*lasttime = tv;
722 		rv = 1;
723 	}
724 
725 	return (rv);
726 }
727 
728 /*
729  * ppsratecheck(): packets (or events) per second limitation.
730  */
731 int
732 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
733 {
734 	struct timeval tv, delta;
735 	int rv;
736 
737 	microuptime(&tv);
738 
739 	timersub(&tv, lasttime, &delta);
740 
741 	/*
742 	 * check for 0,0 is so that the message will be seen at least once.
743 	 * if more than one second have passed since the last update of
744 	 * lasttime, reset the counter.
745 	 *
746 	 * we do increment *curpps even in *curpps < maxpps case, as some may
747 	 * try to use *curpps for stat purposes as well.
748 	 */
749 	if (maxpps == 0)
750 		rv = 0;
751 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
752 	    delta.tv_sec >= 1) {
753 		*lasttime = tv;
754 		*curpps = 0;
755 		rv = 1;
756 	} else if (maxpps < 0)
757 		rv = 1;
758 	else if (*curpps < maxpps)
759 		rv = 1;
760 	else
761 		rv = 0;
762 
763 #if 1 /*DIAGNOSTIC?*/
764 	/* be careful about wrap-around */
765 	if (*curpps + 1 > *curpps)
766 		*curpps = *curpps + 1;
767 #else
768 	/*
769 	 * assume that there's not too many calls to this function.
770 	 * not sure if the assumption holds, as it depends on *caller's*
771 	 * behavior, not the behavior of this function.
772 	 * IMHO it is wrong to make assumption on the caller's behavior,
773 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
774 	 */
775 	*curpps = *curpps + 1;
776 #endif
777 
778 	return (rv);
779 }
780 
781 
782 #define RESETTODR_PERIOD	1800
783 
784 void periodic_resettodr(void *);
785 void perform_resettodr(void *);
786 
787 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
788 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
789 
790 void
791 periodic_resettodr(void *arg __unused)
792 {
793 	task_add(systq, &resettodr_task);
794 }
795 
796 void
797 perform_resettodr(void *arg __unused)
798 {
799 	resettodr();
800 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
801 }
802 
803 void
804 start_periodic_resettodr(void)
805 {
806 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
807 }
808 
809 void
810 stop_periodic_resettodr(void)
811 {
812 	timeout_del(&resettodr_to);
813 	task_del(systq, &resettodr_task);
814 }
815