xref: /dflybsd-src/sys/kern/kern_clock.c (revision 767a43118674e3039d1f3dd7d3f2662a2eccf599)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
35  * Copyright (c) 1982, 1986, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by the University of
54  *	California, Berkeley and its contributors.
55  * 4. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
72  * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $
73  * $DragonFly: src/sys/kern/kern_clock.c,v 1.62 2008/09/09 04:06:13 dillon Exp $
74  */
75 
76 #include "opt_ntp.h"
77 #include "opt_polling.h"
78 #include "opt_ifpoll.h"
79 #include "opt_pctrack.h"
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/kernel.h>
85 #include <sys/kinfo.h>
86 #include <sys/proc.h>
87 #include <sys/malloc.h>
88 #include <sys/resource.h>
89 #include <sys/resourcevar.h>
90 #include <sys/signalvar.h>
91 #include <sys/timex.h>
92 #include <sys/timepps.h>
93 #include <vm/vm.h>
94 #include <sys/lock.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_map.h>
97 #include <vm/vm_extern.h>
98 #include <sys/sysctl.h>
99 
100 #include <sys/thread2.h>
101 
102 #include <machine/cpu.h>
103 #include <machine/limits.h>
104 #include <machine/smp.h>
105 #include <machine/cpufunc.h>
106 #include <machine/specialreg.h>
107 #include <machine/clock.h>
108 
109 #ifdef GPROF
110 #include <sys/gmon.h>
111 #endif
112 
113 #ifdef DEVICE_POLLING
114 extern void init_device_poll_pcpu(int);
115 #endif
116 
117 #ifdef IFPOLL_ENABLE
118 extern void ifpoll_init_pcpu(int);
119 #endif
120 
121 #ifdef DEBUG_PCTRACK
122 static void do_pctrack(struct intrframe *frame, int which);
123 #endif
124 
125 static void initclocks (void *dummy);
126 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
127 
128 /*
129  * Some of these don't belong here, but it's easiest to concentrate them.
130  * Note that cpu_time counts in microseconds, but most userland programs
131  * just compare relative times against the total by delta.
132  */
133 struct kinfo_cputime cputime_percpu[MAXCPU];
134 #ifdef DEBUG_PCTRACK
135 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE };
136 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE];
137 #endif
138 
139 #ifdef SMP
140 static int
141 sysctl_cputime(SYSCTL_HANDLER_ARGS)
142 {
143 	int cpu, error = 0;
144 	size_t size = sizeof(struct kinfo_cputime);
145 
146 	for (cpu = 0; cpu < ncpus; ++cpu) {
147 		if ((error = SYSCTL_OUT(req, &cputime_percpu[cpu], size)))
148 			break;
149 	}
150 
151 	return (error);
152 }
153 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0,
154 	sysctl_cputime, "S,kinfo_cputime", "CPU time statistics");
155 #else
156 SYSCTL_STRUCT(_kern, OID_AUTO, cputime, CTLFLAG_RD, &cpu_time, kinfo_cputime,
157     "CPU time statistics");
158 #endif
159 
160 static int
161 sysctl_cp_time(SYSCTL_HANDLER_ARGS)
162 {
163 	long cpu_states[5] = {0};
164 	int cpu, error = 0;
165 	size_t size = sizeof(cpu_states);
166 
167 	for (cpu = 0; cpu < ncpus; ++cpu) {
168 		cpu_states[CP_USER] += cputime_percpu[cpu].cp_user;
169 		cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice;
170 		cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys;
171 		cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr;
172 		cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle;
173 	}
174 
175 	error = SYSCTL_OUT(req, cpu_states, size);
176 
177 	return (error);
178 }
179 
180 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0,
181 	sysctl_cp_time, "LU", "CPU time statistics");
182 
183 /*
184  * boottime is used to calculate the 'real' uptime.  Do not confuse this with
185  * microuptime().  microtime() is not drift compensated.  The real uptime
186  * with compensation is nanotime() - bootime.  boottime is recalculated
187  * whenever the real time is set based on the compensated elapsed time
188  * in seconds (gd->gd_time_seconds).
189  *
190  * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic.
191  * Slight adjustments to gd_cpuclock_base are made to phase-lock it to
192  * the real time.
193  */
194 struct timespec boottime;	/* boot time (realtime) for reference only */
195 time_t time_second;		/* read-only 'passive' uptime in seconds */
196 
197 /*
198  * basetime is used to calculate the compensated real time of day.  The
199  * basetime can be modified on a per-tick basis by the adjtime(),
200  * ntp_adjtime(), and sysctl-based time correction APIs.
201  *
202  * Note that frequency corrections can also be made by adjusting
203  * gd_cpuclock_base.
204  *
205  * basetime is a tail-chasing FIFO, updated only by cpu #0.  The FIFO is
206  * used on both SMP and UP systems to avoid MP races between cpu's and
207  * interrupt races on UP systems.
208  */
209 #define BASETIME_ARYSIZE	16
210 #define BASETIME_ARYMASK	(BASETIME_ARYSIZE - 1)
211 static struct timespec basetime[BASETIME_ARYSIZE];
212 static volatile int basetime_index;
213 
214 static int
215 sysctl_get_basetime(SYSCTL_HANDLER_ARGS)
216 {
217 	struct timespec *bt;
218 	int error;
219 	int index;
220 
221 	/*
222 	 * Because basetime data and index may be updated by another cpu,
223 	 * a load fence is required to ensure that the data we read has
224 	 * not been speculatively read relative to a possibly updated index.
225 	 */
226 	index = basetime_index;
227 	cpu_lfence();
228 	bt = &basetime[index];
229 	error = SYSCTL_OUT(req, bt, sizeof(*bt));
230 	return (error);
231 }
232 
233 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
234     &boottime, timespec, "System boottime");
235 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0,
236     sysctl_get_basetime, "S,timespec", "System basetime");
237 
238 static void hardclock(systimer_t info, int, struct intrframe *frame);
239 static void statclock(systimer_t info, int, struct intrframe *frame);
240 static void schedclock(systimer_t info, int, struct intrframe *frame);
241 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp);
242 
243 int	ticks;			/* system master ticks at hz */
244 int	clocks_running;		/* tsleep/timeout clocks operational */
245 int64_t	nsec_adj;		/* ntpd per-tick adjustment in nsec << 32 */
246 int64_t	nsec_acc;		/* accumulator */
247 int	sched_ticks;		/* global schedule clock ticks */
248 
249 /* NTPD time correction fields */
250 int64_t	ntp_tick_permanent;	/* per-tick adjustment in nsec << 32 */
251 int64_t	ntp_tick_acc;		/* accumulator for per-tick adjustment */
252 int64_t	ntp_delta;		/* one-time correction in nsec */
253 int64_t ntp_big_delta = 1000000000;
254 int32_t	ntp_tick_delta;		/* current adjustment rate */
255 int32_t	ntp_default_tick_delta;	/* adjustment rate for ntp_delta */
256 time_t	ntp_leap_second;	/* time of next leap second */
257 int	ntp_leap_insert;	/* whether to insert or remove a second */
258 
259 /*
260  * Finish initializing clock frequencies and start all clocks running.
261  */
262 /* ARGSUSED*/
263 static void
264 initclocks(void *dummy)
265 {
266 	/*psratio = profhz / stathz;*/
267 	initclocks_pcpu();
268 	clocks_running = 1;
269 }
270 
271 /*
272  * Called on a per-cpu basis
273  */
274 void
275 initclocks_pcpu(void)
276 {
277 	struct globaldata *gd = mycpu;
278 
279 	crit_enter();
280 	if (gd->gd_cpuid == 0) {
281 	    gd->gd_time_seconds = 1;
282 	    gd->gd_cpuclock_base = sys_cputimer->count();
283 	} else {
284 	    /* XXX */
285 	    gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds;
286 	    gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base;
287 	}
288 
289 	systimer_intr_enable();
290 
291 #ifdef DEVICE_POLLING
292 	init_device_poll_pcpu(gd->gd_cpuid);
293 #endif
294 
295 #ifdef IFPOLL_ENABLE
296 	ifpoll_init_pcpu(gd->gd_cpuid);
297 #endif
298 
299 	/*
300 	 * Use a non-queued periodic systimer to prevent multiple ticks from
301 	 * building up if the sysclock jumps forward (8254 gets reset).  The
302 	 * sysclock will never jump backwards.  Our time sync is based on
303 	 * the actual sysclock, not the ticks count.
304 	 */
305 	systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz);
306 	systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz);
307 	/* XXX correct the frequency for scheduler / estcpu tests */
308 	systimer_init_periodic_nq(&gd->gd_schedclock, schedclock,
309 				NULL, ESTCPUFREQ);
310 	crit_exit();
311 }
312 
313 /*
314  * This sets the current real time of day.  Timespecs are in seconds and
315  * nanoseconds.  We do not mess with gd_time_seconds and gd_cpuclock_base,
316  * instead we adjust basetime so basetime + gd_* results in the current
317  * time of day.  This way the gd_* fields are guarenteed to represent
318  * a monotonically increasing 'uptime' value.
319  *
320  * When set_timeofday() is called from userland, the system call forces it
321  * onto cpu #0 since only cpu #0 can update basetime_index.
322  */
323 void
324 set_timeofday(struct timespec *ts)
325 {
326 	struct timespec *nbt;
327 	int ni;
328 
329 	/*
330 	 * XXX SMP / non-atomic basetime updates
331 	 */
332 	crit_enter();
333 	ni = (basetime_index + 1) & BASETIME_ARYMASK;
334 	nbt = &basetime[ni];
335 	nanouptime(nbt);
336 	nbt->tv_sec = ts->tv_sec - nbt->tv_sec;
337 	nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec;
338 	if (nbt->tv_nsec < 0) {
339 	    nbt->tv_nsec += 1000000000;
340 	    --nbt->tv_sec;
341 	}
342 
343 	/*
344 	 * Note that basetime diverges from boottime as the clock drift is
345 	 * compensated for, so we cannot do away with boottime.  When setting
346 	 * the absolute time of day the drift is 0 (for an instant) and we
347 	 * can simply assign boottime to basetime.
348 	 *
349 	 * Note that nanouptime() is based on gd_time_seconds which is drift
350 	 * compensated up to a point (it is guarenteed to remain monotonically
351 	 * increasing).  gd_time_seconds is thus our best uptime guess and
352 	 * suitable for use in the boottime calculation.  It is already taken
353 	 * into account in the basetime calculation above.
354 	 */
355 	boottime.tv_sec = nbt->tv_sec;
356 	ntp_delta = 0;
357 
358 	/*
359 	 * We now have a new basetime, make sure all other cpus have it,
360 	 * then update the index.
361 	 */
362 	cpu_sfence();
363 	basetime_index = ni;
364 
365 	crit_exit();
366 }
367 
368 /*
369  * Each cpu has its own hardclock, but we only increments ticks and softticks
370  * on cpu #0.
371  *
372  * NOTE! systimer! the MP lock might not be held here.  We can only safely
373  * manipulate objects owned by the current cpu.
374  */
375 static void
376 hardclock(systimer_t info, int in_ipi __unused, struct intrframe *frame)
377 {
378 	sysclock_t cputicks;
379 	struct proc *p;
380 	struct globaldata *gd = mycpu;
381 
382 	/*
383 	 * Realtime updates are per-cpu.  Note that timer corrections as
384 	 * returned by microtime() and friends make an additional adjustment
385 	 * using a system-wise 'basetime', but the running time is always
386 	 * taken from the per-cpu globaldata area.  Since the same clock
387 	 * is distributing (XXX SMP) to all cpus, the per-cpu timebases
388 	 * stay in synch.
389 	 *
390 	 * Note that we never allow info->time (aka gd->gd_hardclock.time)
391 	 * to reverse index gd_cpuclock_base, but that it is possible for
392 	 * it to temporarily get behind in the seconds if something in the
393 	 * system locks interrupts for a long period of time.  Since periodic
394 	 * timers count events, though everything should resynch again
395 	 * immediately.
396 	 */
397 	cputicks = info->time - gd->gd_cpuclock_base;
398 	if (cputicks >= sys_cputimer->freq) {
399 		++gd->gd_time_seconds;
400 		gd->gd_cpuclock_base += sys_cputimer->freq;
401 	}
402 
403 	/*
404 	 * The system-wide ticks counter and NTP related timedelta/tickdelta
405 	 * adjustments only occur on cpu #0.  NTP adjustments are accomplished
406 	 * by updating basetime.
407 	 */
408 	if (gd->gd_cpuid == 0) {
409 	    struct timespec *nbt;
410 	    struct timespec nts;
411 	    int leap;
412 	    int ni;
413 
414 	    ++ticks;
415 
416 #if 0
417 	    if (tco->tc_poll_pps)
418 		tco->tc_poll_pps(tco);
419 #endif
420 
421 	    /*
422 	     * Calculate the new basetime index.  We are in a critical section
423 	     * on cpu #0 and can safely play with basetime_index.  Start
424 	     * with the current basetime and then make adjustments.
425 	     */
426 	    ni = (basetime_index + 1) & BASETIME_ARYMASK;
427 	    nbt = &basetime[ni];
428 	    *nbt = basetime[basetime_index];
429 
430 	    /*
431 	     * Apply adjtime corrections.  (adjtime() API)
432 	     *
433 	     * adjtime() only runs on cpu #0 so our critical section is
434 	     * sufficient to access these variables.
435 	     */
436 	    if (ntp_delta != 0) {
437 		nbt->tv_nsec += ntp_tick_delta;
438 		ntp_delta -= ntp_tick_delta;
439 		if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) ||
440 		    (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) {
441 			ntp_tick_delta = ntp_delta;
442  		}
443  	    }
444 
445 	    /*
446 	     * Apply permanent frequency corrections.  (sysctl API)
447 	     */
448 	    if (ntp_tick_permanent != 0) {
449 		ntp_tick_acc += ntp_tick_permanent;
450 		if (ntp_tick_acc >= (1LL << 32)) {
451 		    nbt->tv_nsec += ntp_tick_acc >> 32;
452 		    ntp_tick_acc -= (ntp_tick_acc >> 32) << 32;
453 		} else if (ntp_tick_acc <= -(1LL << 32)) {
454 		    /* Negate ntp_tick_acc to avoid shifting the sign bit. */
455 		    nbt->tv_nsec -= (-ntp_tick_acc) >> 32;
456 		    ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32;
457 		}
458  	    }
459 
460 	    if (nbt->tv_nsec >= 1000000000) {
461 		    nbt->tv_sec++;
462 		    nbt->tv_nsec -= 1000000000;
463 	    } else if (nbt->tv_nsec < 0) {
464 		    nbt->tv_sec--;
465 		    nbt->tv_nsec += 1000000000;
466 	    }
467 
468 	    /*
469 	     * Another per-tick compensation.  (for ntp_adjtime() API)
470 	     */
471 	    if (nsec_adj != 0) {
472 		nsec_acc += nsec_adj;
473 		if (nsec_acc >= 0x100000000LL) {
474 		    nbt->tv_nsec += nsec_acc >> 32;
475 		    nsec_acc = (nsec_acc & 0xFFFFFFFFLL);
476 		} else if (nsec_acc <= -0x100000000LL) {
477 		    nbt->tv_nsec -= -nsec_acc >> 32;
478 		    nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL);
479 		}
480 		if (nbt->tv_nsec >= 1000000000) {
481 		    nbt->tv_nsec -= 1000000000;
482 		    ++nbt->tv_sec;
483 		} else if (nbt->tv_nsec < 0) {
484 		    nbt->tv_nsec += 1000000000;
485 		    --nbt->tv_sec;
486 		}
487 	    }
488 
489 	    /************************************************************
490 	     *			LEAP SECOND CORRECTION			*
491 	     ************************************************************
492 	     *
493 	     * Taking into account all the corrections made above, figure
494 	     * out the new real time.  If the seconds field has changed
495 	     * then apply any pending leap-second corrections.
496 	     */
497 	    getnanotime_nbt(nbt, &nts);
498 
499 	    if (time_second != nts.tv_sec) {
500 		/*
501 		 * Apply leap second (sysctl API).  Adjust nts for changes
502 		 * so we do not have to call getnanotime_nbt again.
503 		 */
504 		if (ntp_leap_second) {
505 		    if (ntp_leap_second == nts.tv_sec) {
506 			if (ntp_leap_insert) {
507 			    nbt->tv_sec++;
508 			    nts.tv_sec++;
509 			} else {
510 			    nbt->tv_sec--;
511 			    nts.tv_sec--;
512 			}
513 			ntp_leap_second--;
514 		    }
515 		}
516 
517 		/*
518 		 * Apply leap second (ntp_adjtime() API), calculate a new
519 		 * nsec_adj field.  ntp_update_second() returns nsec_adj
520 		 * as a per-second value but we need it as a per-tick value.
521 		 */
522 		leap = ntp_update_second(time_second, &nsec_adj);
523 		nsec_adj /= hz;
524 		nbt->tv_sec += leap;
525 		nts.tv_sec += leap;
526 
527 		/*
528 		 * Update the time_second 'approximate time' global.
529 		 */
530 		time_second = nts.tv_sec;
531 	    }
532 
533 	    /*
534 	     * Finally, our new basetime is ready to go live!
535 	     */
536 	    cpu_sfence();
537 	    basetime_index = ni;
538 	}
539 
540 	/*
541 	 * lwkt thread scheduler fair queueing
542 	 */
543 	lwkt_schedulerclock(curthread);
544 
545 	/*
546 	 * softticks are handled for all cpus
547 	 */
548 	hardclock_softtick(gd);
549 
550 	/*
551 	 * ITimer handling is per-tick, per-cpu.
552 	 *
553 	 * We must acquire the per-process token in order for ksignal()
554 	 * to be non-blocking.  For the moment this requires an AST fault,
555 	 * the ksignal() cannot be safely issued from this hard interrupt.
556 	 *
557 	 * XXX Even the trytoken here isn't right, and itimer operation in
558 	 *     a multi threaded environment is going to be weird at the
559 	 *     very least.
560 	 */
561 	if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) {
562 		crit_enter_hard();
563 		if (frame && CLKF_USERMODE(frame) &&
564 		    timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) &&
565 		    itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) {
566 			p->p_flags |= P_SIGVTALRM;
567 			need_user_resched();
568 		}
569 		if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) &&
570 		    itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) {
571 			p->p_flags |= P_SIGPROF;
572 			need_user_resched();
573 		}
574 		crit_exit_hard();
575 		lwkt_reltoken(&p->p_token);
576 	}
577 	setdelayed();
578 }
579 
580 /*
581  * The statistics clock typically runs at a 125Hz rate, and is intended
582  * to be frequency offset from the hardclock (typ 100Hz).  It is per-cpu.
583  *
584  * NOTE! systimer! the MP lock might not be held here.  We can only safely
585  * manipulate objects owned by the current cpu.
586  *
587  * The stats clock is responsible for grabbing a profiling sample.
588  * Most of the statistics are only used by user-level statistics programs.
589  * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and
590  * p->p_estcpu.
591  *
592  * Like the other clocks, the stat clock is called from what is effectively
593  * a fast interrupt, so the context should be the thread/process that got
594  * interrupted.
595  */
596 static void
597 statclock(systimer_t info, int in_ipi, struct intrframe *frame)
598 {
599 #ifdef GPROF
600 	struct gmonparam *g;
601 	int i;
602 #endif
603 	thread_t td;
604 	struct proc *p;
605 	int bump;
606 	struct timeval tv;
607 	struct timeval *stv;
608 
609 	/*
610 	 * How big was our timeslice relative to the last time?
611 	 */
612 	microuptime(&tv);	/* mpsafe */
613 	stv = &mycpu->gd_stattv;
614 	if (stv->tv_sec == 0) {
615 	    bump = 1;
616 	} else {
617 	    bump = tv.tv_usec - stv->tv_usec +
618 		(tv.tv_sec - stv->tv_sec) * 1000000;
619 	    if (bump < 0)
620 		bump = 0;
621 	    if (bump > 1000000)
622 		bump = 1000000;
623 	}
624 	*stv = tv;
625 
626 	td = curthread;
627 	p = td->td_proc;
628 
629 	if (frame && CLKF_USERMODE(frame)) {
630 		/*
631 		 * Came from userland, handle user time and deal with
632 		 * possible process.
633 		 */
634 		if (p && (p->p_flags & P_PROFIL))
635 			addupc_intr(p, CLKF_PC(frame), 1);
636 		td->td_uticks += bump;
637 
638 		/*
639 		 * Charge the time as appropriate
640 		 */
641 		if (p && p->p_nice > NZERO)
642 			cpu_time.cp_nice += bump;
643 		else
644 			cpu_time.cp_user += bump;
645 	} else {
646 		int intr_nest = mycpu->gd_intr_nesting_level;
647 
648 		if (in_ipi) {
649 			/*
650 			 * IPI processing code will bump gd_intr_nesting_level
651 			 * up by one, which breaks following CLKF_INTR testing,
652 			 * so we substract it by one here.
653 			 */
654 			--intr_nest;
655 		}
656 #ifdef GPROF
657 		/*
658 		 * Kernel statistics are just like addupc_intr, only easier.
659 		 */
660 		g = &_gmonparam;
661 		if (g->state == GMON_PROF_ON && frame) {
662 			i = CLKF_PC(frame) - g->lowpc;
663 			if (i < g->textsize) {
664 				i /= HISTFRACTION * sizeof(*g->kcount);
665 				g->kcount[i]++;
666 			}
667 		}
668 #endif
669 
670 #define IS_INTR_RUNNING	((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td))
671 
672 		/*
673 		 * Came from kernel mode, so we were:
674 		 * - handling an interrupt,
675 		 * - doing syscall or trap work on behalf of the current
676 		 *   user process, or
677 		 * - spinning in the idle loop.
678 		 * Whichever it is, charge the time as appropriate.
679 		 * Note that we charge interrupts to the current process,
680 		 * regardless of whether they are ``for'' that process,
681 		 * so that we know how much of its real time was spent
682 		 * in ``non-process'' (i.e., interrupt) work.
683 		 *
684 		 * XXX assume system if frame is NULL.  A NULL frame
685 		 * can occur if ipi processing is done from a crit_exit().
686 		 */
687 		if (IS_INTR_RUNNING)
688 			td->td_iticks += bump;
689 		else
690 			td->td_sticks += bump;
691 
692 		if (IS_INTR_RUNNING) {
693 #ifdef DEBUG_PCTRACK
694 			if (frame)
695 				do_pctrack(frame, PCTRACK_INT);
696 #endif
697 			cpu_time.cp_intr += bump;
698 		} else {
699 			if (td == &mycpu->gd_idlethread) {
700 				cpu_time.cp_idle += bump;
701 			} else {
702 #ifdef DEBUG_PCTRACK
703 				if (frame)
704 					do_pctrack(frame, PCTRACK_SYS);
705 #endif
706 				cpu_time.cp_sys += bump;
707 			}
708 		}
709 
710 #undef IS_INTR_RUNNING
711 	}
712 }
713 
714 #ifdef DEBUG_PCTRACK
715 /*
716  * Sample the PC when in the kernel or in an interrupt.  User code can
717  * retrieve the information and generate a histogram or other output.
718  */
719 
720 static void
721 do_pctrack(struct intrframe *frame, int which)
722 {
723 	struct kinfo_pctrack *pctrack;
724 
725 	pctrack = &cputime_pctrack[mycpu->gd_cpuid][which];
726 	pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] =
727 		(void *)CLKF_PC(frame);
728 	++pctrack->pc_index;
729 }
730 
731 static int
732 sysctl_pctrack(SYSCTL_HANDLER_ARGS)
733 {
734 	struct kinfo_pcheader head;
735 	int error;
736 	int cpu;
737 	int ntrack;
738 
739 	head.pc_ntrack = PCTRACK_SIZE;
740 	head.pc_arysize = PCTRACK_ARYSIZE;
741 
742 	if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0)
743 		return (error);
744 
745 	for (cpu = 0; cpu < ncpus; ++cpu) {
746 		for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) {
747 			error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack],
748 					   sizeof(struct kinfo_pctrack));
749 			if (error)
750 				break;
751 		}
752 		if (error)
753 			break;
754 	}
755 	return (error);
756 }
757 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0,
758 	sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking");
759 
760 #endif
761 
762 /*
763  * The scheduler clock typically runs at a 50Hz rate.  NOTE! systimer,
764  * the MP lock might not be held.  We can safely manipulate parts of curproc
765  * but that's about it.
766  *
767  * Each cpu has its own scheduler clock.
768  */
769 static void
770 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame)
771 {
772 	struct lwp *lp;
773 	struct rusage *ru;
774 	struct vmspace *vm;
775 	long rss;
776 
777 	if ((lp = lwkt_preempted_proc()) != NULL) {
778 		/*
779 		 * Account for cpu time used and hit the scheduler.  Note
780 		 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD
781 		 * HERE.
782 		 */
783 		++lp->lwp_cpticks;
784 		lp->lwp_proc->p_usched->schedulerclock(lp, info->periodic,
785 						       info->time);
786 	}
787 	if ((lp = curthread->td_lwp) != NULL) {
788 		/*
789 		 * Update resource usage integrals and maximums.
790 		 */
791 		if ((ru = &lp->lwp_proc->p_ru) &&
792 		    (vm = lp->lwp_proc->p_vmspace) != NULL) {
793 			ru->ru_ixrss += pgtok(vm->vm_tsize);
794 			ru->ru_idrss += pgtok(vm->vm_dsize);
795 			ru->ru_isrss += pgtok(vm->vm_ssize);
796 			if (lwkt_trytoken(&vm->vm_map.token)) {
797 				rss = pgtok(vmspace_resident_count(vm));
798 				if (ru->ru_maxrss < rss)
799 					ru->ru_maxrss = rss;
800 				lwkt_reltoken(&vm->vm_map.token);
801 			}
802 		}
803 	}
804 	/* Increment the global sched_ticks */
805 	if (mycpu->gd_cpuid == 0)
806 		++sched_ticks;
807 }
808 
809 /*
810  * Compute number of ticks for the specified amount of time.  The
811  * return value is intended to be used in a clock interrupt timed
812  * operation and guarenteed to meet or exceed the requested time.
813  * If the representation overflows, return INT_MAX.  The minimum return
814  * value is 1 ticks and the function will average the calculation up.
815  * If any value greater then 0 microseconds is supplied, a value
816  * of at least 2 will be returned to ensure that a near-term clock
817  * interrupt does not cause the timeout to occur (degenerately) early.
818  *
819  * Note that limit checks must take into account microseconds, which is
820  * done simply by using the smaller signed long maximum instead of
821  * the unsigned long maximum.
822  *
823  * If ints have 32 bits, then the maximum value for any timeout in
824  * 10ms ticks is 248 days.
825  */
826 int
827 tvtohz_high(struct timeval *tv)
828 {
829 	int ticks;
830 	long sec, usec;
831 
832 	sec = tv->tv_sec;
833 	usec = tv->tv_usec;
834 	if (usec < 0) {
835 		sec--;
836 		usec += 1000000;
837 	}
838 	if (sec < 0) {
839 #ifdef DIAGNOSTIC
840 		if (usec > 0) {
841 			sec++;
842 			usec -= 1000000;
843 		}
844 		kprintf("tvtohz_high: negative time difference "
845 			"%ld sec %ld usec\n",
846 			sec, usec);
847 #endif
848 		ticks = 1;
849 	} else if (sec <= INT_MAX / hz) {
850 		ticks = (int)(sec * hz +
851 			    ((u_long)usec + (ustick - 1)) / ustick) + 1;
852 	} else {
853 		ticks = INT_MAX;
854 	}
855 	return (ticks);
856 }
857 
858 int
859 tstohz_high(struct timespec *ts)
860 {
861 	int ticks;
862 	long sec, nsec;
863 
864 	sec = ts->tv_sec;
865 	nsec = ts->tv_nsec;
866 	if (nsec < 0) {
867 		sec--;
868 		nsec += 1000000000;
869 	}
870 	if (sec < 0) {
871 #ifdef DIAGNOSTIC
872 		if (nsec > 0) {
873 			sec++;
874 			nsec -= 1000000000;
875 		}
876 		kprintf("tstohz_high: negative time difference "
877 			"%ld sec %ld nsec\n",
878 			sec, nsec);
879 #endif
880 		ticks = 1;
881 	} else if (sec <= INT_MAX / hz) {
882 		ticks = (int)(sec * hz +
883 			    ((u_long)nsec + (nstick - 1)) / nstick) + 1;
884 	} else {
885 		ticks = INT_MAX;
886 	}
887 	return (ticks);
888 }
889 
890 
891 /*
892  * Compute number of ticks for the specified amount of time, erroring on
893  * the side of it being too low to ensure that sleeping the returned number
894  * of ticks will not result in a late return.
895  *
896  * The supplied timeval may not be negative and should be normalized.  A
897  * return value of 0 is possible if the timeval converts to less then
898  * 1 tick.
899  *
900  * If ints have 32 bits, then the maximum value for any timeout in
901  * 10ms ticks is 248 days.
902  */
903 int
904 tvtohz_low(struct timeval *tv)
905 {
906 	int ticks;
907 	long sec;
908 
909 	sec = tv->tv_sec;
910 	if (sec <= INT_MAX / hz)
911 		ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick);
912 	else
913 		ticks = INT_MAX;
914 	return (ticks);
915 }
916 
917 int
918 tstohz_low(struct timespec *ts)
919 {
920 	int ticks;
921 	long sec;
922 
923 	sec = ts->tv_sec;
924 	if (sec <= INT_MAX / hz)
925 		ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick);
926 	else
927 		ticks = INT_MAX;
928 	return (ticks);
929 }
930 
931 /*
932  * Start profiling on a process.
933  *
934  * Kernel profiling passes proc0 which never exits and hence
935  * keeps the profile clock running constantly.
936  */
937 void
938 startprofclock(struct proc *p)
939 {
940 	if ((p->p_flags & P_PROFIL) == 0) {
941 		p->p_flags |= P_PROFIL;
942 #if 0	/* XXX */
943 		if (++profprocs == 1 && stathz != 0) {
944 			crit_enter();
945 			psdiv = psratio;
946 			setstatclockrate(profhz);
947 			crit_exit();
948 		}
949 #endif
950 	}
951 }
952 
953 /*
954  * Stop profiling on a process.
955  *
956  * caller must hold p->p_token
957  */
958 void
959 stopprofclock(struct proc *p)
960 {
961 	if (p->p_flags & P_PROFIL) {
962 		p->p_flags &= ~P_PROFIL;
963 #if 0	/* XXX */
964 		if (--profprocs == 0 && stathz != 0) {
965 			crit_enter();
966 			psdiv = 1;
967 			setstatclockrate(stathz);
968 			crit_exit();
969 		}
970 #endif
971 	}
972 }
973 
974 /*
975  * Return information about system clocks.
976  */
977 static int
978 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
979 {
980 	struct kinfo_clockinfo clkinfo;
981 	/*
982 	 * Construct clockinfo structure.
983 	 */
984 	clkinfo.ci_hz = hz;
985 	clkinfo.ci_tick = ustick;
986 	clkinfo.ci_tickadj = ntp_default_tick_delta / 1000;
987 	clkinfo.ci_profhz = profhz;
988 	clkinfo.ci_stathz = stathz ? stathz : hz;
989 	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
990 }
991 
992 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
993 	0, 0, sysctl_kern_clockrate, "S,clockinfo","");
994 
995 /*
996  * We have eight functions for looking at the clock, four for
997  * microseconds and four for nanoseconds.  For each there is fast
998  * but less precise version "get{nano|micro}[up]time" which will
999  * return a time which is up to 1/HZ previous to the call, whereas
1000  * the raw version "{nano|micro}[up]time" will return a timestamp
1001  * which is as precise as possible.  The "up" variants return the
1002  * time relative to system boot, these are well suited for time
1003  * interval measurements.
1004  *
1005  * Each cpu independantly maintains the current time of day, so all
1006  * we need to do to protect ourselves from changes is to do a loop
1007  * check on the seconds field changing out from under us.
1008  *
1009  * The system timer maintains a 32 bit count and due to various issues
1010  * it is possible for the calculated delta to occassionally exceed
1011  * sys_cputimer->freq.  If this occurs the sys_cputimer->freq64_nsec
1012  * multiplication can easily overflow, so we deal with the case.  For
1013  * uniformity we deal with the case in the usec case too.
1014  *
1015  * All the [get][micro,nano][time,uptime]() routines are MPSAFE.
1016  */
1017 void
1018 getmicrouptime(struct timeval *tvp)
1019 {
1020 	struct globaldata *gd = mycpu;
1021 	sysclock_t delta;
1022 
1023 	do {
1024 		tvp->tv_sec = gd->gd_time_seconds;
1025 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1026 	} while (tvp->tv_sec != gd->gd_time_seconds);
1027 
1028 	if (delta >= sys_cputimer->freq) {
1029 		tvp->tv_sec += delta / sys_cputimer->freq;
1030 		delta %= sys_cputimer->freq;
1031 	}
1032 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1033 	if (tvp->tv_usec >= 1000000) {
1034 		tvp->tv_usec -= 1000000;
1035 		++tvp->tv_sec;
1036 	}
1037 }
1038 
1039 void
1040 getnanouptime(struct timespec *tsp)
1041 {
1042 	struct globaldata *gd = mycpu;
1043 	sysclock_t delta;
1044 
1045 	do {
1046 		tsp->tv_sec = gd->gd_time_seconds;
1047 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1048 	} while (tsp->tv_sec != gd->gd_time_seconds);
1049 
1050 	if (delta >= sys_cputimer->freq) {
1051 		tsp->tv_sec += delta / sys_cputimer->freq;
1052 		delta %= sys_cputimer->freq;
1053 	}
1054 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1055 }
1056 
1057 void
1058 microuptime(struct timeval *tvp)
1059 {
1060 	struct globaldata *gd = mycpu;
1061 	sysclock_t delta;
1062 
1063 	do {
1064 		tvp->tv_sec = gd->gd_time_seconds;
1065 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1066 	} while (tvp->tv_sec != gd->gd_time_seconds);
1067 
1068 	if (delta >= sys_cputimer->freq) {
1069 		tvp->tv_sec += delta / sys_cputimer->freq;
1070 		delta %= sys_cputimer->freq;
1071 	}
1072 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1073 }
1074 
1075 void
1076 nanouptime(struct timespec *tsp)
1077 {
1078 	struct globaldata *gd = mycpu;
1079 	sysclock_t delta;
1080 
1081 	do {
1082 		tsp->tv_sec = gd->gd_time_seconds;
1083 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1084 	} while (tsp->tv_sec != gd->gd_time_seconds);
1085 
1086 	if (delta >= sys_cputimer->freq) {
1087 		tsp->tv_sec += delta / sys_cputimer->freq;
1088 		delta %= sys_cputimer->freq;
1089 	}
1090 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1091 }
1092 
1093 /*
1094  * realtime routines
1095  */
1096 void
1097 getmicrotime(struct timeval *tvp)
1098 {
1099 	struct globaldata *gd = mycpu;
1100 	struct timespec *bt;
1101 	sysclock_t delta;
1102 
1103 	do {
1104 		tvp->tv_sec = gd->gd_time_seconds;
1105 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1106 	} while (tvp->tv_sec != gd->gd_time_seconds);
1107 
1108 	if (delta >= sys_cputimer->freq) {
1109 		tvp->tv_sec += delta / sys_cputimer->freq;
1110 		delta %= sys_cputimer->freq;
1111 	}
1112 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1113 
1114 	bt = &basetime[basetime_index];
1115 	tvp->tv_sec += bt->tv_sec;
1116 	tvp->tv_usec += bt->tv_nsec / 1000;
1117 	while (tvp->tv_usec >= 1000000) {
1118 		tvp->tv_usec -= 1000000;
1119 		++tvp->tv_sec;
1120 	}
1121 }
1122 
1123 void
1124 getnanotime(struct timespec *tsp)
1125 {
1126 	struct globaldata *gd = mycpu;
1127 	struct timespec *bt;
1128 	sysclock_t delta;
1129 
1130 	do {
1131 		tsp->tv_sec = gd->gd_time_seconds;
1132 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1133 	} while (tsp->tv_sec != gd->gd_time_seconds);
1134 
1135 	if (delta >= sys_cputimer->freq) {
1136 		tsp->tv_sec += delta / sys_cputimer->freq;
1137 		delta %= sys_cputimer->freq;
1138 	}
1139 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1140 
1141 	bt = &basetime[basetime_index];
1142 	tsp->tv_sec += bt->tv_sec;
1143 	tsp->tv_nsec += bt->tv_nsec;
1144 	while (tsp->tv_nsec >= 1000000000) {
1145 		tsp->tv_nsec -= 1000000000;
1146 		++tsp->tv_sec;
1147 	}
1148 }
1149 
1150 static void
1151 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp)
1152 {
1153 	struct globaldata *gd = mycpu;
1154 	sysclock_t delta;
1155 
1156 	do {
1157 		tsp->tv_sec = gd->gd_time_seconds;
1158 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1159 	} while (tsp->tv_sec != gd->gd_time_seconds);
1160 
1161 	if (delta >= sys_cputimer->freq) {
1162 		tsp->tv_sec += delta / sys_cputimer->freq;
1163 		delta %= sys_cputimer->freq;
1164 	}
1165 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1166 
1167 	tsp->tv_sec += nbt->tv_sec;
1168 	tsp->tv_nsec += nbt->tv_nsec;
1169 	while (tsp->tv_nsec >= 1000000000) {
1170 		tsp->tv_nsec -= 1000000000;
1171 		++tsp->tv_sec;
1172 	}
1173 }
1174 
1175 
1176 void
1177 microtime(struct timeval *tvp)
1178 {
1179 	struct globaldata *gd = mycpu;
1180 	struct timespec *bt;
1181 	sysclock_t delta;
1182 
1183 	do {
1184 		tvp->tv_sec = gd->gd_time_seconds;
1185 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1186 	} while (tvp->tv_sec != gd->gd_time_seconds);
1187 
1188 	if (delta >= sys_cputimer->freq) {
1189 		tvp->tv_sec += delta / sys_cputimer->freq;
1190 		delta %= sys_cputimer->freq;
1191 	}
1192 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1193 
1194 	bt = &basetime[basetime_index];
1195 	tvp->tv_sec += bt->tv_sec;
1196 	tvp->tv_usec += bt->tv_nsec / 1000;
1197 	while (tvp->tv_usec >= 1000000) {
1198 		tvp->tv_usec -= 1000000;
1199 		++tvp->tv_sec;
1200 	}
1201 }
1202 
1203 void
1204 nanotime(struct timespec *tsp)
1205 {
1206 	struct globaldata *gd = mycpu;
1207 	struct timespec *bt;
1208 	sysclock_t delta;
1209 
1210 	do {
1211 		tsp->tv_sec = gd->gd_time_seconds;
1212 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1213 	} while (tsp->tv_sec != gd->gd_time_seconds);
1214 
1215 	if (delta >= sys_cputimer->freq) {
1216 		tsp->tv_sec += delta / sys_cputimer->freq;
1217 		delta %= sys_cputimer->freq;
1218 	}
1219 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1220 
1221 	bt = &basetime[basetime_index];
1222 	tsp->tv_sec += bt->tv_sec;
1223 	tsp->tv_nsec += bt->tv_nsec;
1224 	while (tsp->tv_nsec >= 1000000000) {
1225 		tsp->tv_nsec -= 1000000000;
1226 		++tsp->tv_sec;
1227 	}
1228 }
1229 
1230 /*
1231  * note: this is not exactly synchronized with real time.  To do that we
1232  * would have to do what microtime does and check for a nanoseconds overflow.
1233  */
1234 time_t
1235 get_approximate_time_t(void)
1236 {
1237 	struct globaldata *gd = mycpu;
1238 	struct timespec *bt;
1239 
1240 	bt = &basetime[basetime_index];
1241 	return(gd->gd_time_seconds + bt->tv_sec);
1242 }
1243 
1244 int
1245 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1246 {
1247 	pps_params_t *app;
1248 	struct pps_fetch_args *fapi;
1249 #ifdef PPS_SYNC
1250 	struct pps_kcbind_args *kapi;
1251 #endif
1252 
1253 	switch (cmd) {
1254 	case PPS_IOC_CREATE:
1255 		return (0);
1256 	case PPS_IOC_DESTROY:
1257 		return (0);
1258 	case PPS_IOC_SETPARAMS:
1259 		app = (pps_params_t *)data;
1260 		if (app->mode & ~pps->ppscap)
1261 			return (EINVAL);
1262 		pps->ppsparam = *app;
1263 		return (0);
1264 	case PPS_IOC_GETPARAMS:
1265 		app = (pps_params_t *)data;
1266 		*app = pps->ppsparam;
1267 		app->api_version = PPS_API_VERS_1;
1268 		return (0);
1269 	case PPS_IOC_GETCAP:
1270 		*(int*)data = pps->ppscap;
1271 		return (0);
1272 	case PPS_IOC_FETCH:
1273 		fapi = (struct pps_fetch_args *)data;
1274 		if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1275 			return (EINVAL);
1276 		if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
1277 			return (EOPNOTSUPP);
1278 		pps->ppsinfo.current_mode = pps->ppsparam.mode;
1279 		fapi->pps_info_buf = pps->ppsinfo;
1280 		return (0);
1281 	case PPS_IOC_KCBIND:
1282 #ifdef PPS_SYNC
1283 		kapi = (struct pps_kcbind_args *)data;
1284 		/* XXX Only root should be able to do this */
1285 		if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1286 			return (EINVAL);
1287 		if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1288 			return (EINVAL);
1289 		if (kapi->edge & ~pps->ppscap)
1290 			return (EINVAL);
1291 		pps->kcmode = kapi->edge;
1292 		return (0);
1293 #else
1294 		return (EOPNOTSUPP);
1295 #endif
1296 	default:
1297 		return (ENOTTY);
1298 	}
1299 }
1300 
1301 void
1302 pps_init(struct pps_state *pps)
1303 {
1304 	pps->ppscap |= PPS_TSFMT_TSPEC;
1305 	if (pps->ppscap & PPS_CAPTUREASSERT)
1306 		pps->ppscap |= PPS_OFFSETASSERT;
1307 	if (pps->ppscap & PPS_CAPTURECLEAR)
1308 		pps->ppscap |= PPS_OFFSETCLEAR;
1309 }
1310 
1311 void
1312 pps_event(struct pps_state *pps, sysclock_t count, int event)
1313 {
1314 	struct globaldata *gd;
1315 	struct timespec *tsp;
1316 	struct timespec *osp;
1317 	struct timespec *bt;
1318 	struct timespec ts;
1319 	sysclock_t *pcount;
1320 #ifdef PPS_SYNC
1321 	sysclock_t tcount;
1322 #endif
1323 	sysclock_t delta;
1324 	pps_seq_t *pseq;
1325 	int foff;
1326 	int fhard;
1327 
1328 	gd = mycpu;
1329 
1330 	/* Things would be easier with arrays... */
1331 	if (event == PPS_CAPTUREASSERT) {
1332 		tsp = &pps->ppsinfo.assert_timestamp;
1333 		osp = &pps->ppsparam.assert_offset;
1334 		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1335 		fhard = pps->kcmode & PPS_CAPTUREASSERT;
1336 		pcount = &pps->ppscount[0];
1337 		pseq = &pps->ppsinfo.assert_sequence;
1338 	} else {
1339 		tsp = &pps->ppsinfo.clear_timestamp;
1340 		osp = &pps->ppsparam.clear_offset;
1341 		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1342 		fhard = pps->kcmode & PPS_CAPTURECLEAR;
1343 		pcount = &pps->ppscount[1];
1344 		pseq = &pps->ppsinfo.clear_sequence;
1345 	}
1346 
1347 	/* Nothing really happened */
1348 	if (*pcount == count)
1349 		return;
1350 
1351 	*pcount = count;
1352 
1353 	do {
1354 		ts.tv_sec = gd->gd_time_seconds;
1355 		delta = count - gd->gd_cpuclock_base;
1356 	} while (ts.tv_sec != gd->gd_time_seconds);
1357 
1358 	if (delta >= sys_cputimer->freq) {
1359 		ts.tv_sec += delta / sys_cputimer->freq;
1360 		delta %= sys_cputimer->freq;
1361 	}
1362 	ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1363 	bt = &basetime[basetime_index];
1364 	ts.tv_sec += bt->tv_sec;
1365 	ts.tv_nsec += bt->tv_nsec;
1366 	while (ts.tv_nsec >= 1000000000) {
1367 		ts.tv_nsec -= 1000000000;
1368 		++ts.tv_sec;
1369 	}
1370 
1371 	(*pseq)++;
1372 	*tsp = ts;
1373 
1374 	if (foff) {
1375 		timespecadd(tsp, osp);
1376 		if (tsp->tv_nsec < 0) {
1377 			tsp->tv_nsec += 1000000000;
1378 			tsp->tv_sec -= 1;
1379 		}
1380 	}
1381 #ifdef PPS_SYNC
1382 	if (fhard) {
1383 		/* magic, at its best... */
1384 		tcount = count - pps->ppscount[2];
1385 		pps->ppscount[2] = count;
1386 		if (tcount >= sys_cputimer->freq) {
1387 			delta = (1000000000 * (tcount / sys_cputimer->freq) +
1388 				 sys_cputimer->freq64_nsec *
1389 				 (tcount % sys_cputimer->freq)) >> 32;
1390 		} else {
1391 			delta = (sys_cputimer->freq64_nsec * tcount) >> 32;
1392 		}
1393 		hardpps(tsp, delta);
1394 	}
1395 #endif
1396 }
1397 
1398 /*
1399  * Return the tsc target value for a delay of (ns).
1400  *
1401  * Returns -1 if the TSC is not supported.
1402  */
1403 int64_t
1404 tsc_get_target(int ns)
1405 {
1406 #if defined(_RDTSC_SUPPORTED_)
1407 	if (cpu_feature & CPUID_TSC) {
1408 		return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000);
1409 	}
1410 #endif
1411 	return(-1);
1412 }
1413 
1414 /*
1415  * Compare the tsc against the passed target
1416  *
1417  * Returns +1 if the target has been reached
1418  * Returns  0 if the target has not yet been reached
1419  * Returns -1 if the TSC is not supported.
1420  *
1421  * Typical use:		while (tsc_test_target(target) == 0) { ...poll... }
1422  */
1423 int
1424 tsc_test_target(int64_t target)
1425 {
1426 #if defined(_RDTSC_SUPPORTED_)
1427 	if (cpu_feature & CPUID_TSC) {
1428 		if ((int64_t)(target - rdtsc()) <= 0)
1429 			return(1);
1430 		return(0);
1431 	}
1432 #endif
1433 	return(-1);
1434 }
1435 
1436 /*
1437  * Delay the specified number of nanoseconds using the tsc.  This function
1438  * returns immediately if the TSC is not supported.  At least one cpu_pause()
1439  * will be issued.
1440  */
1441 void
1442 tsc_delay(int ns)
1443 {
1444 	int64_t clk;
1445 
1446 	clk = tsc_get_target(ns);
1447 	cpu_pause();
1448 	while (tsc_test_target(clk) == 0)
1449 		cpu_pause();
1450 }
1451