xref: /dflybsd-src/sys/kern/kern_clock.c (revision 22ff886e5769d1e8d4bf7faa7bdb9f608ede1714)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
35  * Copyright (c) 1982, 1986, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by the University of
54  *	California, Berkeley and its contributors.
55  * 4. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
72  * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $
73  * $DragonFly: src/sys/kern/kern_clock.c,v 1.62 2008/09/09 04:06:13 dillon Exp $
74  */
75 
76 #include "opt_ntp.h"
77 #include "opt_polling.h"
78 #include "opt_ifpoll.h"
79 #include "opt_pctrack.h"
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/kernel.h>
85 #include <sys/kinfo.h>
86 #include <sys/proc.h>
87 #include <sys/malloc.h>
88 #include <sys/resourcevar.h>
89 #include <sys/signalvar.h>
90 #include <sys/timex.h>
91 #include <sys/timepps.h>
92 #include <vm/vm.h>
93 #include <sys/lock.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_extern.h>
97 #include <sys/sysctl.h>
98 
99 #include <sys/thread2.h>
100 #include <sys/mplock2.h>
101 
102 #include <machine/cpu.h>
103 #include <machine/limits.h>
104 #include <machine/smp.h>
105 #include <machine/cpufunc.h>
106 #include <machine/specialreg.h>
107 #include <machine/clock.h>
108 
109 #ifdef GPROF
110 #include <sys/gmon.h>
111 #endif
112 
113 #ifdef DEVICE_POLLING
114 extern void init_device_poll_pcpu(int);
115 #endif
116 
117 #ifdef IFPOLL_ENABLE
118 extern void ifpoll_init_pcpu(int);
119 #endif
120 
121 #ifdef DEBUG_PCTRACK
122 static void do_pctrack(struct intrframe *frame, int which);
123 #endif
124 
125 static void initclocks (void *dummy);
126 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
127 
128 /*
129  * Some of these don't belong here, but it's easiest to concentrate them.
130  * Note that cpu_time counts in microseconds, but most userland programs
131  * just compare relative times against the total by delta.
132  */
133 struct kinfo_cputime cputime_percpu[MAXCPU];
134 #ifdef DEBUG_PCTRACK
135 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE };
136 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE];
137 #endif
138 
139 #ifdef SMP
140 static int
141 sysctl_cputime(SYSCTL_HANDLER_ARGS)
142 {
143 	int cpu, error = 0;
144 	size_t size = sizeof(struct kinfo_cputime);
145 
146 	for (cpu = 0; cpu < ncpus; ++cpu) {
147 		if ((error = SYSCTL_OUT(req, &cputime_percpu[cpu], size)))
148 			break;
149 	}
150 
151 	return (error);
152 }
153 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0,
154 	sysctl_cputime, "S,kinfo_cputime", "CPU time statistics");
155 #else
156 SYSCTL_STRUCT(_kern, OID_AUTO, cputime, CTLFLAG_RD, &cpu_time, kinfo_cputime,
157     "CPU time statistics");
158 #endif
159 
160 /*
161  * boottime is used to calculate the 'real' uptime.  Do not confuse this with
162  * microuptime().  microtime() is not drift compensated.  The real uptime
163  * with compensation is nanotime() - bootime.  boottime is recalculated
164  * whenever the real time is set based on the compensated elapsed time
165  * in seconds (gd->gd_time_seconds).
166  *
167  * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic.
168  * Slight adjustments to gd_cpuclock_base are made to phase-lock it to
169  * the real time.
170  */
171 struct timespec boottime;	/* boot time (realtime) for reference only */
172 time_t time_second;		/* read-only 'passive' uptime in seconds */
173 
174 /*
175  * basetime is used to calculate the compensated real time of day.  The
176  * basetime can be modified on a per-tick basis by the adjtime(),
177  * ntp_adjtime(), and sysctl-based time correction APIs.
178  *
179  * Note that frequency corrections can also be made by adjusting
180  * gd_cpuclock_base.
181  *
182  * basetime is a tail-chasing FIFO, updated only by cpu #0.  The FIFO is
183  * used on both SMP and UP systems to avoid MP races between cpu's and
184  * interrupt races on UP systems.
185  */
186 #define BASETIME_ARYSIZE	16
187 #define BASETIME_ARYMASK	(BASETIME_ARYSIZE - 1)
188 static struct timespec basetime[BASETIME_ARYSIZE];
189 static volatile int basetime_index;
190 
191 static int
192 sysctl_get_basetime(SYSCTL_HANDLER_ARGS)
193 {
194 	struct timespec *bt;
195 	int error;
196 	int index;
197 
198 	/*
199 	 * Because basetime data and index may be updated by another cpu,
200 	 * a load fence is required to ensure that the data we read has
201 	 * not been speculatively read relative to a possibly updated index.
202 	 */
203 	index = basetime_index;
204 	cpu_lfence();
205 	bt = &basetime[index];
206 	error = SYSCTL_OUT(req, bt, sizeof(*bt));
207 	return (error);
208 }
209 
210 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
211     &boottime, timespec, "System boottime");
212 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0,
213     sysctl_get_basetime, "S,timespec", "System basetime");
214 
215 static void hardclock(systimer_t info, struct intrframe *frame);
216 static void statclock(systimer_t info, struct intrframe *frame);
217 static void schedclock(systimer_t info, struct intrframe *frame);
218 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp);
219 
220 int	ticks;			/* system master ticks at hz */
221 int	clocks_running;		/* tsleep/timeout clocks operational */
222 int64_t	nsec_adj;		/* ntpd per-tick adjustment in nsec << 32 */
223 int64_t	nsec_acc;		/* accumulator */
224 
225 /* NTPD time correction fields */
226 int64_t	ntp_tick_permanent;	/* per-tick adjustment in nsec << 32 */
227 int64_t	ntp_tick_acc;		/* accumulator for per-tick adjustment */
228 int64_t	ntp_delta;		/* one-time correction in nsec */
229 int64_t ntp_big_delta = 1000000000;
230 int32_t	ntp_tick_delta;		/* current adjustment rate */
231 int32_t	ntp_default_tick_delta;	/* adjustment rate for ntp_delta */
232 time_t	ntp_leap_second;	/* time of next leap second */
233 int	ntp_leap_insert;	/* whether to insert or remove a second */
234 
235 /*
236  * Finish initializing clock frequencies and start all clocks running.
237  */
238 /* ARGSUSED*/
239 static void
240 initclocks(void *dummy)
241 {
242 	/*psratio = profhz / stathz;*/
243 	initclocks_pcpu();
244 	clocks_running = 1;
245 }
246 
247 /*
248  * Called on a per-cpu basis
249  */
250 void
251 initclocks_pcpu(void)
252 {
253 	struct globaldata *gd = mycpu;
254 
255 	crit_enter();
256 	if (gd->gd_cpuid == 0) {
257 	    gd->gd_time_seconds = 1;
258 	    gd->gd_cpuclock_base = sys_cputimer->count();
259 	} else {
260 	    /* XXX */
261 	    gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds;
262 	    gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base;
263 	}
264 
265 	systimer_intr_enable();
266 
267 #ifdef DEVICE_POLLING
268 	init_device_poll_pcpu(gd->gd_cpuid);
269 #endif
270 
271 #ifdef IFPOLL_ENABLE
272 	ifpoll_init_pcpu(gd->gd_cpuid);
273 #endif
274 
275 	/*
276 	 * Use a non-queued periodic systimer to prevent multiple ticks from
277 	 * building up if the sysclock jumps forward (8254 gets reset).  The
278 	 * sysclock will never jump backwards.  Our time sync is based on
279 	 * the actual sysclock, not the ticks count.
280 	 */
281 	systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz);
282 	systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz);
283 	/* XXX correct the frequency for scheduler / estcpu tests */
284 	systimer_init_periodic_nq(&gd->gd_schedclock, schedclock,
285 				NULL, ESTCPUFREQ);
286 	crit_exit();
287 }
288 
289 /*
290  * This sets the current real time of day.  Timespecs are in seconds and
291  * nanoseconds.  We do not mess with gd_time_seconds and gd_cpuclock_base,
292  * instead we adjust basetime so basetime + gd_* results in the current
293  * time of day.  This way the gd_* fields are guarenteed to represent
294  * a monotonically increasing 'uptime' value.
295  *
296  * When set_timeofday() is called from userland, the system call forces it
297  * onto cpu #0 since only cpu #0 can update basetime_index.
298  */
299 void
300 set_timeofday(struct timespec *ts)
301 {
302 	struct timespec *nbt;
303 	int ni;
304 
305 	/*
306 	 * XXX SMP / non-atomic basetime updates
307 	 */
308 	crit_enter();
309 	ni = (basetime_index + 1) & BASETIME_ARYMASK;
310 	nbt = &basetime[ni];
311 	nanouptime(nbt);
312 	nbt->tv_sec = ts->tv_sec - nbt->tv_sec;
313 	nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec;
314 	if (nbt->tv_nsec < 0) {
315 	    nbt->tv_nsec += 1000000000;
316 	    --nbt->tv_sec;
317 	}
318 
319 	/*
320 	 * Note that basetime diverges from boottime as the clock drift is
321 	 * compensated for, so we cannot do away with boottime.  When setting
322 	 * the absolute time of day the drift is 0 (for an instant) and we
323 	 * can simply assign boottime to basetime.
324 	 *
325 	 * Note that nanouptime() is based on gd_time_seconds which is drift
326 	 * compensated up to a point (it is guarenteed to remain monotonically
327 	 * increasing).  gd_time_seconds is thus our best uptime guess and
328 	 * suitable for use in the boottime calculation.  It is already taken
329 	 * into account in the basetime calculation above.
330 	 */
331 	boottime.tv_sec = nbt->tv_sec;
332 	ntp_delta = 0;
333 
334 	/*
335 	 * We now have a new basetime, make sure all other cpus have it,
336 	 * then update the index.
337 	 */
338 	cpu_sfence();
339 	basetime_index = ni;
340 
341 	crit_exit();
342 }
343 
344 /*
345  * Each cpu has its own hardclock, but we only increments ticks and softticks
346  * on cpu #0.
347  *
348  * NOTE! systimer! the MP lock might not be held here.  We can only safely
349  * manipulate objects owned by the current cpu.
350  */
351 static void
352 hardclock(systimer_t info, struct intrframe *frame)
353 {
354 	sysclock_t cputicks;
355 	struct proc *p;
356 	struct globaldata *gd = mycpu;
357 
358 	/*
359 	 * Realtime updates are per-cpu.  Note that timer corrections as
360 	 * returned by microtime() and friends make an additional adjustment
361 	 * using a system-wise 'basetime', but the running time is always
362 	 * taken from the per-cpu globaldata area.  Since the same clock
363 	 * is distributing (XXX SMP) to all cpus, the per-cpu timebases
364 	 * stay in synch.
365 	 *
366 	 * Note that we never allow info->time (aka gd->gd_hardclock.time)
367 	 * to reverse index gd_cpuclock_base, but that it is possible for
368 	 * it to temporarily get behind in the seconds if something in the
369 	 * system locks interrupts for a long period of time.  Since periodic
370 	 * timers count events, though everything should resynch again
371 	 * immediately.
372 	 */
373 	cputicks = info->time - gd->gd_cpuclock_base;
374 	if (cputicks >= sys_cputimer->freq) {
375 		++gd->gd_time_seconds;
376 		gd->gd_cpuclock_base += sys_cputimer->freq;
377 	}
378 
379 	/*
380 	 * The system-wide ticks counter and NTP related timedelta/tickdelta
381 	 * adjustments only occur on cpu #0.  NTP adjustments are accomplished
382 	 * by updating basetime.
383 	 */
384 	if (gd->gd_cpuid == 0) {
385 	    struct timespec *nbt;
386 	    struct timespec nts;
387 	    int leap;
388 	    int ni;
389 
390 	    ++ticks;
391 
392 #if 0
393 	    if (tco->tc_poll_pps)
394 		tco->tc_poll_pps(tco);
395 #endif
396 
397 	    /*
398 	     * Calculate the new basetime index.  We are in a critical section
399 	     * on cpu #0 and can safely play with basetime_index.  Start
400 	     * with the current basetime and then make adjustments.
401 	     */
402 	    ni = (basetime_index + 1) & BASETIME_ARYMASK;
403 	    nbt = &basetime[ni];
404 	    *nbt = basetime[basetime_index];
405 
406 	    /*
407 	     * Apply adjtime corrections.  (adjtime() API)
408 	     *
409 	     * adjtime() only runs on cpu #0 so our critical section is
410 	     * sufficient to access these variables.
411 	     */
412 	    if (ntp_delta != 0) {
413 		nbt->tv_nsec += ntp_tick_delta;
414 		ntp_delta -= ntp_tick_delta;
415 		if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) ||
416 		    (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) {
417 			ntp_tick_delta = ntp_delta;
418  		}
419  	    }
420 
421 	    /*
422 	     * Apply permanent frequency corrections.  (sysctl API)
423 	     */
424 	    if (ntp_tick_permanent != 0) {
425 		ntp_tick_acc += ntp_tick_permanent;
426 		if (ntp_tick_acc >= (1LL << 32)) {
427 		    nbt->tv_nsec += ntp_tick_acc >> 32;
428 		    ntp_tick_acc -= (ntp_tick_acc >> 32) << 32;
429 		} else if (ntp_tick_acc <= -(1LL << 32)) {
430 		    /* Negate ntp_tick_acc to avoid shifting the sign bit. */
431 		    nbt->tv_nsec -= (-ntp_tick_acc) >> 32;
432 		    ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32;
433 		}
434  	    }
435 
436 	    if (nbt->tv_nsec >= 1000000000) {
437 		    nbt->tv_sec++;
438 		    nbt->tv_nsec -= 1000000000;
439 	    } else if (nbt->tv_nsec < 0) {
440 		    nbt->tv_sec--;
441 		    nbt->tv_nsec += 1000000000;
442 	    }
443 
444 	    /*
445 	     * Another per-tick compensation.  (for ntp_adjtime() API)
446 	     */
447 	    if (nsec_adj != 0) {
448 		nsec_acc += nsec_adj;
449 		if (nsec_acc >= 0x100000000LL) {
450 		    nbt->tv_nsec += nsec_acc >> 32;
451 		    nsec_acc = (nsec_acc & 0xFFFFFFFFLL);
452 		} else if (nsec_acc <= -0x100000000LL) {
453 		    nbt->tv_nsec -= -nsec_acc >> 32;
454 		    nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL);
455 		}
456 		if (nbt->tv_nsec >= 1000000000) {
457 		    nbt->tv_nsec -= 1000000000;
458 		    ++nbt->tv_sec;
459 		} else if (nbt->tv_nsec < 0) {
460 		    nbt->tv_nsec += 1000000000;
461 		    --nbt->tv_sec;
462 		}
463 	    }
464 
465 	    /************************************************************
466 	     *			LEAP SECOND CORRECTION			*
467 	     ************************************************************
468 	     *
469 	     * Taking into account all the corrections made above, figure
470 	     * out the new real time.  If the seconds field has changed
471 	     * then apply any pending leap-second corrections.
472 	     */
473 	    getnanotime_nbt(nbt, &nts);
474 
475 	    if (time_second != nts.tv_sec) {
476 		/*
477 		 * Apply leap second (sysctl API).  Adjust nts for changes
478 		 * so we do not have to call getnanotime_nbt again.
479 		 */
480 		if (ntp_leap_second) {
481 		    if (ntp_leap_second == nts.tv_sec) {
482 			if (ntp_leap_insert) {
483 			    nbt->tv_sec++;
484 			    nts.tv_sec++;
485 			} else {
486 			    nbt->tv_sec--;
487 			    nts.tv_sec--;
488 			}
489 			ntp_leap_second--;
490 		    }
491 		}
492 
493 		/*
494 		 * Apply leap second (ntp_adjtime() API), calculate a new
495 		 * nsec_adj field.  ntp_update_second() returns nsec_adj
496 		 * as a per-second value but we need it as a per-tick value.
497 		 */
498 		leap = ntp_update_second(time_second, &nsec_adj);
499 		nsec_adj /= hz;
500 		nbt->tv_sec += leap;
501 		nts.tv_sec += leap;
502 
503 		/*
504 		 * Update the time_second 'approximate time' global.
505 		 */
506 		time_second = nts.tv_sec;
507 	    }
508 
509 	    /*
510 	     * Finally, our new basetime is ready to go live!
511 	     */
512 	    cpu_sfence();
513 	    basetime_index = ni;
514 
515 	    /*
516 	     * Figure out how badly the system is starved for memory
517 	     */
518 	    vm_fault_ratecheck();
519 	}
520 
521 	/*
522 	 * lwkt thread scheduler fair queueing
523 	 */
524 	lwkt_fairq_schedulerclock(curthread);
525 
526 	/*
527 	 * softticks are handled for all cpus
528 	 */
529 	hardclock_softtick(gd);
530 
531 	/*
532 	 * The LWKT scheduler will generally allow the current process to
533 	 * return to user mode even if there are other runnable LWKT threads
534 	 * running in kernel mode on behalf of a user process.  This will
535 	 * ensure that those other threads have an opportunity to run in
536 	 * fairly short order (but not instantly).
537 	 */
538 	need_lwkt_resched();
539 
540 	/*
541 	 * ITimer handling is per-tick, per-cpu.  I don't think ksignal()
542 	 * is mpsafe on curproc, so XXX get the mplock.
543 	 */
544 	if ((p = curproc) != NULL && try_mplock()) {
545 		if (frame && CLKF_USERMODE(frame) &&
546 		    timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) &&
547 		    itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0)
548 			ksignal(p, SIGVTALRM);
549 		if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) &&
550 		    itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0)
551 			ksignal(p, SIGPROF);
552 		rel_mplock();
553 	}
554 	setdelayed();
555 }
556 
557 /*
558  * The statistics clock typically runs at a 125Hz rate, and is intended
559  * to be frequency offset from the hardclock (typ 100Hz).  It is per-cpu.
560  *
561  * NOTE! systimer! the MP lock might not be held here.  We can only safely
562  * manipulate objects owned by the current cpu.
563  *
564  * The stats clock is responsible for grabbing a profiling sample.
565  * Most of the statistics are only used by user-level statistics programs.
566  * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and
567  * p->p_estcpu.
568  *
569  * Like the other clocks, the stat clock is called from what is effectively
570  * a fast interrupt, so the context should be the thread/process that got
571  * interrupted.
572  */
573 static void
574 statclock(systimer_t info, struct intrframe *frame)
575 {
576 #ifdef GPROF
577 	struct gmonparam *g;
578 	int i;
579 #endif
580 	thread_t td;
581 	struct proc *p;
582 	int bump;
583 	struct timeval tv;
584 	struct timeval *stv;
585 
586 	/*
587 	 * How big was our timeslice relative to the last time?
588 	 */
589 	microuptime(&tv);	/* mpsafe */
590 	stv = &mycpu->gd_stattv;
591 	if (stv->tv_sec == 0) {
592 	    bump = 1;
593 	} else {
594 	    bump = tv.tv_usec - stv->tv_usec +
595 		(tv.tv_sec - stv->tv_sec) * 1000000;
596 	    if (bump < 0)
597 		bump = 0;
598 	    if (bump > 1000000)
599 		bump = 1000000;
600 	}
601 	*stv = tv;
602 
603 	td = curthread;
604 	p = td->td_proc;
605 
606 	if (frame && CLKF_USERMODE(frame)) {
607 		/*
608 		 * Came from userland, handle user time and deal with
609 		 * possible process.
610 		 */
611 		if (p && (p->p_flag & P_PROFIL))
612 			addupc_intr(p, CLKF_PC(frame), 1);
613 		td->td_uticks += bump;
614 
615 		/*
616 		 * Charge the time as appropriate
617 		 */
618 		if (p && p->p_nice > NZERO)
619 			cpu_time.cp_nice += bump;
620 		else
621 			cpu_time.cp_user += bump;
622 	} else {
623 #ifdef GPROF
624 		/*
625 		 * Kernel statistics are just like addupc_intr, only easier.
626 		 */
627 		g = &_gmonparam;
628 		if (g->state == GMON_PROF_ON && frame) {
629 			i = CLKF_PC(frame) - g->lowpc;
630 			if (i < g->textsize) {
631 				i /= HISTFRACTION * sizeof(*g->kcount);
632 				g->kcount[i]++;
633 			}
634 		}
635 #endif
636 		/*
637 		 * Came from kernel mode, so we were:
638 		 * - handling an interrupt,
639 		 * - doing syscall or trap work on behalf of the current
640 		 *   user process, or
641 		 * - spinning in the idle loop.
642 		 * Whichever it is, charge the time as appropriate.
643 		 * Note that we charge interrupts to the current process,
644 		 * regardless of whether they are ``for'' that process,
645 		 * so that we know how much of its real time was spent
646 		 * in ``non-process'' (i.e., interrupt) work.
647 		 *
648 		 * XXX assume system if frame is NULL.  A NULL frame
649 		 * can occur if ipi processing is done from a crit_exit().
650 		 */
651 		if (frame && CLKF_INTR(frame))
652 			td->td_iticks += bump;
653 		else
654 			td->td_sticks += bump;
655 
656 		if (frame && CLKF_INTR(frame)) {
657 #ifdef DEBUG_PCTRACK
658 			do_pctrack(frame, PCTRACK_INT);
659 #endif
660 			cpu_time.cp_intr += bump;
661 		} else {
662 			if (td == &mycpu->gd_idlethread) {
663 				cpu_time.cp_idle += bump;
664 			} else {
665 #ifdef DEBUG_PCTRACK
666 				if (frame)
667 					do_pctrack(frame, PCTRACK_SYS);
668 #endif
669 				cpu_time.cp_sys += bump;
670 			}
671 		}
672 	}
673 }
674 
675 #ifdef DEBUG_PCTRACK
676 /*
677  * Sample the PC when in the kernel or in an interrupt.  User code can
678  * retrieve the information and generate a histogram or other output.
679  */
680 
681 static void
682 do_pctrack(struct intrframe *frame, int which)
683 {
684 	struct kinfo_pctrack *pctrack;
685 
686 	pctrack = &cputime_pctrack[mycpu->gd_cpuid][which];
687 	pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] =
688 		(void *)CLKF_PC(frame);
689 	++pctrack->pc_index;
690 }
691 
692 static int
693 sysctl_pctrack(SYSCTL_HANDLER_ARGS)
694 {
695 	struct kinfo_pcheader head;
696 	int error;
697 	int cpu;
698 	int ntrack;
699 
700 	head.pc_ntrack = PCTRACK_SIZE;
701 	head.pc_arysize = PCTRACK_ARYSIZE;
702 
703 	if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0)
704 		return (error);
705 
706 	for (cpu = 0; cpu < ncpus; ++cpu) {
707 		for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) {
708 			error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack],
709 					   sizeof(struct kinfo_pctrack));
710 			if (error)
711 				break;
712 		}
713 		if (error)
714 			break;
715 	}
716 	return (error);
717 }
718 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0,
719 	sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking");
720 
721 #endif
722 
723 /*
724  * The scheduler clock typically runs at a 50Hz rate.  NOTE! systimer,
725  * the MP lock might not be held.  We can safely manipulate parts of curproc
726  * but that's about it.
727  *
728  * Each cpu has its own scheduler clock.
729  */
730 static void
731 schedclock(systimer_t info, struct intrframe *frame)
732 {
733 	struct lwp *lp;
734 	struct rusage *ru;
735 	struct vmspace *vm;
736 	long rss;
737 
738 	if ((lp = lwkt_preempted_proc()) != NULL) {
739 		/*
740 		 * Account for cpu time used and hit the scheduler.  Note
741 		 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD
742 		 * HERE.
743 		 */
744 		++lp->lwp_cpticks;
745 		lp->lwp_proc->p_usched->schedulerclock(lp, info->periodic,
746 						       info->time);
747 	}
748 	if ((lp = curthread->td_lwp) != NULL) {
749 		/*
750 		 * Update resource usage integrals and maximums.
751 		 */
752 		if ((ru = &lp->lwp_proc->p_ru) &&
753 		    (vm = lp->lwp_proc->p_vmspace) != NULL) {
754 			ru->ru_ixrss += pgtok(vm->vm_tsize);
755 			ru->ru_idrss += pgtok(vm->vm_dsize);
756 			ru->ru_isrss += pgtok(vm->vm_ssize);
757 			rss = pgtok(vmspace_resident_count(vm));
758 			if (ru->ru_maxrss < rss)
759 				ru->ru_maxrss = rss;
760 		}
761 	}
762 }
763 
764 /*
765  * Compute number of ticks for the specified amount of time.  The
766  * return value is intended to be used in a clock interrupt timed
767  * operation and guarenteed to meet or exceed the requested time.
768  * If the representation overflows, return INT_MAX.  The minimum return
769  * value is 1 ticks and the function will average the calculation up.
770  * If any value greater then 0 microseconds is supplied, a value
771  * of at least 2 will be returned to ensure that a near-term clock
772  * interrupt does not cause the timeout to occur (degenerately) early.
773  *
774  * Note that limit checks must take into account microseconds, which is
775  * done simply by using the smaller signed long maximum instead of
776  * the unsigned long maximum.
777  *
778  * If ints have 32 bits, then the maximum value for any timeout in
779  * 10ms ticks is 248 days.
780  */
781 int
782 tvtohz_high(struct timeval *tv)
783 {
784 	int ticks;
785 	long sec, usec;
786 
787 	sec = tv->tv_sec;
788 	usec = tv->tv_usec;
789 	if (usec < 0) {
790 		sec--;
791 		usec += 1000000;
792 	}
793 	if (sec < 0) {
794 #ifdef DIAGNOSTIC
795 		if (usec > 0) {
796 			sec++;
797 			usec -= 1000000;
798 		}
799 		kprintf("tvtohz_high: negative time difference "
800 			"%ld sec %ld usec\n",
801 			sec, usec);
802 #endif
803 		ticks = 1;
804 	} else if (sec <= INT_MAX / hz) {
805 		ticks = (int)(sec * hz +
806 			    ((u_long)usec + (ustick - 1)) / ustick) + 1;
807 	} else {
808 		ticks = INT_MAX;
809 	}
810 	return (ticks);
811 }
812 
813 int
814 tstohz_high(struct timespec *ts)
815 {
816 	int ticks;
817 	long sec, nsec;
818 
819 	sec = ts->tv_sec;
820 	nsec = ts->tv_nsec;
821 	if (nsec < 0) {
822 		sec--;
823 		nsec += 1000000000;
824 	}
825 	if (sec < 0) {
826 #ifdef DIAGNOSTIC
827 		if (nsec > 0) {
828 			sec++;
829 			nsec -= 1000000000;
830 		}
831 		kprintf("tstohz_high: negative time difference "
832 			"%ld sec %ld nsec\n",
833 			sec, nsec);
834 #endif
835 		ticks = 1;
836 	} else if (sec <= INT_MAX / hz) {
837 		ticks = (int)(sec * hz +
838 			    ((u_long)nsec + (nstick - 1)) / nstick) + 1;
839 	} else {
840 		ticks = INT_MAX;
841 	}
842 	return (ticks);
843 }
844 
845 
846 /*
847  * Compute number of ticks for the specified amount of time, erroring on
848  * the side of it being too low to ensure that sleeping the returned number
849  * of ticks will not result in a late return.
850  *
851  * The supplied timeval may not be negative and should be normalized.  A
852  * return value of 0 is possible if the timeval converts to less then
853  * 1 tick.
854  *
855  * If ints have 32 bits, then the maximum value for any timeout in
856  * 10ms ticks is 248 days.
857  */
858 int
859 tvtohz_low(struct timeval *tv)
860 {
861 	int ticks;
862 	long sec;
863 
864 	sec = tv->tv_sec;
865 	if (sec <= INT_MAX / hz)
866 		ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick);
867 	else
868 		ticks = INT_MAX;
869 	return (ticks);
870 }
871 
872 int
873 tstohz_low(struct timespec *ts)
874 {
875 	int ticks;
876 	long sec;
877 
878 	sec = ts->tv_sec;
879 	if (sec <= INT_MAX / hz)
880 		ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick);
881 	else
882 		ticks = INT_MAX;
883 	return (ticks);
884 }
885 
886 /*
887  * Start profiling on a process.
888  *
889  * Kernel profiling passes proc0 which never exits and hence
890  * keeps the profile clock running constantly.
891  */
892 void
893 startprofclock(struct proc *p)
894 {
895 	if ((p->p_flag & P_PROFIL) == 0) {
896 		p->p_flag |= P_PROFIL;
897 #if 0	/* XXX */
898 		if (++profprocs == 1 && stathz != 0) {
899 			crit_enter();
900 			psdiv = psratio;
901 			setstatclockrate(profhz);
902 			crit_exit();
903 		}
904 #endif
905 	}
906 }
907 
908 /*
909  * Stop profiling on a process.
910  */
911 void
912 stopprofclock(struct proc *p)
913 {
914 	if (p->p_flag & P_PROFIL) {
915 		p->p_flag &= ~P_PROFIL;
916 #if 0	/* XXX */
917 		if (--profprocs == 0 && stathz != 0) {
918 			crit_enter();
919 			psdiv = 1;
920 			setstatclockrate(stathz);
921 			crit_exit();
922 		}
923 #endif
924 	}
925 }
926 
927 /*
928  * Return information about system clocks.
929  */
930 static int
931 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
932 {
933 	struct kinfo_clockinfo clkinfo;
934 	/*
935 	 * Construct clockinfo structure.
936 	 */
937 	clkinfo.ci_hz = hz;
938 	clkinfo.ci_tick = ustick;
939 	clkinfo.ci_tickadj = ntp_default_tick_delta / 1000;
940 	clkinfo.ci_profhz = profhz;
941 	clkinfo.ci_stathz = stathz ? stathz : hz;
942 	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
943 }
944 
945 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
946 	0, 0, sysctl_kern_clockrate, "S,clockinfo","");
947 
948 /*
949  * We have eight functions for looking at the clock, four for
950  * microseconds and four for nanoseconds.  For each there is fast
951  * but less precise version "get{nano|micro}[up]time" which will
952  * return a time which is up to 1/HZ previous to the call, whereas
953  * the raw version "{nano|micro}[up]time" will return a timestamp
954  * which is as precise as possible.  The "up" variants return the
955  * time relative to system boot, these are well suited for time
956  * interval measurements.
957  *
958  * Each cpu independantly maintains the current time of day, so all
959  * we need to do to protect ourselves from changes is to do a loop
960  * check on the seconds field changing out from under us.
961  *
962  * The system timer maintains a 32 bit count and due to various issues
963  * it is possible for the calculated delta to occassionally exceed
964  * sys_cputimer->freq.  If this occurs the sys_cputimer->freq64_nsec
965  * multiplication can easily overflow, so we deal with the case.  For
966  * uniformity we deal with the case in the usec case too.
967  *
968  * All the [get][micro,nano][time,uptime]() routines are MPSAFE.
969  */
970 void
971 getmicrouptime(struct timeval *tvp)
972 {
973 	struct globaldata *gd = mycpu;
974 	sysclock_t delta;
975 
976 	do {
977 		tvp->tv_sec = gd->gd_time_seconds;
978 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
979 	} while (tvp->tv_sec != gd->gd_time_seconds);
980 
981 	if (delta >= sys_cputimer->freq) {
982 		tvp->tv_sec += delta / sys_cputimer->freq;
983 		delta %= sys_cputimer->freq;
984 	}
985 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
986 	if (tvp->tv_usec >= 1000000) {
987 		tvp->tv_usec -= 1000000;
988 		++tvp->tv_sec;
989 	}
990 }
991 
992 void
993 getnanouptime(struct timespec *tsp)
994 {
995 	struct globaldata *gd = mycpu;
996 	sysclock_t delta;
997 
998 	do {
999 		tsp->tv_sec = gd->gd_time_seconds;
1000 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1001 	} while (tsp->tv_sec != gd->gd_time_seconds);
1002 
1003 	if (delta >= sys_cputimer->freq) {
1004 		tsp->tv_sec += delta / sys_cputimer->freq;
1005 		delta %= sys_cputimer->freq;
1006 	}
1007 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1008 }
1009 
1010 void
1011 microuptime(struct timeval *tvp)
1012 {
1013 	struct globaldata *gd = mycpu;
1014 	sysclock_t delta;
1015 
1016 	do {
1017 		tvp->tv_sec = gd->gd_time_seconds;
1018 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1019 	} while (tvp->tv_sec != gd->gd_time_seconds);
1020 
1021 	if (delta >= sys_cputimer->freq) {
1022 		tvp->tv_sec += delta / sys_cputimer->freq;
1023 		delta %= sys_cputimer->freq;
1024 	}
1025 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1026 }
1027 
1028 void
1029 nanouptime(struct timespec *tsp)
1030 {
1031 	struct globaldata *gd = mycpu;
1032 	sysclock_t delta;
1033 
1034 	do {
1035 		tsp->tv_sec = gd->gd_time_seconds;
1036 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1037 	} while (tsp->tv_sec != gd->gd_time_seconds);
1038 
1039 	if (delta >= sys_cputimer->freq) {
1040 		tsp->tv_sec += delta / sys_cputimer->freq;
1041 		delta %= sys_cputimer->freq;
1042 	}
1043 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1044 }
1045 
1046 /*
1047  * realtime routines
1048  */
1049 void
1050 getmicrotime(struct timeval *tvp)
1051 {
1052 	struct globaldata *gd = mycpu;
1053 	struct timespec *bt;
1054 	sysclock_t delta;
1055 
1056 	do {
1057 		tvp->tv_sec = gd->gd_time_seconds;
1058 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1059 	} while (tvp->tv_sec != gd->gd_time_seconds);
1060 
1061 	if (delta >= sys_cputimer->freq) {
1062 		tvp->tv_sec += delta / sys_cputimer->freq;
1063 		delta %= sys_cputimer->freq;
1064 	}
1065 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1066 
1067 	bt = &basetime[basetime_index];
1068 	tvp->tv_sec += bt->tv_sec;
1069 	tvp->tv_usec += bt->tv_nsec / 1000;
1070 	while (tvp->tv_usec >= 1000000) {
1071 		tvp->tv_usec -= 1000000;
1072 		++tvp->tv_sec;
1073 	}
1074 }
1075 
1076 void
1077 getnanotime(struct timespec *tsp)
1078 {
1079 	struct globaldata *gd = mycpu;
1080 	struct timespec *bt;
1081 	sysclock_t delta;
1082 
1083 	do {
1084 		tsp->tv_sec = gd->gd_time_seconds;
1085 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1086 	} while (tsp->tv_sec != gd->gd_time_seconds);
1087 
1088 	if (delta >= sys_cputimer->freq) {
1089 		tsp->tv_sec += delta / sys_cputimer->freq;
1090 		delta %= sys_cputimer->freq;
1091 	}
1092 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1093 
1094 	bt = &basetime[basetime_index];
1095 	tsp->tv_sec += bt->tv_sec;
1096 	tsp->tv_nsec += bt->tv_nsec;
1097 	while (tsp->tv_nsec >= 1000000000) {
1098 		tsp->tv_nsec -= 1000000000;
1099 		++tsp->tv_sec;
1100 	}
1101 }
1102 
1103 static void
1104 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp)
1105 {
1106 	struct globaldata *gd = mycpu;
1107 	sysclock_t delta;
1108 
1109 	do {
1110 		tsp->tv_sec = gd->gd_time_seconds;
1111 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1112 	} while (tsp->tv_sec != gd->gd_time_seconds);
1113 
1114 	if (delta >= sys_cputimer->freq) {
1115 		tsp->tv_sec += delta / sys_cputimer->freq;
1116 		delta %= sys_cputimer->freq;
1117 	}
1118 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1119 
1120 	tsp->tv_sec += nbt->tv_sec;
1121 	tsp->tv_nsec += nbt->tv_nsec;
1122 	while (tsp->tv_nsec >= 1000000000) {
1123 		tsp->tv_nsec -= 1000000000;
1124 		++tsp->tv_sec;
1125 	}
1126 }
1127 
1128 
1129 void
1130 microtime(struct timeval *tvp)
1131 {
1132 	struct globaldata *gd = mycpu;
1133 	struct timespec *bt;
1134 	sysclock_t delta;
1135 
1136 	do {
1137 		tvp->tv_sec = gd->gd_time_seconds;
1138 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1139 	} while (tvp->tv_sec != gd->gd_time_seconds);
1140 
1141 	if (delta >= sys_cputimer->freq) {
1142 		tvp->tv_sec += delta / sys_cputimer->freq;
1143 		delta %= sys_cputimer->freq;
1144 	}
1145 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1146 
1147 	bt = &basetime[basetime_index];
1148 	tvp->tv_sec += bt->tv_sec;
1149 	tvp->tv_usec += bt->tv_nsec / 1000;
1150 	while (tvp->tv_usec >= 1000000) {
1151 		tvp->tv_usec -= 1000000;
1152 		++tvp->tv_sec;
1153 	}
1154 }
1155 
1156 void
1157 nanotime(struct timespec *tsp)
1158 {
1159 	struct globaldata *gd = mycpu;
1160 	struct timespec *bt;
1161 	sysclock_t delta;
1162 
1163 	do {
1164 		tsp->tv_sec = gd->gd_time_seconds;
1165 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1166 	} while (tsp->tv_sec != gd->gd_time_seconds);
1167 
1168 	if (delta >= sys_cputimer->freq) {
1169 		tsp->tv_sec += delta / sys_cputimer->freq;
1170 		delta %= sys_cputimer->freq;
1171 	}
1172 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1173 
1174 	bt = &basetime[basetime_index];
1175 	tsp->tv_sec += bt->tv_sec;
1176 	tsp->tv_nsec += bt->tv_nsec;
1177 	while (tsp->tv_nsec >= 1000000000) {
1178 		tsp->tv_nsec -= 1000000000;
1179 		++tsp->tv_sec;
1180 	}
1181 }
1182 
1183 /*
1184  * note: this is not exactly synchronized with real time.  To do that we
1185  * would have to do what microtime does and check for a nanoseconds overflow.
1186  */
1187 time_t
1188 get_approximate_time_t(void)
1189 {
1190 	struct globaldata *gd = mycpu;
1191 	struct timespec *bt;
1192 
1193 	bt = &basetime[basetime_index];
1194 	return(gd->gd_time_seconds + bt->tv_sec);
1195 }
1196 
1197 int
1198 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1199 {
1200 	pps_params_t *app;
1201 	struct pps_fetch_args *fapi;
1202 #ifdef PPS_SYNC
1203 	struct pps_kcbind_args *kapi;
1204 #endif
1205 
1206 	switch (cmd) {
1207 	case PPS_IOC_CREATE:
1208 		return (0);
1209 	case PPS_IOC_DESTROY:
1210 		return (0);
1211 	case PPS_IOC_SETPARAMS:
1212 		app = (pps_params_t *)data;
1213 		if (app->mode & ~pps->ppscap)
1214 			return (EINVAL);
1215 		pps->ppsparam = *app;
1216 		return (0);
1217 	case PPS_IOC_GETPARAMS:
1218 		app = (pps_params_t *)data;
1219 		*app = pps->ppsparam;
1220 		app->api_version = PPS_API_VERS_1;
1221 		return (0);
1222 	case PPS_IOC_GETCAP:
1223 		*(int*)data = pps->ppscap;
1224 		return (0);
1225 	case PPS_IOC_FETCH:
1226 		fapi = (struct pps_fetch_args *)data;
1227 		if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1228 			return (EINVAL);
1229 		if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
1230 			return (EOPNOTSUPP);
1231 		pps->ppsinfo.current_mode = pps->ppsparam.mode;
1232 		fapi->pps_info_buf = pps->ppsinfo;
1233 		return (0);
1234 	case PPS_IOC_KCBIND:
1235 #ifdef PPS_SYNC
1236 		kapi = (struct pps_kcbind_args *)data;
1237 		/* XXX Only root should be able to do this */
1238 		if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1239 			return (EINVAL);
1240 		if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1241 			return (EINVAL);
1242 		if (kapi->edge & ~pps->ppscap)
1243 			return (EINVAL);
1244 		pps->kcmode = kapi->edge;
1245 		return (0);
1246 #else
1247 		return (EOPNOTSUPP);
1248 #endif
1249 	default:
1250 		return (ENOTTY);
1251 	}
1252 }
1253 
1254 void
1255 pps_init(struct pps_state *pps)
1256 {
1257 	pps->ppscap |= PPS_TSFMT_TSPEC;
1258 	if (pps->ppscap & PPS_CAPTUREASSERT)
1259 		pps->ppscap |= PPS_OFFSETASSERT;
1260 	if (pps->ppscap & PPS_CAPTURECLEAR)
1261 		pps->ppscap |= PPS_OFFSETCLEAR;
1262 }
1263 
1264 void
1265 pps_event(struct pps_state *pps, sysclock_t count, int event)
1266 {
1267 	struct globaldata *gd;
1268 	struct timespec *tsp;
1269 	struct timespec *osp;
1270 	struct timespec *bt;
1271 	struct timespec ts;
1272 	sysclock_t *pcount;
1273 #ifdef PPS_SYNC
1274 	sysclock_t tcount;
1275 #endif
1276 	sysclock_t delta;
1277 	pps_seq_t *pseq;
1278 	int foff;
1279 	int fhard;
1280 
1281 	gd = mycpu;
1282 
1283 	/* Things would be easier with arrays... */
1284 	if (event == PPS_CAPTUREASSERT) {
1285 		tsp = &pps->ppsinfo.assert_timestamp;
1286 		osp = &pps->ppsparam.assert_offset;
1287 		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1288 		fhard = pps->kcmode & PPS_CAPTUREASSERT;
1289 		pcount = &pps->ppscount[0];
1290 		pseq = &pps->ppsinfo.assert_sequence;
1291 	} else {
1292 		tsp = &pps->ppsinfo.clear_timestamp;
1293 		osp = &pps->ppsparam.clear_offset;
1294 		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1295 		fhard = pps->kcmode & PPS_CAPTURECLEAR;
1296 		pcount = &pps->ppscount[1];
1297 		pseq = &pps->ppsinfo.clear_sequence;
1298 	}
1299 
1300 	/* Nothing really happened */
1301 	if (*pcount == count)
1302 		return;
1303 
1304 	*pcount = count;
1305 
1306 	do {
1307 		ts.tv_sec = gd->gd_time_seconds;
1308 		delta = count - gd->gd_cpuclock_base;
1309 	} while (ts.tv_sec != gd->gd_time_seconds);
1310 
1311 	if (delta >= sys_cputimer->freq) {
1312 		ts.tv_sec += delta / sys_cputimer->freq;
1313 		delta %= sys_cputimer->freq;
1314 	}
1315 	ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1316 	bt = &basetime[basetime_index];
1317 	ts.tv_sec += bt->tv_sec;
1318 	ts.tv_nsec += bt->tv_nsec;
1319 	while (ts.tv_nsec >= 1000000000) {
1320 		ts.tv_nsec -= 1000000000;
1321 		++ts.tv_sec;
1322 	}
1323 
1324 	(*pseq)++;
1325 	*tsp = ts;
1326 
1327 	if (foff) {
1328 		timespecadd(tsp, osp);
1329 		if (tsp->tv_nsec < 0) {
1330 			tsp->tv_nsec += 1000000000;
1331 			tsp->tv_sec -= 1;
1332 		}
1333 	}
1334 #ifdef PPS_SYNC
1335 	if (fhard) {
1336 		/* magic, at its best... */
1337 		tcount = count - pps->ppscount[2];
1338 		pps->ppscount[2] = count;
1339 		if (tcount >= sys_cputimer->freq) {
1340 			delta = (1000000000 * (tcount / sys_cputimer->freq) +
1341 				 sys_cputimer->freq64_nsec *
1342 				 (tcount % sys_cputimer->freq)) >> 32;
1343 		} else {
1344 			delta = (sys_cputimer->freq64_nsec * tcount) >> 32;
1345 		}
1346 		hardpps(tsp, delta);
1347 	}
1348 #endif
1349 }
1350 
1351 /*
1352  * Return the tsc target value for a delay of (ns).
1353  *
1354  * Returns -1 if the TSC is not supported.
1355  */
1356 int64_t
1357 tsc_get_target(int ns)
1358 {
1359 #if defined(_RDTSC_SUPPORTED_)
1360 	if (cpu_feature & CPUID_TSC) {
1361 		return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000);
1362 	}
1363 #endif
1364 	return(-1);
1365 }
1366 
1367 /*
1368  * Compare the tsc against the passed target
1369  *
1370  * Returns +1 if the target has been reached
1371  * Returns  0 if the target has not yet been reached
1372  * Returns -1 if the TSC is not supported.
1373  *
1374  * Typical use:		while (tsc_test_target(target) == 0) { ...poll... }
1375  */
1376 int
1377 tsc_test_target(int64_t target)
1378 {
1379 #if defined(_RDTSC_SUPPORTED_)
1380 	if (cpu_feature & CPUID_TSC) {
1381 		if ((int64_t)(target - rdtsc()) <= 0)
1382 			return(1);
1383 		return(0);
1384 	}
1385 #endif
1386 	return(-1);
1387 }
1388