xref: /onnv-gate/usr/src/uts/common/os/clock.c (revision 11657:524064812b1b)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52036Swentaoy  * Common Development and Distribution License (the "License").
62036Swentaoy  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
220Sstevel@tonic-gate /*	  All Rights Reserved	*/
230Sstevel@tonic-gate 
240Sstevel@tonic-gate /*
2511496Srafael.vanoni@sun.com  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
260Sstevel@tonic-gate  * Use is subject to license terms.
270Sstevel@tonic-gate  */
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/param.h>
300Sstevel@tonic-gate #include <sys/t_lock.h>
310Sstevel@tonic-gate #include <sys/types.h>
320Sstevel@tonic-gate #include <sys/tuneable.h>
330Sstevel@tonic-gate #include <sys/sysmacros.h>
340Sstevel@tonic-gate #include <sys/systm.h>
350Sstevel@tonic-gate #include <sys/cpuvar.h>
360Sstevel@tonic-gate #include <sys/lgrp.h>
370Sstevel@tonic-gate #include <sys/user.h>
380Sstevel@tonic-gate #include <sys/proc.h>
390Sstevel@tonic-gate #include <sys/callo.h>
400Sstevel@tonic-gate #include <sys/kmem.h>
410Sstevel@tonic-gate #include <sys/var.h>
420Sstevel@tonic-gate #include <sys/cmn_err.h>
430Sstevel@tonic-gate #include <sys/swap.h>
440Sstevel@tonic-gate #include <sys/vmsystm.h>
450Sstevel@tonic-gate #include <sys/class.h>
460Sstevel@tonic-gate #include <sys/time.h>
470Sstevel@tonic-gate #include <sys/debug.h>
480Sstevel@tonic-gate #include <sys/vtrace.h>
490Sstevel@tonic-gate #include <sys/spl.h>
500Sstevel@tonic-gate #include <sys/atomic.h>
510Sstevel@tonic-gate #include <sys/dumphdr.h>
520Sstevel@tonic-gate #include <sys/archsystm.h>
530Sstevel@tonic-gate #include <sys/fs/swapnode.h>
540Sstevel@tonic-gate #include <sys/panic.h>
550Sstevel@tonic-gate #include <sys/disp.h>
560Sstevel@tonic-gate #include <sys/msacct.h>
570Sstevel@tonic-gate #include <sys/mem_cage.h>
580Sstevel@tonic-gate 
590Sstevel@tonic-gate #include <vm/page.h>
600Sstevel@tonic-gate #include <vm/anon.h>
610Sstevel@tonic-gate #include <vm/rm.h>
620Sstevel@tonic-gate #include <sys/cyclic.h>
630Sstevel@tonic-gate #include <sys/cpupart.h>
640Sstevel@tonic-gate #include <sys/rctl.h>
650Sstevel@tonic-gate #include <sys/task.h>
660Sstevel@tonic-gate #include <sys/sdt.h>
675107Seota #include <sys/ddi_timer.h>
6810696SDavid.Hollister@Sun.COM #include <sys/random.h>
6910696SDavid.Hollister@Sun.COM #include <sys/modctl.h>
700Sstevel@tonic-gate 
710Sstevel@tonic-gate /*
720Sstevel@tonic-gate  * for NTP support
730Sstevel@tonic-gate  */
740Sstevel@tonic-gate #include <sys/timex.h>
750Sstevel@tonic-gate #include <sys/inttypes.h>
760Sstevel@tonic-gate 
7711066Srafael.vanoni@sun.com #include <sys/sunddi.h>
7811066Srafael.vanoni@sun.com #include <sys/clock_impl.h>
7911066Srafael.vanoni@sun.com 
800Sstevel@tonic-gate /*
813792Sakolb  * clock() is called straight from the clock cyclic; see clock_init().
820Sstevel@tonic-gate  *
830Sstevel@tonic-gate  * Functions:
840Sstevel@tonic-gate  *	reprime clock
850Sstevel@tonic-gate  *	maintain date
860Sstevel@tonic-gate  *	jab the scheduler
870Sstevel@tonic-gate  */
880Sstevel@tonic-gate 
890Sstevel@tonic-gate extern kcondvar_t	fsflush_cv;
900Sstevel@tonic-gate extern sysinfo_t	sysinfo;
910Sstevel@tonic-gate extern vminfo_t	vminfo;
920Sstevel@tonic-gate extern int	idleswtch;	/* flag set while idle in pswtch() */
9310696SDavid.Hollister@Sun.COM extern hrtime_t volatile devinfo_freeze;
940Sstevel@tonic-gate 
950Sstevel@tonic-gate /*
960Sstevel@tonic-gate  * high-precision avenrun values.  These are needed to make the
970Sstevel@tonic-gate  * regular avenrun values accurate.
980Sstevel@tonic-gate  */
990Sstevel@tonic-gate static uint64_t hp_avenrun[3];
1000Sstevel@tonic-gate int	avenrun[3];		/* FSCALED average run queue lengths */
1010Sstevel@tonic-gate time_t	time;	/* time in seconds since 1970 - for compatibility only */
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate static struct loadavg_s loadavg;
1040Sstevel@tonic-gate /*
1050Sstevel@tonic-gate  * Phase/frequency-lock loop (PLL/FLL) definitions
1060Sstevel@tonic-gate  *
1070Sstevel@tonic-gate  * The following variables are read and set by the ntp_adjtime() system
1080Sstevel@tonic-gate  * call.
1090Sstevel@tonic-gate  *
1100Sstevel@tonic-gate  * time_state shows the state of the system clock, with values defined
1110Sstevel@tonic-gate  * in the timex.h header file.
1120Sstevel@tonic-gate  *
1130Sstevel@tonic-gate  * time_status shows the status of the system clock, with bits defined
1140Sstevel@tonic-gate  * in the timex.h header file.
1150Sstevel@tonic-gate  *
1160Sstevel@tonic-gate  * time_offset is used by the PLL/FLL to adjust the system time in small
1170Sstevel@tonic-gate  * increments.
1180Sstevel@tonic-gate  *
1190Sstevel@tonic-gate  * time_constant determines the bandwidth or "stiffness" of the PLL.
1200Sstevel@tonic-gate  *
1210Sstevel@tonic-gate  * time_tolerance determines maximum frequency error or tolerance of the
1220Sstevel@tonic-gate  * CPU clock oscillator and is a property of the architecture; however,
1230Sstevel@tonic-gate  * in principle it could change as result of the presence of external
1240Sstevel@tonic-gate  * discipline signals, for instance.
1250Sstevel@tonic-gate  *
1260Sstevel@tonic-gate  * time_precision is usually equal to the kernel tick variable; however,
1270Sstevel@tonic-gate  * in cases where a precision clock counter or external clock is
1280Sstevel@tonic-gate  * available, the resolution can be much less than this and depend on
1290Sstevel@tonic-gate  * whether the external clock is working or not.
1300Sstevel@tonic-gate  *
1310Sstevel@tonic-gate  * time_maxerror is initialized by a ntp_adjtime() call and increased by
1320Sstevel@tonic-gate  * the kernel once each second to reflect the maximum error bound
1330Sstevel@tonic-gate  * growth.
1340Sstevel@tonic-gate  *
1350Sstevel@tonic-gate  * time_esterror is set and read by the ntp_adjtime() call, but
1360Sstevel@tonic-gate  * otherwise not used by the kernel.
1370Sstevel@tonic-gate  */
1380Sstevel@tonic-gate int32_t time_state = TIME_OK;	/* clock state */
1390Sstevel@tonic-gate int32_t time_status = STA_UNSYNC;	/* clock status bits */
1400Sstevel@tonic-gate int32_t time_offset = 0;		/* time offset (us) */
1410Sstevel@tonic-gate int32_t time_constant = 0;		/* pll time constant */
1420Sstevel@tonic-gate int32_t time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
1430Sstevel@tonic-gate int32_t time_precision = 1;	/* clock precision (us) */
1440Sstevel@tonic-gate int32_t time_maxerror = MAXPHASE;	/* maximum error (us) */
1450Sstevel@tonic-gate int32_t time_esterror = MAXPHASE;	/* estimated error (us) */
1460Sstevel@tonic-gate 
1470Sstevel@tonic-gate /*
1480Sstevel@tonic-gate  * The following variables establish the state of the PLL/FLL and the
1490Sstevel@tonic-gate  * residual time and frequency offset of the local clock. The scale
1500Sstevel@tonic-gate  * factors are defined in the timex.h header file.
1510Sstevel@tonic-gate  *
1520Sstevel@tonic-gate  * time_phase and time_freq are the phase increment and the frequency
1530Sstevel@tonic-gate  * increment, respectively, of the kernel time variable.
1540Sstevel@tonic-gate  *
1550Sstevel@tonic-gate  * time_freq is set via ntp_adjtime() from a value stored in a file when
1560Sstevel@tonic-gate  * the synchronization daemon is first started. Its value is retrieved
1570Sstevel@tonic-gate  * via ntp_adjtime() and written to the file about once per hour by the
1580Sstevel@tonic-gate  * daemon.
1590Sstevel@tonic-gate  *
1600Sstevel@tonic-gate  * time_adj is the adjustment added to the value of tick at each timer
1610Sstevel@tonic-gate  * interrupt and is recomputed from time_phase and time_freq at each
1620Sstevel@tonic-gate  * seconds rollover.
1630Sstevel@tonic-gate  *
1640Sstevel@tonic-gate  * time_reftime is the second's portion of the system time at the last
1650Sstevel@tonic-gate  * call to ntp_adjtime(). It is used to adjust the time_freq variable
1660Sstevel@tonic-gate  * and to increase the time_maxerror as the time since last update
1670Sstevel@tonic-gate  * increases.
1680Sstevel@tonic-gate  */
1690Sstevel@tonic-gate int32_t time_phase = 0;		/* phase offset (scaled us) */
1700Sstevel@tonic-gate int32_t time_freq = 0;		/* frequency offset (scaled ppm) */
1710Sstevel@tonic-gate int32_t time_adj = 0;		/* tick adjust (scaled 1 / hz) */
1720Sstevel@tonic-gate int32_t time_reftime = 0;		/* time at last adjustment (s) */
1730Sstevel@tonic-gate 
1740Sstevel@tonic-gate /*
1750Sstevel@tonic-gate  * The scale factors of the following variables are defined in the
1760Sstevel@tonic-gate  * timex.h header file.
1770Sstevel@tonic-gate  *
1780Sstevel@tonic-gate  * pps_time contains the time at each calibration interval, as read by
1790Sstevel@tonic-gate  * microtime(). pps_count counts the seconds of the calibration
1800Sstevel@tonic-gate  * interval, the duration of which is nominally pps_shift in powers of
1810Sstevel@tonic-gate  * two.
1820Sstevel@tonic-gate  *
1830Sstevel@tonic-gate  * pps_offset is the time offset produced by the time median filter
1840Sstevel@tonic-gate  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
1850Sstevel@tonic-gate  * this filter.
1860Sstevel@tonic-gate  *
1870Sstevel@tonic-gate  * pps_freq is the frequency offset produced by the frequency median
1880Sstevel@tonic-gate  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
1890Sstevel@tonic-gate  * by this filter.
1900Sstevel@tonic-gate  *
1910Sstevel@tonic-gate  * pps_usec is latched from a high resolution counter or external clock
1920Sstevel@tonic-gate  * at pps_time. Here we want the hardware counter contents only, not the
1930Sstevel@tonic-gate  * contents plus the time_tv.usec as usual.
1940Sstevel@tonic-gate  *
1950Sstevel@tonic-gate  * pps_valid counts the number of seconds since the last PPS update. It
1960Sstevel@tonic-gate  * is used as a watchdog timer to disable the PPS discipline should the
1970Sstevel@tonic-gate  * PPS signal be lost.
1980Sstevel@tonic-gate  *
1990Sstevel@tonic-gate  * pps_glitch counts the number of seconds since the beginning of an
2000Sstevel@tonic-gate  * offset burst more than tick/2 from current nominal offset. It is used
2010Sstevel@tonic-gate  * mainly to suppress error bursts due to priority conflicts between the
2020Sstevel@tonic-gate  * PPS interrupt and timer interrupt.
2030Sstevel@tonic-gate  *
2040Sstevel@tonic-gate  * pps_intcnt counts the calibration intervals for use in the interval-
2050Sstevel@tonic-gate  * adaptation algorithm. It's just too complicated for words.
2060Sstevel@tonic-gate  */
2070Sstevel@tonic-gate struct timeval pps_time;	/* kernel time at last interval */
2080Sstevel@tonic-gate int32_t pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
2090Sstevel@tonic-gate int32_t pps_offset = 0;		/* pps time offset (us) */
2100Sstevel@tonic-gate int32_t pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
2110Sstevel@tonic-gate int32_t pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
2120Sstevel@tonic-gate int32_t pps_freq = 0;		/* frequency offset (scaled ppm) */
2130Sstevel@tonic-gate int32_t pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
2140Sstevel@tonic-gate int32_t pps_usec = 0;		/* microsec counter at last interval */
2150Sstevel@tonic-gate int32_t pps_valid = PPS_VALID;	/* pps signal watchdog counter */
2160Sstevel@tonic-gate int32_t pps_glitch = 0;		/* pps signal glitch counter */
2170Sstevel@tonic-gate int32_t pps_count = 0;		/* calibration interval counter (s) */
2180Sstevel@tonic-gate int32_t pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
2190Sstevel@tonic-gate int32_t pps_intcnt = 0;		/* intervals at current duration */
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate /*
2220Sstevel@tonic-gate  * PPS signal quality monitors
2230Sstevel@tonic-gate  *
2240Sstevel@tonic-gate  * pps_jitcnt counts the seconds that have been discarded because the
2250Sstevel@tonic-gate  * jitter measured by the time median filter exceeds the limit MAXTIME
2260Sstevel@tonic-gate  * (100 us).
2270Sstevel@tonic-gate  *
2280Sstevel@tonic-gate  * pps_calcnt counts the frequency calibration intervals, which are
2290Sstevel@tonic-gate  * variable from 4 s to 256 s.
2300Sstevel@tonic-gate  *
2310Sstevel@tonic-gate  * pps_errcnt counts the calibration intervals which have been discarded
2320Sstevel@tonic-gate  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
2330Sstevel@tonic-gate  * calibration interval jitter exceeds two ticks.
2340Sstevel@tonic-gate  *
2350Sstevel@tonic-gate  * pps_stbcnt counts the calibration intervals that have been discarded
2360Sstevel@tonic-gate  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
2370Sstevel@tonic-gate  */
2380Sstevel@tonic-gate int32_t pps_jitcnt = 0;		/* jitter limit exceeded */
2390Sstevel@tonic-gate int32_t pps_calcnt = 0;		/* calibration intervals */
2400Sstevel@tonic-gate int32_t pps_errcnt = 0;		/* calibration errors */
2410Sstevel@tonic-gate int32_t pps_stbcnt = 0;		/* stability limit exceeded */
2420Sstevel@tonic-gate 
24311066Srafael.vanoni@sun.com kcondvar_t lbolt_cv;
2440Sstevel@tonic-gate 
24511066Srafael.vanoni@sun.com /*
24611066Srafael.vanoni@sun.com  * Hybrid lbolt implementation:
24711066Srafael.vanoni@sun.com  *
24811066Srafael.vanoni@sun.com  * The service historically provided by the lbolt and lbolt64 variables has
24911066Srafael.vanoni@sun.com  * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
25011066Srafael.vanoni@sun.com  * original symbols removed from the system. The once clock driven variables are
25111066Srafael.vanoni@sun.com  * now implemented in an event driven fashion, backed by gethrtime() coarsed to
25211066Srafael.vanoni@sun.com  * the appropriate clock resolution. The default event driven implementation is
25311066Srafael.vanoni@sun.com  * complemented by a cyclic driven one, active only during periods of intense
25411066Srafael.vanoni@sun.com  * activity around the DDI lbolt routines, when a lbolt specific cyclic is
25511066Srafael.vanoni@sun.com  * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
25611066Srafael.vanoni@sun.com  * rely on the original low cost of consulting a memory position.
25711066Srafael.vanoni@sun.com  *
25811066Srafael.vanoni@sun.com  * The implementation uses the number of calls to these routines and the
25911066Srafael.vanoni@sun.com  * frequency of these to determine when to transition from event to cyclic
26011066Srafael.vanoni@sun.com  * driven and vice-versa. These values are kept on a per CPU basis for
26111066Srafael.vanoni@sun.com  * scalability reasons and to prevent CPUs from constantly invalidating a single
26211066Srafael.vanoni@sun.com  * cache line when modifying a global variable. The transition from event to
26311066Srafael.vanoni@sun.com  * cyclic mode happens once the thresholds are crossed, and activity on any CPU
26411066Srafael.vanoni@sun.com  * can cause such transition.
26511066Srafael.vanoni@sun.com  *
26611066Srafael.vanoni@sun.com  * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
26711066Srafael.vanoni@sun.com  * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
26811066Srafael.vanoni@sun.com  * lbolt_cyclic_driven() according to the current mode. When the thresholds
26911066Srafael.vanoni@sun.com  * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
27011066Srafael.vanoni@sun.com  * fire at a nsec_per_tick interval and increment an internal variable at
27111066Srafael.vanoni@sun.com  * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
27211066Srafael.vanoni@sun.com  * will simply return the value of such variable. lbolt_cyclic() will attempt
27311066Srafael.vanoni@sun.com  * to shut itself off at each threshold interval (sampling period for calls
27411066Srafael.vanoni@sun.com  * to the DDI lbolt routines), and return to the event driven mode, but will
27511066Srafael.vanoni@sun.com  * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
27611066Srafael.vanoni@sun.com  *
27711066Srafael.vanoni@sun.com  * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
27811066Srafael.vanoni@sun.com  * for the cyclic subsystem to be intialized.
27911066Srafael.vanoni@sun.com  *
28011066Srafael.vanoni@sun.com  */
28111226Srafael.vanoni@sun.com int64_t lbolt_bootstrap(void);
28211066Srafael.vanoni@sun.com int64_t lbolt_event_driven(void);
28311066Srafael.vanoni@sun.com int64_t lbolt_cyclic_driven(void);
28411066Srafael.vanoni@sun.com int64_t (*lbolt_hybrid)(void) = lbolt_bootstrap;
28511066Srafael.vanoni@sun.com uint_t lbolt_ev_to_cyclic(caddr_t, caddr_t);
28611066Srafael.vanoni@sun.com 
28711066Srafael.vanoni@sun.com /*
28811066Srafael.vanoni@sun.com  * lbolt's cyclic, installed by clock_init().
28911066Srafael.vanoni@sun.com  */
29011066Srafael.vanoni@sun.com static void lbolt_cyclic(void);
29111066Srafael.vanoni@sun.com 
29211066Srafael.vanoni@sun.com /*
29311066Srafael.vanoni@sun.com  * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
29411066Srafael.vanoni@sun.com  * from switching back to event driven, once it reaches cyclic mode.
29511066Srafael.vanoni@sun.com  */
29611066Srafael.vanoni@sun.com static boolean_t lbolt_cyc_only = B_FALSE;
29711066Srafael.vanoni@sun.com 
29811066Srafael.vanoni@sun.com /*
29911066Srafael.vanoni@sun.com  * Cache aligned, per CPU structure with lbolt usage statistics.
30011066Srafael.vanoni@sun.com  */
30111066Srafael.vanoni@sun.com static lbolt_cpu_t *lb_cpu;
30211066Srafael.vanoni@sun.com 
30311066Srafael.vanoni@sun.com /*
30411066Srafael.vanoni@sun.com  * Single, cache aligned, structure with all the information required by
30511066Srafael.vanoni@sun.com  * the lbolt implementation.
30611066Srafael.vanoni@sun.com  */
30711066Srafael.vanoni@sun.com lbolt_info_t *lb_info;
30811066Srafael.vanoni@sun.com 
30911066Srafael.vanoni@sun.com 
3100Sstevel@tonic-gate int one_sec = 1; /* turned on once every second */
3110Sstevel@tonic-gate static int fsflushcnt;	/* counter for t_fsflushr */
3120Sstevel@tonic-gate int	dosynctodr = 1;	/* patchable; enable/disable sync to TOD chip */
3130Sstevel@tonic-gate int	tod_needsync = 0;	/* need to sync tod chip with software time */
3140Sstevel@tonic-gate static int tod_broken = 0;	/* clock chip doesn't work */
3150Sstevel@tonic-gate time_t	boot_time = 0;		/* Boot time in seconds since 1970 */
3160Sstevel@tonic-gate cyclic_id_t clock_cyclic;	/* clock()'s cyclic_id */
3170Sstevel@tonic-gate cyclic_id_t deadman_cyclic;	/* deadman()'s cyclic_id */
3185265Seota cyclic_id_t ddi_timer_cyclic;	/* cyclic_timer()'s cyclic_id */
3190Sstevel@tonic-gate 
3205788Smv143129 extern void	clock_tick_schedule(int);
3215788Smv143129 
3220Sstevel@tonic-gate static int lgrp_ticks;		/* counter to schedule lgrp load calcs */
3230Sstevel@tonic-gate 
3240Sstevel@tonic-gate /*
3250Sstevel@tonic-gate  * for tod fault detection
3260Sstevel@tonic-gate  */
3270Sstevel@tonic-gate #define	TOD_REF_FREQ		((longlong_t)(NANOSEC))
3280Sstevel@tonic-gate #define	TOD_STALL_THRESHOLD	(TOD_REF_FREQ * 3 / 2)
3290Sstevel@tonic-gate #define	TOD_JUMP_THRESHOLD	(TOD_REF_FREQ / 2)
3300Sstevel@tonic-gate #define	TOD_FILTER_N		4
3310Sstevel@tonic-gate #define	TOD_FILTER_SETTLE	(4 * TOD_FILTER_N)
3320Sstevel@tonic-gate static int tod_faulted = TOD_NOFAULT;
3330Sstevel@tonic-gate static int tod_fault_reset_flag = 0;
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate /* patchable via /etc/system */
3360Sstevel@tonic-gate int tod_validate_enable = 1;
3370Sstevel@tonic-gate 
33810696SDavid.Hollister@Sun.COM /* Diagnose/Limit messages about delay(9F) called from interrupt context */
33910696SDavid.Hollister@Sun.COM int			delay_from_interrupt_diagnose = 0;
34010696SDavid.Hollister@Sun.COM volatile uint32_t	delay_from_interrupt_msg = 20;
34110696SDavid.Hollister@Sun.COM 
3420Sstevel@tonic-gate /*
343950Ssethg  * On non-SPARC systems, TOD validation must be deferred until gethrtime
344950Ssethg  * returns non-zero values (after mach_clkinit's execution).
345950Ssethg  * On SPARC systems, it must be deferred until after hrtime_base
346950Ssethg  * and hres_last_tick are set (in the first invocation of hres_tick).
347950Ssethg  * Since in both cases the prerequisites occur before the invocation of
348950Ssethg  * tod_get() in clock(), the deferment is lifted there.
349950Ssethg  */
350950Ssethg static boolean_t tod_validate_deferred = B_TRUE;
351950Ssethg 
352950Ssethg /*
3530Sstevel@tonic-gate  * tod_fault_table[] must be aligned with
3540Sstevel@tonic-gate  * enum tod_fault_type in systm.h
3550Sstevel@tonic-gate  */
3560Sstevel@tonic-gate static char *tod_fault_table[] = {
3570Sstevel@tonic-gate 	"Reversed",			/* TOD_REVERSED */
3580Sstevel@tonic-gate 	"Stalled",			/* TOD_STALLED */
3590Sstevel@tonic-gate 	"Jumped",			/* TOD_JUMPED */
3605084Sjohnlev 	"Changed in Clock Rate",	/* TOD_RATECHANGED */
3615084Sjohnlev 	"Is Read-Only"			/* TOD_RDONLY */
3620Sstevel@tonic-gate 	/*
3630Sstevel@tonic-gate 	 * no strings needed for TOD_NOFAULT
3640Sstevel@tonic-gate 	 */
3650Sstevel@tonic-gate };
3660Sstevel@tonic-gate 
3670Sstevel@tonic-gate /*
3680Sstevel@tonic-gate  * test hook for tod broken detection in tod_validate
3690Sstevel@tonic-gate  */
3700Sstevel@tonic-gate int tod_unit_test = 0;
3710Sstevel@tonic-gate time_t tod_test_injector;
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate #define	CLOCK_ADJ_HIST_SIZE	4
3740Sstevel@tonic-gate 
3750Sstevel@tonic-gate static int	adj_hist_entry;
3760Sstevel@tonic-gate 
3770Sstevel@tonic-gate int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE];
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate static void calcloadavg(int, uint64_t *);
3800Sstevel@tonic-gate static int genloadavg(struct loadavg_s *);
3810Sstevel@tonic-gate static void loadavg_update();
3820Sstevel@tonic-gate 
3830Sstevel@tonic-gate void (*cmm_clock_callout)() = NULL;
3843792Sakolb void (*cpucaps_clock_callout)() = NULL;
3850Sstevel@tonic-gate 
3865788Smv143129 extern clock_t clock_tick_proc_max;
3875788Smv143129 
38811066Srafael.vanoni@sun.com static int64_t deadman_counter = 0;
38911066Srafael.vanoni@sun.com 
3900Sstevel@tonic-gate static void
3910Sstevel@tonic-gate clock(void)
3920Sstevel@tonic-gate {
3930Sstevel@tonic-gate 	kthread_t	*t;
3945788Smv143129 	uint_t	nrunnable;
3950Sstevel@tonic-gate 	uint_t	w_io;
3960Sstevel@tonic-gate 	cpu_t	*cp;
3970Sstevel@tonic-gate 	cpupart_t *cpupart;
3980Sstevel@tonic-gate 	extern void set_anoninfo();
3990Sstevel@tonic-gate 	extern	void	set_freemem();
4000Sstevel@tonic-gate 	void	(*funcp)();
4010Sstevel@tonic-gate 	int32_t ltemp;
4020Sstevel@tonic-gate 	int64_t lltemp;
4030Sstevel@tonic-gate 	int s;
4040Sstevel@tonic-gate 	int do_lgrp_load;
4050Sstevel@tonic-gate 	int i;
40611066Srafael.vanoni@sun.com 	clock_t now = LBOLT_NO_ACCOUNT;	/* current tick */
4070Sstevel@tonic-gate 
4080Sstevel@tonic-gate 	if (panicstr)
4090Sstevel@tonic-gate 		return;
4100Sstevel@tonic-gate 
4110Sstevel@tonic-gate 	set_anoninfo();
4120Sstevel@tonic-gate 	/*
4130Sstevel@tonic-gate 	 * Make sure that 'freemem' do not drift too far from the truth
4140Sstevel@tonic-gate 	 */
4150Sstevel@tonic-gate 	set_freemem();
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 
4180Sstevel@tonic-gate 	/*
4190Sstevel@tonic-gate 	 * Before the section which is repeated is executed, we do
4200Sstevel@tonic-gate 	 * the time delta processing which occurs every clock tick
4210Sstevel@tonic-gate 	 *
4220Sstevel@tonic-gate 	 * There is additional processing which happens every time
4230Sstevel@tonic-gate 	 * the nanosecond counter rolls over which is described
4240Sstevel@tonic-gate 	 * below - see the section which begins with : if (one_sec)
4250Sstevel@tonic-gate 	 *
4260Sstevel@tonic-gate 	 * This section marks the beginning of the precision-kernel
4270Sstevel@tonic-gate 	 * code fragment.
4280Sstevel@tonic-gate 	 *
4290Sstevel@tonic-gate 	 * First, compute the phase adjustment. If the low-order bits
4300Sstevel@tonic-gate 	 * (time_phase) of the update overflow, bump the higher order
4310Sstevel@tonic-gate 	 * bits (time_update).
4320Sstevel@tonic-gate 	 */
4330Sstevel@tonic-gate 	time_phase += time_adj;
4340Sstevel@tonic-gate 	if (time_phase <= -FINEUSEC) {
4350Sstevel@tonic-gate 		ltemp = -time_phase / SCALE_PHASE;
4360Sstevel@tonic-gate 		time_phase += ltemp * SCALE_PHASE;
4370Sstevel@tonic-gate 		s = hr_clock_lock();
4380Sstevel@tonic-gate 		timedelta -= ltemp * (NANOSEC/MICROSEC);
4390Sstevel@tonic-gate 		hr_clock_unlock(s);
4400Sstevel@tonic-gate 	} else if (time_phase >= FINEUSEC) {
4410Sstevel@tonic-gate 		ltemp = time_phase / SCALE_PHASE;
4420Sstevel@tonic-gate 		time_phase -= ltemp * SCALE_PHASE;
4430Sstevel@tonic-gate 		s = hr_clock_lock();
4440Sstevel@tonic-gate 		timedelta += ltemp * (NANOSEC/MICROSEC);
4450Sstevel@tonic-gate 		hr_clock_unlock(s);
4460Sstevel@tonic-gate 	}
4470Sstevel@tonic-gate 
4480Sstevel@tonic-gate 	/*
4490Sstevel@tonic-gate 	 * End of precision-kernel code fragment which is processed
4500Sstevel@tonic-gate 	 * every timer interrupt.
4510Sstevel@tonic-gate 	 *
4520Sstevel@tonic-gate 	 * Continue with the interrupt processing as scheduled.
4530Sstevel@tonic-gate 	 */
4540Sstevel@tonic-gate 	/*
4550Sstevel@tonic-gate 	 * Count the number of runnable threads and the number waiting
4560Sstevel@tonic-gate 	 * for some form of I/O to complete -- gets added to
4570Sstevel@tonic-gate 	 * sysinfo.waiting.  To know the state of the system, must add
4580Sstevel@tonic-gate 	 * wait counts from all CPUs.  Also add up the per-partition
4590Sstevel@tonic-gate 	 * statistics.
4600Sstevel@tonic-gate 	 */
4610Sstevel@tonic-gate 	w_io = 0;
4620Sstevel@tonic-gate 	nrunnable = 0;
4630Sstevel@tonic-gate 
4640Sstevel@tonic-gate 	/*
4650Sstevel@tonic-gate 	 * keep track of when to update lgrp/part loads
4660Sstevel@tonic-gate 	 */
4670Sstevel@tonic-gate 
4680Sstevel@tonic-gate 	do_lgrp_load = 0;
4690Sstevel@tonic-gate 	if (lgrp_ticks++ >= hz / 10) {
4700Sstevel@tonic-gate 		lgrp_ticks = 0;
4710Sstevel@tonic-gate 		do_lgrp_load = 1;
4720Sstevel@tonic-gate 	}
4730Sstevel@tonic-gate 
47411066Srafael.vanoni@sun.com 	if (one_sec) {
4750Sstevel@tonic-gate 		loadavg_update();
47611066Srafael.vanoni@sun.com 		deadman_counter++;
47711066Srafael.vanoni@sun.com 	}
4780Sstevel@tonic-gate 
4790Sstevel@tonic-gate 	/*
4800Sstevel@tonic-gate 	 * First count the threads waiting on kpreempt queues in each
4810Sstevel@tonic-gate 	 * CPU partition.
4820Sstevel@tonic-gate 	 */
4830Sstevel@tonic-gate 
4840Sstevel@tonic-gate 	cpupart = cp_list_head;
4850Sstevel@tonic-gate 	do {
4860Sstevel@tonic-gate 		uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable;
4870Sstevel@tonic-gate 
4880Sstevel@tonic-gate 		cpupart->cp_updates++;
4890Sstevel@tonic-gate 		nrunnable += cpupart_nrunnable;
4900Sstevel@tonic-gate 		cpupart->cp_nrunnable_cum += cpupart_nrunnable;
4910Sstevel@tonic-gate 		if (one_sec) {
4920Sstevel@tonic-gate 			cpupart->cp_nrunning = 0;
4930Sstevel@tonic-gate 			cpupart->cp_nrunnable = cpupart_nrunnable;
4940Sstevel@tonic-gate 		}
4950Sstevel@tonic-gate 	} while ((cpupart = cpupart->cp_next) != cp_list_head);
4960Sstevel@tonic-gate 
4970Sstevel@tonic-gate 
4980Sstevel@tonic-gate 	/* Now count the per-CPU statistics. */
4990Sstevel@tonic-gate 	cp = cpu_list;
5000Sstevel@tonic-gate 	do {
5010Sstevel@tonic-gate 		uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable;
5020Sstevel@tonic-gate 
5030Sstevel@tonic-gate 		nrunnable += cpu_nrunnable;
5040Sstevel@tonic-gate 		cpupart = cp->cpu_part;
5050Sstevel@tonic-gate 		cpupart->cp_nrunnable_cum += cpu_nrunnable;
5063446Smrj 		if (one_sec) {
5070Sstevel@tonic-gate 			cpupart->cp_nrunnable += cpu_nrunnable;
5083446Smrj 			/*
5095788Smv143129 			 * Update user, system, and idle cpu times.
5105788Smv143129 			 */
5115788Smv143129 			cpupart->cp_nrunning++;
5125788Smv143129 			/*
5133446Smrj 			 * w_io is used to update sysinfo.waiting during
5143446Smrj 			 * one_second processing below.  Only gather w_io
5153446Smrj 			 * information when we walk the list of cpus if we're
5163446Smrj 			 * going to perform one_second processing.
5173446Smrj 			 */
5183446Smrj 			w_io += CPU_STATS(cp, sys.iowait);
5195076Smishra 		}
5203446Smrj 
5215076Smishra 		if (one_sec && (cp->cpu_flags & CPU_EXISTS)) {
5225076Smishra 			int i, load, change;
5235076Smishra 			hrtime_t intracct, intrused;
5245076Smishra 			const hrtime_t maxnsec = 1000000000;
5255076Smishra 			const int precision = 100;
5265076Smishra 
5275076Smishra 			/*
5285076Smishra 			 * Estimate interrupt load on this cpu each second.
5295076Smishra 			 * Computes cpu_intrload as %utilization (0-99).
5305076Smishra 			 */
5315076Smishra 
5325076Smishra 			/* add up interrupt time from all micro states */
5335076Smishra 			for (intracct = 0, i = 0; i < NCMSTATES; i++)
5345076Smishra 				intracct += cp->cpu_intracct[i];
5355076Smishra 			scalehrtime(&intracct);
5365076Smishra 
5375076Smishra 			/* compute nsec used in the past second */
5385076Smishra 			intrused = intracct - cp->cpu_intrlast;
5395076Smishra 			cp->cpu_intrlast = intracct;
5405076Smishra 
5415076Smishra 			/* limit the value for safety (and the first pass) */
5425076Smishra 			if (intrused >= maxnsec)
5435076Smishra 				intrused = maxnsec - 1;
5445076Smishra 
5455076Smishra 			/* calculate %time in interrupt */
5465076Smishra 			load = (precision * intrused) / maxnsec;
5475076Smishra 			ASSERT(load >= 0 && load < precision);
5485076Smishra 			change = cp->cpu_intrload - load;
5495076Smishra 
5505076Smishra 			/* jump to new max, or decay the old max */
5515076Smishra 			if (change < 0)
5525076Smishra 				cp->cpu_intrload = load;
5535076Smishra 			else if (change > 0)
5545076Smishra 				cp->cpu_intrload -= (change + 3) / 4;
5555076Smishra 
5565076Smishra 			DTRACE_PROBE3(cpu_intrload,
5575076Smishra 			    cpu_t *, cp,
5585076Smishra 			    hrtime_t, intracct,
5595076Smishra 			    hrtime_t, intrused);
5603446Smrj 		}
5615076Smishra 
5620Sstevel@tonic-gate 		if (do_lgrp_load &&
5630Sstevel@tonic-gate 		    (cp->cpu_flags & CPU_EXISTS)) {
5640Sstevel@tonic-gate 			/*
5650Sstevel@tonic-gate 			 * When updating the lgroup's load average,
5660Sstevel@tonic-gate 			 * account for the thread running on the CPU.
5670Sstevel@tonic-gate 			 * If the CPU is the current one, then we need
5680Sstevel@tonic-gate 			 * to account for the underlying thread which
5690Sstevel@tonic-gate 			 * got the clock interrupt not the thread that is
5700Sstevel@tonic-gate 			 * handling the interrupt and caculating the load
5710Sstevel@tonic-gate 			 * average
5720Sstevel@tonic-gate 			 */
5730Sstevel@tonic-gate 			t = cp->cpu_thread;
5740Sstevel@tonic-gate 			if (CPU == cp)
5750Sstevel@tonic-gate 				t = t->t_intr;
5760Sstevel@tonic-gate 
5770Sstevel@tonic-gate 			/*
5780Sstevel@tonic-gate 			 * Account for the load average for this thread if
5790Sstevel@tonic-gate 			 * it isn't the idle thread or it is on the interrupt
5800Sstevel@tonic-gate 			 * stack and not the current CPU handling the clock
5810Sstevel@tonic-gate 			 * interrupt
5820Sstevel@tonic-gate 			 */
5830Sstevel@tonic-gate 			if ((t && t != cp->cpu_idle_thread) || (CPU != cp &&
5840Sstevel@tonic-gate 			    CPU_ON_INTR(cp))) {
5850Sstevel@tonic-gate 				if (t->t_lpl == cp->cpu_lpl) {
5860Sstevel@tonic-gate 					/* local thread */
5870Sstevel@tonic-gate 					cpu_nrunnable++;
5880Sstevel@tonic-gate 				} else {
5890Sstevel@tonic-gate 					/*
5900Sstevel@tonic-gate 					 * This is a remote thread, charge it
5910Sstevel@tonic-gate 					 * against its home lgroup.  Note that
5920Sstevel@tonic-gate 					 * we notice that a thread is remote
5930Sstevel@tonic-gate 					 * only if it's currently executing.
5940Sstevel@tonic-gate 					 * This is a reasonable approximation,
5950Sstevel@tonic-gate 					 * since queued remote threads are rare.
5960Sstevel@tonic-gate 					 * Note also that if we didn't charge
5970Sstevel@tonic-gate 					 * it to its home lgroup, remote
5980Sstevel@tonic-gate 					 * execution would often make a system
5990Sstevel@tonic-gate 					 * appear balanced even though it was
6000Sstevel@tonic-gate 					 * not, and thread placement/migration
6010Sstevel@tonic-gate 					 * would often not be done correctly.
6020Sstevel@tonic-gate 					 */
6030Sstevel@tonic-gate 					lgrp_loadavg(t->t_lpl,
6040Sstevel@tonic-gate 					    LGRP_LOADAVG_IN_THREAD_MAX, 0);
6050Sstevel@tonic-gate 				}
6060Sstevel@tonic-gate 			}
6070Sstevel@tonic-gate 			lgrp_loadavg(cp->cpu_lpl,
6080Sstevel@tonic-gate 			    cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1);
6090Sstevel@tonic-gate 		}
6100Sstevel@tonic-gate 	} while ((cp = cp->cpu_next) != cpu_list);
6110Sstevel@tonic-gate 
6125788Smv143129 	clock_tick_schedule(one_sec);
6130Sstevel@tonic-gate 
6140Sstevel@tonic-gate 	/*
6150Sstevel@tonic-gate 	 * Check for a callout that needs be called from the clock
6160Sstevel@tonic-gate 	 * thread to support the membership protocol in a clustered
6170Sstevel@tonic-gate 	 * system.  Copy the function pointer so that we can reset
6180Sstevel@tonic-gate 	 * this to NULL if needed.
6190Sstevel@tonic-gate 	 */
6200Sstevel@tonic-gate 	if ((funcp = cmm_clock_callout) != NULL)
6210Sstevel@tonic-gate 		(*funcp)();
6220Sstevel@tonic-gate 
6233792Sakolb 	if ((funcp = cpucaps_clock_callout) != NULL)
6243792Sakolb 		(*funcp)();
6253792Sakolb 
6260Sstevel@tonic-gate 	/*
6270Sstevel@tonic-gate 	 * Wakeup the cageout thread waiters once per second.
6280Sstevel@tonic-gate 	 */
6290Sstevel@tonic-gate 	if (one_sec)
6300Sstevel@tonic-gate 		kcage_tick();
6310Sstevel@tonic-gate 
6320Sstevel@tonic-gate 	if (one_sec) {
6330Sstevel@tonic-gate 
6340Sstevel@tonic-gate 		int drift, absdrift;
6350Sstevel@tonic-gate 		timestruc_t tod;
6360Sstevel@tonic-gate 		int s;
6370Sstevel@tonic-gate 
6380Sstevel@tonic-gate 		/*
6390Sstevel@tonic-gate 		 * Beginning of precision-kernel code fragment executed
6400Sstevel@tonic-gate 		 * every second.
6410Sstevel@tonic-gate 		 *
6420Sstevel@tonic-gate 		 * On rollover of the second the phase adjustment to be
6430Sstevel@tonic-gate 		 * used for the next second is calculated.  Also, the
6440Sstevel@tonic-gate 		 * maximum error is increased by the tolerance.  If the
6450Sstevel@tonic-gate 		 * PPS frequency discipline code is present, the phase is
6460Sstevel@tonic-gate 		 * increased to compensate for the CPU clock oscillator
6470Sstevel@tonic-gate 		 * frequency error.
6480Sstevel@tonic-gate 		 *
6490Sstevel@tonic-gate 		 * On a 32-bit machine and given parameters in the timex.h
6500Sstevel@tonic-gate 		 * header file, the maximum phase adjustment is +-512 ms
6510Sstevel@tonic-gate 		 * and maximum frequency offset is (a tad less than)
6520Sstevel@tonic-gate 		 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
6530Sstevel@tonic-gate 		 */
6540Sstevel@tonic-gate 		time_maxerror += time_tolerance / SCALE_USEC;
6550Sstevel@tonic-gate 
6560Sstevel@tonic-gate 		/*
6570Sstevel@tonic-gate 		 * Leap second processing. If in leap-insert state at
6580Sstevel@tonic-gate 		 * the end of the day, the system clock is set back one
6590Sstevel@tonic-gate 		 * second; if in leap-delete state, the system clock is
6600Sstevel@tonic-gate 		 * set ahead one second. The microtime() routine or
6610Sstevel@tonic-gate 		 * external clock driver will insure that reported time
6620Sstevel@tonic-gate 		 * is always monotonic. The ugly divides should be
6630Sstevel@tonic-gate 		 * replaced.
6640Sstevel@tonic-gate 		 */
6650Sstevel@tonic-gate 		switch (time_state) {
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate 		case TIME_OK:
6680Sstevel@tonic-gate 			if (time_status & STA_INS)
6690Sstevel@tonic-gate 				time_state = TIME_INS;
6700Sstevel@tonic-gate 			else if (time_status & STA_DEL)
6710Sstevel@tonic-gate 				time_state = TIME_DEL;
6720Sstevel@tonic-gate 			break;
6730Sstevel@tonic-gate 
6740Sstevel@tonic-gate 		case TIME_INS:
6750Sstevel@tonic-gate 			if (hrestime.tv_sec % 86400 == 0) {
6760Sstevel@tonic-gate 				s = hr_clock_lock();
6770Sstevel@tonic-gate 				hrestime.tv_sec--;
6780Sstevel@tonic-gate 				hr_clock_unlock(s);
6790Sstevel@tonic-gate 				time_state = TIME_OOP;
6800Sstevel@tonic-gate 			}
6810Sstevel@tonic-gate 			break;
6820Sstevel@tonic-gate 
6830Sstevel@tonic-gate 		case TIME_DEL:
6840Sstevel@tonic-gate 			if ((hrestime.tv_sec + 1) % 86400 == 0) {
6850Sstevel@tonic-gate 				s = hr_clock_lock();
6860Sstevel@tonic-gate 				hrestime.tv_sec++;
6870Sstevel@tonic-gate 				hr_clock_unlock(s);
6880Sstevel@tonic-gate 				time_state = TIME_WAIT;
6890Sstevel@tonic-gate 			}
6900Sstevel@tonic-gate 			break;
6910Sstevel@tonic-gate 
6920Sstevel@tonic-gate 		case TIME_OOP:
6930Sstevel@tonic-gate 			time_state = TIME_WAIT;
6940Sstevel@tonic-gate 			break;
6950Sstevel@tonic-gate 
6960Sstevel@tonic-gate 		case TIME_WAIT:
6970Sstevel@tonic-gate 			if (!(time_status & (STA_INS | STA_DEL)))
6980Sstevel@tonic-gate 				time_state = TIME_OK;
6990Sstevel@tonic-gate 		default:
7000Sstevel@tonic-gate 			break;
7010Sstevel@tonic-gate 		}
7020Sstevel@tonic-gate 
7030Sstevel@tonic-gate 		/*
7040Sstevel@tonic-gate 		 * Compute the phase adjustment for the next second. In
7050Sstevel@tonic-gate 		 * PLL mode, the offset is reduced by a fixed factor
7060Sstevel@tonic-gate 		 * times the time constant. In FLL mode the offset is
7070Sstevel@tonic-gate 		 * used directly. In either mode, the maximum phase
7080Sstevel@tonic-gate 		 * adjustment for each second is clamped so as to spread
7090Sstevel@tonic-gate 		 * the adjustment over not more than the number of
7100Sstevel@tonic-gate 		 * seconds between updates.
7110Sstevel@tonic-gate 		 */
7120Sstevel@tonic-gate 		if (time_offset == 0)
7130Sstevel@tonic-gate 			time_adj = 0;
7140Sstevel@tonic-gate 		else if (time_offset < 0) {
7150Sstevel@tonic-gate 			lltemp = -time_offset;
7160Sstevel@tonic-gate 			if (!(time_status & STA_FLL)) {
7170Sstevel@tonic-gate 				if ((1 << time_constant) >= SCALE_KG)
7180Sstevel@tonic-gate 					lltemp *= (1 << time_constant) /
7190Sstevel@tonic-gate 					    SCALE_KG;
7200Sstevel@tonic-gate 				else
7210Sstevel@tonic-gate 					lltemp = (lltemp / SCALE_KG) >>
7220Sstevel@tonic-gate 					    time_constant;
7230Sstevel@tonic-gate 			}
7240Sstevel@tonic-gate 			if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
7250Sstevel@tonic-gate 				lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
7260Sstevel@tonic-gate 			time_offset += lltemp;
7270Sstevel@tonic-gate 			time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
7280Sstevel@tonic-gate 		} else {
7290Sstevel@tonic-gate 			lltemp = time_offset;
7300Sstevel@tonic-gate 			if (!(time_status & STA_FLL)) {
7310Sstevel@tonic-gate 				if ((1 << time_constant) >= SCALE_KG)
7320Sstevel@tonic-gate 					lltemp *= (1 << time_constant) /
7330Sstevel@tonic-gate 					    SCALE_KG;
7340Sstevel@tonic-gate 				else
7350Sstevel@tonic-gate 					lltemp = (lltemp / SCALE_KG) >>
7360Sstevel@tonic-gate 					    time_constant;
7370Sstevel@tonic-gate 			}
7380Sstevel@tonic-gate 			if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
7390Sstevel@tonic-gate 				lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
7400Sstevel@tonic-gate 			time_offset -= lltemp;
7410Sstevel@tonic-gate 			time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
7420Sstevel@tonic-gate 		}
7430Sstevel@tonic-gate 
7440Sstevel@tonic-gate 		/*
7450Sstevel@tonic-gate 		 * Compute the frequency estimate and additional phase
7460Sstevel@tonic-gate 		 * adjustment due to frequency error for the next
7470Sstevel@tonic-gate 		 * second. When the PPS signal is engaged, gnaw on the
7480Sstevel@tonic-gate 		 * watchdog counter and update the frequency computed by
7490Sstevel@tonic-gate 		 * the pll and the PPS signal.
7500Sstevel@tonic-gate 		 */
7510Sstevel@tonic-gate 		pps_valid++;
7520Sstevel@tonic-gate 		if (pps_valid == PPS_VALID) {
7530Sstevel@tonic-gate 			pps_jitter = MAXTIME;
7540Sstevel@tonic-gate 			pps_stabil = MAXFREQ;
7550Sstevel@tonic-gate 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
7560Sstevel@tonic-gate 			    STA_PPSWANDER | STA_PPSERROR);
7570Sstevel@tonic-gate 		}
7580Sstevel@tonic-gate 		lltemp = time_freq + pps_freq;
7590Sstevel@tonic-gate 
7600Sstevel@tonic-gate 		if (lltemp)
7610Sstevel@tonic-gate 			time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz);
7620Sstevel@tonic-gate 
7630Sstevel@tonic-gate 		/*
7640Sstevel@tonic-gate 		 * End of precision kernel-code fragment
7650Sstevel@tonic-gate 		 *
7660Sstevel@tonic-gate 		 * The section below should be modified if we are planning
7670Sstevel@tonic-gate 		 * to use NTP for synchronization.
7680Sstevel@tonic-gate 		 *
7690Sstevel@tonic-gate 		 * Note: the clock synchronization code now assumes
7700Sstevel@tonic-gate 		 * the following:
7710Sstevel@tonic-gate 		 *   - if dosynctodr is 1, then compute the drift between
7720Sstevel@tonic-gate 		 *	the tod chip and software time and adjust one or
7730Sstevel@tonic-gate 		 *	the other depending on the circumstances
7740Sstevel@tonic-gate 		 *
7750Sstevel@tonic-gate 		 *   - if dosynctodr is 0, then the tod chip is independent
7760Sstevel@tonic-gate 		 *	of the software clock and should not be adjusted,
7770Sstevel@tonic-gate 		 *	but allowed to free run.  this allows NTP to sync.
7780Sstevel@tonic-gate 		 *	hrestime without any interference from the tod chip.
7790Sstevel@tonic-gate 		 */
7800Sstevel@tonic-gate 
781950Ssethg 		tod_validate_deferred = B_FALSE;
7820Sstevel@tonic-gate 		mutex_enter(&tod_lock);
7830Sstevel@tonic-gate 		tod = tod_get();
7840Sstevel@tonic-gate 		drift = tod.tv_sec - hrestime.tv_sec;
7850Sstevel@tonic-gate 		absdrift = (drift >= 0) ? drift : -drift;
7860Sstevel@tonic-gate 		if (tod_needsync || absdrift > 1) {
7870Sstevel@tonic-gate 			int s;
7880Sstevel@tonic-gate 			if (absdrift > 2) {
7890Sstevel@tonic-gate 				if (!tod_broken && tod_faulted == TOD_NOFAULT) {
7900Sstevel@tonic-gate 					s = hr_clock_lock();
7910Sstevel@tonic-gate 					hrestime = tod;
7920Sstevel@tonic-gate 					membar_enter();	/* hrestime visible */
7930Sstevel@tonic-gate 					timedelta = 0;
7944123Sdm120769 					timechanged++;
7950Sstevel@tonic-gate 					tod_needsync = 0;
7960Sstevel@tonic-gate 					hr_clock_unlock(s);
7978048SMadhavan.Venkataraman@Sun.COM 					callout_hrestime();
7988048SMadhavan.Venkataraman@Sun.COM 
7990Sstevel@tonic-gate 				}
8000Sstevel@tonic-gate 			} else {
8010Sstevel@tonic-gate 				if (tod_needsync || !dosynctodr) {
8020Sstevel@tonic-gate 					gethrestime(&tod);
8030Sstevel@tonic-gate 					tod_set(tod);
8040Sstevel@tonic-gate 					s = hr_clock_lock();
8050Sstevel@tonic-gate 					if (timedelta == 0)
8060Sstevel@tonic-gate 						tod_needsync = 0;
8070Sstevel@tonic-gate 					hr_clock_unlock(s);
8080Sstevel@tonic-gate 				} else {
8090Sstevel@tonic-gate 					/*
8100Sstevel@tonic-gate 					 * If the drift is 2 seconds on the
8110Sstevel@tonic-gate 					 * money, then the TOD is adjusting
8120Sstevel@tonic-gate 					 * the clock;  record that.
8130Sstevel@tonic-gate 					 */
8140Sstevel@tonic-gate 					clock_adj_hist[adj_hist_entry++ %
81511066Srafael.vanoni@sun.com 					    CLOCK_ADJ_HIST_SIZE] = now;
8160Sstevel@tonic-gate 					s = hr_clock_lock();
8170Sstevel@tonic-gate 					timedelta = (int64_t)drift*NANOSEC;
8180Sstevel@tonic-gate 					hr_clock_unlock(s);
8190Sstevel@tonic-gate 				}
8200Sstevel@tonic-gate 			}
8210Sstevel@tonic-gate 		}
8220Sstevel@tonic-gate 		one_sec = 0;
8230Sstevel@tonic-gate 		time = gethrestime_sec();  /* for crusty old kmem readers */
8240Sstevel@tonic-gate 		mutex_exit(&tod_lock);
8250Sstevel@tonic-gate 
8260Sstevel@tonic-gate 		/*
8270Sstevel@tonic-gate 		 * Some drivers still depend on this... XXX
8280Sstevel@tonic-gate 		 */
8290Sstevel@tonic-gate 		cv_broadcast(&lbolt_cv);
8300Sstevel@tonic-gate 
8310Sstevel@tonic-gate 		vminfo.freemem += freemem;
8320Sstevel@tonic-gate 		{
8330Sstevel@tonic-gate 			pgcnt_t maxswap, resv, free;
8340Sstevel@tonic-gate 			pgcnt_t avail =
8350Sstevel@tonic-gate 			    MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
8360Sstevel@tonic-gate 
8375076Smishra 			maxswap = k_anoninfo.ani_mem_resv +
8385076Smishra 			    k_anoninfo.ani_max +avail;
8390Sstevel@tonic-gate 			free = k_anoninfo.ani_free + avail;
8400Sstevel@tonic-gate 			resv = k_anoninfo.ani_phys_resv +
8410Sstevel@tonic-gate 			    k_anoninfo.ani_mem_resv;
8420Sstevel@tonic-gate 
8430Sstevel@tonic-gate 			vminfo.swap_resv += resv;
8440Sstevel@tonic-gate 			/* number of reserved and allocated pages */
8450Sstevel@tonic-gate #ifdef	DEBUG
8460Sstevel@tonic-gate 			if (maxswap < free)
8470Sstevel@tonic-gate 				cmn_err(CE_WARN, "clock: maxswap < free");
8480Sstevel@tonic-gate 			if (maxswap < resv)
8490Sstevel@tonic-gate 				cmn_err(CE_WARN, "clock: maxswap < resv");
8500Sstevel@tonic-gate #endif
8510Sstevel@tonic-gate 			vminfo.swap_alloc += maxswap - free;
8520Sstevel@tonic-gate 			vminfo.swap_avail += maxswap - resv;
8530Sstevel@tonic-gate 			vminfo.swap_free += free;
8540Sstevel@tonic-gate 		}
855*11657SDonghai.Qiao@Sun.COM 		vminfo.updates++;
8560Sstevel@tonic-gate 		if (nrunnable) {
8570Sstevel@tonic-gate 			sysinfo.runque += nrunnable;
8580Sstevel@tonic-gate 			sysinfo.runocc++;
8590Sstevel@tonic-gate 		}
8600Sstevel@tonic-gate 		if (nswapped) {
8610Sstevel@tonic-gate 			sysinfo.swpque += nswapped;
8620Sstevel@tonic-gate 			sysinfo.swpocc++;
8630Sstevel@tonic-gate 		}
8640Sstevel@tonic-gate 		sysinfo.waiting += w_io;
865*11657SDonghai.Qiao@Sun.COM 		sysinfo.updates++;
8660Sstevel@tonic-gate 
8670Sstevel@tonic-gate 		/*
8680Sstevel@tonic-gate 		 * Wake up fsflush to write out DELWRI
8690Sstevel@tonic-gate 		 * buffers, dirty pages and other cached
8700Sstevel@tonic-gate 		 * administrative data, e.g. inodes.
8710Sstevel@tonic-gate 		 */
8720Sstevel@tonic-gate 		if (--fsflushcnt <= 0) {
8730Sstevel@tonic-gate 			fsflushcnt = tune.t_fsflushr;
8740Sstevel@tonic-gate 			cv_signal(&fsflush_cv);
8750Sstevel@tonic-gate 		}
8760Sstevel@tonic-gate 
8770Sstevel@tonic-gate 		vmmeter();
8780Sstevel@tonic-gate 		calcloadavg(genloadavg(&loadavg), hp_avenrun);
8790Sstevel@tonic-gate 		for (i = 0; i < 3; i++)
8800Sstevel@tonic-gate 			/*
8810Sstevel@tonic-gate 			 * At the moment avenrun[] can only hold 31
8820Sstevel@tonic-gate 			 * bits of load average as it is a signed
8830Sstevel@tonic-gate 			 * int in the API. We need to ensure that
8840Sstevel@tonic-gate 			 * hp_avenrun[i] >> (16 - FSHIFT) will not be
8850Sstevel@tonic-gate 			 * too large. If it is, we put the largest value
8860Sstevel@tonic-gate 			 * that we can use into avenrun[i]. This is
8870Sstevel@tonic-gate 			 * kludgey, but about all we can do until we
8880Sstevel@tonic-gate 			 * avenrun[] is declared as an array of uint64[]
8890Sstevel@tonic-gate 			 */
8900Sstevel@tonic-gate 			if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT)))
8910Sstevel@tonic-gate 				avenrun[i] = (int32_t)(hp_avenrun[i] >>
8920Sstevel@tonic-gate 				    (16 - FSHIFT));
8930Sstevel@tonic-gate 			else
8940Sstevel@tonic-gate 				avenrun[i] = 0x7fffffff;
8950Sstevel@tonic-gate 
8960Sstevel@tonic-gate 		cpupart = cp_list_head;
8970Sstevel@tonic-gate 		do {
8980Sstevel@tonic-gate 			calcloadavg(genloadavg(&cpupart->cp_loadavg),
8990Sstevel@tonic-gate 			    cpupart->cp_hp_avenrun);
9000Sstevel@tonic-gate 		} while ((cpupart = cpupart->cp_next) != cp_list_head);
9010Sstevel@tonic-gate 
9020Sstevel@tonic-gate 		/*
9030Sstevel@tonic-gate 		 * Wake up the swapper thread if necessary.
9040Sstevel@tonic-gate 		 */
9050Sstevel@tonic-gate 		if (runin ||
9060Sstevel@tonic-gate 		    (runout && (avefree < desfree || wake_sched_sec))) {
9070Sstevel@tonic-gate 			t = &t0;
9080Sstevel@tonic-gate 			thread_lock(t);
9090Sstevel@tonic-gate 			if (t->t_state == TS_STOPPED) {
9100Sstevel@tonic-gate 				runin = runout = 0;
9110Sstevel@tonic-gate 				wake_sched_sec = 0;
9120Sstevel@tonic-gate 				t->t_whystop = 0;
9130Sstevel@tonic-gate 				t->t_whatstop = 0;
9140Sstevel@tonic-gate 				t->t_schedflag &= ~TS_ALLSTART;
9150Sstevel@tonic-gate 				THREAD_TRANSITION(t);
9160Sstevel@tonic-gate 				setfrontdq(t);
9170Sstevel@tonic-gate 			}
9180Sstevel@tonic-gate 			thread_unlock(t);
9190Sstevel@tonic-gate 		}
9200Sstevel@tonic-gate 	}
9210Sstevel@tonic-gate 
9220Sstevel@tonic-gate 	/*
9230Sstevel@tonic-gate 	 * Wake up the swapper if any high priority swapped-out threads
9240Sstevel@tonic-gate 	 * became runable during the last tick.
9250Sstevel@tonic-gate 	 */
9260Sstevel@tonic-gate 	if (wake_sched) {
9270Sstevel@tonic-gate 		t = &t0;
9280Sstevel@tonic-gate 		thread_lock(t);
9290Sstevel@tonic-gate 		if (t->t_state == TS_STOPPED) {
9300Sstevel@tonic-gate 			runin = runout = 0;
9310Sstevel@tonic-gate 			wake_sched = 0;
9320Sstevel@tonic-gate 			t->t_whystop = 0;
9330Sstevel@tonic-gate 			t->t_whatstop = 0;
9340Sstevel@tonic-gate 			t->t_schedflag &= ~TS_ALLSTART;
9350Sstevel@tonic-gate 			THREAD_TRANSITION(t);
9360Sstevel@tonic-gate 			setfrontdq(t);
9370Sstevel@tonic-gate 		}
9380Sstevel@tonic-gate 		thread_unlock(t);
9390Sstevel@tonic-gate 	}
9400Sstevel@tonic-gate }
9410Sstevel@tonic-gate 
9420Sstevel@tonic-gate void
9430Sstevel@tonic-gate clock_init(void)
9440Sstevel@tonic-gate {
94511066Srafael.vanoni@sun.com 	cyc_handler_t clk_hdlr, timer_hdlr, lbolt_hdlr;
94611066Srafael.vanoni@sun.com 	cyc_time_t clk_when, lbolt_when;
94711066Srafael.vanoni@sun.com 	int i, sz;
94811066Srafael.vanoni@sun.com 	intptr_t buf;
9490Sstevel@tonic-gate 
95011066Srafael.vanoni@sun.com 	/*
95111066Srafael.vanoni@sun.com 	 * Setup handler and timer for the clock cyclic.
95211066Srafael.vanoni@sun.com 	 */
95311066Srafael.vanoni@sun.com 	clk_hdlr.cyh_func = (cyc_func_t)clock;
95411066Srafael.vanoni@sun.com 	clk_hdlr.cyh_level = CY_LOCK_LEVEL;
95511066Srafael.vanoni@sun.com 	clk_hdlr.cyh_arg = NULL;
9560Sstevel@tonic-gate 
95711066Srafael.vanoni@sun.com 	clk_when.cyt_when = 0;
95811066Srafael.vanoni@sun.com 	clk_when.cyt_interval = nsec_per_tick;
9595107Seota 
9605107Seota 	/*
9615107Seota 	 * cyclic_timer is dedicated to the ddi interface, which
9625107Seota 	 * uses the same clock resolution as the system one.
9635107Seota 	 */
96411066Srafael.vanoni@sun.com 	timer_hdlr.cyh_func = (cyc_func_t)cyclic_timer;
96511066Srafael.vanoni@sun.com 	timer_hdlr.cyh_level = CY_LOCK_LEVEL;
96611066Srafael.vanoni@sun.com 	timer_hdlr.cyh_arg = NULL;
96711066Srafael.vanoni@sun.com 
96811066Srafael.vanoni@sun.com 	/*
96911226Srafael.vanoni@sun.com 	 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
97011226Srafael.vanoni@sun.com 	 * interval to satisfy performance needs of the DDI lbolt consumers.
97111226Srafael.vanoni@sun.com 	 * It is off by default.
97211066Srafael.vanoni@sun.com 	 */
97311066Srafael.vanoni@sun.com 	lbolt_hdlr.cyh_func = (cyc_func_t)lbolt_cyclic;
97411066Srafael.vanoni@sun.com 	lbolt_hdlr.cyh_level = CY_LOCK_LEVEL;
97511066Srafael.vanoni@sun.com 	lbolt_hdlr.cyh_arg = NULL;
97611066Srafael.vanoni@sun.com 
97711066Srafael.vanoni@sun.com 	lbolt_when.cyt_interval = nsec_per_tick;
97811066Srafael.vanoni@sun.com 
97911066Srafael.vanoni@sun.com 	/*
98011066Srafael.vanoni@sun.com 	 * Allocate cache line aligned space for the per CPU lbolt data and
98111099Srafael.vanoni@sun.com 	 * lbolt info structures, and initialize them with their default
98211099Srafael.vanoni@sun.com 	 * values. Note that these structures are also cache line sized.
98311066Srafael.vanoni@sun.com 	 */
98411066Srafael.vanoni@sun.com 	sz = sizeof (lbolt_info_t) + CPU_CACHE_COHERENCE_SIZE;
98511066Srafael.vanoni@sun.com 	buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
98611066Srafael.vanoni@sun.com 	lb_info = (lbolt_info_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
98711066Srafael.vanoni@sun.com 
98811066Srafael.vanoni@sun.com 	if (hz != HZ_DEFAULT)
98911066Srafael.vanoni@sun.com 		lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL *
99011066Srafael.vanoni@sun.com 		    hz/HZ_DEFAULT;
99111066Srafael.vanoni@sun.com 	else
99211066Srafael.vanoni@sun.com 		lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL;
99311066Srafael.vanoni@sun.com 
99411066Srafael.vanoni@sun.com 	lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS;
99511066Srafael.vanoni@sun.com 
99611099Srafael.vanoni@sun.com 	sz = (sizeof (lbolt_cpu_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE;
99711066Srafael.vanoni@sun.com 	buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
99811066Srafael.vanoni@sun.com 	lb_cpu = (lbolt_cpu_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
99911066Srafael.vanoni@sun.com 
100011066Srafael.vanoni@sun.com 	for (i = 0; i < max_ncpus; i++)
100111066Srafael.vanoni@sun.com 		lb_cpu[i].lbc_counter = lb_info->lbi_thresh_calls;
100211066Srafael.vanoni@sun.com 
100311226Srafael.vanoni@sun.com 	/*
100411226Srafael.vanoni@sun.com 	 * Install the softint used to switch between event and cyclic driven
100511226Srafael.vanoni@sun.com 	 * lbolt. We use a soft interrupt to make sure the context of the
100611226Srafael.vanoni@sun.com 	 * cyclic reprogram call is safe.
100711226Srafael.vanoni@sun.com 	 */
100811066Srafael.vanoni@sun.com 	lbolt_softint_add();
100911066Srafael.vanoni@sun.com 
101011226Srafael.vanoni@sun.com 	/*
101111226Srafael.vanoni@sun.com 	 * Since the hybrid lbolt implementation is based on a hardware counter
101211226Srafael.vanoni@sun.com 	 * that is reset at every hardware reboot and that we'd like to have
101311226Srafael.vanoni@sun.com 	 * the lbolt value starting at zero after both a hardware and a fast
101411226Srafael.vanoni@sun.com 	 * reboot, we calculate the number of clock ticks the system's been up
101511226Srafael.vanoni@sun.com 	 * and store it in the lbi_debug_time field of the lbolt info structure.
101611226Srafael.vanoni@sun.com 	 * The value of this field will be subtracted from lbolt before
101711226Srafael.vanoni@sun.com 	 * returning it.
101811226Srafael.vanoni@sun.com 	 */
101911226Srafael.vanoni@sun.com 	lb_info->lbi_internal = lb_info->lbi_debug_time =
102011226Srafael.vanoni@sun.com 	    (gethrtime()/nsec_per_tick);
102111226Srafael.vanoni@sun.com 
102211226Srafael.vanoni@sun.com 	/*
102311226Srafael.vanoni@sun.com 	 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
102411226Srafael.vanoni@sun.com 	 * and lbolt_debug_{enter,return} use this value as an indication that
102511226Srafael.vanoni@sun.com 	 * the initializaion above hasn't been completed. Setting lbolt_hybrid
102611226Srafael.vanoni@sun.com 	 * to either lbolt_{cyclic,event}_driven here signals those code paths
102711226Srafael.vanoni@sun.com 	 * that the lbolt related structures can be used.
102811226Srafael.vanoni@sun.com 	 */
102911195Srafael.vanoni@sun.com 	if (lbolt_cyc_only) {
103011195Srafael.vanoni@sun.com 		lbolt_when.cyt_when = 0;
103111195Srafael.vanoni@sun.com 		lbolt_hybrid = lbolt_cyclic_driven;
103211195Srafael.vanoni@sun.com 	} else {
103311195Srafael.vanoni@sun.com 		lbolt_when.cyt_when = CY_INFINITY;
103411195Srafael.vanoni@sun.com 		lbolt_hybrid = lbolt_event_driven;
103511195Srafael.vanoni@sun.com 	}
103611195Srafael.vanoni@sun.com 
103711066Srafael.vanoni@sun.com 	/*
103811066Srafael.vanoni@sun.com 	 * Grab cpu_lock and install all three cyclics.
103911066Srafael.vanoni@sun.com 	 */
10405107Seota 	mutex_enter(&cpu_lock);
104111066Srafael.vanoni@sun.com 
104211066Srafael.vanoni@sun.com 	clock_cyclic = cyclic_add(&clk_hdlr, &clk_when);
104311066Srafael.vanoni@sun.com 	ddi_timer_cyclic = cyclic_add(&timer_hdlr, &clk_when);
104411151Srafael.vanoni@sun.com 	lb_info->id.lbi_cyclic_id = cyclic_add(&lbolt_hdlr, &lbolt_when);
104511066Srafael.vanoni@sun.com 
10465107Seota 	mutex_exit(&cpu_lock);
10470Sstevel@tonic-gate }
10480Sstevel@tonic-gate 
10490Sstevel@tonic-gate /*
10500Sstevel@tonic-gate  * Called before calcloadavg to get 10-sec moving loadavg together
10510Sstevel@tonic-gate  */
10520Sstevel@tonic-gate 
10530Sstevel@tonic-gate static int
10540Sstevel@tonic-gate genloadavg(struct loadavg_s *avgs)
10550Sstevel@tonic-gate {
10560Sstevel@tonic-gate 	int avg;
10570Sstevel@tonic-gate 	int spos; /* starting position */
10580Sstevel@tonic-gate 	int cpos; /* moving current position */
10590Sstevel@tonic-gate 	int i;
10600Sstevel@tonic-gate 	int slen;
10610Sstevel@tonic-gate 	hrtime_t hr_avg;
10620Sstevel@tonic-gate 
10630Sstevel@tonic-gate 	/* 10-second snapshot, calculate first positon */
10640Sstevel@tonic-gate 	if (avgs->lg_len == 0) {
10650Sstevel@tonic-gate 		return (0);
10660Sstevel@tonic-gate 	}
10670Sstevel@tonic-gate 	slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ;
10680Sstevel@tonic-gate 
10690Sstevel@tonic-gate 	spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 :
10700Sstevel@tonic-gate 	    S_LOADAVG_SZ + (avgs->lg_cur - 1);
10710Sstevel@tonic-gate 	for (i = hr_avg = 0; i < slen; i++) {
10720Sstevel@tonic-gate 		cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i);
10730Sstevel@tonic-gate 		hr_avg += avgs->lg_loads[cpos];
10740Sstevel@tonic-gate 	}
10750Sstevel@tonic-gate 
10760Sstevel@tonic-gate 	hr_avg = hr_avg / slen;
10770Sstevel@tonic-gate 	avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX);
10780Sstevel@tonic-gate 
10790Sstevel@tonic-gate 	return (avg);
10800Sstevel@tonic-gate }
10810Sstevel@tonic-gate 
10820Sstevel@tonic-gate /*
10830Sstevel@tonic-gate  * Run every second from clock () to update the loadavg count available to the
10840Sstevel@tonic-gate  * system and cpu-partitions.
10850Sstevel@tonic-gate  *
10860Sstevel@tonic-gate  * This works by sampling the previous usr, sys, wait time elapsed,
10870Sstevel@tonic-gate  * computing a delta, and adding that delta to the elapsed usr, sys,
10880Sstevel@tonic-gate  * wait increase.
10890Sstevel@tonic-gate  */
10900Sstevel@tonic-gate 
10910Sstevel@tonic-gate static void
10920Sstevel@tonic-gate loadavg_update()
10930Sstevel@tonic-gate {
10940Sstevel@tonic-gate 	cpu_t *cp;
10950Sstevel@tonic-gate 	cpupart_t *cpupart;
10960Sstevel@tonic-gate 	hrtime_t cpu_total;
10970Sstevel@tonic-gate 	int prev;
10980Sstevel@tonic-gate 
10990Sstevel@tonic-gate 	cp = cpu_list;
11000Sstevel@tonic-gate 	loadavg.lg_total = 0;
11010Sstevel@tonic-gate 
11020Sstevel@tonic-gate 	/*
11030Sstevel@tonic-gate 	 * first pass totals up per-cpu statistics for system and cpu
11040Sstevel@tonic-gate 	 * partitions
11050Sstevel@tonic-gate 	 */
11060Sstevel@tonic-gate 
11070Sstevel@tonic-gate 	do {
11080Sstevel@tonic-gate 		struct loadavg_s *lavg;
11090Sstevel@tonic-gate 
11100Sstevel@tonic-gate 		lavg = &cp->cpu_loadavg;
11110Sstevel@tonic-gate 
11120Sstevel@tonic-gate 		cpu_total = cp->cpu_acct[CMS_USER] +
11130Sstevel@tonic-gate 		    cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq;
11140Sstevel@tonic-gate 		/* compute delta against last total */
11150Sstevel@tonic-gate 		scalehrtime(&cpu_total);
11160Sstevel@tonic-gate 		prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 :
11170Sstevel@tonic-gate 		    S_LOADAVG_SZ + (lavg->lg_cur - 1);
11180Sstevel@tonic-gate 		if (lavg->lg_loads[prev] <= 0) {
11190Sstevel@tonic-gate 			lavg->lg_loads[lavg->lg_cur] = cpu_total;
11200Sstevel@tonic-gate 			cpu_total = 0;
11210Sstevel@tonic-gate 		} else {
11220Sstevel@tonic-gate 			lavg->lg_loads[lavg->lg_cur] = cpu_total;
11230Sstevel@tonic-gate 			cpu_total = cpu_total - lavg->lg_loads[prev];
11240Sstevel@tonic-gate 			if (cpu_total < 0)
11250Sstevel@tonic-gate 				cpu_total = 0;
11260Sstevel@tonic-gate 		}
11270Sstevel@tonic-gate 
11280Sstevel@tonic-gate 		lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
11290Sstevel@tonic-gate 		lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
11300Sstevel@tonic-gate 		    lavg->lg_len + 1 : S_LOADAVG_SZ;
11310Sstevel@tonic-gate 
11320Sstevel@tonic-gate 		loadavg.lg_total += cpu_total;
11330Sstevel@tonic-gate 		cp->cpu_part->cp_loadavg.lg_total += cpu_total;
11340Sstevel@tonic-gate 
11350Sstevel@tonic-gate 	} while ((cp = cp->cpu_next) != cpu_list);
11360Sstevel@tonic-gate 
11370Sstevel@tonic-gate 	loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total;
11380Sstevel@tonic-gate 	loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ;
11390Sstevel@tonic-gate 	loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ?
11400Sstevel@tonic-gate 	    loadavg.lg_len + 1 : S_LOADAVG_SZ;
11410Sstevel@tonic-gate 	/*
11420Sstevel@tonic-gate 	 * Second pass updates counts
11430Sstevel@tonic-gate 	 */
11440Sstevel@tonic-gate 	cpupart = cp_list_head;
11450Sstevel@tonic-gate 
11460Sstevel@tonic-gate 	do {
11470Sstevel@tonic-gate 		struct loadavg_s *lavg;
11480Sstevel@tonic-gate 
11490Sstevel@tonic-gate 		lavg = &cpupart->cp_loadavg;
11500Sstevel@tonic-gate 		lavg->lg_loads[lavg->lg_cur] = lavg->lg_total;
11510Sstevel@tonic-gate 		lavg->lg_total = 0;
11520Sstevel@tonic-gate 		lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
11530Sstevel@tonic-gate 		lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
11540Sstevel@tonic-gate 		    lavg->lg_len + 1 : S_LOADAVG_SZ;
11550Sstevel@tonic-gate 
11560Sstevel@tonic-gate 	} while ((cpupart = cpupart->cp_next) != cp_list_head);
11570Sstevel@tonic-gate 
11580Sstevel@tonic-gate }
11590Sstevel@tonic-gate 
11600Sstevel@tonic-gate /*
11610Sstevel@tonic-gate  * clock_update() - local clock update
11620Sstevel@tonic-gate  *
11630Sstevel@tonic-gate  * This routine is called by ntp_adjtime() to update the local clock
11640Sstevel@tonic-gate  * phase and frequency. The implementation is of an
11650Sstevel@tonic-gate  * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
11660Sstevel@tonic-gate  * routine computes new time and frequency offset estimates for each
11670Sstevel@tonic-gate  * call.  The PPS signal itself determines the new time offset,
11680Sstevel@tonic-gate  * instead of the calling argument.  Presumably, calls to
11690Sstevel@tonic-gate  * ntp_adjtime() occur only when the caller believes the local clock
11700Sstevel@tonic-gate  * is valid within some bound (+-128 ms with NTP). If the caller's
11710Sstevel@tonic-gate  * time is far different than the PPS time, an argument will ensue,
11720Sstevel@tonic-gate  * and it's not clear who will lose.
11730Sstevel@tonic-gate  *
11740Sstevel@tonic-gate  * For uncompensated quartz crystal oscillatores and nominal update
11750Sstevel@tonic-gate  * intervals less than 1024 s, operation should be in phase-lock mode
11760Sstevel@tonic-gate  * (STA_FLL = 0), where the loop is disciplined to phase. For update
11770Sstevel@tonic-gate  * intervals greater than this, operation should be in frequency-lock
11780Sstevel@tonic-gate  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
11790Sstevel@tonic-gate  *
11800Sstevel@tonic-gate  * Note: mutex(&tod_lock) is in effect.
11810Sstevel@tonic-gate  */
11820Sstevel@tonic-gate void
11830Sstevel@tonic-gate clock_update(int offset)
11840Sstevel@tonic-gate {
11850Sstevel@tonic-gate 	int ltemp, mtemp, s;
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tod_lock));
11880Sstevel@tonic-gate 
11890Sstevel@tonic-gate 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
11900Sstevel@tonic-gate 		return;
11910Sstevel@tonic-gate 	ltemp = offset;
11920Sstevel@tonic-gate 	if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL))
11930Sstevel@tonic-gate 		ltemp = pps_offset;
11940Sstevel@tonic-gate 
11950Sstevel@tonic-gate 	/*
11960Sstevel@tonic-gate 	 * Scale the phase adjustment and clamp to the operating range.
11970Sstevel@tonic-gate 	 */
11980Sstevel@tonic-gate 	if (ltemp > MAXPHASE)
11990Sstevel@tonic-gate 		time_offset = MAXPHASE * SCALE_UPDATE;
12000Sstevel@tonic-gate 	else if (ltemp < -MAXPHASE)
12010Sstevel@tonic-gate 		time_offset = -(MAXPHASE * SCALE_UPDATE);
12020Sstevel@tonic-gate 	else
12030Sstevel@tonic-gate 		time_offset = ltemp * SCALE_UPDATE;
12040Sstevel@tonic-gate 
12050Sstevel@tonic-gate 	/*
12060Sstevel@tonic-gate 	 * Select whether the frequency is to be controlled and in which
12070Sstevel@tonic-gate 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
12080Sstevel@tonic-gate 	 * multiply/divide should be replaced someday.
12090Sstevel@tonic-gate 	 */
12100Sstevel@tonic-gate 	if (time_status & STA_FREQHOLD || time_reftime == 0)
12110Sstevel@tonic-gate 		time_reftime = hrestime.tv_sec;
12120Sstevel@tonic-gate 
12130Sstevel@tonic-gate 	mtemp = hrestime.tv_sec - time_reftime;
12140Sstevel@tonic-gate 	time_reftime = hrestime.tv_sec;
12150Sstevel@tonic-gate 
12160Sstevel@tonic-gate 	if (time_status & STA_FLL) {
12170Sstevel@tonic-gate 		if (mtemp >= MINSEC) {
12180Sstevel@tonic-gate 			ltemp = ((time_offset / mtemp) * (SCALE_USEC /
12190Sstevel@tonic-gate 			    SCALE_UPDATE));
12200Sstevel@tonic-gate 			if (ltemp)
12210Sstevel@tonic-gate 				time_freq += ltemp / SCALE_KH;
12220Sstevel@tonic-gate 		}
12230Sstevel@tonic-gate 	} else {
12240Sstevel@tonic-gate 		if (mtemp < MAXSEC) {
12250Sstevel@tonic-gate 			ltemp *= mtemp;
12260Sstevel@tonic-gate 			if (ltemp)
12270Sstevel@tonic-gate 				time_freq += (int)(((int64_t)ltemp *
12280Sstevel@tonic-gate 				    SCALE_USEC) / SCALE_KF)
12290Sstevel@tonic-gate 				    / (1 << (time_constant * 2));
12300Sstevel@tonic-gate 		}
12310Sstevel@tonic-gate 	}
12320Sstevel@tonic-gate 	if (time_freq > time_tolerance)
12330Sstevel@tonic-gate 		time_freq = time_tolerance;
12340Sstevel@tonic-gate 	else if (time_freq < -time_tolerance)
12350Sstevel@tonic-gate 		time_freq = -time_tolerance;
12360Sstevel@tonic-gate 
12370Sstevel@tonic-gate 	s = hr_clock_lock();
12380Sstevel@tonic-gate 	tod_needsync = 1;
12390Sstevel@tonic-gate 	hr_clock_unlock(s);
12400Sstevel@tonic-gate }
12410Sstevel@tonic-gate 
12420Sstevel@tonic-gate /*
12430Sstevel@tonic-gate  * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
12440Sstevel@tonic-gate  *
12450Sstevel@tonic-gate  * This routine is called at each PPS interrupt in order to discipline
12460Sstevel@tonic-gate  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
12470Sstevel@tonic-gate  * and leaves it in a handy spot for the clock() routine. It
12480Sstevel@tonic-gate  * integrates successive PPS phase differences and calculates the
12490Sstevel@tonic-gate  * frequency offset. This is used in clock() to discipline the CPU
12500Sstevel@tonic-gate  * clock oscillator so that intrinsic frequency error is cancelled out.
12510Sstevel@tonic-gate  * The code requires the caller to capture the time and hardware counter
12520Sstevel@tonic-gate  * value at the on-time PPS signal transition.
12530Sstevel@tonic-gate  *
12540Sstevel@tonic-gate  * Note that, on some Unix systems, this routine runs at an interrupt
12550Sstevel@tonic-gate  * priority level higher than the timer interrupt routine clock().
12560Sstevel@tonic-gate  * Therefore, the variables used are distinct from the clock()
12570Sstevel@tonic-gate  * variables, except for certain exceptions: The PPS frequency pps_freq
12580Sstevel@tonic-gate  * and phase pps_offset variables are determined by this routine and
12590Sstevel@tonic-gate  * updated atomically. The time_tolerance variable can be considered a
12600Sstevel@tonic-gate  * constant, since it is infrequently changed, and then only when the
12610Sstevel@tonic-gate  * PPS signal is disabled. The watchdog counter pps_valid is updated
12620Sstevel@tonic-gate  * once per second by clock() and is atomically cleared in this
12630Sstevel@tonic-gate  * routine.
12640Sstevel@tonic-gate  *
12650Sstevel@tonic-gate  * tvp is the time of the last tick; usec is a microsecond count since the
12660Sstevel@tonic-gate  * last tick.
12670Sstevel@tonic-gate  *
12680Sstevel@tonic-gate  * Note: In Solaris systems, the tick value is actually given by
12690Sstevel@tonic-gate  *       usec_per_tick.  This is called from the serial driver cdintr(),
12700Sstevel@tonic-gate  *	 or equivalent, at a high PIL.  Because the kernel keeps a
12710Sstevel@tonic-gate  *	 highresolution time, the following code can accept either
12720Sstevel@tonic-gate  *	 the traditional argument pair, or the current highres timestamp
12730Sstevel@tonic-gate  *       in tvp and zero in usec.
12740Sstevel@tonic-gate  */
12750Sstevel@tonic-gate void
12760Sstevel@tonic-gate ddi_hardpps(struct timeval *tvp, int usec)
12770Sstevel@tonic-gate {
12780Sstevel@tonic-gate 	int u_usec, v_usec, bigtick;
12790Sstevel@tonic-gate 	time_t cal_sec;
12800Sstevel@tonic-gate 	int cal_usec;
12810Sstevel@tonic-gate 
12820Sstevel@tonic-gate 	/*
12830Sstevel@tonic-gate 	 * An occasional glitch can be produced when the PPS interrupt
12840Sstevel@tonic-gate 	 * occurs in the clock() routine before the time variable is
12850Sstevel@tonic-gate 	 * updated. Here the offset is discarded when the difference
12860Sstevel@tonic-gate 	 * between it and the last one is greater than tick/2, but not
12870Sstevel@tonic-gate 	 * if the interval since the first discard exceeds 30 s.
12880Sstevel@tonic-gate 	 */
12890Sstevel@tonic-gate 	time_status |= STA_PPSSIGNAL;
12900Sstevel@tonic-gate 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
12910Sstevel@tonic-gate 	pps_valid = 0;
12920Sstevel@tonic-gate 	u_usec = -tvp->tv_usec;
12930Sstevel@tonic-gate 	if (u_usec < -(MICROSEC/2))
12940Sstevel@tonic-gate 		u_usec += MICROSEC;
12950Sstevel@tonic-gate 	v_usec = pps_offset - u_usec;
12960Sstevel@tonic-gate 	if (v_usec < 0)
12970Sstevel@tonic-gate 		v_usec = -v_usec;
12980Sstevel@tonic-gate 	if (v_usec > (usec_per_tick >> 1)) {
12990Sstevel@tonic-gate 		if (pps_glitch > MAXGLITCH) {
13000Sstevel@tonic-gate 			pps_glitch = 0;
13010Sstevel@tonic-gate 			pps_tf[2] = u_usec;
13020Sstevel@tonic-gate 			pps_tf[1] = u_usec;
13030Sstevel@tonic-gate 		} else {
13040Sstevel@tonic-gate 			pps_glitch++;
13050Sstevel@tonic-gate 			u_usec = pps_offset;
13060Sstevel@tonic-gate 		}
13070Sstevel@tonic-gate 	} else
13080Sstevel@tonic-gate 		pps_glitch = 0;
13090Sstevel@tonic-gate 
13100Sstevel@tonic-gate 	/*
13110Sstevel@tonic-gate 	 * A three-stage median filter is used to help deglitch the pps
13120Sstevel@tonic-gate 	 * time. The median sample becomes the time offset estimate; the
13130Sstevel@tonic-gate 	 * difference between the other two samples becomes the time
13140Sstevel@tonic-gate 	 * dispersion (jitter) estimate.
13150Sstevel@tonic-gate 	 */
13160Sstevel@tonic-gate 	pps_tf[2] = pps_tf[1];
13170Sstevel@tonic-gate 	pps_tf[1] = pps_tf[0];
13180Sstevel@tonic-gate 	pps_tf[0] = u_usec;
13190Sstevel@tonic-gate 	if (pps_tf[0] > pps_tf[1]) {
13200Sstevel@tonic-gate 		if (pps_tf[1] > pps_tf[2]) {
13210Sstevel@tonic-gate 			pps_offset = pps_tf[1];		/* 0 1 2 */
13220Sstevel@tonic-gate 			v_usec = pps_tf[0] - pps_tf[2];
13230Sstevel@tonic-gate 		} else if (pps_tf[2] > pps_tf[0]) {
13240Sstevel@tonic-gate 			pps_offset = pps_tf[0];		/* 2 0 1 */
13250Sstevel@tonic-gate 			v_usec = pps_tf[2] - pps_tf[1];
13260Sstevel@tonic-gate 		} else {
13270Sstevel@tonic-gate 			pps_offset = pps_tf[2];		/* 0 2 1 */
13280Sstevel@tonic-gate 			v_usec = pps_tf[0] - pps_tf[1];
13290Sstevel@tonic-gate 		}
13300Sstevel@tonic-gate 	} else {
13310Sstevel@tonic-gate 		if (pps_tf[1] < pps_tf[2]) {
13320Sstevel@tonic-gate 			pps_offset = pps_tf[1];		/* 2 1 0 */
13330Sstevel@tonic-gate 			v_usec = pps_tf[2] - pps_tf[0];
13340Sstevel@tonic-gate 		} else  if (pps_tf[2] < pps_tf[0]) {
13350Sstevel@tonic-gate 			pps_offset = pps_tf[0];		/* 1 0 2 */
13360Sstevel@tonic-gate 			v_usec = pps_tf[1] - pps_tf[2];
13370Sstevel@tonic-gate 		} else {
13380Sstevel@tonic-gate 			pps_offset = pps_tf[2];		/* 1 2 0 */
13390Sstevel@tonic-gate 			v_usec = pps_tf[1] - pps_tf[0];
13400Sstevel@tonic-gate 		}
13410Sstevel@tonic-gate 	}
13420Sstevel@tonic-gate 	if (v_usec > MAXTIME)
13430Sstevel@tonic-gate 		pps_jitcnt++;
13440Sstevel@tonic-gate 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
13450Sstevel@tonic-gate 	pps_jitter += v_usec / (1 << PPS_AVG);
13460Sstevel@tonic-gate 	if (pps_jitter > (MAXTIME >> 1))
13470Sstevel@tonic-gate 		time_status |= STA_PPSJITTER;
13480Sstevel@tonic-gate 
13490Sstevel@tonic-gate 	/*
13500Sstevel@tonic-gate 	 * During the calibration interval adjust the starting time when
13510Sstevel@tonic-gate 	 * the tick overflows. At the end of the interval compute the
13520Sstevel@tonic-gate 	 * duration of the interval and the difference of the hardware
13530Sstevel@tonic-gate 	 * counters at the beginning and end of the interval. This code
13540Sstevel@tonic-gate 	 * is deliciously complicated by the fact valid differences may
13550Sstevel@tonic-gate 	 * exceed the value of tick when using long calibration
13560Sstevel@tonic-gate 	 * intervals and small ticks. Note that the counter can be
13570Sstevel@tonic-gate 	 * greater than tick if caught at just the wrong instant, but
13580Sstevel@tonic-gate 	 * the values returned and used here are correct.
13590Sstevel@tonic-gate 	 */
13600Sstevel@tonic-gate 	bigtick = (int)usec_per_tick * SCALE_USEC;
13610Sstevel@tonic-gate 	pps_usec -= pps_freq;
13620Sstevel@tonic-gate 	if (pps_usec >= bigtick)
13630Sstevel@tonic-gate 		pps_usec -= bigtick;
13640Sstevel@tonic-gate 	if (pps_usec < 0)
13650Sstevel@tonic-gate 		pps_usec += bigtick;
13660Sstevel@tonic-gate 	pps_time.tv_sec++;
13670Sstevel@tonic-gate 	pps_count++;
13680Sstevel@tonic-gate 	if (pps_count < (1 << pps_shift))
13690Sstevel@tonic-gate 		return;
13700Sstevel@tonic-gate 	pps_count = 0;
13710Sstevel@tonic-gate 	pps_calcnt++;
13720Sstevel@tonic-gate 	u_usec = usec * SCALE_USEC;
13730Sstevel@tonic-gate 	v_usec = pps_usec - u_usec;
13740Sstevel@tonic-gate 	if (v_usec >= bigtick >> 1)
13750Sstevel@tonic-gate 		v_usec -= bigtick;
13760Sstevel@tonic-gate 	if (v_usec < -(bigtick >> 1))
13770Sstevel@tonic-gate 		v_usec += bigtick;
13780Sstevel@tonic-gate 	if (v_usec < 0)
13790Sstevel@tonic-gate 		v_usec = -(-v_usec >> pps_shift);
13800Sstevel@tonic-gate 	else
13810Sstevel@tonic-gate 		v_usec = v_usec >> pps_shift;
13820Sstevel@tonic-gate 	pps_usec = u_usec;
13830Sstevel@tonic-gate 	cal_sec = tvp->tv_sec;
13840Sstevel@tonic-gate 	cal_usec = tvp->tv_usec;
13850Sstevel@tonic-gate 	cal_sec -= pps_time.tv_sec;
13860Sstevel@tonic-gate 	cal_usec -= pps_time.tv_usec;
13870Sstevel@tonic-gate 	if (cal_usec < 0) {
13880Sstevel@tonic-gate 		cal_usec += MICROSEC;
13890Sstevel@tonic-gate 		cal_sec--;
13900Sstevel@tonic-gate 	}
13910Sstevel@tonic-gate 	pps_time = *tvp;
13920Sstevel@tonic-gate 
13930Sstevel@tonic-gate 	/*
13940Sstevel@tonic-gate 	 * Check for lost interrupts, noise, excessive jitter and
13950Sstevel@tonic-gate 	 * excessive frequency error. The number of timer ticks during
13960Sstevel@tonic-gate 	 * the interval may vary +-1 tick. Add to this a margin of one
13970Sstevel@tonic-gate 	 * tick for the PPS signal jitter and maximum frequency
13980Sstevel@tonic-gate 	 * deviation. If the limits are exceeded, the calibration
13990Sstevel@tonic-gate 	 * interval is reset to the minimum and we start over.
14000Sstevel@tonic-gate 	 */
14010Sstevel@tonic-gate 	u_usec = (int)usec_per_tick << 1;
14020Sstevel@tonic-gate 	if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) ||
14030Sstevel@tonic-gate 	    (cal_sec == 0 && cal_usec < u_usec)) ||
14040Sstevel@tonic-gate 	    v_usec > time_tolerance || v_usec < -time_tolerance) {
14050Sstevel@tonic-gate 		pps_errcnt++;
14060Sstevel@tonic-gate 		pps_shift = PPS_SHIFT;
14070Sstevel@tonic-gate 		pps_intcnt = 0;
14080Sstevel@tonic-gate 		time_status |= STA_PPSERROR;
14090Sstevel@tonic-gate 		return;
14100Sstevel@tonic-gate 	}
14110Sstevel@tonic-gate 
14120Sstevel@tonic-gate 	/*
14130Sstevel@tonic-gate 	 * A three-stage median filter is used to help deglitch the pps
14140Sstevel@tonic-gate 	 * frequency. The median sample becomes the frequency offset
14150Sstevel@tonic-gate 	 * estimate; the difference between the other two samples
14160Sstevel@tonic-gate 	 * becomes the frequency dispersion (stability) estimate.
14170Sstevel@tonic-gate 	 */
14180Sstevel@tonic-gate 	pps_ff[2] = pps_ff[1];
14190Sstevel@tonic-gate 	pps_ff[1] = pps_ff[0];
14200Sstevel@tonic-gate 	pps_ff[0] = v_usec;
14210Sstevel@tonic-gate 	if (pps_ff[0] > pps_ff[1]) {
14220Sstevel@tonic-gate 		if (pps_ff[1] > pps_ff[2]) {
14230Sstevel@tonic-gate 			u_usec = pps_ff[1];		/* 0 1 2 */
14240Sstevel@tonic-gate 			v_usec = pps_ff[0] - pps_ff[2];
14250Sstevel@tonic-gate 		} else if (pps_ff[2] > pps_ff[0]) {
14260Sstevel@tonic-gate 			u_usec = pps_ff[0];		/* 2 0 1 */
14270Sstevel@tonic-gate 			v_usec = pps_ff[2] - pps_ff[1];
14280Sstevel@tonic-gate 		} else {
14290Sstevel@tonic-gate 			u_usec = pps_ff[2];		/* 0 2 1 */
14300Sstevel@tonic-gate 			v_usec = pps_ff[0] - pps_ff[1];
14310Sstevel@tonic-gate 		}
14320Sstevel@tonic-gate 	} else {
14330Sstevel@tonic-gate 		if (pps_ff[1] < pps_ff[2]) {
14340Sstevel@tonic-gate 			u_usec = pps_ff[1];		/* 2 1 0 */
14350Sstevel@tonic-gate 			v_usec = pps_ff[2] - pps_ff[0];
14360Sstevel@tonic-gate 		} else  if (pps_ff[2] < pps_ff[0]) {
14370Sstevel@tonic-gate 			u_usec = pps_ff[0];		/* 1 0 2 */
14380Sstevel@tonic-gate 			v_usec = pps_ff[1] - pps_ff[2];
14390Sstevel@tonic-gate 		} else {
14400Sstevel@tonic-gate 			u_usec = pps_ff[2];		/* 1 2 0 */
14410Sstevel@tonic-gate 			v_usec = pps_ff[1] - pps_ff[0];
14420Sstevel@tonic-gate 		}
14430Sstevel@tonic-gate 	}
14440Sstevel@tonic-gate 
14450Sstevel@tonic-gate 	/*
14460Sstevel@tonic-gate 	 * Here the frequency dispersion (stability) is updated. If it
14470Sstevel@tonic-gate 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
14480Sstevel@tonic-gate 	 * offset is updated as well, but clamped to the tolerance. It
14490Sstevel@tonic-gate 	 * will be processed later by the clock() routine.
14500Sstevel@tonic-gate 	 */
14510Sstevel@tonic-gate 	v_usec = (v_usec >> 1) - pps_stabil;
14520Sstevel@tonic-gate 	if (v_usec < 0)
14530Sstevel@tonic-gate 		pps_stabil -= -v_usec >> PPS_AVG;
14540Sstevel@tonic-gate 	else
14550Sstevel@tonic-gate 		pps_stabil += v_usec >> PPS_AVG;
14560Sstevel@tonic-gate 	if (pps_stabil > MAXFREQ >> 2) {
14570Sstevel@tonic-gate 		pps_stbcnt++;
14580Sstevel@tonic-gate 		time_status |= STA_PPSWANDER;
14590Sstevel@tonic-gate 		return;
14600Sstevel@tonic-gate 	}
14610Sstevel@tonic-gate 	if (time_status & STA_PPSFREQ) {
14620Sstevel@tonic-gate 		if (u_usec < 0) {
14630Sstevel@tonic-gate 			pps_freq -= -u_usec >> PPS_AVG;
14640Sstevel@tonic-gate 			if (pps_freq < -time_tolerance)
14650Sstevel@tonic-gate 				pps_freq = -time_tolerance;
14660Sstevel@tonic-gate 			u_usec = -u_usec;
14670Sstevel@tonic-gate 		} else {
14680Sstevel@tonic-gate 			pps_freq += u_usec >> PPS_AVG;
14690Sstevel@tonic-gate 			if (pps_freq > time_tolerance)
14700Sstevel@tonic-gate 				pps_freq = time_tolerance;
14710Sstevel@tonic-gate 		}
14720Sstevel@tonic-gate 	}
14730Sstevel@tonic-gate 
14740Sstevel@tonic-gate 	/*
14750Sstevel@tonic-gate 	 * Here the calibration interval is adjusted. If the maximum
14760Sstevel@tonic-gate 	 * time difference is greater than tick / 4, reduce the interval
14770Sstevel@tonic-gate 	 * by half. If this is not the case for four consecutive
14780Sstevel@tonic-gate 	 * intervals, double the interval.
14790Sstevel@tonic-gate 	 */
14800Sstevel@tonic-gate 	if (u_usec << pps_shift > bigtick >> 2) {
14810Sstevel@tonic-gate 		pps_intcnt = 0;
14820Sstevel@tonic-gate 		if (pps_shift > PPS_SHIFT)
14830Sstevel@tonic-gate 			pps_shift--;
14840Sstevel@tonic-gate 	} else if (pps_intcnt >= 4) {
14850Sstevel@tonic-gate 		pps_intcnt = 0;
14860Sstevel@tonic-gate 		if (pps_shift < PPS_SHIFTMAX)
14870Sstevel@tonic-gate 			pps_shift++;
14880Sstevel@tonic-gate 	} else
14890Sstevel@tonic-gate 		pps_intcnt++;
14900Sstevel@tonic-gate 
14910Sstevel@tonic-gate 	/*
14920Sstevel@tonic-gate 	 * If recovering from kmdb, then make sure the tod chip gets resynced.
14930Sstevel@tonic-gate 	 * If we took an early exit above, then we don't yet have a stable
14940Sstevel@tonic-gate 	 * calibration signal to lock onto, so don't mark the tod for sync
14950Sstevel@tonic-gate 	 * until we get all the way here.
14960Sstevel@tonic-gate 	 */
14970Sstevel@tonic-gate 	{
14980Sstevel@tonic-gate 		int s = hr_clock_lock();
14990Sstevel@tonic-gate 
15000Sstevel@tonic-gate 		tod_needsync = 1;
15010Sstevel@tonic-gate 		hr_clock_unlock(s);
15020Sstevel@tonic-gate 	}
15030Sstevel@tonic-gate }
15040Sstevel@tonic-gate 
15050Sstevel@tonic-gate /*
15060Sstevel@tonic-gate  * Handle clock tick processing for a thread.
15070Sstevel@tonic-gate  * Check for timer action, enforce CPU rlimit, do profiling etc.
15080Sstevel@tonic-gate  */
15090Sstevel@tonic-gate void
15105788Smv143129 clock_tick(kthread_t *t, int pending)
15110Sstevel@tonic-gate {
15120Sstevel@tonic-gate 	struct proc *pp;
15130Sstevel@tonic-gate 	klwp_id_t    lwp;
15140Sstevel@tonic-gate 	struct as *as;
15155788Smv143129 	clock_t	ticks;
15160Sstevel@tonic-gate 	int	poke = 0;		/* notify another CPU */
15170Sstevel@tonic-gate 	int	user_mode;
15180Sstevel@tonic-gate 	size_t	 rss;
15195788Smv143129 	int i, total_usec, usec;
15205788Smv143129 	rctl_qty_t secs;
15215788Smv143129 
15225788Smv143129 	ASSERT(pending > 0);
15230Sstevel@tonic-gate 
15240Sstevel@tonic-gate 	/* Must be operating on a lwp/thread */
15250Sstevel@tonic-gate 	if ((lwp = ttolwp(t)) == NULL) {
15260Sstevel@tonic-gate 		panic("clock_tick: no lwp");
15270Sstevel@tonic-gate 		/*NOTREACHED*/
15280Sstevel@tonic-gate 	}
15290Sstevel@tonic-gate 
15305788Smv143129 	for (i = 0; i < pending; i++) {
15315788Smv143129 		CL_TICK(t);	/* Class specific tick processing */
15325788Smv143129 		DTRACE_SCHED1(tick, kthread_t *, t);
15335788Smv143129 	}
15340Sstevel@tonic-gate 
15350Sstevel@tonic-gate 	pp = ttoproc(t);
15360Sstevel@tonic-gate 
15370Sstevel@tonic-gate 	/* pp->p_lock makes sure that the thread does not exit */
15380Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pp->p_lock));
15390Sstevel@tonic-gate 
15400Sstevel@tonic-gate 	user_mode = (lwp->lwp_state == LWP_USER);
15410Sstevel@tonic-gate 
15425788Smv143129 	ticks = (pp->p_utime + pp->p_stime) % hz;
15430Sstevel@tonic-gate 	/*
15440Sstevel@tonic-gate 	 * Update process times. Should use high res clock and state
15450Sstevel@tonic-gate 	 * changes instead of statistical sampling method. XXX
15460Sstevel@tonic-gate 	 */
15470Sstevel@tonic-gate 	if (user_mode) {
15485788Smv143129 		pp->p_utime += pending;
15490Sstevel@tonic-gate 	} else {
15505788Smv143129 		pp->p_stime += pending;
15510Sstevel@tonic-gate 	}
15525788Smv143129 
15535788Smv143129 	pp->p_ttime += pending;
15540Sstevel@tonic-gate 	as = pp->p_as;
15550Sstevel@tonic-gate 
15560Sstevel@tonic-gate 	/*
15570Sstevel@tonic-gate 	 * Update user profiling statistics. Get the pc from the
15580Sstevel@tonic-gate 	 * lwp when the AST happens.
15590Sstevel@tonic-gate 	 */
15600Sstevel@tonic-gate 	if (pp->p_prof.pr_scale) {
15615788Smv143129 		atomic_add_32(&lwp->lwp_oweupc, (int32_t)pending);
15620Sstevel@tonic-gate 		if (user_mode) {
15630Sstevel@tonic-gate 			poke = 1;
15640Sstevel@tonic-gate 			aston(t);
15650Sstevel@tonic-gate 		}
15660Sstevel@tonic-gate 	}
15670Sstevel@tonic-gate 
15685788Smv143129 	/*
15695788Smv143129 	 * If CPU was in user state, process lwp-virtual time
15705788Smv143129 	 * interval timer. The value passed to itimerdecr() has to be
15715788Smv143129 	 * in microseconds and has to be less than one second. Hence
15725788Smv143129 	 * this loop.
15735788Smv143129 	 */
15745788Smv143129 	total_usec = usec_per_tick * pending;
15755788Smv143129 	while (total_usec > 0) {
15765788Smv143129 		usec = MIN(total_usec, (MICROSEC - 1));
15775788Smv143129 		if (user_mode &&
15785788Smv143129 		    timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) &&
15795788Smv143129 		    itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec) == 0) {
15805788Smv143129 			poke = 1;
15815788Smv143129 			sigtoproc(pp, t, SIGVTALRM);
15825788Smv143129 		}
15835788Smv143129 		total_usec -= usec;
15845788Smv143129 	}
15850Sstevel@tonic-gate 
15860Sstevel@tonic-gate 	/*
15875788Smv143129 	 * If CPU was in user state, process lwp-profile
15880Sstevel@tonic-gate 	 * interval timer.
15890Sstevel@tonic-gate 	 */
15905788Smv143129 	total_usec = usec_per_tick * pending;
15915788Smv143129 	while (total_usec > 0) {
15925788Smv143129 		usec = MIN(total_usec, (MICROSEC - 1));
15935788Smv143129 		if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) &&
15945788Smv143129 		    itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec) == 0) {
15955788Smv143129 			poke = 1;
15965788Smv143129 			sigtoproc(pp, t, SIGPROF);
15975788Smv143129 		}
15985788Smv143129 		total_usec -= usec;
15990Sstevel@tonic-gate 	}
16000Sstevel@tonic-gate 
16010Sstevel@tonic-gate 	/*
16020Sstevel@tonic-gate 	 * Enforce CPU resource controls:
16030Sstevel@tonic-gate 	 *   (a) process.max-cpu-time resource control
16045788Smv143129 	 *
16055788Smv143129 	 * Perform the check only if we have accumulated more a second.
16060Sstevel@tonic-gate 	 */
16075788Smv143129 	if ((ticks + pending) >= hz) {
16085788Smv143129 		(void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp,
16095788Smv143129 		    (pp->p_utime + pp->p_stime)/hz, RCA_UNSAFE_SIGINFO);
16105788Smv143129 	}
16110Sstevel@tonic-gate 
16120Sstevel@tonic-gate 	/*
16130Sstevel@tonic-gate 	 *   (b) task.max-cpu-time resource control
16145788Smv143129 	 *
16155788Smv143129 	 * If we have accumulated enough ticks, increment the task CPU
16165788Smv143129 	 * time usage and test for the resource limit. This minimizes the
16175788Smv143129 	 * number of calls to the rct_test(). The task CPU time mutex
16185788Smv143129 	 * is highly contentious as many processes can be sharing a task.
16190Sstevel@tonic-gate 	 */
16205788Smv143129 	if (pp->p_ttime >= clock_tick_proc_max) {
16215788Smv143129 		secs = task_cpu_time_incr(pp->p_task, pp->p_ttime);
16225788Smv143129 		pp->p_ttime = 0;
16235788Smv143129 		if (secs) {
16245788Smv143129 			(void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls,
16255788Smv143129 			    pp, secs, RCA_UNSAFE_SIGINFO);
16265788Smv143129 		}
16275788Smv143129 	}
16280Sstevel@tonic-gate 
16290Sstevel@tonic-gate 	/*
16300Sstevel@tonic-gate 	 * Update memory usage for the currently running process.
16310Sstevel@tonic-gate 	 */
16320Sstevel@tonic-gate 	rss = rm_asrss(as);
16330Sstevel@tonic-gate 	PTOU(pp)->u_mem += rss;
16340Sstevel@tonic-gate 	if (rss > PTOU(pp)->u_mem_max)
16350Sstevel@tonic-gate 		PTOU(pp)->u_mem_max = rss;
16360Sstevel@tonic-gate 
16370Sstevel@tonic-gate 	/*
16380Sstevel@tonic-gate 	 * Notify the CPU the thread is running on.
16390Sstevel@tonic-gate 	 */
16400Sstevel@tonic-gate 	if (poke && t->t_cpu != CPU)
16410Sstevel@tonic-gate 		poke_cpu(t->t_cpu->cpu_id);
16420Sstevel@tonic-gate }
16430Sstevel@tonic-gate 
16440Sstevel@tonic-gate void
16450Sstevel@tonic-gate profil_tick(uintptr_t upc)
16460Sstevel@tonic-gate {
16470Sstevel@tonic-gate 	int ticks;
16480Sstevel@tonic-gate 	proc_t *p = ttoproc(curthread);
16490Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
16500Sstevel@tonic-gate 	struct prof *pr = &p->p_prof;
16510Sstevel@tonic-gate 
16520Sstevel@tonic-gate 	do {
16530Sstevel@tonic-gate 		ticks = lwp->lwp_oweupc;
16540Sstevel@tonic-gate 	} while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks);
16550Sstevel@tonic-gate 
16560Sstevel@tonic-gate 	mutex_enter(&p->p_pflock);
16570Sstevel@tonic-gate 	if (pr->pr_scale >= 2 && upc >= pr->pr_off) {
16580Sstevel@tonic-gate 		/*
16590Sstevel@tonic-gate 		 * Old-style profiling
16600Sstevel@tonic-gate 		 */
16610Sstevel@tonic-gate 		uint16_t *slot = pr->pr_base;
16620Sstevel@tonic-gate 		uint16_t old, new;
16630Sstevel@tonic-gate 		if (pr->pr_scale != 2) {
16640Sstevel@tonic-gate 			uintptr_t delta = upc - pr->pr_off;
16650Sstevel@tonic-gate 			uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) +
16660Sstevel@tonic-gate 			    (((delta & 0xffff) * pr->pr_scale) >> 16);
16670Sstevel@tonic-gate 			if (byteoff >= (uintptr_t)pr->pr_size) {
16680Sstevel@tonic-gate 				mutex_exit(&p->p_pflock);
16690Sstevel@tonic-gate 				return;
16700Sstevel@tonic-gate 			}
16710Sstevel@tonic-gate 			slot += byteoff / sizeof (uint16_t);
16720Sstevel@tonic-gate 		}
16730Sstevel@tonic-gate 		if (fuword16(slot, &old) < 0 ||
16740Sstevel@tonic-gate 		    (new = old + ticks) > SHRT_MAX ||
16750Sstevel@tonic-gate 		    suword16(slot, new) < 0) {
16760Sstevel@tonic-gate 			pr->pr_scale = 0;
16770Sstevel@tonic-gate 		}
16780Sstevel@tonic-gate 	} else if (pr->pr_scale == 1) {
16790Sstevel@tonic-gate 		/*
16800Sstevel@tonic-gate 		 * PC Sampling
16810Sstevel@tonic-gate 		 */
16820Sstevel@tonic-gate 		model_t model = lwp_getdatamodel(lwp);
16830Sstevel@tonic-gate 		int result;
16840Sstevel@tonic-gate #ifdef __lint
16850Sstevel@tonic-gate 		model = model;
16860Sstevel@tonic-gate #endif
16870Sstevel@tonic-gate 		while (ticks-- > 0) {
16880Sstevel@tonic-gate 			if (pr->pr_samples == pr->pr_size) {
16890Sstevel@tonic-gate 				/* buffer full, turn off sampling */
16900Sstevel@tonic-gate 				pr->pr_scale = 0;
16910Sstevel@tonic-gate 				break;
16920Sstevel@tonic-gate 			}
16930Sstevel@tonic-gate 			switch (SIZEOF_PTR(model)) {
16940Sstevel@tonic-gate 			case sizeof (uint32_t):
16950Sstevel@tonic-gate 				result = suword32(pr->pr_base, (uint32_t)upc);
16960Sstevel@tonic-gate 				break;
16970Sstevel@tonic-gate #ifdef _LP64
16980Sstevel@tonic-gate 			case sizeof (uint64_t):
16990Sstevel@tonic-gate 				result = suword64(pr->pr_base, (uint64_t)upc);
17000Sstevel@tonic-gate 				break;
17010Sstevel@tonic-gate #endif
17020Sstevel@tonic-gate 			default:
17030Sstevel@tonic-gate 				cmn_err(CE_WARN, "profil_tick: unexpected "
17040Sstevel@tonic-gate 				    "data model");
17050Sstevel@tonic-gate 				result = -1;
17060Sstevel@tonic-gate 				break;
17070Sstevel@tonic-gate 			}
17080Sstevel@tonic-gate 			if (result != 0) {
17090Sstevel@tonic-gate 				pr->pr_scale = 0;
17100Sstevel@tonic-gate 				break;
17110Sstevel@tonic-gate 			}
17120Sstevel@tonic-gate 			pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model);
17130Sstevel@tonic-gate 			pr->pr_samples++;
17140Sstevel@tonic-gate 		}
17150Sstevel@tonic-gate 	}
17160Sstevel@tonic-gate 	mutex_exit(&p->p_pflock);
17170Sstevel@tonic-gate }
17180Sstevel@tonic-gate 
17190Sstevel@tonic-gate static void
17200Sstevel@tonic-gate delay_wakeup(void *arg)
17210Sstevel@tonic-gate {
172210696SDavid.Hollister@Sun.COM 	kthread_t	*t = arg;
17230Sstevel@tonic-gate 
17240Sstevel@tonic-gate 	mutex_enter(&t->t_delay_lock);
17250Sstevel@tonic-gate 	cv_signal(&t->t_delay_cv);
17260Sstevel@tonic-gate 	mutex_exit(&t->t_delay_lock);
17270Sstevel@tonic-gate }
17280Sstevel@tonic-gate 
172910696SDavid.Hollister@Sun.COM /*
173010696SDavid.Hollister@Sun.COM  * The delay(9F) man page indicates that it can only be called from user or
173110696SDavid.Hollister@Sun.COM  * kernel context - detect and diagnose bad calls. The following macro will
173210696SDavid.Hollister@Sun.COM  * produce a limited number of messages identifying bad callers.  This is done
173310696SDavid.Hollister@Sun.COM  * in a macro so that caller() is meaningful. When a bad caller is identified,
173410696SDavid.Hollister@Sun.COM  * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
173510696SDavid.Hollister@Sun.COM  */
173610696SDavid.Hollister@Sun.COM #define	DELAY_CONTEXT_CHECK()	{					\
173710696SDavid.Hollister@Sun.COM 	uint32_t	m;						\
173810696SDavid.Hollister@Sun.COM 	char		*f;						\
173910696SDavid.Hollister@Sun.COM 	ulong_t		off;						\
174010696SDavid.Hollister@Sun.COM 									\
174110696SDavid.Hollister@Sun.COM 	m = delay_from_interrupt_msg;					\
174210696SDavid.Hollister@Sun.COM 	if (delay_from_interrupt_diagnose && servicing_interrupt() &&	\
174310696SDavid.Hollister@Sun.COM 	    !panicstr && !devinfo_freeze &&				\
174410696SDavid.Hollister@Sun.COM 	    atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) {	\
174510696SDavid.Hollister@Sun.COM 		f = modgetsymname((uintptr_t)caller(), &off);		\
174610696SDavid.Hollister@Sun.COM 		cmn_err(CE_WARN, "delay(9F) called from "		\
174710696SDavid.Hollister@Sun.COM 		    "interrupt context: %s`%s",				\
174810696SDavid.Hollister@Sun.COM 		    mod_containing_pc(caller()), f ? f : "...");	\
174910696SDavid.Hollister@Sun.COM 	}								\
175010696SDavid.Hollister@Sun.COM }
175110696SDavid.Hollister@Sun.COM 
175210696SDavid.Hollister@Sun.COM /*
175310696SDavid.Hollister@Sun.COM  * delay_common: common delay code.
175410696SDavid.Hollister@Sun.COM  */
175510696SDavid.Hollister@Sun.COM static void
175610696SDavid.Hollister@Sun.COM delay_common(clock_t ticks)
175710696SDavid.Hollister@Sun.COM {
175810696SDavid.Hollister@Sun.COM 	kthread_t	*t = curthread;
175910696SDavid.Hollister@Sun.COM 	clock_t		deadline;
176010696SDavid.Hollister@Sun.COM 	clock_t		timeleft;
176110696SDavid.Hollister@Sun.COM 	callout_id_t	id;
176210696SDavid.Hollister@Sun.COM 
176310696SDavid.Hollister@Sun.COM 	/* If timeouts aren't running all we can do is spin. */
176410696SDavid.Hollister@Sun.COM 	if (panicstr || devinfo_freeze) {
176510696SDavid.Hollister@Sun.COM 		/* Convert delay(9F) call into drv_usecwait(9F) call. */
176610696SDavid.Hollister@Sun.COM 		if (ticks > 0)
176710696SDavid.Hollister@Sun.COM 			drv_usecwait(TICK_TO_USEC(ticks));
176810696SDavid.Hollister@Sun.COM 		return;
176910696SDavid.Hollister@Sun.COM 	}
177010696SDavid.Hollister@Sun.COM 
177111066Srafael.vanoni@sun.com 	deadline = ddi_get_lbolt() + ticks;
177211066Srafael.vanoni@sun.com 	while ((timeleft = deadline - ddi_get_lbolt()) > 0) {
177310696SDavid.Hollister@Sun.COM 		mutex_enter(&t->t_delay_lock);
177410696SDavid.Hollister@Sun.COM 		id = timeout_default(delay_wakeup, t, timeleft);
177510696SDavid.Hollister@Sun.COM 		cv_wait(&t->t_delay_cv, &t->t_delay_lock);
177610696SDavid.Hollister@Sun.COM 		mutex_exit(&t->t_delay_lock);
177710696SDavid.Hollister@Sun.COM 		(void) untimeout_default(id, 0);
177810696SDavid.Hollister@Sun.COM 	}
177910696SDavid.Hollister@Sun.COM }
178010696SDavid.Hollister@Sun.COM 
178110696SDavid.Hollister@Sun.COM /*
178210696SDavid.Hollister@Sun.COM  * Delay specified number of clock ticks.
178310696SDavid.Hollister@Sun.COM  */
17840Sstevel@tonic-gate void
17850Sstevel@tonic-gate delay(clock_t ticks)
17860Sstevel@tonic-gate {
178710696SDavid.Hollister@Sun.COM 	DELAY_CONTEXT_CHECK();
178810696SDavid.Hollister@Sun.COM 
178910696SDavid.Hollister@Sun.COM 	delay_common(ticks);
179010696SDavid.Hollister@Sun.COM }
17910Sstevel@tonic-gate 
179210696SDavid.Hollister@Sun.COM /*
179310696SDavid.Hollister@Sun.COM  * Delay a random number of clock ticks between 1 and ticks.
179410696SDavid.Hollister@Sun.COM  */
179510696SDavid.Hollister@Sun.COM void
179610696SDavid.Hollister@Sun.COM delay_random(clock_t ticks)
179710696SDavid.Hollister@Sun.COM {
179810696SDavid.Hollister@Sun.COM 	int	r;
17990Sstevel@tonic-gate 
180010696SDavid.Hollister@Sun.COM 	DELAY_CONTEXT_CHECK();
180110696SDavid.Hollister@Sun.COM 
180210696SDavid.Hollister@Sun.COM 	(void) random_get_pseudo_bytes((void *)&r, sizeof (r));
180310696SDavid.Hollister@Sun.COM 	if (ticks == 0)
180410696SDavid.Hollister@Sun.COM 		ticks = 1;
180510696SDavid.Hollister@Sun.COM 	ticks = (r % ticks) + 1;
180610696SDavid.Hollister@Sun.COM 	delay_common(ticks);
18070Sstevel@tonic-gate }
18080Sstevel@tonic-gate 
18090Sstevel@tonic-gate /*
18100Sstevel@tonic-gate  * Like delay, but interruptible by a signal.
18110Sstevel@tonic-gate  */
18120Sstevel@tonic-gate int
18130Sstevel@tonic-gate delay_sig(clock_t ticks)
18140Sstevel@tonic-gate {
181510696SDavid.Hollister@Sun.COM 	kthread_t	*t = curthread;
181610696SDavid.Hollister@Sun.COM 	clock_t		deadline;
181710696SDavid.Hollister@Sun.COM 	clock_t		rc;
18180Sstevel@tonic-gate 
181910696SDavid.Hollister@Sun.COM 	/* If timeouts aren't running all we can do is spin. */
182010696SDavid.Hollister@Sun.COM 	if (panicstr || devinfo_freeze) {
182110696SDavid.Hollister@Sun.COM 		if (ticks > 0)
182210696SDavid.Hollister@Sun.COM 			drv_usecwait(TICK_TO_USEC(ticks));
182310696SDavid.Hollister@Sun.COM 		return (0);
182410696SDavid.Hollister@Sun.COM 	}
182510696SDavid.Hollister@Sun.COM 
182611066Srafael.vanoni@sun.com 	deadline = ddi_get_lbolt() + ticks;
182710696SDavid.Hollister@Sun.COM 	mutex_enter(&t->t_delay_lock);
18280Sstevel@tonic-gate 	do {
182910696SDavid.Hollister@Sun.COM 		rc = cv_timedwait_sig(&t->t_delay_cv,
183010696SDavid.Hollister@Sun.COM 		    &t->t_delay_lock, deadline);
183110696SDavid.Hollister@Sun.COM 		/* loop until past deadline or signaled */
18320Sstevel@tonic-gate 	} while (rc > 0);
183310696SDavid.Hollister@Sun.COM 	mutex_exit(&t->t_delay_lock);
18340Sstevel@tonic-gate 	if (rc == 0)
18350Sstevel@tonic-gate 		return (EINTR);
18360Sstevel@tonic-gate 	return (0);
18370Sstevel@tonic-gate }
18380Sstevel@tonic-gate 
183910696SDavid.Hollister@Sun.COM 
18400Sstevel@tonic-gate #define	SECONDS_PER_DAY 86400
18410Sstevel@tonic-gate 
18420Sstevel@tonic-gate /*
18430Sstevel@tonic-gate  * Initialize the system time based on the TOD chip.  approx is used as
18440Sstevel@tonic-gate  * an approximation of time (e.g. from the filesystem) in the event that
18450Sstevel@tonic-gate  * the TOD chip has been cleared or is unresponsive.  An approx of -1
18460Sstevel@tonic-gate  * means the filesystem doesn't keep time.
18470Sstevel@tonic-gate  */
18480Sstevel@tonic-gate void
18490Sstevel@tonic-gate clkset(time_t approx)
18500Sstevel@tonic-gate {
18510Sstevel@tonic-gate 	timestruc_t ts;
18520Sstevel@tonic-gate 	int spl;
18530Sstevel@tonic-gate 	int set_clock = 0;
18540Sstevel@tonic-gate 
18550Sstevel@tonic-gate 	mutex_enter(&tod_lock);
18560Sstevel@tonic-gate 	ts = tod_get();
18570Sstevel@tonic-gate 
18580Sstevel@tonic-gate 	if (ts.tv_sec > 365 * SECONDS_PER_DAY) {
18590Sstevel@tonic-gate 		/*
18600Sstevel@tonic-gate 		 * If the TOD chip is reporting some time after 1971,
18610Sstevel@tonic-gate 		 * then it probably didn't lose power or become otherwise
18620Sstevel@tonic-gate 		 * cleared in the recent past;  check to assure that
18630Sstevel@tonic-gate 		 * the time coming from the filesystem isn't in the future
18640Sstevel@tonic-gate 		 * according to the TOD chip.
18650Sstevel@tonic-gate 		 */
18660Sstevel@tonic-gate 		if (approx != -1 && approx > ts.tv_sec) {
18670Sstevel@tonic-gate 			cmn_err(CE_WARN, "Last shutdown is later "
18680Sstevel@tonic-gate 			    "than time on time-of-day chip; check date.");
18690Sstevel@tonic-gate 		}
18700Sstevel@tonic-gate 	} else {
18710Sstevel@tonic-gate 		/*
18729158SKrishnendu.Sadhukhan@Sun.COM 		 * If the TOD chip isn't giving correct time, set it to the
18739158SKrishnendu.Sadhukhan@Sun.COM 		 * greater of i) approx and ii) 1987. That way if approx
18749158SKrishnendu.Sadhukhan@Sun.COM 		 * is negative or is earlier than 1987, we set the clock
18759158SKrishnendu.Sadhukhan@Sun.COM 		 * back to a time when Oliver North, ALF and Dire Straits
18769158SKrishnendu.Sadhukhan@Sun.COM 		 * were all on the collective brain:  1987.
18770Sstevel@tonic-gate 		 */
18780Sstevel@tonic-gate 		timestruc_t tmp;
18799158SKrishnendu.Sadhukhan@Sun.COM 		time_t diagnose_date = (1987 - 1970) * 365 * SECONDS_PER_DAY;
18809158SKrishnendu.Sadhukhan@Sun.COM 		ts.tv_sec = (approx > diagnose_date ? approx : diagnose_date);
18810Sstevel@tonic-gate 		ts.tv_nsec = 0;
18820Sstevel@tonic-gate 
18830Sstevel@tonic-gate 		/*
18840Sstevel@tonic-gate 		 * Attempt to write the new time to the TOD chip.  Set spl high
18850Sstevel@tonic-gate 		 * to avoid getting preempted between the tod_set and tod_get.
18860Sstevel@tonic-gate 		 */
18870Sstevel@tonic-gate 		spl = splhi();
18880Sstevel@tonic-gate 		tod_set(ts);
18890Sstevel@tonic-gate 		tmp = tod_get();
18900Sstevel@tonic-gate 		splx(spl);
18910Sstevel@tonic-gate 
18920Sstevel@tonic-gate 		if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) {
18930Sstevel@tonic-gate 			tod_broken = 1;
18940Sstevel@tonic-gate 			dosynctodr = 0;
18959158SKrishnendu.Sadhukhan@Sun.COM 			cmn_err(CE_WARN, "Time-of-day chip unresponsive.");
18960Sstevel@tonic-gate 		} else {
18970Sstevel@tonic-gate 			cmn_err(CE_WARN, "Time-of-day chip had "
18980Sstevel@tonic-gate 			    "incorrect date; check and reset.");
18990Sstevel@tonic-gate 		}
19000Sstevel@tonic-gate 		set_clock = 1;
19010Sstevel@tonic-gate 	}
19020Sstevel@tonic-gate 
19030Sstevel@tonic-gate 	if (!boot_time) {
19040Sstevel@tonic-gate 		boot_time = ts.tv_sec;
19050Sstevel@tonic-gate 		set_clock = 1;
19060Sstevel@tonic-gate 	}
19070Sstevel@tonic-gate 
19080Sstevel@tonic-gate 	if (set_clock)
19090Sstevel@tonic-gate 		set_hrestime(&ts);
19100Sstevel@tonic-gate 
19110Sstevel@tonic-gate 	mutex_exit(&tod_lock);
19120Sstevel@tonic-gate }
19130Sstevel@tonic-gate 
19144123Sdm120769 int	timechanged;	/* for testing if the system time has been reset */
19150Sstevel@tonic-gate 
19160Sstevel@tonic-gate void
19170Sstevel@tonic-gate set_hrestime(timestruc_t *ts)
19180Sstevel@tonic-gate {
19190Sstevel@tonic-gate 	int spl = hr_clock_lock();
19200Sstevel@tonic-gate 	hrestime = *ts;
19214123Sdm120769 	membar_enter();	/* hrestime must be visible before timechanged++ */
19220Sstevel@tonic-gate 	timedelta = 0;
19234123Sdm120769 	timechanged++;
19240Sstevel@tonic-gate 	hr_clock_unlock(spl);
19258048SMadhavan.Venkataraman@Sun.COM 	callout_hrestime();
19260Sstevel@tonic-gate }
19270Sstevel@tonic-gate 
19280Sstevel@tonic-gate static uint_t deadman_seconds;
19290Sstevel@tonic-gate static uint32_t deadman_panics;
19300Sstevel@tonic-gate static int deadman_enabled = 0;
19310Sstevel@tonic-gate static int deadman_panic_timers = 1;
19320Sstevel@tonic-gate 
19330Sstevel@tonic-gate static void
19340Sstevel@tonic-gate deadman(void)
19350Sstevel@tonic-gate {
19360Sstevel@tonic-gate 	if (panicstr) {
19370Sstevel@tonic-gate 		/*
19380Sstevel@tonic-gate 		 * During panic, other CPUs besides the panic
19390Sstevel@tonic-gate 		 * master continue to handle cyclics and some other
19400Sstevel@tonic-gate 		 * interrupts.  The code below is intended to be
19410Sstevel@tonic-gate 		 * single threaded, so any CPU other than the master
19420Sstevel@tonic-gate 		 * must keep out.
19430Sstevel@tonic-gate 		 */
19440Sstevel@tonic-gate 		if (CPU->cpu_id != panic_cpu.cpu_id)
19450Sstevel@tonic-gate 			return;
19460Sstevel@tonic-gate 
19470Sstevel@tonic-gate 		if (!deadman_panic_timers)
19480Sstevel@tonic-gate 			return; /* allow all timers to be manually disabled */
19490Sstevel@tonic-gate 
19500Sstevel@tonic-gate 		/*
19510Sstevel@tonic-gate 		 * If we are generating a crash dump or syncing filesystems and
19520Sstevel@tonic-gate 		 * the corresponding timer is set, decrement it and re-enter
19530Sstevel@tonic-gate 		 * the panic code to abort it and advance to the next state.
19540Sstevel@tonic-gate 		 * The panic states and triggers are explained in panic.c.
19550Sstevel@tonic-gate 		 */
19560Sstevel@tonic-gate 		if (panic_dump) {
19570Sstevel@tonic-gate 			if (dump_timeleft && (--dump_timeleft == 0)) {
19580Sstevel@tonic-gate 				panic("panic dump timeout");
19590Sstevel@tonic-gate 				/*NOTREACHED*/
19600Sstevel@tonic-gate 			}
19610Sstevel@tonic-gate 		} else if (panic_sync) {
19620Sstevel@tonic-gate 			if (sync_timeleft && (--sync_timeleft == 0)) {
19630Sstevel@tonic-gate 				panic("panic sync timeout");
19640Sstevel@tonic-gate 				/*NOTREACHED*/
19650Sstevel@tonic-gate 			}
19660Sstevel@tonic-gate 		}
19670Sstevel@tonic-gate 
19680Sstevel@tonic-gate 		return;
19690Sstevel@tonic-gate 	}
19700Sstevel@tonic-gate 
197111066Srafael.vanoni@sun.com 	if (deadman_counter != CPU->cpu_deadman_counter) {
197211066Srafael.vanoni@sun.com 		CPU->cpu_deadman_counter = deadman_counter;
19730Sstevel@tonic-gate 		CPU->cpu_deadman_countdown = deadman_seconds;
19740Sstevel@tonic-gate 		return;
19750Sstevel@tonic-gate 	}
19760Sstevel@tonic-gate 
19776054Svb160487 	if (--CPU->cpu_deadman_countdown > 0)
19780Sstevel@tonic-gate 		return;
19790Sstevel@tonic-gate 
19800Sstevel@tonic-gate 	/*
19810Sstevel@tonic-gate 	 * Regardless of whether or not we actually bring the system down,
19820Sstevel@tonic-gate 	 * bump the deadman_panics variable.
19830Sstevel@tonic-gate 	 *
19840Sstevel@tonic-gate 	 * N.B. deadman_panics is incremented once for each CPU that
19850Sstevel@tonic-gate 	 * passes through here.  It's expected that all the CPUs will
19860Sstevel@tonic-gate 	 * detect this condition within one second of each other, so
19870Sstevel@tonic-gate 	 * when deadman_enabled is off, deadman_panics will
19880Sstevel@tonic-gate 	 * typically be a multiple of the total number of CPUs in
19890Sstevel@tonic-gate 	 * the system.
19900Sstevel@tonic-gate 	 */
19910Sstevel@tonic-gate 	atomic_add_32(&deadman_panics, 1);
19920Sstevel@tonic-gate 
19930Sstevel@tonic-gate 	if (!deadman_enabled) {
19940Sstevel@tonic-gate 		CPU->cpu_deadman_countdown = deadman_seconds;
19950Sstevel@tonic-gate 		return;
19960Sstevel@tonic-gate 	}
19970Sstevel@tonic-gate 
19980Sstevel@tonic-gate 	/*
19990Sstevel@tonic-gate 	 * If we're here, we want to bring the system down.
20000Sstevel@tonic-gate 	 */
20010Sstevel@tonic-gate 	panic("deadman: timed out after %d seconds of clock "
20020Sstevel@tonic-gate 	    "inactivity", deadman_seconds);
20030Sstevel@tonic-gate 	/*NOTREACHED*/
20040Sstevel@tonic-gate }
20050Sstevel@tonic-gate 
20060Sstevel@tonic-gate /*ARGSUSED*/
20070Sstevel@tonic-gate static void
20080Sstevel@tonic-gate deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
20090Sstevel@tonic-gate {
201011066Srafael.vanoni@sun.com 	cpu->cpu_deadman_counter = 0;
20110Sstevel@tonic-gate 	cpu->cpu_deadman_countdown = deadman_seconds;
20120Sstevel@tonic-gate 
20130Sstevel@tonic-gate 	hdlr->cyh_func = (cyc_func_t)deadman;
20140Sstevel@tonic-gate 	hdlr->cyh_level = CY_HIGH_LEVEL;
20150Sstevel@tonic-gate 	hdlr->cyh_arg = NULL;
20160Sstevel@tonic-gate 
20170Sstevel@tonic-gate 	/*
20180Sstevel@tonic-gate 	 * Stagger the CPUs so that they don't all run deadman() at
20190Sstevel@tonic-gate 	 * the same time.  Simplest reason to do this is to make it
20200Sstevel@tonic-gate 	 * more likely that only one CPU will panic in case of a
20210Sstevel@tonic-gate 	 * timeout.  This is (strictly speaking) an aesthetic, not a
20220Sstevel@tonic-gate 	 * technical consideration.
20230Sstevel@tonic-gate 	 */
20240Sstevel@tonic-gate 	when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
20250Sstevel@tonic-gate 	when->cyt_interval = NANOSEC;
20260Sstevel@tonic-gate }
20270Sstevel@tonic-gate 
20280Sstevel@tonic-gate 
20290Sstevel@tonic-gate void
20300Sstevel@tonic-gate deadman_init(void)
20310Sstevel@tonic-gate {
20320Sstevel@tonic-gate 	cyc_omni_handler_t hdlr;
20330Sstevel@tonic-gate 
20340Sstevel@tonic-gate 	if (deadman_seconds == 0)
20350Sstevel@tonic-gate 		deadman_seconds = snoop_interval / MICROSEC;
20360Sstevel@tonic-gate 
20370Sstevel@tonic-gate 	if (snooping)
20380Sstevel@tonic-gate 		deadman_enabled = 1;
20390Sstevel@tonic-gate 
20400Sstevel@tonic-gate 	hdlr.cyo_online = deadman_online;
20410Sstevel@tonic-gate 	hdlr.cyo_offline = NULL;
20420Sstevel@tonic-gate 	hdlr.cyo_arg = NULL;
20430Sstevel@tonic-gate 
20440Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
20450Sstevel@tonic-gate 	deadman_cyclic = cyclic_add_omni(&hdlr);
20460Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
20470Sstevel@tonic-gate }
20480Sstevel@tonic-gate 
20490Sstevel@tonic-gate /*
20500Sstevel@tonic-gate  * tod_fault() is for updating tod validate mechanism state:
20510Sstevel@tonic-gate  * (1) TOD_NOFAULT: for resetting the state to 'normal'.
20520Sstevel@tonic-gate  *     currently used for debugging only
20530Sstevel@tonic-gate  * (2) The following four cases detected by tod validate mechanism:
20540Sstevel@tonic-gate  *       TOD_REVERSED: current tod value is less than previous value.
20550Sstevel@tonic-gate  *       TOD_STALLED: current tod value hasn't advanced.
20560Sstevel@tonic-gate  *       TOD_JUMPED: current tod value advanced too far from previous value.
20570Sstevel@tonic-gate  *       TOD_RATECHANGED: the ratio between average tod delta and
20580Sstevel@tonic-gate  *       average tick delta has changed.
20595084Sjohnlev  * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
20605084Sjohnlev  *     a virtual TOD provided by a hypervisor.
20610Sstevel@tonic-gate  */
20620Sstevel@tonic-gate enum tod_fault_type
20630Sstevel@tonic-gate tod_fault(enum tod_fault_type ftype, int off)
20640Sstevel@tonic-gate {
20650Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tod_lock));
20660Sstevel@tonic-gate 
20670Sstevel@tonic-gate 	if (tod_faulted != ftype) {
20680Sstevel@tonic-gate 		switch (ftype) {
20690Sstevel@tonic-gate 		case TOD_NOFAULT:
207078Sae112802 			plat_tod_fault(TOD_NOFAULT);
20710Sstevel@tonic-gate 			cmn_err(CE_NOTE, "Restarted tracking "
20725076Smishra 			    "Time of Day clock.");
20730Sstevel@tonic-gate 			tod_faulted = ftype;
20740Sstevel@tonic-gate 			break;
20750Sstevel@tonic-gate 		case TOD_REVERSED:
20760Sstevel@tonic-gate 		case TOD_JUMPED:
20770Sstevel@tonic-gate 			if (tod_faulted == TOD_NOFAULT) {
207878Sae112802 				plat_tod_fault(ftype);
20790Sstevel@tonic-gate 				cmn_err(CE_WARN, "Time of Day clock error: "
20800Sstevel@tonic-gate 				    "reason [%s by 0x%x]. -- "
20810Sstevel@tonic-gate 				    " Stopped tracking Time Of Day clock.",
20820Sstevel@tonic-gate 				    tod_fault_table[ftype], off);
20830Sstevel@tonic-gate 				tod_faulted = ftype;
20840Sstevel@tonic-gate 			}
20850Sstevel@tonic-gate 			break;
20860Sstevel@tonic-gate 		case TOD_STALLED:
20870Sstevel@tonic-gate 		case TOD_RATECHANGED:
20880Sstevel@tonic-gate 			if (tod_faulted == TOD_NOFAULT) {
208978Sae112802 				plat_tod_fault(ftype);
20900Sstevel@tonic-gate 				cmn_err(CE_WARN, "Time of Day clock error: "
20910Sstevel@tonic-gate 				    "reason [%s]. -- "
20920Sstevel@tonic-gate 				    " Stopped tracking Time Of Day clock.",
20930Sstevel@tonic-gate 				    tod_fault_table[ftype]);
20940Sstevel@tonic-gate 				tod_faulted = ftype;
20950Sstevel@tonic-gate 			}
20960Sstevel@tonic-gate 			break;
20975084Sjohnlev 		case TOD_RDONLY:
20985084Sjohnlev 			if (tod_faulted == TOD_NOFAULT) {
20995084Sjohnlev 				plat_tod_fault(ftype);
21005084Sjohnlev 				cmn_err(CE_NOTE, "!Time of Day clock is "
21015084Sjohnlev 				    "Read-Only; set of Date/Time will not "
21025084Sjohnlev 				    "persist across reboot.");
21035084Sjohnlev 				tod_faulted = ftype;
21045084Sjohnlev 			}
21055084Sjohnlev 			break;
21060Sstevel@tonic-gate 		default:
21070Sstevel@tonic-gate 			break;
21080Sstevel@tonic-gate 		}
21090Sstevel@tonic-gate 	}
21100Sstevel@tonic-gate 	return (tod_faulted);
21110Sstevel@tonic-gate }
21120Sstevel@tonic-gate 
21130Sstevel@tonic-gate void
21140Sstevel@tonic-gate tod_fault_reset()
21150Sstevel@tonic-gate {
21160Sstevel@tonic-gate 	tod_fault_reset_flag = 1;
21170Sstevel@tonic-gate }
21180Sstevel@tonic-gate 
21190Sstevel@tonic-gate 
21200Sstevel@tonic-gate /*
21210Sstevel@tonic-gate  * tod_validate() is used for checking values returned by tod_get().
21220Sstevel@tonic-gate  * Four error cases can be detected by this routine:
21230Sstevel@tonic-gate  *   TOD_REVERSED: current tod value is less than previous.
21240Sstevel@tonic-gate  *   TOD_STALLED: current tod value hasn't advanced.
21250Sstevel@tonic-gate  *   TOD_JUMPED: current tod value advanced too far from previous value.
21260Sstevel@tonic-gate  *   TOD_RATECHANGED: the ratio between average tod delta and
21270Sstevel@tonic-gate  *   average tick delta has changed.
21280Sstevel@tonic-gate  */
21290Sstevel@tonic-gate time_t
21300Sstevel@tonic-gate tod_validate(time_t tod)
21310Sstevel@tonic-gate {
21320Sstevel@tonic-gate 	time_t diff_tod;
21330Sstevel@tonic-gate 	hrtime_t diff_tick;
21340Sstevel@tonic-gate 
21350Sstevel@tonic-gate 	long dtick;
21360Sstevel@tonic-gate 	int dtick_delta;
21370Sstevel@tonic-gate 
21380Sstevel@tonic-gate 	int off = 0;
21390Sstevel@tonic-gate 	enum tod_fault_type tod_bad = TOD_NOFAULT;
21400Sstevel@tonic-gate 
21410Sstevel@tonic-gate 	static int firsttime = 1;
21420Sstevel@tonic-gate 
21430Sstevel@tonic-gate 	static time_t prev_tod = 0;
21440Sstevel@tonic-gate 	static hrtime_t prev_tick = 0;
21450Sstevel@tonic-gate 	static long dtick_avg = TOD_REF_FREQ;
21460Sstevel@tonic-gate 
21470Sstevel@tonic-gate 	hrtime_t tick = gethrtime();
21480Sstevel@tonic-gate 
21490Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tod_lock));
21500Sstevel@tonic-gate 
21510Sstevel@tonic-gate 	/*
21520Sstevel@tonic-gate 	 * tod_validate_enable is patchable via /etc/system.
2153950Ssethg 	 * If TOD is already faulted, or if TOD validation is deferred,
2154950Ssethg 	 * there is nothing to do.
21550Sstevel@tonic-gate 	 */
2156950Ssethg 	if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2157950Ssethg 	    tod_validate_deferred) {
21580Sstevel@tonic-gate 		return (tod);
21590Sstevel@tonic-gate 	}
21600Sstevel@tonic-gate 
21610Sstevel@tonic-gate 	/*
21620Sstevel@tonic-gate 	 * Update prev_tod and prev_tick values for first run
21630Sstevel@tonic-gate 	 */
21640Sstevel@tonic-gate 	if (firsttime) {
21650Sstevel@tonic-gate 		firsttime = 0;
21660Sstevel@tonic-gate 		prev_tod = tod;
21670Sstevel@tonic-gate 		prev_tick = tick;
21680Sstevel@tonic-gate 		return (tod);
21690Sstevel@tonic-gate 	}
21700Sstevel@tonic-gate 
21710Sstevel@tonic-gate 	/*
21720Sstevel@tonic-gate 	 * For either of these conditions, we need to reset ourself
21730Sstevel@tonic-gate 	 * and start validation from zero since each condition
21740Sstevel@tonic-gate 	 * indicates that the TOD will be updated with new value
21750Sstevel@tonic-gate 	 * Also, note that tod_needsync will be reset in clock()
21760Sstevel@tonic-gate 	 */
21770Sstevel@tonic-gate 	if (tod_needsync || tod_fault_reset_flag) {
21780Sstevel@tonic-gate 		firsttime = 1;
21790Sstevel@tonic-gate 		prev_tod = 0;
21800Sstevel@tonic-gate 		prev_tick = 0;
21810Sstevel@tonic-gate 		dtick_avg = TOD_REF_FREQ;
21820Sstevel@tonic-gate 
21830Sstevel@tonic-gate 		if (tod_fault_reset_flag)
21840Sstevel@tonic-gate 			tod_fault_reset_flag = 0;
21850Sstevel@tonic-gate 
21860Sstevel@tonic-gate 		return (tod);
21870Sstevel@tonic-gate 	}
21880Sstevel@tonic-gate 
21890Sstevel@tonic-gate 	/* test hook */
21900Sstevel@tonic-gate 	switch (tod_unit_test) {
21910Sstevel@tonic-gate 	case 1: /* for testing jumping tod */
21920Sstevel@tonic-gate 		tod += tod_test_injector;
21930Sstevel@tonic-gate 		tod_unit_test = 0;
21940Sstevel@tonic-gate 		break;
21950Sstevel@tonic-gate 	case 2:	/* for testing stuck tod bit */
21960Sstevel@tonic-gate 		tod |= 1 << tod_test_injector;
21970Sstevel@tonic-gate 		tod_unit_test = 0;
21980Sstevel@tonic-gate 		break;
21990Sstevel@tonic-gate 	case 3:	/* for testing stalled tod */
22000Sstevel@tonic-gate 		tod = prev_tod;
22010Sstevel@tonic-gate 		tod_unit_test = 0;
22020Sstevel@tonic-gate 		break;
22030Sstevel@tonic-gate 	case 4:	/* reset tod fault status */
22040Sstevel@tonic-gate 		(void) tod_fault(TOD_NOFAULT, 0);
22050Sstevel@tonic-gate 		tod_unit_test = 0;
22060Sstevel@tonic-gate 		break;
22070Sstevel@tonic-gate 	default:
22080Sstevel@tonic-gate 		break;
22090Sstevel@tonic-gate 	}
22100Sstevel@tonic-gate 
22110Sstevel@tonic-gate 	diff_tod = tod - prev_tod;
22120Sstevel@tonic-gate 	diff_tick = tick - prev_tick;
22130Sstevel@tonic-gate 
22140Sstevel@tonic-gate 	ASSERT(diff_tick >= 0);
22150Sstevel@tonic-gate 
22160Sstevel@tonic-gate 	if (diff_tod < 0) {
22170Sstevel@tonic-gate 		/* ERROR - tod reversed */
22180Sstevel@tonic-gate 		tod_bad = TOD_REVERSED;
22190Sstevel@tonic-gate 		off = (int)(prev_tod - tod);
22200Sstevel@tonic-gate 	} else if (diff_tod == 0) {
22210Sstevel@tonic-gate 		/* tod did not advance */
22220Sstevel@tonic-gate 		if (diff_tick > TOD_STALL_THRESHOLD) {
22230Sstevel@tonic-gate 			/* ERROR - tod stalled */
22240Sstevel@tonic-gate 			tod_bad = TOD_STALLED;
22250Sstevel@tonic-gate 		} else {
22260Sstevel@tonic-gate 			/*
22270Sstevel@tonic-gate 			 * Make sure we don't update prev_tick
22280Sstevel@tonic-gate 			 * so that diff_tick is calculated since
22290Sstevel@tonic-gate 			 * the first diff_tod == 0
22300Sstevel@tonic-gate 			 */
22310Sstevel@tonic-gate 			return (tod);
22320Sstevel@tonic-gate 		}
22330Sstevel@tonic-gate 	} else {
22340Sstevel@tonic-gate 		/* calculate dtick */
22350Sstevel@tonic-gate 		dtick = diff_tick / diff_tod;
22360Sstevel@tonic-gate 
22370Sstevel@tonic-gate 		/* update dtick averages */
22380Sstevel@tonic-gate 		dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N);
22390Sstevel@tonic-gate 
22400Sstevel@tonic-gate 		/*
22410Sstevel@tonic-gate 		 * Calculate dtick_delta as
22420Sstevel@tonic-gate 		 * variation from reference freq in quartiles
22430Sstevel@tonic-gate 		 */
22440Sstevel@tonic-gate 		dtick_delta = (dtick_avg - TOD_REF_FREQ) /
22455076Smishra 		    (TOD_REF_FREQ >> 2);
22460Sstevel@tonic-gate 
22470Sstevel@tonic-gate 		/*
22480Sstevel@tonic-gate 		 * Even with a perfectly functioning TOD device,
22490Sstevel@tonic-gate 		 * when the number of elapsed seconds is low the
22500Sstevel@tonic-gate 		 * algorithm can calculate a rate that is beyond
22510Sstevel@tonic-gate 		 * tolerance, causing an error.  The algorithm is
22520Sstevel@tonic-gate 		 * inaccurate when elapsed time is low (less than
22530Sstevel@tonic-gate 		 * 5 seconds).
22540Sstevel@tonic-gate 		 */
22550Sstevel@tonic-gate 		if (diff_tod > 4) {
22560Sstevel@tonic-gate 			if (dtick < TOD_JUMP_THRESHOLD) {
22570Sstevel@tonic-gate 				/* ERROR - tod jumped */
22580Sstevel@tonic-gate 				tod_bad = TOD_JUMPED;
22590Sstevel@tonic-gate 				off = (int)diff_tod;
22600Sstevel@tonic-gate 			} else if (dtick_delta) {
22610Sstevel@tonic-gate 				/* ERROR - change in clock rate */
22620Sstevel@tonic-gate 				tod_bad = TOD_RATECHANGED;
22630Sstevel@tonic-gate 			}
22640Sstevel@tonic-gate 		}
22650Sstevel@tonic-gate 	}
22660Sstevel@tonic-gate 
22670Sstevel@tonic-gate 	if (tod_bad != TOD_NOFAULT) {
22680Sstevel@tonic-gate 		(void) tod_fault(tod_bad, off);
22690Sstevel@tonic-gate 
22700Sstevel@tonic-gate 		/*
22710Sstevel@tonic-gate 		 * Disable dosynctodr since we are going to fault
22720Sstevel@tonic-gate 		 * the TOD chip anyway here
22730Sstevel@tonic-gate 		 */
22740Sstevel@tonic-gate 		dosynctodr = 0;
22750Sstevel@tonic-gate 
22760Sstevel@tonic-gate 		/*
22770Sstevel@tonic-gate 		 * Set tod to the correct value from hrestime
22780Sstevel@tonic-gate 		 */
22790Sstevel@tonic-gate 		tod = hrestime.tv_sec;
22800Sstevel@tonic-gate 	}
22810Sstevel@tonic-gate 
22820Sstevel@tonic-gate 	prev_tod = tod;
22830Sstevel@tonic-gate 	prev_tick = tick;
22840Sstevel@tonic-gate 	return (tod);
22850Sstevel@tonic-gate }
22860Sstevel@tonic-gate 
22870Sstevel@tonic-gate static void
22880Sstevel@tonic-gate calcloadavg(int nrun, uint64_t *hp_ave)
22890Sstevel@tonic-gate {
22900Sstevel@tonic-gate 	static int64_t f[3] = { 135, 27, 9 };
22910Sstevel@tonic-gate 	uint_t i;
22920Sstevel@tonic-gate 	int64_t q, r;
22930Sstevel@tonic-gate 
22940Sstevel@tonic-gate 	/*
22950Sstevel@tonic-gate 	 * Compute load average over the last 1, 5, and 15 minutes
22960Sstevel@tonic-gate 	 * (60, 300, and 900 seconds).  The constants in f[3] are for
22970Sstevel@tonic-gate 	 * exponential decay:
22980Sstevel@tonic-gate 	 * (1 - exp(-1/60)) << 13 = 135,
22990Sstevel@tonic-gate 	 * (1 - exp(-1/300)) << 13 = 27,
23000Sstevel@tonic-gate 	 * (1 - exp(-1/900)) << 13 = 9.
23010Sstevel@tonic-gate 	 */
23020Sstevel@tonic-gate 
23030Sstevel@tonic-gate 	/*
23040Sstevel@tonic-gate 	 * a little hoop-jumping to avoid integer overflow
23050Sstevel@tonic-gate 	 */
23060Sstevel@tonic-gate 	for (i = 0; i < 3; i++) {
23070Sstevel@tonic-gate 		q = (hp_ave[i]  >> 16) << 7;
23080Sstevel@tonic-gate 		r = (hp_ave[i]  & 0xffff) << 7;
23090Sstevel@tonic-gate 		hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4;
23100Sstevel@tonic-gate 	}
23110Sstevel@tonic-gate }
231211066Srafael.vanoni@sun.com 
231311066Srafael.vanoni@sun.com /*
231411066Srafael.vanoni@sun.com  * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
231511066Srafael.vanoni@sun.com  * calculate the value of lbolt according to the current mode. In the event
231611066Srafael.vanoni@sun.com  * driven mode (the default), lbolt is calculated by dividing the current hires
231711066Srafael.vanoni@sun.com  * time by the number of nanoseconds per clock tick. In the cyclic driven mode
231811066Srafael.vanoni@sun.com  * an internal variable is incremented at each firing of the lbolt cyclic
231911066Srafael.vanoni@sun.com  * and returned by lbolt_cyclic_driven().
232011066Srafael.vanoni@sun.com  *
232111066Srafael.vanoni@sun.com  * The system will transition from event to cyclic driven mode when the number
232211066Srafael.vanoni@sun.com  * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
232311066Srafael.vanoni@sun.com  * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
232411066Srafael.vanoni@sun.com  * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
232511066Srafael.vanoni@sun.com  * causing enough activity to cross the thresholds.
232611066Srafael.vanoni@sun.com  */
232711226Srafael.vanoni@sun.com int64_t
232811066Srafael.vanoni@sun.com lbolt_bootstrap(void)
232911066Srafael.vanoni@sun.com {
233011066Srafael.vanoni@sun.com 	return (0);
233111066Srafael.vanoni@sun.com }
233211066Srafael.vanoni@sun.com 
233311066Srafael.vanoni@sun.com /* ARGSUSED */
233411066Srafael.vanoni@sun.com uint_t
233511066Srafael.vanoni@sun.com lbolt_ev_to_cyclic(caddr_t arg1, caddr_t arg2)
233611066Srafael.vanoni@sun.com {
233711066Srafael.vanoni@sun.com 	hrtime_t ts, exp;
233811066Srafael.vanoni@sun.com 	int ret;
233911066Srafael.vanoni@sun.com 
234011066Srafael.vanoni@sun.com 	ASSERT(lbolt_hybrid != lbolt_cyclic_driven);
234111066Srafael.vanoni@sun.com 
234211066Srafael.vanoni@sun.com 	kpreempt_disable();
234311066Srafael.vanoni@sun.com 
234411066Srafael.vanoni@sun.com 	ts = gethrtime();
234511066Srafael.vanoni@sun.com 	lb_info->lbi_internal = (ts/nsec_per_tick);
234611066Srafael.vanoni@sun.com 
234711066Srafael.vanoni@sun.com 	/*
234811066Srafael.vanoni@sun.com 	 * Align the next expiration to a clock tick boundary.
234911066Srafael.vanoni@sun.com 	 */
235011066Srafael.vanoni@sun.com 	exp = ts + nsec_per_tick - 1;
235111066Srafael.vanoni@sun.com 	exp = (exp/nsec_per_tick) * nsec_per_tick;
235211066Srafael.vanoni@sun.com 
235311151Srafael.vanoni@sun.com 	ret = cyclic_reprogram(lb_info->id.lbi_cyclic_id, exp);
235411066Srafael.vanoni@sun.com 	ASSERT(ret);
235511066Srafael.vanoni@sun.com 
235611066Srafael.vanoni@sun.com 	lbolt_hybrid = lbolt_cyclic_driven;
235711066Srafael.vanoni@sun.com 	lb_info->lbi_cyc_deactivate = B_FALSE;
235811066Srafael.vanoni@sun.com 	lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
235911066Srafael.vanoni@sun.com 
236011066Srafael.vanoni@sun.com 	kpreempt_enable();
236111066Srafael.vanoni@sun.com 
236211066Srafael.vanoni@sun.com 	ret = atomic_dec_32_nv(&lb_info->lbi_token);
236311066Srafael.vanoni@sun.com 	ASSERT(ret == 0);
236411066Srafael.vanoni@sun.com 
236511066Srafael.vanoni@sun.com 	return (1);
236611066Srafael.vanoni@sun.com }
236711066Srafael.vanoni@sun.com 
236811066Srafael.vanoni@sun.com int64_t
236911066Srafael.vanoni@sun.com lbolt_event_driven(void)
237011066Srafael.vanoni@sun.com {
237111066Srafael.vanoni@sun.com 	hrtime_t ts;
237211066Srafael.vanoni@sun.com 	int64_t lb;
237311066Srafael.vanoni@sun.com 	int ret, cpu = CPU->cpu_seqid;
237411066Srafael.vanoni@sun.com 
237511066Srafael.vanoni@sun.com 	ts = gethrtime();
237611066Srafael.vanoni@sun.com 	ASSERT(ts > 0);
237711066Srafael.vanoni@sun.com 
237811066Srafael.vanoni@sun.com 	ASSERT(nsec_per_tick > 0);
237911066Srafael.vanoni@sun.com 	lb = (ts/nsec_per_tick);
238011066Srafael.vanoni@sun.com 
238111066Srafael.vanoni@sun.com 	/*
238211066Srafael.vanoni@sun.com 	 * Switch to cyclic mode if the number of calls to this routine
238311066Srafael.vanoni@sun.com 	 * has reached the threshold within the interval.
238411066Srafael.vanoni@sun.com 	 */
238511066Srafael.vanoni@sun.com 	if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) {
238611066Srafael.vanoni@sun.com 
238711066Srafael.vanoni@sun.com 		if (--lb_cpu[cpu].lbc_counter == 0) {
238811066Srafael.vanoni@sun.com 			/*
238911066Srafael.vanoni@sun.com 			 * Reached the threshold within the interval, reset
239011066Srafael.vanoni@sun.com 			 * the usage statistics.
239111066Srafael.vanoni@sun.com 			 */
239211066Srafael.vanoni@sun.com 			lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
239311066Srafael.vanoni@sun.com 			lb_cpu[cpu].lbc_cnt_start = lb;
239411066Srafael.vanoni@sun.com 
239511066Srafael.vanoni@sun.com 			/*
239611066Srafael.vanoni@sun.com 			 * Make sure only one thread reprograms the
239711066Srafael.vanoni@sun.com 			 * lbolt cyclic and changes the mode.
239811066Srafael.vanoni@sun.com 			 */
239911066Srafael.vanoni@sun.com 			if (panicstr == NULL &&
240011066Srafael.vanoni@sun.com 			    atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
240111066Srafael.vanoni@sun.com 
240211066Srafael.vanoni@sun.com 				if (lbolt_hybrid == lbolt_cyclic_driven) {
240311066Srafael.vanoni@sun.com 					ret = atomic_dec_32_nv(
240411066Srafael.vanoni@sun.com 					    &lb_info->lbi_token);
240511066Srafael.vanoni@sun.com 					ASSERT(ret == 0);
240611496Srafael.vanoni@sun.com 				} else {
240711496Srafael.vanoni@sun.com 					lbolt_softint_post();
240811066Srafael.vanoni@sun.com 				}
240911066Srafael.vanoni@sun.com 			}
241011066Srafael.vanoni@sun.com 		}
241111066Srafael.vanoni@sun.com 	} else {
241211066Srafael.vanoni@sun.com 		/*
241311066Srafael.vanoni@sun.com 		 * Exceeded the interval, reset the usage statistics.
241411066Srafael.vanoni@sun.com 		 */
241511066Srafael.vanoni@sun.com 		lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
241611066Srafael.vanoni@sun.com 		lb_cpu[cpu].lbc_cnt_start = lb;
241711066Srafael.vanoni@sun.com 	}
241811066Srafael.vanoni@sun.com 
241911066Srafael.vanoni@sun.com 	ASSERT(lb >= lb_info->lbi_debug_time);
242011066Srafael.vanoni@sun.com 
242111066Srafael.vanoni@sun.com 	return (lb - lb_info->lbi_debug_time);
242211066Srafael.vanoni@sun.com }
242311066Srafael.vanoni@sun.com 
242411066Srafael.vanoni@sun.com int64_t
242511066Srafael.vanoni@sun.com lbolt_cyclic_driven(void)
242611066Srafael.vanoni@sun.com {
242711066Srafael.vanoni@sun.com 	int64_t lb = lb_info->lbi_internal;
242811066Srafael.vanoni@sun.com 	int cpu = CPU->cpu_seqid;
242911066Srafael.vanoni@sun.com 
243011066Srafael.vanoni@sun.com 	if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) {
243111066Srafael.vanoni@sun.com 
243211066Srafael.vanoni@sun.com 		if (lb_cpu[cpu].lbc_counter == 0)
243311066Srafael.vanoni@sun.com 			/*
243411066Srafael.vanoni@sun.com 			 * Reached the threshold within the interval,
243511066Srafael.vanoni@sun.com 			 * prevent the lbolt cyclic from turning itself
243611066Srafael.vanoni@sun.com 			 * off.
243711066Srafael.vanoni@sun.com 			 */
243811066Srafael.vanoni@sun.com 			lb_info->lbi_cyc_deactivate = B_FALSE;
243911066Srafael.vanoni@sun.com 		else
244011066Srafael.vanoni@sun.com 			lb_cpu[cpu].lbc_counter--;
244111066Srafael.vanoni@sun.com 	} else {
244211066Srafael.vanoni@sun.com 		/*
244311066Srafael.vanoni@sun.com 		 * Only reset the usage statistics when the interval has
244411066Srafael.vanoni@sun.com 		 * exceeded.
244511066Srafael.vanoni@sun.com 		 */
244611066Srafael.vanoni@sun.com 		lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
244711066Srafael.vanoni@sun.com 		lb_cpu[cpu].lbc_cnt_start = lb;
244811066Srafael.vanoni@sun.com 	}
244911066Srafael.vanoni@sun.com 
245011066Srafael.vanoni@sun.com 	ASSERT(lb >= lb_info->lbi_debug_time);
245111066Srafael.vanoni@sun.com 
245211066Srafael.vanoni@sun.com 	return (lb - lb_info->lbi_debug_time);
245311066Srafael.vanoni@sun.com }
245411066Srafael.vanoni@sun.com 
245511066Srafael.vanoni@sun.com /*
245611226Srafael.vanoni@sun.com  * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
245711066Srafael.vanoni@sun.com  * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
245811066Srafael.vanoni@sun.com  * It is inactive by default, and will be activated when switching from event
245911066Srafael.vanoni@sun.com  * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
246011066Srafael.vanoni@sun.com  * by lbolt_cyclic_driven().
246111066Srafael.vanoni@sun.com  */
246211066Srafael.vanoni@sun.com static void
246311066Srafael.vanoni@sun.com lbolt_cyclic(void)
246411066Srafael.vanoni@sun.com {
246511066Srafael.vanoni@sun.com 	int ret;
246611066Srafael.vanoni@sun.com 
246711066Srafael.vanoni@sun.com 	lb_info->lbi_internal++;
246811066Srafael.vanoni@sun.com 
246911066Srafael.vanoni@sun.com 	if (!lbolt_cyc_only) {
247011066Srafael.vanoni@sun.com 
247111066Srafael.vanoni@sun.com 		if (lb_info->lbi_cyc_deactivate) {
247211066Srafael.vanoni@sun.com 			/*
247311066Srafael.vanoni@sun.com 			 * Switching from cyclic to event driven mode.
247411066Srafael.vanoni@sun.com 			 */
247511066Srafael.vanoni@sun.com 			if (atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
247611066Srafael.vanoni@sun.com 
247711066Srafael.vanoni@sun.com 				if (lbolt_hybrid == lbolt_event_driven) {
247811066Srafael.vanoni@sun.com 					ret = atomic_dec_32_nv(
247911066Srafael.vanoni@sun.com 					    &lb_info->lbi_token);
248011066Srafael.vanoni@sun.com 					ASSERT(ret == 0);
248111066Srafael.vanoni@sun.com 					return;
248211066Srafael.vanoni@sun.com 				}
248311066Srafael.vanoni@sun.com 
248411066Srafael.vanoni@sun.com 				kpreempt_disable();
248511066Srafael.vanoni@sun.com 
248611066Srafael.vanoni@sun.com 				lbolt_hybrid = lbolt_event_driven;
248711151Srafael.vanoni@sun.com 				ret = cyclic_reprogram(
248811151Srafael.vanoni@sun.com 				    lb_info->id.lbi_cyclic_id,
248911066Srafael.vanoni@sun.com 				    CY_INFINITY);
249011066Srafael.vanoni@sun.com 				ASSERT(ret);
249111066Srafael.vanoni@sun.com 
249211066Srafael.vanoni@sun.com 				kpreempt_enable();
249311066Srafael.vanoni@sun.com 
249411066Srafael.vanoni@sun.com 				ret = atomic_dec_32_nv(&lb_info->lbi_token);
249511066Srafael.vanoni@sun.com 				ASSERT(ret == 0);
249611066Srafael.vanoni@sun.com 			}
249711066Srafael.vanoni@sun.com 		}
249811066Srafael.vanoni@sun.com 
249911066Srafael.vanoni@sun.com 		/*
250011066Srafael.vanoni@sun.com 		 * The lbolt cyclic should not try to deactivate itself before
250111066Srafael.vanoni@sun.com 		 * the sampling period has elapsed.
250211066Srafael.vanoni@sun.com 		 */
250311066Srafael.vanoni@sun.com 		if (lb_info->lbi_internal - lb_info->lbi_cyc_deac_start >=
250411066Srafael.vanoni@sun.com 		    lb_info->lbi_thresh_interval) {
250511066Srafael.vanoni@sun.com 			lb_info->lbi_cyc_deactivate = B_TRUE;
250611066Srafael.vanoni@sun.com 			lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
250711066Srafael.vanoni@sun.com 		}
250811066Srafael.vanoni@sun.com 	}
250911066Srafael.vanoni@sun.com }
251011066Srafael.vanoni@sun.com 
251111066Srafael.vanoni@sun.com /*
251211066Srafael.vanoni@sun.com  * Since the lbolt service was historically cyclic driven, it must be 'stopped'
251311066Srafael.vanoni@sun.com  * when the system drops into the kernel debugger. lbolt_debug_entry() is
251411066Srafael.vanoni@sun.com  * called by the KDI system claim callbacks to record a hires timestamp at
251511066Srafael.vanoni@sun.com  * debug enter time. lbolt_debug_return() is called by the sistem release
251611066Srafael.vanoni@sun.com  * callbacks to account for the time spent in the debugger. The value is then
251711066Srafael.vanoni@sun.com  * accumulated in the lb_info structure and used by lbolt_event_driven() and
251811066Srafael.vanoni@sun.com  * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
251911066Srafael.vanoni@sun.com  */
252011066Srafael.vanoni@sun.com void
252111066Srafael.vanoni@sun.com lbolt_debug_entry(void)
252211066Srafael.vanoni@sun.com {
252311195Srafael.vanoni@sun.com 	if (lbolt_hybrid != lbolt_bootstrap) {
252411195Srafael.vanoni@sun.com 		ASSERT(lb_info != NULL);
252511195Srafael.vanoni@sun.com 		lb_info->lbi_debug_ts = gethrtime();
252611195Srafael.vanoni@sun.com 	}
252711066Srafael.vanoni@sun.com }
252811066Srafael.vanoni@sun.com 
252911151Srafael.vanoni@sun.com /*
253011151Srafael.vanoni@sun.com  * Calculate the time spent in the debugger and add it to the lbolt info
253111151Srafael.vanoni@sun.com  * structure. We also update the internal lbolt value in case we were in
253211151Srafael.vanoni@sun.com  * cyclic driven mode going in.
253311151Srafael.vanoni@sun.com  */
253411066Srafael.vanoni@sun.com void
253511066Srafael.vanoni@sun.com lbolt_debug_return(void)
253611066Srafael.vanoni@sun.com {
253711151Srafael.vanoni@sun.com 	hrtime_t ts;
253811151Srafael.vanoni@sun.com 
253911195Srafael.vanoni@sun.com 	if (lbolt_hybrid != lbolt_bootstrap) {
254011195Srafael.vanoni@sun.com 		ASSERT(lb_info != NULL);
254111195Srafael.vanoni@sun.com 		ASSERT(nsec_per_tick > 0);
254211195Srafael.vanoni@sun.com 
254311151Srafael.vanoni@sun.com 		ts = gethrtime();
254411151Srafael.vanoni@sun.com 		lb_info->lbi_internal = (ts/nsec_per_tick);
254511066Srafael.vanoni@sun.com 		lb_info->lbi_debug_time +=
254611151Srafael.vanoni@sun.com 		    ((ts - lb_info->lbi_debug_ts)/nsec_per_tick);
254711195Srafael.vanoni@sun.com 
254811195Srafael.vanoni@sun.com 		lb_info->lbi_debug_ts = 0;
254911151Srafael.vanoni@sun.com 	}
255011066Srafael.vanoni@sun.com }
2551