10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 53426Sjohansen * Common Development and Distribution License (the "License"). 63426Sjohansen * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 223426Sjohansen * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/types.h> 290Sstevel@tonic-gate #include <sys/param.h> 300Sstevel@tonic-gate #include <sys/systm.h> 310Sstevel@tonic-gate #include <sys/user.h> 320Sstevel@tonic-gate #include <sys/proc.h> 330Sstevel@tonic-gate #include <sys/cpuvar.h> 340Sstevel@tonic-gate #include <sys/thread.h> 350Sstevel@tonic-gate #include <sys/debug.h> 360Sstevel@tonic-gate #include <sys/msacct.h> 370Sstevel@tonic-gate #include <sys/time.h> 380Sstevel@tonic-gate 390Sstevel@tonic-gate /* 400Sstevel@tonic-gate * Mega-theory block comment: 410Sstevel@tonic-gate * 420Sstevel@tonic-gate * Microstate accounting uses finite states and the transitions between these 430Sstevel@tonic-gate * states to measure timing and accounting information. The state information 440Sstevel@tonic-gate * is presently tracked for threads (via microstate accounting) and cpus (via 450Sstevel@tonic-gate * cpu microstate accounting). In each case, these accounting mechanisms use 460Sstevel@tonic-gate * states and transitions to measure time spent in each state instead of 470Sstevel@tonic-gate * clock-based sampling methodologies. 480Sstevel@tonic-gate * 490Sstevel@tonic-gate * For microstate accounting: 500Sstevel@tonic-gate * state transitions are accomplished by calling new_mstate() to switch between 510Sstevel@tonic-gate * states. Transitions from a sleeping state (LMS_SLEEP and LMS_STOPPED) occur 520Sstevel@tonic-gate * by calling restore_mstate() which restores a thread to its previously running 530Sstevel@tonic-gate * state. This code is primarialy executed by the dispatcher in disp() before 540Sstevel@tonic-gate * running a process that was put to sleep. If the thread was not in a sleeping 550Sstevel@tonic-gate * state, this call has little effect other than to update the count of time the 560Sstevel@tonic-gate * thread has spent waiting on run-queues in its lifetime. 570Sstevel@tonic-gate * 580Sstevel@tonic-gate * For cpu microstate accounting: 590Sstevel@tonic-gate * Cpu microstate accounting is similar to the microstate accounting for threads 600Sstevel@tonic-gate * but it tracks user, system, and idle time for cpus. Cpu microstate 610Sstevel@tonic-gate * accounting does not track interrupt times as there is a pre-existing 620Sstevel@tonic-gate * interrupt accounting mechanism for this purpose. Cpu microstate accounting 630Sstevel@tonic-gate * tracks time that user threads have spent active, idle, or in the system on a 640Sstevel@tonic-gate * given cpu. Cpu microstate accounting has fewer states which allows it to 650Sstevel@tonic-gate * have better defined transitions. The states transition in the following 660Sstevel@tonic-gate * order: 670Sstevel@tonic-gate * 680Sstevel@tonic-gate * CMS_USER <-> CMS_SYSTEM <-> CMS_IDLE 690Sstevel@tonic-gate * 700Sstevel@tonic-gate * In order to get to the idle state, the cpu microstate must first go through 710Sstevel@tonic-gate * the system state, and vice-versa for the user state from idle. The switching 720Sstevel@tonic-gate * of the microstates from user to system is done as part of the regular thread 730Sstevel@tonic-gate * microstate accounting code, except for the idle state which is switched by 740Sstevel@tonic-gate * the dispatcher before it runs the idle loop. 750Sstevel@tonic-gate * 760Sstevel@tonic-gate * Cpu percentages: 770Sstevel@tonic-gate * Cpu percentages are now handled by and based upon microstate accounting 780Sstevel@tonic-gate * information (the same is true for load averages). The routines which handle 790Sstevel@tonic-gate * the growing/shrinking and exponentiation of cpu percentages have been moved 800Sstevel@tonic-gate * here as it now makes more sense for them to be generated from the microstate 810Sstevel@tonic-gate * code. Cpu percentages are generated similarly to the way they were before; 820Sstevel@tonic-gate * however, now they are based upon high-resolution timestamps and the 830Sstevel@tonic-gate * timestamps are modified at various state changes instead of during a clock() 840Sstevel@tonic-gate * interrupt. This allows us to generate more accurate cpu percentages which 850Sstevel@tonic-gate * are also in-sync with microstate data. 860Sstevel@tonic-gate */ 870Sstevel@tonic-gate 880Sstevel@tonic-gate /* 890Sstevel@tonic-gate * Initialize the microstate level and the 900Sstevel@tonic-gate * associated accounting information for an LWP. 910Sstevel@tonic-gate */ 920Sstevel@tonic-gate void 930Sstevel@tonic-gate init_mstate( 940Sstevel@tonic-gate kthread_t *t, 950Sstevel@tonic-gate int init_state) 960Sstevel@tonic-gate { 970Sstevel@tonic-gate struct mstate *ms; 980Sstevel@tonic-gate klwp_t *lwp; 990Sstevel@tonic-gate hrtime_t curtime; 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate ASSERT(init_state != LMS_WAIT_CPU); 1020Sstevel@tonic-gate ASSERT((unsigned)init_state < NMSTATES); 1030Sstevel@tonic-gate 1040Sstevel@tonic-gate if ((lwp = ttolwp(t)) != NULL) { 1050Sstevel@tonic-gate ms = &lwp->lwp_mstate; 1060Sstevel@tonic-gate curtime = gethrtime_unscaled(); 1070Sstevel@tonic-gate ms->ms_prev = LMS_SYSTEM; 1080Sstevel@tonic-gate ms->ms_start = curtime; 1090Sstevel@tonic-gate ms->ms_term = 0; 1100Sstevel@tonic-gate ms->ms_state_start = curtime; 1110Sstevel@tonic-gate t->t_mstate = init_state; 1120Sstevel@tonic-gate t->t_waitrq = 0; 1130Sstevel@tonic-gate t->t_hrtime = curtime; 1140Sstevel@tonic-gate if ((t->t_proc_flag & TP_MSACCT) == 0) 1150Sstevel@tonic-gate t->t_proc_flag |= TP_MSACCT; 1160Sstevel@tonic-gate bzero((caddr_t)&ms->ms_acct[0], sizeof (ms->ms_acct)); 1170Sstevel@tonic-gate } 1180Sstevel@tonic-gate } 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate /* 1210Sstevel@tonic-gate * Initialize the microstate level and associated accounting information 1220Sstevel@tonic-gate * for the specified cpu 1230Sstevel@tonic-gate */ 1240Sstevel@tonic-gate 1250Sstevel@tonic-gate void 1260Sstevel@tonic-gate init_cpu_mstate( 1270Sstevel@tonic-gate cpu_t *cpu, 1280Sstevel@tonic-gate int init_state) 1290Sstevel@tonic-gate { 1300Sstevel@tonic-gate ASSERT(init_state != CMS_DISABLED); 1310Sstevel@tonic-gate 1320Sstevel@tonic-gate cpu->cpu_mstate = init_state; 1330Sstevel@tonic-gate cpu->cpu_mstate_start = gethrtime_unscaled(); 1340Sstevel@tonic-gate cpu->cpu_waitrq = 0; 1350Sstevel@tonic-gate bzero((caddr_t)&cpu->cpu_acct[0], sizeof (cpu->cpu_acct)); 1360Sstevel@tonic-gate } 1370Sstevel@tonic-gate 1380Sstevel@tonic-gate /* 1390Sstevel@tonic-gate * sets cpu state to OFFLINE. We don't actually track this time, 1400Sstevel@tonic-gate * but it serves as a useful placeholder state for when we're not 1410Sstevel@tonic-gate * doing anything. 1420Sstevel@tonic-gate */ 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate void 1450Sstevel@tonic-gate term_cpu_mstate(struct cpu *cpu) 1460Sstevel@tonic-gate { 1470Sstevel@tonic-gate ASSERT(cpu->cpu_mstate != CMS_DISABLED); 1480Sstevel@tonic-gate cpu->cpu_mstate = CMS_DISABLED; 1490Sstevel@tonic-gate cpu->cpu_mstate_start = 0; 1500Sstevel@tonic-gate } 1510Sstevel@tonic-gate 1521058Sesolom /* NEW_CPU_MSTATE comments inline in new_cpu_mstate below. */ 1531058Sesolom 1541058Sesolom #define NEW_CPU_MSTATE(state) \ 1551058Sesolom gen = cpu->cpu_mstate_gen; \ 1561058Sesolom cpu->cpu_mstate_gen = 0; \ 1571058Sesolom /* Need membar_producer() here if stores not ordered / TSO */ \ 1581058Sesolom cpu->cpu_acct[cpu->cpu_mstate] += curtime - cpu->cpu_mstate_start; \ 1591058Sesolom cpu->cpu_mstate = state; \ 1601058Sesolom cpu->cpu_mstate_start = curtime; \ 1611058Sesolom /* Need membar_producer() here if stores not ordered / TSO */ \ 1621058Sesolom cpu->cpu_mstate_gen = (++gen == 0) ? 1 : gen; 1631058Sesolom 1640Sstevel@tonic-gate void 165590Sesolom new_cpu_mstate(int cmstate, hrtime_t curtime) 1660Sstevel@tonic-gate { 167590Sesolom cpu_t *cpu = CPU; 168590Sesolom uint16_t gen; 1690Sstevel@tonic-gate 1700Sstevel@tonic-gate ASSERT(cpu->cpu_mstate != CMS_DISABLED); 1710Sstevel@tonic-gate ASSERT(cmstate < NCMSTATES); 1720Sstevel@tonic-gate ASSERT(cmstate != CMS_DISABLED); 173590Sesolom 174590Sesolom /* 175590Sesolom * This function cannot be re-entrant on a given CPU. As such, 176590Sesolom * we ASSERT and panic if we are called on behalf of an interrupt. 177590Sesolom * The one exception is for an interrupt which has previously 178590Sesolom * blocked. Such an interrupt is being scheduled by the dispatcher 179590Sesolom * just like a normal thread, and as such cannot arrive here 180590Sesolom * in a re-entrant manner. 181590Sesolom */ 182590Sesolom 183590Sesolom ASSERT(!CPU_ON_INTR(cpu) && curthread->t_intr == NULL); 1840Sstevel@tonic-gate ASSERT(curthread->t_preempt > 0 || curthread == cpu->cpu_idle_thread); 1850Sstevel@tonic-gate 186590Sesolom /* 187590Sesolom * LOCKING, or lack thereof: 188590Sesolom * 189590Sesolom * Updates to CPU mstate can only be made by the CPU 190590Sesolom * itself, and the above check to ignore interrupts 191590Sesolom * should prevent recursion into this function on a given 192590Sesolom * processor. i.e. no possible write contention. 193590Sesolom * 194590Sesolom * However, reads of CPU mstate can occur at any time 195590Sesolom * from any CPU. Any locking added to this code path 196590Sesolom * would seriously impact syscall performance. So, 197590Sesolom * instead we have a best-effort protection for readers. 198590Sesolom * The reader will want to account for any time between 199590Sesolom * cpu_mstate_start and the present time. This requires 200590Sesolom * some guarantees that the reader is getting coherent 201590Sesolom * information. 202590Sesolom * 203590Sesolom * We use a generation counter, which is set to 0 before 204590Sesolom * we start making changes, and is set to a new value 205590Sesolom * after we're done. Someone reading the CPU mstate 206590Sesolom * should check for the same non-zero value of this 207590Sesolom * counter both before and after reading all state. The 208590Sesolom * important point is that the reader is not a 209590Sesolom * performance-critical path, but this function is. 2101058Sesolom * 2111058Sesolom * The ordering of writes is critical. cpu_mstate_gen must 2121058Sesolom * be visibly zero on all CPUs before we change cpu_mstate 2131058Sesolom * and cpu_mstate_start. Additionally, cpu_mstate_gen must 2141058Sesolom * not be restored to oldgen+1 until after all of the other 2151058Sesolom * writes have become visible. 2161058Sesolom * 2171058Sesolom * Normally one puts membar_producer() calls to accomplish 2181058Sesolom * this. Unfortunately this routine is extremely performance 2191058Sesolom * critical (esp. in syscall_mstate below) and we cannot 2201058Sesolom * afford the additional time, particularly on some x86 2211058Sesolom * architectures with extremely slow sfence calls. On a 2221058Sesolom * CPU which guarantees write ordering (including sparc, x86, 2231058Sesolom * and amd64) this is not a problem. The compiler could still 2241058Sesolom * reorder the writes, so we make the four cpu fields 2251058Sesolom * volatile to prevent this. 2261058Sesolom * 2271058Sesolom * TSO warning: should we port to a non-TSO (or equivalent) 2281058Sesolom * CPU, this will break. 2291058Sesolom * 2301058Sesolom * The reader stills needs the membar_consumer() calls because, 2311058Sesolom * although the volatiles prevent the compiler from reordering 2321058Sesolom * loads, the CPU can still do so. 233590Sesolom */ 234590Sesolom 2351058Sesolom NEW_CPU_MSTATE(cmstate); 2360Sstevel@tonic-gate } 2370Sstevel@tonic-gate 2380Sstevel@tonic-gate /* 2393792Sakolb * Return an aggregation of user and system CPU time consumed by 2403792Sakolb * the specified thread in scaled nanoseconds. 2413792Sakolb */ 2423792Sakolb hrtime_t 2433792Sakolb mstate_thread_onproc_time(kthread_t *t) 2443792Sakolb { 2453792Sakolb hrtime_t aggr_time; 2463792Sakolb hrtime_t now; 2473792Sakolb hrtime_t state_start; 2483792Sakolb struct mstate *ms; 2493792Sakolb klwp_t *lwp; 2503792Sakolb int mstate; 2513792Sakolb 2523792Sakolb ASSERT(THREAD_LOCK_HELD(t)); 2533792Sakolb 2543792Sakolb if ((lwp = ttolwp(t)) == NULL) 2553792Sakolb return (0); 2563792Sakolb 2573792Sakolb mstate = t->t_mstate; 2583792Sakolb ms = &lwp->lwp_mstate; 2593792Sakolb state_start = ms->ms_state_start; 2603792Sakolb 2613792Sakolb aggr_time = ms->ms_acct[LMS_USER] + 2623792Sakolb ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP]; 2633792Sakolb 2643792Sakolb now = gethrtime_unscaled(); 2653792Sakolb 2663792Sakolb /* 2673792Sakolb * NOTE: gethrtime_unscaled on X86 taken on different CPUs is 2683792Sakolb * inconsistent, so it is possible that now < state_start. 2693792Sakolb */ 2703792Sakolb if ((mstate == LMS_USER || mstate == LMS_SYSTEM || 2713792Sakolb mstate == LMS_TRAP) && (now > state_start)) { 2723792Sakolb aggr_time += now - state_start; 2733792Sakolb } 2743792Sakolb 2753792Sakolb scalehrtime(&aggr_time); 2763792Sakolb return (aggr_time); 2773792Sakolb } 2783792Sakolb 2793792Sakolb /* 2800Sstevel@tonic-gate * Return an aggregation of microstate times in scaled nanoseconds (high-res 2810Sstevel@tonic-gate * time). This keeps in mind that p_acct is already scaled, and ms_acct is 2820Sstevel@tonic-gate * not. 2830Sstevel@tonic-gate */ 2840Sstevel@tonic-gate hrtime_t 2850Sstevel@tonic-gate mstate_aggr_state(proc_t *p, int a_state) 2860Sstevel@tonic-gate { 2870Sstevel@tonic-gate struct mstate *ms; 2880Sstevel@tonic-gate kthread_t *t; 2890Sstevel@tonic-gate klwp_t *lwp; 2900Sstevel@tonic-gate hrtime_t aggr_time; 2910Sstevel@tonic-gate hrtime_t scaledtime; 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 2940Sstevel@tonic-gate ASSERT((unsigned)a_state < NMSTATES); 2950Sstevel@tonic-gate 2960Sstevel@tonic-gate aggr_time = p->p_acct[a_state]; 2970Sstevel@tonic-gate if (a_state == LMS_SYSTEM) 2980Sstevel@tonic-gate aggr_time += p->p_acct[LMS_TRAP]; 2990Sstevel@tonic-gate 3000Sstevel@tonic-gate t = p->p_tlist; 3010Sstevel@tonic-gate if (t == NULL) 3020Sstevel@tonic-gate return (aggr_time); 3030Sstevel@tonic-gate 3040Sstevel@tonic-gate do { 3050Sstevel@tonic-gate if (t->t_proc_flag & TP_LWPEXIT) 3060Sstevel@tonic-gate continue; 3070Sstevel@tonic-gate 3080Sstevel@tonic-gate lwp = ttolwp(t); 3090Sstevel@tonic-gate ms = &lwp->lwp_mstate; 3100Sstevel@tonic-gate scaledtime = ms->ms_acct[a_state]; 3110Sstevel@tonic-gate scalehrtime(&scaledtime); 3120Sstevel@tonic-gate aggr_time += scaledtime; 3130Sstevel@tonic-gate if (a_state == LMS_SYSTEM) { 3140Sstevel@tonic-gate scaledtime = ms->ms_acct[LMS_TRAP]; 3150Sstevel@tonic-gate scalehrtime(&scaledtime); 3160Sstevel@tonic-gate aggr_time += scaledtime; 3170Sstevel@tonic-gate } 3180Sstevel@tonic-gate } while ((t = t->t_forw) != p->p_tlist); 3190Sstevel@tonic-gate 3200Sstevel@tonic-gate return (aggr_time); 3210Sstevel@tonic-gate } 3220Sstevel@tonic-gate 3231058Sesolom 3240Sstevel@tonic-gate void 3250Sstevel@tonic-gate syscall_mstate(int fromms, int toms) 3260Sstevel@tonic-gate { 3270Sstevel@tonic-gate kthread_t *t = curthread; 3280Sstevel@tonic-gate struct mstate *ms; 3290Sstevel@tonic-gate hrtime_t *mstimep; 3300Sstevel@tonic-gate hrtime_t curtime; 3310Sstevel@tonic-gate klwp_t *lwp; 3320Sstevel@tonic-gate hrtime_t newtime; 3331058Sesolom cpu_t *cpu; 3341058Sesolom uint16_t gen; 3350Sstevel@tonic-gate 3360Sstevel@tonic-gate if ((lwp = ttolwp(t)) == NULL) 3370Sstevel@tonic-gate return; 3380Sstevel@tonic-gate 3390Sstevel@tonic-gate ASSERT(fromms < NMSTATES); 3400Sstevel@tonic-gate ASSERT(toms < NMSTATES); 3410Sstevel@tonic-gate 3420Sstevel@tonic-gate ms = &lwp->lwp_mstate; 3430Sstevel@tonic-gate mstimep = &ms->ms_acct[fromms]; 3440Sstevel@tonic-gate curtime = gethrtime_unscaled(); 3450Sstevel@tonic-gate newtime = curtime - ms->ms_state_start; 3460Sstevel@tonic-gate while (newtime < 0) { 3470Sstevel@tonic-gate curtime = gethrtime_unscaled(); 3480Sstevel@tonic-gate newtime = curtime - ms->ms_state_start; 3490Sstevel@tonic-gate } 3500Sstevel@tonic-gate *mstimep += newtime; 3510Sstevel@tonic-gate t->t_mstate = toms; 3520Sstevel@tonic-gate ms->ms_state_start = curtime; 3530Sstevel@tonic-gate ms->ms_prev = fromms; 354590Sesolom kpreempt_disable(); /* don't change CPU while changing CPU's state */ 3551058Sesolom cpu = CPU; 3561058Sesolom ASSERT(cpu == t->t_cpu); 3571058Sesolom if ((toms != LMS_USER) && (cpu->cpu_mstate != CMS_SYSTEM)) { 3581058Sesolom NEW_CPU_MSTATE(CMS_SYSTEM); 3591058Sesolom } else if ((toms == LMS_USER) && (cpu->cpu_mstate != CMS_USER)) { 3601058Sesolom NEW_CPU_MSTATE(CMS_USER); 3611058Sesolom } 3620Sstevel@tonic-gate kpreempt_enable(); 3630Sstevel@tonic-gate } 3640Sstevel@tonic-gate 3651058Sesolom #undef NEW_CPU_MSTATE 3661058Sesolom 3670Sstevel@tonic-gate /* 3680Sstevel@tonic-gate * The following is for computing the percentage of cpu time used recently 3690Sstevel@tonic-gate * by an lwp. The function cpu_decay() is also called from /proc code. 3700Sstevel@tonic-gate * 3710Sstevel@tonic-gate * exp_x(x): 3720Sstevel@tonic-gate * Given x as a 64-bit non-negative scaled integer of arbitrary magnitude, 3730Sstevel@tonic-gate * Return exp(-x) as a 64-bit scaled integer in the range [0 .. 1]. 3740Sstevel@tonic-gate * 3750Sstevel@tonic-gate * Scaling for 64-bit scaled integer: 3760Sstevel@tonic-gate * The binary point is to the right of the high-order bit 3770Sstevel@tonic-gate * of the low-order 32-bit word. 3780Sstevel@tonic-gate */ 3790Sstevel@tonic-gate 3800Sstevel@tonic-gate #define LSHIFT 31 3810Sstevel@tonic-gate #define LSI_ONE ((uint32_t)1 << LSHIFT) /* 32-bit scaled integer 1 */ 3820Sstevel@tonic-gate 3830Sstevel@tonic-gate #ifdef DEBUG 3840Sstevel@tonic-gate uint_t expx_cnt = 0; /* number of calls to exp_x() */ 3850Sstevel@tonic-gate uint_t expx_mul = 0; /* number of long multiplies in exp_x() */ 3860Sstevel@tonic-gate #endif 3870Sstevel@tonic-gate 3880Sstevel@tonic-gate static uint64_t 3890Sstevel@tonic-gate exp_x(uint64_t x) 3900Sstevel@tonic-gate { 3910Sstevel@tonic-gate int i; 3920Sstevel@tonic-gate uint64_t ull; 3930Sstevel@tonic-gate uint32_t ui; 3940Sstevel@tonic-gate 3950Sstevel@tonic-gate #ifdef DEBUG 3960Sstevel@tonic-gate expx_cnt++; 3970Sstevel@tonic-gate #endif 3980Sstevel@tonic-gate /* 3990Sstevel@tonic-gate * By the formula: 4000Sstevel@tonic-gate * exp(-x) = exp(-x/2) * exp(-x/2) 4010Sstevel@tonic-gate * we keep halving x until it becomes small enough for 4020Sstevel@tonic-gate * the following approximation to be accurate enough: 4030Sstevel@tonic-gate * exp(-x) = 1 - x 4040Sstevel@tonic-gate * We reduce x until it is less than 1/4 (the 2 in LSHIFT-2 below). 4050Sstevel@tonic-gate * Our final error will be smaller than 4% . 4060Sstevel@tonic-gate */ 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate /* 4090Sstevel@tonic-gate * Use a uint64_t for the initial shift calculation. 4100Sstevel@tonic-gate */ 4110Sstevel@tonic-gate ull = x >> (LSHIFT-2); 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate /* 4140Sstevel@tonic-gate * Short circuit: 4150Sstevel@tonic-gate * A number this large produces effectively 0 (actually .005). 4160Sstevel@tonic-gate * This way, we will never do more than 5 multiplies. 4170Sstevel@tonic-gate */ 4180Sstevel@tonic-gate if (ull >= (1 << 5)) 4190Sstevel@tonic-gate return (0); 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate ui = ull; /* OK. Now we can use a uint_t. */ 4220Sstevel@tonic-gate for (i = 0; ui != 0; i++) 4230Sstevel@tonic-gate ui >>= 1; 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate if (i != 0) { 4260Sstevel@tonic-gate #ifdef DEBUG 4270Sstevel@tonic-gate expx_mul += i; /* seldom happens */ 4280Sstevel@tonic-gate #endif 4290Sstevel@tonic-gate x >>= i; 4300Sstevel@tonic-gate } 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate /* 4330Sstevel@tonic-gate * Now we compute 1 - x and square it the number of times 4340Sstevel@tonic-gate * that we halved x above to produce the final result: 4350Sstevel@tonic-gate */ 4360Sstevel@tonic-gate x = LSI_ONE - x; 4370Sstevel@tonic-gate while (i--) 4380Sstevel@tonic-gate x = (x * x) >> LSHIFT; 4390Sstevel@tonic-gate 4400Sstevel@tonic-gate return (x); 4410Sstevel@tonic-gate } 4420Sstevel@tonic-gate 4430Sstevel@tonic-gate /* 4440Sstevel@tonic-gate * Given the old percent cpu and a time delta in nanoseconds, 4450Sstevel@tonic-gate * return the new decayed percent cpu: pct * exp(-tau), 4460Sstevel@tonic-gate * where 'tau' is the time delta multiplied by a decay factor. 4470Sstevel@tonic-gate * We have chosen the decay factor (cpu_decay_factor in param.c) 4480Sstevel@tonic-gate * to make the decay over five seconds be approximately 20%. 4490Sstevel@tonic-gate * 4500Sstevel@tonic-gate * 'pct' is a 32-bit scaled integer <= 1 4510Sstevel@tonic-gate * The binary point is to the right of the high-order bit 4520Sstevel@tonic-gate * of the 32-bit word. 4530Sstevel@tonic-gate */ 4540Sstevel@tonic-gate static uint32_t 4550Sstevel@tonic-gate cpu_decay(uint32_t pct, hrtime_t nsec) 4560Sstevel@tonic-gate { 4570Sstevel@tonic-gate uint64_t delta = (uint64_t)nsec; 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate delta /= cpu_decay_factor; 4600Sstevel@tonic-gate return ((pct * exp_x(delta)) >> LSHIFT); 4610Sstevel@tonic-gate } 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate /* 4640Sstevel@tonic-gate * Given the old percent cpu and a time delta in nanoseconds, 4650Sstevel@tonic-gate * return the new grown percent cpu: 1 - ( 1 - pct ) * exp(-tau) 4660Sstevel@tonic-gate */ 4670Sstevel@tonic-gate static uint32_t 4680Sstevel@tonic-gate cpu_grow(uint32_t pct, hrtime_t nsec) 4690Sstevel@tonic-gate { 4700Sstevel@tonic-gate return (LSI_ONE - cpu_decay(LSI_ONE - pct, nsec)); 4710Sstevel@tonic-gate } 4720Sstevel@tonic-gate 4730Sstevel@tonic-gate 4740Sstevel@tonic-gate /* 4750Sstevel@tonic-gate * Defined to determine whether a lwp is still on a processor. 4760Sstevel@tonic-gate */ 4770Sstevel@tonic-gate 4780Sstevel@tonic-gate #define T_ONPROC(kt) \ 4790Sstevel@tonic-gate ((kt)->t_mstate < LMS_SLEEP) 4800Sstevel@tonic-gate #define T_OFFPROC(kt) \ 4810Sstevel@tonic-gate ((kt)->t_mstate >= LMS_SLEEP) 4820Sstevel@tonic-gate 4830Sstevel@tonic-gate uint_t 4840Sstevel@tonic-gate cpu_update_pct(kthread_t *t, hrtime_t newtime) 4850Sstevel@tonic-gate { 4860Sstevel@tonic-gate hrtime_t delta; 4870Sstevel@tonic-gate hrtime_t hrlb; 4880Sstevel@tonic-gate uint_t pctcpu; 4890Sstevel@tonic-gate uint_t npctcpu; 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate /* 4920Sstevel@tonic-gate * This routine can get called at PIL > 0, this *has* to be 4930Sstevel@tonic-gate * done atomically. Holding locks here causes bad things to happen. 4940Sstevel@tonic-gate * (read: deadlock). 4950Sstevel@tonic-gate */ 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate do { 4980Sstevel@tonic-gate if (T_ONPROC(t) && t->t_waitrq == 0) { 4990Sstevel@tonic-gate hrlb = t->t_hrtime; 5000Sstevel@tonic-gate delta = newtime - hrlb; 5010Sstevel@tonic-gate if (delta < 0) { 5020Sstevel@tonic-gate newtime = gethrtime_unscaled(); 5030Sstevel@tonic-gate delta = newtime - hrlb; 5040Sstevel@tonic-gate } 5050Sstevel@tonic-gate t->t_hrtime = newtime; 5060Sstevel@tonic-gate scalehrtime(&delta); 5070Sstevel@tonic-gate pctcpu = t->t_pctcpu; 5080Sstevel@tonic-gate npctcpu = cpu_grow(pctcpu, delta); 5090Sstevel@tonic-gate } else { 5100Sstevel@tonic-gate hrlb = t->t_hrtime; 5110Sstevel@tonic-gate delta = newtime - hrlb; 5120Sstevel@tonic-gate if (delta < 0) { 5130Sstevel@tonic-gate newtime = gethrtime_unscaled(); 5140Sstevel@tonic-gate delta = newtime - hrlb; 5150Sstevel@tonic-gate } 5160Sstevel@tonic-gate t->t_hrtime = newtime; 5170Sstevel@tonic-gate scalehrtime(&delta); 5180Sstevel@tonic-gate pctcpu = t->t_pctcpu; 5190Sstevel@tonic-gate npctcpu = cpu_decay(pctcpu, delta); 5200Sstevel@tonic-gate } 5210Sstevel@tonic-gate } while (cas32(&t->t_pctcpu, pctcpu, npctcpu) != pctcpu); 5220Sstevel@tonic-gate 5230Sstevel@tonic-gate return (npctcpu); 5240Sstevel@tonic-gate } 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate /* 5270Sstevel@tonic-gate * Change the microstate level for the LWP and update the 5280Sstevel@tonic-gate * associated accounting information. Return the previous 5290Sstevel@tonic-gate * LWP state. 5300Sstevel@tonic-gate */ 5310Sstevel@tonic-gate int 5320Sstevel@tonic-gate new_mstate(kthread_t *t, int new_state) 5330Sstevel@tonic-gate { 5340Sstevel@tonic-gate struct mstate *ms; 5350Sstevel@tonic-gate unsigned state; 5360Sstevel@tonic-gate hrtime_t *mstimep; 5370Sstevel@tonic-gate hrtime_t curtime; 5380Sstevel@tonic-gate hrtime_t newtime; 5390Sstevel@tonic-gate hrtime_t oldtime; 5400Sstevel@tonic-gate klwp_t *lwp; 5410Sstevel@tonic-gate 5420Sstevel@tonic-gate ASSERT(new_state != LMS_WAIT_CPU); 5430Sstevel@tonic-gate ASSERT((unsigned)new_state < NMSTATES); 5440Sstevel@tonic-gate ASSERT(t == curthread || THREAD_LOCK_HELD(t)); 5450Sstevel@tonic-gate 546*4071Sjohansen /* 547*4071Sjohansen * Don't do microstate processing for threads without a lwp (kernel 548*4071Sjohansen * threads). Also, if we're an interrupt thread that is pinning another 549*4071Sjohansen * thread, our t_mstate hasn't been initialized. We'd be modifying the 550*4071Sjohansen * microstate of the underlying lwp which doesn't realize that it's 551*4071Sjohansen * pinned. In this case, also don't change the microstate. 552*4071Sjohansen */ 553*4071Sjohansen if (((lwp = ttolwp(t)) == NULL) || t->t_intr) 5540Sstevel@tonic-gate return (LMS_SYSTEM); 5550Sstevel@tonic-gate 5560Sstevel@tonic-gate curtime = gethrtime_unscaled(); 5570Sstevel@tonic-gate 5580Sstevel@tonic-gate /* adjust cpu percentages before we go any further */ 5590Sstevel@tonic-gate (void) cpu_update_pct(t, curtime); 5600Sstevel@tonic-gate 5610Sstevel@tonic-gate ms = &lwp->lwp_mstate; 5620Sstevel@tonic-gate state = t->t_mstate; 5630Sstevel@tonic-gate do { 5640Sstevel@tonic-gate switch (state) { 5650Sstevel@tonic-gate case LMS_TFAULT: 5660Sstevel@tonic-gate case LMS_DFAULT: 5670Sstevel@tonic-gate case LMS_KFAULT: 5680Sstevel@tonic-gate case LMS_USER_LOCK: 5690Sstevel@tonic-gate mstimep = &ms->ms_acct[LMS_SYSTEM]; 5700Sstevel@tonic-gate break; 5710Sstevel@tonic-gate default: 5720Sstevel@tonic-gate mstimep = &ms->ms_acct[state]; 5730Sstevel@tonic-gate break; 5740Sstevel@tonic-gate } 5750Sstevel@tonic-gate newtime = curtime - ms->ms_state_start; 5760Sstevel@tonic-gate if (newtime < 0) { 5770Sstevel@tonic-gate curtime = gethrtime_unscaled(); 5780Sstevel@tonic-gate oldtime = *mstimep - 1; /* force CAS to fail */ 5790Sstevel@tonic-gate continue; 5800Sstevel@tonic-gate } 5810Sstevel@tonic-gate oldtime = *mstimep; 5820Sstevel@tonic-gate newtime += oldtime; 5830Sstevel@tonic-gate t->t_mstate = new_state; 5840Sstevel@tonic-gate ms->ms_state_start = curtime; 5850Sstevel@tonic-gate } while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime); 5860Sstevel@tonic-gate /* 5870Sstevel@tonic-gate * Remember the previous running microstate. 5880Sstevel@tonic-gate */ 5890Sstevel@tonic-gate if (state != LMS_SLEEP && state != LMS_STOPPED) 5900Sstevel@tonic-gate ms->ms_prev = state; 5910Sstevel@tonic-gate 5920Sstevel@tonic-gate /* 5930Sstevel@tonic-gate * Switch CPU microstate if appropriate 5940Sstevel@tonic-gate */ 595590Sesolom 5960Sstevel@tonic-gate kpreempt_disable(); /* MUST disable kpreempt before touching t->cpu */ 597590Sesolom ASSERT(t->t_cpu == CPU); 598590Sesolom if (!CPU_ON_INTR(t->t_cpu) && curthread->t_intr == NULL) { 599590Sesolom if (new_state == LMS_USER && t->t_cpu->cpu_mstate != CMS_USER) 600590Sesolom new_cpu_mstate(CMS_USER, curtime); 601590Sesolom else if (new_state != LMS_USER && 602590Sesolom t->t_cpu->cpu_mstate != CMS_SYSTEM) 603590Sesolom new_cpu_mstate(CMS_SYSTEM, curtime); 6040Sstevel@tonic-gate } 6050Sstevel@tonic-gate kpreempt_enable(); 6060Sstevel@tonic-gate 6070Sstevel@tonic-gate return (ms->ms_prev); 6080Sstevel@tonic-gate } 6090Sstevel@tonic-gate 6100Sstevel@tonic-gate /* 6110Sstevel@tonic-gate * Restore the LWP microstate to the previous runnable state. 6120Sstevel@tonic-gate * Called from disp() with the newly selected lwp. 6130Sstevel@tonic-gate */ 6140Sstevel@tonic-gate void 6150Sstevel@tonic-gate restore_mstate(kthread_t *t) 6160Sstevel@tonic-gate { 6170Sstevel@tonic-gate struct mstate *ms; 6180Sstevel@tonic-gate hrtime_t *mstimep; 6190Sstevel@tonic-gate klwp_t *lwp; 6200Sstevel@tonic-gate hrtime_t curtime; 6210Sstevel@tonic-gate hrtime_t waitrq; 6220Sstevel@tonic-gate hrtime_t newtime; 6230Sstevel@tonic-gate hrtime_t oldtime; 6240Sstevel@tonic-gate 625*4071Sjohansen /* 626*4071Sjohansen * Don't call restore mstate of threads without lwps. (Kernel threads) 627*4071Sjohansen * 628*4071Sjohansen * threads with t_intr set shouldn't be in the dispatcher, so assert 629*4071Sjohansen * that nobody here has t_intr. 630*4071Sjohansen */ 631*4071Sjohansen ASSERT(t->t_intr == NULL); 632*4071Sjohansen 6330Sstevel@tonic-gate if ((lwp = ttolwp(t)) == NULL) 6340Sstevel@tonic-gate return; 6350Sstevel@tonic-gate 6360Sstevel@tonic-gate curtime = gethrtime_unscaled(); 6370Sstevel@tonic-gate (void) cpu_update_pct(t, curtime); 6380Sstevel@tonic-gate ms = &lwp->lwp_mstate; 6390Sstevel@tonic-gate ASSERT((unsigned)t->t_mstate < NMSTATES); 6400Sstevel@tonic-gate do { 6410Sstevel@tonic-gate switch (t->t_mstate) { 6420Sstevel@tonic-gate case LMS_SLEEP: 6430Sstevel@tonic-gate /* 6440Sstevel@tonic-gate * Update the timer for the current sleep state. 6450Sstevel@tonic-gate */ 6460Sstevel@tonic-gate ASSERT((unsigned)ms->ms_prev < NMSTATES); 6470Sstevel@tonic-gate switch (ms->ms_prev) { 6480Sstevel@tonic-gate case LMS_TFAULT: 6490Sstevel@tonic-gate case LMS_DFAULT: 6500Sstevel@tonic-gate case LMS_KFAULT: 6510Sstevel@tonic-gate case LMS_USER_LOCK: 6520Sstevel@tonic-gate mstimep = &ms->ms_acct[ms->ms_prev]; 6530Sstevel@tonic-gate break; 6540Sstevel@tonic-gate default: 6550Sstevel@tonic-gate mstimep = &ms->ms_acct[LMS_SLEEP]; 6560Sstevel@tonic-gate break; 6570Sstevel@tonic-gate } 6580Sstevel@tonic-gate /* 6590Sstevel@tonic-gate * Return to the previous run state. 6600Sstevel@tonic-gate */ 6610Sstevel@tonic-gate t->t_mstate = ms->ms_prev; 6620Sstevel@tonic-gate break; 6630Sstevel@tonic-gate case LMS_STOPPED: 6640Sstevel@tonic-gate mstimep = &ms->ms_acct[LMS_STOPPED]; 6650Sstevel@tonic-gate /* 6660Sstevel@tonic-gate * Return to the previous run state. 6670Sstevel@tonic-gate */ 6680Sstevel@tonic-gate t->t_mstate = ms->ms_prev; 6690Sstevel@tonic-gate break; 6700Sstevel@tonic-gate case LMS_TFAULT: 6710Sstevel@tonic-gate case LMS_DFAULT: 6720Sstevel@tonic-gate case LMS_KFAULT: 6730Sstevel@tonic-gate case LMS_USER_LOCK: 6740Sstevel@tonic-gate mstimep = &ms->ms_acct[LMS_SYSTEM]; 6750Sstevel@tonic-gate break; 6760Sstevel@tonic-gate default: 6770Sstevel@tonic-gate mstimep = &ms->ms_acct[t->t_mstate]; 6780Sstevel@tonic-gate break; 6790Sstevel@tonic-gate } 6800Sstevel@tonic-gate waitrq = t->t_waitrq; /* hopefully atomic */ 6813426Sjohansen if (waitrq == 0) { 6820Sstevel@tonic-gate waitrq = curtime; 6830Sstevel@tonic-gate } 6843426Sjohansen t->t_waitrq = 0; 6850Sstevel@tonic-gate newtime = waitrq - ms->ms_state_start; 6860Sstevel@tonic-gate if (newtime < 0) { 6870Sstevel@tonic-gate curtime = gethrtime_unscaled(); 6880Sstevel@tonic-gate oldtime = *mstimep - 1; /* force CAS to fail */ 6890Sstevel@tonic-gate continue; 6900Sstevel@tonic-gate } 6910Sstevel@tonic-gate oldtime = *mstimep; 6920Sstevel@tonic-gate newtime += oldtime; 6930Sstevel@tonic-gate } while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime); 6940Sstevel@tonic-gate /* 6950Sstevel@tonic-gate * Update the WAIT_CPU timer and per-cpu waitrq total. 6960Sstevel@tonic-gate */ 6970Sstevel@tonic-gate ms->ms_acct[LMS_WAIT_CPU] += (curtime - waitrq); 698477Smishra CPU->cpu_waitrq += (curtime - waitrq); 6990Sstevel@tonic-gate ms->ms_state_start = curtime; 7000Sstevel@tonic-gate } 7010Sstevel@tonic-gate 7020Sstevel@tonic-gate /* 7030Sstevel@tonic-gate * Copy lwp microstate accounting and resource usage information 7040Sstevel@tonic-gate * to the process. (lwp is terminating) 7050Sstevel@tonic-gate */ 7060Sstevel@tonic-gate void 7070Sstevel@tonic-gate term_mstate(kthread_t *t) 7080Sstevel@tonic-gate { 7090Sstevel@tonic-gate struct mstate *ms; 7100Sstevel@tonic-gate proc_t *p = ttoproc(t); 7110Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 7120Sstevel@tonic-gate int i; 7130Sstevel@tonic-gate hrtime_t tmp; 7140Sstevel@tonic-gate 7150Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate ms = &lwp->lwp_mstate; 7180Sstevel@tonic-gate (void) new_mstate(t, LMS_STOPPED); 7190Sstevel@tonic-gate ms->ms_term = ms->ms_state_start; 7200Sstevel@tonic-gate tmp = ms->ms_term - ms->ms_start; 7210Sstevel@tonic-gate scalehrtime(&tmp); 7220Sstevel@tonic-gate p->p_mlreal += tmp; 7230Sstevel@tonic-gate for (i = 0; i < NMSTATES; i++) { 7240Sstevel@tonic-gate tmp = ms->ms_acct[i]; 7250Sstevel@tonic-gate scalehrtime(&tmp); 7260Sstevel@tonic-gate p->p_acct[i] += tmp; 7270Sstevel@tonic-gate } 7280Sstevel@tonic-gate p->p_ru.minflt += lwp->lwp_ru.minflt; 7290Sstevel@tonic-gate p->p_ru.majflt += lwp->lwp_ru.majflt; 7300Sstevel@tonic-gate p->p_ru.nswap += lwp->lwp_ru.nswap; 7310Sstevel@tonic-gate p->p_ru.inblock += lwp->lwp_ru.inblock; 7320Sstevel@tonic-gate p->p_ru.oublock += lwp->lwp_ru.oublock; 7330Sstevel@tonic-gate p->p_ru.msgsnd += lwp->lwp_ru.msgsnd; 7340Sstevel@tonic-gate p->p_ru.msgrcv += lwp->lwp_ru.msgrcv; 7350Sstevel@tonic-gate p->p_ru.nsignals += lwp->lwp_ru.nsignals; 7360Sstevel@tonic-gate p->p_ru.nvcsw += lwp->lwp_ru.nvcsw; 7370Sstevel@tonic-gate p->p_ru.nivcsw += lwp->lwp_ru.nivcsw; 7380Sstevel@tonic-gate p->p_ru.sysc += lwp->lwp_ru.sysc; 7390Sstevel@tonic-gate p->p_ru.ioch += lwp->lwp_ru.ioch; 7400Sstevel@tonic-gate p->p_defunct++; 7410Sstevel@tonic-gate } 742