10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52005Selowe * Common Development and Distribution License (the "License"). 62005Selowe * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 2212235Sprashanth.sreenivasa@oracle.com * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved. 230Sstevel@tonic-gate */ 240Sstevel@tonic-gate 250Sstevel@tonic-gate #include <sys/types.h> 260Sstevel@tonic-gate #include <sys/kstat.h> 270Sstevel@tonic-gate #include <sys/param.h> 280Sstevel@tonic-gate #include <sys/stack.h> 290Sstevel@tonic-gate #include <sys/regset.h> 300Sstevel@tonic-gate #include <sys/thread.h> 310Sstevel@tonic-gate #include <sys/proc.h> 320Sstevel@tonic-gate #include <sys/procfs_isa.h> 330Sstevel@tonic-gate #include <sys/kmem.h> 340Sstevel@tonic-gate #include <sys/cpuvar.h> 350Sstevel@tonic-gate #include <sys/systm.h> 360Sstevel@tonic-gate #include <sys/machpcb.h> 370Sstevel@tonic-gate #include <sys/machasi.h> 380Sstevel@tonic-gate #include <sys/vis.h> 390Sstevel@tonic-gate #include <sys/fpu/fpusystm.h> 400Sstevel@tonic-gate #include <sys/cpu_module.h> 410Sstevel@tonic-gate #include <sys/privregs.h> 420Sstevel@tonic-gate #include <sys/archsystm.h> 430Sstevel@tonic-gate #include <sys/atomic.h> 440Sstevel@tonic-gate #include <sys/cmn_err.h> 450Sstevel@tonic-gate #include <sys/time.h> 460Sstevel@tonic-gate #include <sys/clock.h> 470Sstevel@tonic-gate #include <sys/cmp.h> 480Sstevel@tonic-gate #include <sys/platform_module.h> 490Sstevel@tonic-gate #include <sys/bl.h> 500Sstevel@tonic-gate #include <sys/nvpair.h> 510Sstevel@tonic-gate #include <sys/kdi_impl.h> 520Sstevel@tonic-gate #include <sys/machsystm.h> 530Sstevel@tonic-gate #include <sys/sysmacros.h> 540Sstevel@tonic-gate #include <sys/promif.h> 550Sstevel@tonic-gate #include <sys/pool_pset.h> 563446Smrj #include <sys/mem.h> 573446Smrj #include <sys/dumphdr.h> 582005Selowe #include <vm/seg_kmem.h> 593446Smrj #include <sys/hold_page.h> 603446Smrj #include <sys/cpu.h> 6111066Srafael.vanoni@sun.com #include <sys/ivintr.h> 6211066Srafael.vanoni@sun.com #include <sys/clock_impl.h> 6312235Sprashanth.sreenivasa@oracle.com #include <sys/machclock.h> 640Sstevel@tonic-gate 650Sstevel@tonic-gate int maxphys = MMU_PAGESIZE * 16; /* 128k */ 660Sstevel@tonic-gate int klustsize = MMU_PAGESIZE * 16; /* 128k */ 670Sstevel@tonic-gate 680Sstevel@tonic-gate /* 690Sstevel@tonic-gate * Initialize kernel thread's stack. 700Sstevel@tonic-gate */ 710Sstevel@tonic-gate caddr_t 720Sstevel@tonic-gate thread_stk_init(caddr_t stk) 730Sstevel@tonic-gate { 740Sstevel@tonic-gate kfpu_t *fp; 750Sstevel@tonic-gate ulong_t align; 760Sstevel@tonic-gate 770Sstevel@tonic-gate /* allocate extra space for floating point state */ 780Sstevel@tonic-gate stk -= SA(sizeof (kfpu_t) + GSR_SIZE); 790Sstevel@tonic-gate align = (uintptr_t)stk & 0x3f; 800Sstevel@tonic-gate stk -= align; /* force v9_fpu to be 16 byte aligned */ 810Sstevel@tonic-gate fp = (kfpu_t *)stk; 820Sstevel@tonic-gate fp->fpu_fprs = 0; 830Sstevel@tonic-gate 840Sstevel@tonic-gate stk -= SA(MINFRAME); 850Sstevel@tonic-gate return (stk); 860Sstevel@tonic-gate } 870Sstevel@tonic-gate 882005Selowe #define WIN32_SIZE (MAXWIN * sizeof (struct rwindow32)) 892005Selowe #define WIN64_SIZE (MAXWIN * sizeof (struct rwindow64)) 902005Selowe 912005Selowe kmem_cache_t *wbuf32_cache; 922005Selowe kmem_cache_t *wbuf64_cache; 932005Selowe 942005Selowe void 952005Selowe lwp_stk_cache_init(void) 962005Selowe { 972224Selowe /* 982224Selowe * Window buffers are allocated from the static arena 992224Selowe * because they are accessed at TL>0. We also must use 1002224Selowe * KMC_NOHASH to prevent them from straddling page 1012224Selowe * boundaries as they are accessed by physical address. 1022224Selowe */ 1032005Selowe wbuf32_cache = kmem_cache_create("wbuf32_cache", WIN32_SIZE, 1042224Selowe 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1052005Selowe wbuf64_cache = kmem_cache_create("wbuf64_cache", WIN64_SIZE, 1062224Selowe 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1072005Selowe } 1082005Selowe 1090Sstevel@tonic-gate /* 1100Sstevel@tonic-gate * Initialize lwp's kernel stack. 1110Sstevel@tonic-gate * Note that now that the floating point register save area (kfpu_t) 1120Sstevel@tonic-gate * has been broken out from machpcb and aligned on a 64 byte boundary so that 1130Sstevel@tonic-gate * we can do block load/stores to/from it, there are a couple of potential 1140Sstevel@tonic-gate * optimizations to save stack space. 1. The floating point register save 1150Sstevel@tonic-gate * area could be aligned on a 16 byte boundary, and the floating point code 1160Sstevel@tonic-gate * changed to (a) check the alignment and (b) use different save/restore 1170Sstevel@tonic-gate * macros depending upon the alignment. 2. The lwp_stk_init code below 1180Sstevel@tonic-gate * could be changed to calculate if less space would be wasted if machpcb 1190Sstevel@tonic-gate * was first instead of second. However there is a REGOFF macro used in 1200Sstevel@tonic-gate * locore, syscall_trap, machdep and mlsetup that assumes that the saved 1210Sstevel@tonic-gate * register area is a fixed distance from the %sp, and would have to be 1220Sstevel@tonic-gate * changed to a pointer or something...JJ said later. 1230Sstevel@tonic-gate */ 1240Sstevel@tonic-gate caddr_t 1250Sstevel@tonic-gate lwp_stk_init(klwp_t *lwp, caddr_t stk) 1260Sstevel@tonic-gate { 1270Sstevel@tonic-gate struct machpcb *mpcb; 1280Sstevel@tonic-gate kfpu_t *fp; 1290Sstevel@tonic-gate uintptr_t aln; 1300Sstevel@tonic-gate 1310Sstevel@tonic-gate stk -= SA(sizeof (kfpu_t) + GSR_SIZE); 1320Sstevel@tonic-gate aln = (uintptr_t)stk & 0x3F; 1330Sstevel@tonic-gate stk -= aln; 1340Sstevel@tonic-gate fp = (kfpu_t *)stk; 1350Sstevel@tonic-gate stk -= SA(sizeof (struct machpcb)); 1360Sstevel@tonic-gate mpcb = (struct machpcb *)stk; 1370Sstevel@tonic-gate bzero(mpcb, sizeof (struct machpcb)); 1380Sstevel@tonic-gate bzero(fp, sizeof (kfpu_t) + GSR_SIZE); 1390Sstevel@tonic-gate lwp->lwp_regs = (void *)&mpcb->mpcb_regs; 1400Sstevel@tonic-gate lwp->lwp_fpu = (void *)fp; 1410Sstevel@tonic-gate mpcb->mpcb_fpu = fp; 1420Sstevel@tonic-gate mpcb->mpcb_fpu->fpu_q = mpcb->mpcb_fpu_q; 1430Sstevel@tonic-gate mpcb->mpcb_thread = lwp->lwp_thread; 1440Sstevel@tonic-gate mpcb->mpcb_wbcnt = 0; 1450Sstevel@tonic-gate if (lwp->lwp_procp->p_model == DATAMODEL_ILP32) { 1460Sstevel@tonic-gate mpcb->mpcb_wstate = WSTATE_USER32; 1472005Selowe mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP); 1480Sstevel@tonic-gate } else { 1490Sstevel@tonic-gate mpcb->mpcb_wstate = WSTATE_USER64; 1502005Selowe mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP); 1510Sstevel@tonic-gate } 1520Sstevel@tonic-gate ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0); 1530Sstevel@tonic-gate mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf); 1540Sstevel@tonic-gate mpcb->mpcb_pa = va_to_pa(mpcb); 1550Sstevel@tonic-gate return (stk); 1560Sstevel@tonic-gate } 1570Sstevel@tonic-gate 1580Sstevel@tonic-gate void 1590Sstevel@tonic-gate lwp_stk_fini(klwp_t *lwp) 1600Sstevel@tonic-gate { 1610Sstevel@tonic-gate struct machpcb *mpcb = lwptompcb(lwp); 1620Sstevel@tonic-gate 1630Sstevel@tonic-gate /* 1640Sstevel@tonic-gate * there might be windows still in the wbuf due to unmapped 1650Sstevel@tonic-gate * stack, misaligned stack pointer, etc. We just free it. 1660Sstevel@tonic-gate */ 1670Sstevel@tonic-gate mpcb->mpcb_wbcnt = 0; 1680Sstevel@tonic-gate if (mpcb->mpcb_wstate == WSTATE_USER32) 1692005Selowe kmem_cache_free(wbuf32_cache, mpcb->mpcb_wbuf); 1700Sstevel@tonic-gate else 1712005Selowe kmem_cache_free(wbuf64_cache, mpcb->mpcb_wbuf); 1720Sstevel@tonic-gate mpcb->mpcb_wbuf = NULL; 1730Sstevel@tonic-gate mpcb->mpcb_wbuf_pa = -1; 1740Sstevel@tonic-gate } 1750Sstevel@tonic-gate 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate /* 1780Sstevel@tonic-gate * Copy regs from parent to child. 1790Sstevel@tonic-gate */ 1800Sstevel@tonic-gate void 1810Sstevel@tonic-gate lwp_forkregs(klwp_t *lwp, klwp_t *clwp) 1820Sstevel@tonic-gate { 1830Sstevel@tonic-gate kthread_t *t, *pt = lwptot(lwp); 1840Sstevel@tonic-gate struct machpcb *mpcb = lwptompcb(clwp); 1850Sstevel@tonic-gate struct machpcb *pmpcb = lwptompcb(lwp); 1860Sstevel@tonic-gate kfpu_t *fp, *pfp = lwptofpu(lwp); 1870Sstevel@tonic-gate caddr_t wbuf; 1880Sstevel@tonic-gate uint_t wstate; 1890Sstevel@tonic-gate 1900Sstevel@tonic-gate t = mpcb->mpcb_thread; 1910Sstevel@tonic-gate /* 1920Sstevel@tonic-gate * remember child's fp and wbuf since they will get erased during 1930Sstevel@tonic-gate * the bcopy. 1940Sstevel@tonic-gate */ 1950Sstevel@tonic-gate fp = mpcb->mpcb_fpu; 1960Sstevel@tonic-gate wbuf = mpcb->mpcb_wbuf; 1970Sstevel@tonic-gate wstate = mpcb->mpcb_wstate; 1980Sstevel@tonic-gate /* 1990Sstevel@tonic-gate * Don't copy mpcb_frame since we hand-crafted it 2000Sstevel@tonic-gate * in thread_load(). 2010Sstevel@tonic-gate */ 2020Sstevel@tonic-gate bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct machpcb) - REGOFF); 2030Sstevel@tonic-gate mpcb->mpcb_thread = t; 2040Sstevel@tonic-gate mpcb->mpcb_fpu = fp; 2050Sstevel@tonic-gate fp->fpu_q = mpcb->mpcb_fpu_q; 2060Sstevel@tonic-gate 2070Sstevel@tonic-gate /* 2080Sstevel@tonic-gate * It is theoretically possibly for the lwp's wstate to 2090Sstevel@tonic-gate * be different from its value assigned in lwp_stk_init, 2100Sstevel@tonic-gate * since lwp_stk_init assumed the data model of the process. 2110Sstevel@tonic-gate * Here, we took on the data model of the cloned lwp. 2120Sstevel@tonic-gate */ 2130Sstevel@tonic-gate if (mpcb->mpcb_wstate != wstate) { 2140Sstevel@tonic-gate if (wstate == WSTATE_USER32) { 2152005Selowe kmem_cache_free(wbuf32_cache, wbuf); 2162005Selowe wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP); 2170Sstevel@tonic-gate wstate = WSTATE_USER64; 2180Sstevel@tonic-gate } else { 2192005Selowe kmem_cache_free(wbuf64_cache, wbuf); 2202005Selowe wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP); 2210Sstevel@tonic-gate wstate = WSTATE_USER32; 2220Sstevel@tonic-gate } 2230Sstevel@tonic-gate } 2240Sstevel@tonic-gate 2250Sstevel@tonic-gate mpcb->mpcb_pa = va_to_pa(mpcb); 2260Sstevel@tonic-gate mpcb->mpcb_wbuf = wbuf; 2270Sstevel@tonic-gate mpcb->mpcb_wbuf_pa = va_to_pa(wbuf); 2280Sstevel@tonic-gate 2290Sstevel@tonic-gate ASSERT(mpcb->mpcb_wstate == wstate); 2300Sstevel@tonic-gate 2310Sstevel@tonic-gate if (mpcb->mpcb_wbcnt != 0) { 2320Sstevel@tonic-gate bcopy(pmpcb->mpcb_wbuf, mpcb->mpcb_wbuf, 2330Sstevel@tonic-gate mpcb->mpcb_wbcnt * ((mpcb->mpcb_wstate == WSTATE_USER32) ? 2340Sstevel@tonic-gate sizeof (struct rwindow32) : sizeof (struct rwindow64))); 2350Sstevel@tonic-gate } 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate if (pt == curthread) 2380Sstevel@tonic-gate pfp->fpu_fprs = _fp_read_fprs(); 2390Sstevel@tonic-gate if ((pfp->fpu_en) || (pfp->fpu_fprs & FPRS_FEF)) { 2400Sstevel@tonic-gate if (pt == curthread && fpu_exists) { 2410Sstevel@tonic-gate save_gsr(clwp->lwp_fpu); 2420Sstevel@tonic-gate } else { 2430Sstevel@tonic-gate uint64_t gsr; 2440Sstevel@tonic-gate gsr = get_gsr(lwp->lwp_fpu); 2450Sstevel@tonic-gate set_gsr(gsr, clwp->lwp_fpu); 2460Sstevel@tonic-gate } 2470Sstevel@tonic-gate fp_fork(lwp, clwp); 2480Sstevel@tonic-gate } 2490Sstevel@tonic-gate } 2500Sstevel@tonic-gate 2510Sstevel@tonic-gate /* 2520Sstevel@tonic-gate * Free lwp fpu regs. 2530Sstevel@tonic-gate */ 2540Sstevel@tonic-gate void 2550Sstevel@tonic-gate lwp_freeregs(klwp_t *lwp, int isexec) 2560Sstevel@tonic-gate { 2570Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate if (lwptot(lwp) == curthread) 2600Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 2610Sstevel@tonic-gate if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) 2620Sstevel@tonic-gate fp_free(fp, isexec); 2630Sstevel@tonic-gate } 2640Sstevel@tonic-gate 2650Sstevel@tonic-gate /* 2666994Sedp * These function are currently unused on sparc. 2672712Snn35248 */ 2682712Snn35248 /*ARGSUSED*/ 2692712Snn35248 void 2702712Snn35248 lwp_attach_brand_hdlrs(klwp_t *lwp) 2712712Snn35248 {} 2722712Snn35248 2736994Sedp /*ARGSUSED*/ 2746994Sedp void 2756994Sedp lwp_detach_brand_hdlrs(klwp_t *lwp) 2766994Sedp {} 2776994Sedp 2782712Snn35248 /* 2790Sstevel@tonic-gate * fill in the extra register state area specified with the 2800Sstevel@tonic-gate * specified lwp's platform-dependent non-floating-point extra 2810Sstevel@tonic-gate * register state information 2820Sstevel@tonic-gate */ 2830Sstevel@tonic-gate /* ARGSUSED */ 2840Sstevel@tonic-gate void 2850Sstevel@tonic-gate xregs_getgfiller(klwp_id_t lwp, caddr_t xrp) 2860Sstevel@tonic-gate { 2870Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 2880Sstevel@tonic-gate } 2890Sstevel@tonic-gate 2900Sstevel@tonic-gate /* 2910Sstevel@tonic-gate * fill in the extra register state area specified with the specified lwp's 2920Sstevel@tonic-gate * platform-dependent floating-point extra register state information. 2930Sstevel@tonic-gate * NOTE: 'lwp' might not correspond to 'curthread' since this is 2940Sstevel@tonic-gate * called from code in /proc to get the registers of another lwp. 2950Sstevel@tonic-gate */ 2960Sstevel@tonic-gate void 2970Sstevel@tonic-gate xregs_getfpfiller(klwp_id_t lwp, caddr_t xrp) 2980Sstevel@tonic-gate { 2990Sstevel@tonic-gate prxregset_t *xregs = (prxregset_t *)xrp; 3000Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 3010Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 3020Sstevel@tonic-gate uint64_t gsr; 3030Sstevel@tonic-gate 3040Sstevel@tonic-gate /* 3050Sstevel@tonic-gate * fp_fksave() does not flush the GSR register into 3060Sstevel@tonic-gate * the lwp area, so do it now 3070Sstevel@tonic-gate */ 3080Sstevel@tonic-gate kpreempt_disable(); 3090Sstevel@tonic-gate if (ttolwp(curthread) == lwp && fpu_exists) { 3100Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 3110Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 3120Sstevel@tonic-gate _fp_write_fprs(fprs); 3130Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 3140Sstevel@tonic-gate } 3150Sstevel@tonic-gate save_gsr(fp); 3160Sstevel@tonic-gate } 3170Sstevel@tonic-gate gsr = get_gsr(fp); 3180Sstevel@tonic-gate kpreempt_enable(); 3190Sstevel@tonic-gate PRXREG_GSR(xregs) = gsr; 3200Sstevel@tonic-gate } 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate /* 3230Sstevel@tonic-gate * set the specified lwp's platform-dependent non-floating-point 3240Sstevel@tonic-gate * extra register state based on the specified input 3250Sstevel@tonic-gate */ 3260Sstevel@tonic-gate /* ARGSUSED */ 3270Sstevel@tonic-gate void 3280Sstevel@tonic-gate xregs_setgfiller(klwp_id_t lwp, caddr_t xrp) 3290Sstevel@tonic-gate { 3300Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 3310Sstevel@tonic-gate } 3320Sstevel@tonic-gate 3330Sstevel@tonic-gate /* 3340Sstevel@tonic-gate * set the specified lwp's platform-dependent floating-point 3350Sstevel@tonic-gate * extra register state based on the specified input 3360Sstevel@tonic-gate */ 3370Sstevel@tonic-gate void 3380Sstevel@tonic-gate xregs_setfpfiller(klwp_id_t lwp, caddr_t xrp) 3390Sstevel@tonic-gate { 3400Sstevel@tonic-gate prxregset_t *xregs = (prxregset_t *)xrp; 3410Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 3420Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 3430Sstevel@tonic-gate uint64_t gsr = PRXREG_GSR(xregs); 3440Sstevel@tonic-gate 3450Sstevel@tonic-gate kpreempt_disable(); 3460Sstevel@tonic-gate set_gsr(gsr, lwptofpu(lwp)); 3470Sstevel@tonic-gate 3480Sstevel@tonic-gate if ((lwp == ttolwp(curthread)) && fpu_exists) { 3490Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 3500Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 3510Sstevel@tonic-gate _fp_write_fprs(fprs); 3520Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 3530Sstevel@tonic-gate } 3540Sstevel@tonic-gate restore_gsr(lwptofpu(lwp)); 3550Sstevel@tonic-gate } 3560Sstevel@tonic-gate kpreempt_enable(); 3570Sstevel@tonic-gate } 3580Sstevel@tonic-gate 3590Sstevel@tonic-gate /* 3600Sstevel@tonic-gate * fill in the sun4u asrs, ie, the lwp's platform-dependent 3610Sstevel@tonic-gate * non-floating-point extra register state information 3620Sstevel@tonic-gate */ 3630Sstevel@tonic-gate /* ARGSUSED */ 3640Sstevel@tonic-gate void 3650Sstevel@tonic-gate getasrs(klwp_t *lwp, asrset_t asr) 3660Sstevel@tonic-gate { 3670Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 3680Sstevel@tonic-gate } 3690Sstevel@tonic-gate 3700Sstevel@tonic-gate /* 3710Sstevel@tonic-gate * fill in the sun4u asrs, ie, the lwp's platform-dependent 3720Sstevel@tonic-gate * floating-point extra register state information 3730Sstevel@tonic-gate */ 3740Sstevel@tonic-gate void 3750Sstevel@tonic-gate getfpasrs(klwp_t *lwp, asrset_t asr) 3760Sstevel@tonic-gate { 3770Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 3780Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 3790Sstevel@tonic-gate 3800Sstevel@tonic-gate kpreempt_disable(); 3810Sstevel@tonic-gate if (ttolwp(curthread) == lwp) 3820Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 3830Sstevel@tonic-gate if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) { 3840Sstevel@tonic-gate if (fpu_exists && ttolwp(curthread) == lwp) { 3850Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 3860Sstevel@tonic-gate _fp_write_fprs(fprs); 3870Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 3880Sstevel@tonic-gate } 3890Sstevel@tonic-gate save_gsr(fp); 3900Sstevel@tonic-gate } 3910Sstevel@tonic-gate asr[ASR_GSR] = (int64_t)get_gsr(fp); 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate kpreempt_enable(); 3940Sstevel@tonic-gate } 3950Sstevel@tonic-gate 3960Sstevel@tonic-gate /* 3970Sstevel@tonic-gate * set the sun4u asrs, ie, the lwp's platform-dependent 3980Sstevel@tonic-gate * non-floating-point extra register state information 3990Sstevel@tonic-gate */ 4000Sstevel@tonic-gate /* ARGSUSED */ 4010Sstevel@tonic-gate void 4020Sstevel@tonic-gate setasrs(klwp_t *lwp, asrset_t asr) 4030Sstevel@tonic-gate { 4040Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 4050Sstevel@tonic-gate } 4060Sstevel@tonic-gate 4070Sstevel@tonic-gate void 4080Sstevel@tonic-gate setfpasrs(klwp_t *lwp, asrset_t asr) 4090Sstevel@tonic-gate { 4100Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 4110Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate kpreempt_disable(); 4140Sstevel@tonic-gate if (ttolwp(curthread) == lwp) 4150Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 4160Sstevel@tonic-gate if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) { 4170Sstevel@tonic-gate set_gsr(asr[ASR_GSR], fp); 4180Sstevel@tonic-gate if (fpu_exists && ttolwp(curthread) == lwp) { 4190Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 4200Sstevel@tonic-gate _fp_write_fprs(fprs); 4210Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 4220Sstevel@tonic-gate } 4230Sstevel@tonic-gate restore_gsr(fp); 4240Sstevel@tonic-gate } 4250Sstevel@tonic-gate } 4260Sstevel@tonic-gate kpreempt_enable(); 4270Sstevel@tonic-gate } 4280Sstevel@tonic-gate 4290Sstevel@tonic-gate /* 4300Sstevel@tonic-gate * Create interrupt kstats for this CPU. 4310Sstevel@tonic-gate */ 4320Sstevel@tonic-gate void 4330Sstevel@tonic-gate cpu_create_intrstat(cpu_t *cp) 4340Sstevel@tonic-gate { 4350Sstevel@tonic-gate int i; 4360Sstevel@tonic-gate kstat_t *intr_ksp; 4370Sstevel@tonic-gate kstat_named_t *knp; 4380Sstevel@tonic-gate char name[KSTAT_STRLEN]; 4390Sstevel@tonic-gate zoneid_t zoneid; 4400Sstevel@tonic-gate 4410Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 4420Sstevel@tonic-gate 4430Sstevel@tonic-gate if (pool_pset_enabled()) 4440Sstevel@tonic-gate zoneid = GLOBAL_ZONEID; 4450Sstevel@tonic-gate else 4460Sstevel@tonic-gate zoneid = ALL_ZONES; 4470Sstevel@tonic-gate 4480Sstevel@tonic-gate intr_ksp = kstat_create_zone("cpu", cp->cpu_id, "intrstat", "misc", 4490Sstevel@tonic-gate KSTAT_TYPE_NAMED, PIL_MAX * 2, NULL, zoneid); 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate /* 4520Sstevel@tonic-gate * Initialize each PIL's named kstat 4530Sstevel@tonic-gate */ 4540Sstevel@tonic-gate if (intr_ksp != NULL) { 4550Sstevel@tonic-gate intr_ksp->ks_update = cpu_kstat_intrstat_update; 4560Sstevel@tonic-gate knp = (kstat_named_t *)intr_ksp->ks_data; 4570Sstevel@tonic-gate intr_ksp->ks_private = cp; 4580Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 4590Sstevel@tonic-gate (void) snprintf(name, KSTAT_STRLEN, "level-%d-time", 4600Sstevel@tonic-gate i + 1); 4610Sstevel@tonic-gate kstat_named_init(&knp[i * 2], name, KSTAT_DATA_UINT64); 4620Sstevel@tonic-gate (void) snprintf(name, KSTAT_STRLEN, "level-%d-count", 4630Sstevel@tonic-gate i + 1); 4640Sstevel@tonic-gate kstat_named_init(&knp[(i * 2) + 1], name, 4650Sstevel@tonic-gate KSTAT_DATA_UINT64); 4660Sstevel@tonic-gate } 4670Sstevel@tonic-gate kstat_install(intr_ksp); 4680Sstevel@tonic-gate } 4690Sstevel@tonic-gate } 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate /* 4720Sstevel@tonic-gate * Delete interrupt kstats for this CPU. 4730Sstevel@tonic-gate */ 4740Sstevel@tonic-gate void 4750Sstevel@tonic-gate cpu_delete_intrstat(cpu_t *cp) 4760Sstevel@tonic-gate { 4770Sstevel@tonic-gate kstat_delete_byname_zone("cpu", cp->cpu_id, "intrstat", ALL_ZONES); 4780Sstevel@tonic-gate } 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate /* 4810Sstevel@tonic-gate * Convert interrupt statistics from CPU ticks to nanoseconds and 4820Sstevel@tonic-gate * update kstat. 4830Sstevel@tonic-gate */ 4840Sstevel@tonic-gate int 4850Sstevel@tonic-gate cpu_kstat_intrstat_update(kstat_t *ksp, int rw) 4860Sstevel@tonic-gate { 4870Sstevel@tonic-gate kstat_named_t *knp = ksp->ks_data; 4880Sstevel@tonic-gate cpu_t *cpup = (cpu_t *)ksp->ks_private; 4890Sstevel@tonic-gate int i; 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate if (rw == KSTAT_WRITE) 4920Sstevel@tonic-gate return (EACCES); 4930Sstevel@tonic-gate 4940Sstevel@tonic-gate /* 4950Sstevel@tonic-gate * We use separate passes to copy and convert the statistics to 4960Sstevel@tonic-gate * nanoseconds. This assures that the snapshot of the data is as 4970Sstevel@tonic-gate * self-consistent as possible. 4980Sstevel@tonic-gate */ 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 5010Sstevel@tonic-gate knp[i * 2].value.ui64 = cpup->cpu_m.intrstat[i + 1][0]; 5020Sstevel@tonic-gate knp[(i * 2) + 1].value.ui64 = cpup->cpu_stats.sys.intr[i]; 5030Sstevel@tonic-gate } 5040Sstevel@tonic-gate 5050Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 5060Sstevel@tonic-gate knp[i * 2].value.ui64 = 5070Sstevel@tonic-gate (uint64_t)tick2ns((hrtime_t)knp[i * 2].value.ui64, 5085084Sjohnlev cpup->cpu_id); 5090Sstevel@tonic-gate } 5100Sstevel@tonic-gate 5110Sstevel@tonic-gate return (0); 5120Sstevel@tonic-gate } 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate /* 5150Sstevel@tonic-gate * Called by common/os/cpu.c for psrinfo(1m) kstats 5160Sstevel@tonic-gate */ 5170Sstevel@tonic-gate char * 5180Sstevel@tonic-gate cpu_fru_fmri(cpu_t *cp) 5190Sstevel@tonic-gate { 5200Sstevel@tonic-gate return (cpunodes[cp->cpu_id].fru_fmri); 5210Sstevel@tonic-gate } 5220Sstevel@tonic-gate 5230Sstevel@tonic-gate /* 5240Sstevel@tonic-gate * An interrupt thread is ending a time slice, so compute the interval it 5250Sstevel@tonic-gate * ran for and update the statistic for its PIL. 5260Sstevel@tonic-gate */ 5270Sstevel@tonic-gate void 5280Sstevel@tonic-gate cpu_intr_swtch_enter(kthread_id_t t) 5290Sstevel@tonic-gate { 5300Sstevel@tonic-gate uint64_t interval; 5310Sstevel@tonic-gate uint64_t start; 532590Sesolom cpu_t *cpu; 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate ASSERT((t->t_flag & T_INTR_THREAD) != 0); 5350Sstevel@tonic-gate ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 5360Sstevel@tonic-gate 5370Sstevel@tonic-gate /* 5380Sstevel@tonic-gate * We could be here with a zero timestamp. This could happen if: 5390Sstevel@tonic-gate * an interrupt thread which no longer has a pinned thread underneath 5400Sstevel@tonic-gate * it (i.e. it blocked at some point in its past) has finished running 5410Sstevel@tonic-gate * its handler. intr_thread() updated the interrupt statistic for its 5420Sstevel@tonic-gate * PIL and zeroed its timestamp. Since there was no pinned thread to 5430Sstevel@tonic-gate * return to, swtch() gets called and we end up here. 5440Sstevel@tonic-gate * 5450Sstevel@tonic-gate * It can also happen if an interrupt thread in intr_thread() calls 5460Sstevel@tonic-gate * preempt. It will have already taken care of updating stats. In 5470Sstevel@tonic-gate * this event, the interrupt thread will be runnable. 5480Sstevel@tonic-gate */ 5490Sstevel@tonic-gate if (t->t_intr_start) { 5500Sstevel@tonic-gate do { 5510Sstevel@tonic-gate start = t->t_intr_start; 55212235Sprashanth.sreenivasa@oracle.com interval = CLOCK_TICK_COUNTER() - start; 5530Sstevel@tonic-gate } while (cas64(&t->t_intr_start, start, 0) != start); 554590Sesolom cpu = CPU; 555590Sesolom if (cpu->cpu_m.divisor > 1) 556590Sesolom interval *= cpu->cpu_m.divisor; 557590Sesolom cpu->cpu_m.intrstat[t->t_pil][0] += interval; 558590Sesolom 559590Sesolom atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate], 560590Sesolom interval); 5610Sstevel@tonic-gate } else 5620Sstevel@tonic-gate ASSERT(t->t_intr == NULL || t->t_state == TS_RUN); 5630Sstevel@tonic-gate } 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate 5660Sstevel@tonic-gate /* 5670Sstevel@tonic-gate * An interrupt thread is returning from swtch(). Place a starting timestamp 5680Sstevel@tonic-gate * in its thread structure. 5690Sstevel@tonic-gate */ 5700Sstevel@tonic-gate void 5710Sstevel@tonic-gate cpu_intr_swtch_exit(kthread_id_t t) 5720Sstevel@tonic-gate { 5730Sstevel@tonic-gate uint64_t ts; 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate ASSERT((t->t_flag & T_INTR_THREAD) != 0); 5760Sstevel@tonic-gate ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 5770Sstevel@tonic-gate 5780Sstevel@tonic-gate do { 5790Sstevel@tonic-gate ts = t->t_intr_start; 58012235Sprashanth.sreenivasa@oracle.com } while (cas64(&t->t_intr_start, ts, CLOCK_TICK_COUNTER()) != ts); 5810Sstevel@tonic-gate } 5820Sstevel@tonic-gate 5830Sstevel@tonic-gate 5840Sstevel@tonic-gate int 5850Sstevel@tonic-gate blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class) 5860Sstevel@tonic-gate { 5870Sstevel@tonic-gate if (&plat_blacklist) 5880Sstevel@tonic-gate return (plat_blacklist(cmd, scheme, fmri, class)); 5890Sstevel@tonic-gate 5900Sstevel@tonic-gate return (ENOTSUP); 5910Sstevel@tonic-gate } 5920Sstevel@tonic-gate 5930Sstevel@tonic-gate int 5940Sstevel@tonic-gate kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp) 5950Sstevel@tonic-gate { 5960Sstevel@tonic-gate extern void kdi_flush_caches(void); 5970Sstevel@tonic-gate size_t nread = 0; 5980Sstevel@tonic-gate uint32_t word; 5990Sstevel@tonic-gate int slop, i; 6000Sstevel@tonic-gate 6010Sstevel@tonic-gate kdi_flush_caches(); 6020Sstevel@tonic-gate membar_enter(); 6030Sstevel@tonic-gate 6040Sstevel@tonic-gate /* We might not begin on a word boundary. */ 6050Sstevel@tonic-gate if ((slop = addr & 3) != 0) { 6060Sstevel@tonic-gate word = ldphys(addr & ~3); 6070Sstevel@tonic-gate for (i = slop; i < 4 && nbytes > 0; i++, nbytes--, nread++) 6080Sstevel@tonic-gate *buf++ = ((uchar_t *)&word)[i]; 6090Sstevel@tonic-gate addr = roundup(addr, 4); 6100Sstevel@tonic-gate } 6110Sstevel@tonic-gate 6120Sstevel@tonic-gate while (nbytes > 0) { 6130Sstevel@tonic-gate word = ldphys(addr); 6140Sstevel@tonic-gate for (i = 0; i < 4 && nbytes > 0; i++, nbytes--, nread++, addr++) 6150Sstevel@tonic-gate *buf++ = ((uchar_t *)&word)[i]; 6160Sstevel@tonic-gate } 6170Sstevel@tonic-gate 6180Sstevel@tonic-gate kdi_flush_caches(); 6190Sstevel@tonic-gate 6200Sstevel@tonic-gate *ncopiedp = nread; 6210Sstevel@tonic-gate return (0); 6220Sstevel@tonic-gate } 6230Sstevel@tonic-gate 6240Sstevel@tonic-gate int 6250Sstevel@tonic-gate kdi_pwrite(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp) 6260Sstevel@tonic-gate { 6270Sstevel@tonic-gate extern void kdi_flush_caches(void); 6280Sstevel@tonic-gate size_t nwritten = 0; 6290Sstevel@tonic-gate uint32_t word; 6300Sstevel@tonic-gate int slop, i; 6310Sstevel@tonic-gate 6320Sstevel@tonic-gate kdi_flush_caches(); 6330Sstevel@tonic-gate 6340Sstevel@tonic-gate /* We might not begin on a word boundary. */ 6350Sstevel@tonic-gate if ((slop = addr & 3) != 0) { 6360Sstevel@tonic-gate word = ldphys(addr & ~3); 6370Sstevel@tonic-gate for (i = slop; i < 4 && nbytes > 0; i++, nbytes--, nwritten++) 6380Sstevel@tonic-gate ((uchar_t *)&word)[i] = *buf++; 6390Sstevel@tonic-gate stphys(addr & ~3, word); 6400Sstevel@tonic-gate addr = roundup(addr, 4); 6410Sstevel@tonic-gate } 6420Sstevel@tonic-gate 6430Sstevel@tonic-gate while (nbytes > 3) { 6440Sstevel@tonic-gate for (word = 0, i = 0; i < 4; i++, nbytes--, nwritten++) 6450Sstevel@tonic-gate ((uchar_t *)&word)[i] = *buf++; 6460Sstevel@tonic-gate stphys(addr, word); 6470Sstevel@tonic-gate addr += 4; 6480Sstevel@tonic-gate } 6490Sstevel@tonic-gate 6500Sstevel@tonic-gate /* We might not end with a whole word. */ 6510Sstevel@tonic-gate if (nbytes > 0) { 6520Sstevel@tonic-gate word = ldphys(addr); 6530Sstevel@tonic-gate for (i = 0; nbytes > 0; i++, nbytes--, nwritten++) 6540Sstevel@tonic-gate ((uchar_t *)&word)[i] = *buf++; 6550Sstevel@tonic-gate stphys(addr, word); 6560Sstevel@tonic-gate } 6570Sstevel@tonic-gate 6580Sstevel@tonic-gate membar_enter(); 6590Sstevel@tonic-gate kdi_flush_caches(); 6600Sstevel@tonic-gate 6610Sstevel@tonic-gate *ncopiedp = nwritten; 6620Sstevel@tonic-gate return (0); 6630Sstevel@tonic-gate } 6640Sstevel@tonic-gate 6650Sstevel@tonic-gate static void 6660Sstevel@tonic-gate kdi_kernpanic(struct regs *regs, uint_t tt) 6670Sstevel@tonic-gate { 6680Sstevel@tonic-gate sync_reg_buf = *regs; 6690Sstevel@tonic-gate sync_tt = tt; 6700Sstevel@tonic-gate 6710Sstevel@tonic-gate sync_handler(); 6720Sstevel@tonic-gate } 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate static void 6750Sstevel@tonic-gate kdi_plat_call(void (*platfn)(void)) 6760Sstevel@tonic-gate { 6770Sstevel@tonic-gate if (platfn != NULL) { 6780Sstevel@tonic-gate prom_suspend_prepost(); 6790Sstevel@tonic-gate platfn(); 6800Sstevel@tonic-gate prom_resume_prepost(); 6810Sstevel@tonic-gate } 6820Sstevel@tonic-gate } 6830Sstevel@tonic-gate 68411066Srafael.vanoni@sun.com /* 68511066Srafael.vanoni@sun.com * kdi_system_claim and release are defined here for all sun4 platforms and 68611066Srafael.vanoni@sun.com * pointed to by mach_kdi_init() to provide default callbacks for such systems. 68711066Srafael.vanoni@sun.com * Specific sun4u or sun4v platforms may implement their own claim and release 68811066Srafael.vanoni@sun.com * routines, at which point their respective callbacks will be updated. 68911066Srafael.vanoni@sun.com */ 69011066Srafael.vanoni@sun.com static void 69111066Srafael.vanoni@sun.com kdi_system_claim(void) 69211066Srafael.vanoni@sun.com { 69311066Srafael.vanoni@sun.com lbolt_debug_entry(); 69411066Srafael.vanoni@sun.com } 69511066Srafael.vanoni@sun.com 69611066Srafael.vanoni@sun.com static void 69711066Srafael.vanoni@sun.com kdi_system_release(void) 69811066Srafael.vanoni@sun.com { 69911066Srafael.vanoni@sun.com lbolt_debug_return(); 70011066Srafael.vanoni@sun.com } 70111066Srafael.vanoni@sun.com 7020Sstevel@tonic-gate void 7030Sstevel@tonic-gate mach_kdi_init(kdi_t *kdi) 7040Sstevel@tonic-gate { 7050Sstevel@tonic-gate kdi->kdi_plat_call = kdi_plat_call; 7063446Smrj kdi->kdi_kmdb_enter = kmdb_enter; 70711066Srafael.vanoni@sun.com kdi->pkdi_system_claim = kdi_system_claim; 70811066Srafael.vanoni@sun.com kdi->pkdi_system_release = kdi_system_release; 7090Sstevel@tonic-gate kdi->mkdi_cpu_index = kdi_cpu_index; 7100Sstevel@tonic-gate kdi->mkdi_trap_vatotte = kdi_trap_vatotte; 7110Sstevel@tonic-gate kdi->mkdi_kernpanic = kdi_kernpanic; 7120Sstevel@tonic-gate } 713590Sesolom 714590Sesolom 715590Sesolom /* 716590Sesolom * get_cpu_mstate() is passed an array of timestamps, NCMSTATES 717590Sesolom * long, and it fills in the array with the time spent on cpu in 718590Sesolom * each of the mstates, where time is returned in nsec. 719590Sesolom * 720590Sesolom * No guarantee is made that the returned values in times[] will 721590Sesolom * monotonically increase on sequential calls, although this will 722590Sesolom * be true in the long run. Any such guarantee must be handled by 723590Sesolom * the caller, if needed. This can happen if we fail to account 724590Sesolom * for elapsed time due to a generation counter conflict, yet we 725590Sesolom * did account for it on a prior call (see below). 726590Sesolom * 727590Sesolom * The complication is that the cpu in question may be updating 728590Sesolom * its microstate at the same time that we are reading it. 729590Sesolom * Because the microstate is only updated when the CPU's state 730590Sesolom * changes, the values in cpu_intracct[] can be indefinitely out 731590Sesolom * of date. To determine true current values, it is necessary to 732590Sesolom * compare the current time with cpu_mstate_start, and add the 733590Sesolom * difference to times[cpu_mstate]. 734590Sesolom * 735590Sesolom * This can be a problem if those values are changing out from 736590Sesolom * under us. Because the code path in new_cpu_mstate() is 737590Sesolom * performance critical, we have not added a lock to it. Instead, 738590Sesolom * we have added a generation counter. Before beginning 739590Sesolom * modifications, the counter is set to 0. After modifications, 740590Sesolom * it is set to the old value plus one. 741590Sesolom * 742590Sesolom * get_cpu_mstate() will not consider the values of cpu_mstate 743590Sesolom * and cpu_mstate_start to be usable unless the value of 744590Sesolom * cpu_mstate_gen is both non-zero and unchanged, both before and 745590Sesolom * after reading the mstate information. Note that we must 746590Sesolom * protect against out-of-order loads around accesses to the 747590Sesolom * generation counter. Also, this is a best effort approach in 748590Sesolom * that we do not retry should the counter be found to have 749590Sesolom * changed. 750590Sesolom * 751590Sesolom * cpu_intracct[] is used to identify time spent in each CPU 752590Sesolom * mstate while handling interrupts. Such time should be reported 753590Sesolom * against system time, and so is subtracted out from its 754590Sesolom * corresponding cpu_acct[] time and added to 755590Sesolom * cpu_acct[CMS_SYSTEM]. Additionally, intracct time is stored in 756590Sesolom * %ticks, but acct time may be stored as %sticks, thus requiring 757590Sesolom * different conversions before they can be compared. 758590Sesolom */ 759590Sesolom 760590Sesolom void 761590Sesolom get_cpu_mstate(cpu_t *cpu, hrtime_t *times) 762590Sesolom { 763590Sesolom int i; 764590Sesolom hrtime_t now, start; 765590Sesolom uint16_t gen; 766590Sesolom uint16_t state; 767590Sesolom hrtime_t intracct[NCMSTATES]; 768590Sesolom 769590Sesolom /* 770590Sesolom * Load all volatile state under the protection of membar. 771590Sesolom * cpu_acct[cpu_mstate] must be loaded to avoid double counting 772590Sesolom * of (now - cpu_mstate_start) by a change in CPU mstate that 773590Sesolom * arrives after we make our last check of cpu_mstate_gen. 774590Sesolom */ 775590Sesolom 776590Sesolom now = gethrtime_unscaled(); 777590Sesolom gen = cpu->cpu_mstate_gen; 778590Sesolom 779590Sesolom membar_consumer(); /* guarantee load ordering */ 780590Sesolom start = cpu->cpu_mstate_start; 781590Sesolom state = cpu->cpu_mstate; 782590Sesolom for (i = 0; i < NCMSTATES; i++) { 783590Sesolom intracct[i] = cpu->cpu_intracct[i]; 784590Sesolom times[i] = cpu->cpu_acct[i]; 785590Sesolom } 786590Sesolom membar_consumer(); /* guarantee load ordering */ 787590Sesolom 788590Sesolom if (gen != 0 && gen == cpu->cpu_mstate_gen && now > start) 789590Sesolom times[state] += now - start; 790590Sesolom 791590Sesolom for (i = 0; i < NCMSTATES; i++) { 792590Sesolom scalehrtime(×[i]); 793590Sesolom intracct[i] = tick2ns((hrtime_t)intracct[i], cpu->cpu_id); 794590Sesolom } 795590Sesolom 796590Sesolom for (i = 0; i < NCMSTATES; i++) { 797590Sesolom if (i == CMS_SYSTEM) 798590Sesolom continue; 799590Sesolom times[i] -= intracct[i]; 800590Sesolom if (times[i] < 0) { 801590Sesolom intracct[i] += times[i]; 802590Sesolom times[i] = 0; 803590Sesolom } 804590Sesolom times[CMS_SYSTEM] += intracct[i]; 805590Sesolom } 806590Sesolom } 8073446Smrj 8083446Smrj void 8093446Smrj mach_cpu_pause(volatile char *safe) 8103446Smrj { 8113446Smrj /* 8123446Smrj * This cpu is now safe. 8133446Smrj */ 8143446Smrj *safe = PAUSE_WAIT; 8153446Smrj membar_enter(); /* make sure stores are flushed */ 8163446Smrj 8173446Smrj /* 8183446Smrj * Now we wait. When we are allowed to continue, safe 8193446Smrj * will be set to PAUSE_IDLE. 8203446Smrj */ 8213446Smrj while (*safe != PAUSE_IDLE) 8223446Smrj SMT_PAUSE(); 8233446Smrj } 8243446Smrj 8253446Smrj /*ARGSUSED*/ 8263446Smrj int 8275084Sjohnlev plat_mem_do_mmio(struct uio *uio, enum uio_rw rw) 8283446Smrj { 8295084Sjohnlev return (ENOTSUP); 8303446Smrj } 8313446Smrj 83210843SDave.Plauger@Sun.COM /* cpu threshold for compressed dumps */ 83310843SDave.Plauger@Sun.COM #ifdef sun4v 834*12931SDave.Plauger@Sun.COM uint_t dump_plat_mincpu_default = DUMP_PLAT_SUN4V_MINCPU; 83510843SDave.Plauger@Sun.COM #else 836*12931SDave.Plauger@Sun.COM uint_t dump_plat_mincpu_default = DUMP_PLAT_SUN4U_MINCPU; 83710843SDave.Plauger@Sun.COM #endif 83810843SDave.Plauger@Sun.COM 8393446Smrj int 8403446Smrj dump_plat_addr() 8413446Smrj { 8423446Smrj return (0); 8433446Smrj } 8443446Smrj 8453446Smrj void 8463446Smrj dump_plat_pfn() 8473446Smrj { 8483446Smrj } 8493446Smrj 8503446Smrj /* ARGSUSED */ 8513446Smrj int 8523446Smrj dump_plat_data(void *dump_cdata) 8533446Smrj { 8543446Smrj return (0); 8553446Smrj } 8563446Smrj 8573446Smrj /* ARGSUSED */ 8583446Smrj int 8593446Smrj plat_hold_page(pfn_t pfn, int lock, page_t **pp_ret) 8603446Smrj { 8613446Smrj return (PLAT_HOLD_OK); 8623446Smrj } 8633446Smrj 8643446Smrj /* ARGSUSED */ 8653446Smrj void 8663446Smrj plat_release_page(page_t *pp) 8673446Smrj { 8683446Smrj } 8698960SJan.Setje-Eilers@Sun.COM 8708960SJan.Setje-Eilers@Sun.COM /* ARGSUSED */ 8718960SJan.Setje-Eilers@Sun.COM void 8728960SJan.Setje-Eilers@Sun.COM progressbar_key_abort(ldi_ident_t li) 8738960SJan.Setje-Eilers@Sun.COM { 8748960SJan.Setje-Eilers@Sun.COM } 87511066Srafael.vanoni@sun.com 87611066Srafael.vanoni@sun.com /* 87711066Srafael.vanoni@sun.com * We need to post a soft interrupt to reprogram the lbolt cyclic when 87811066Srafael.vanoni@sun.com * switching from event to cyclic driven lbolt. The following code adds 87911066Srafael.vanoni@sun.com * and posts the softint for sun4 platforms. 88011066Srafael.vanoni@sun.com */ 88111066Srafael.vanoni@sun.com static uint64_t lbolt_softint_inum; 88211066Srafael.vanoni@sun.com 88311066Srafael.vanoni@sun.com void 88411066Srafael.vanoni@sun.com lbolt_softint_add(void) 88511066Srafael.vanoni@sun.com { 88611066Srafael.vanoni@sun.com lbolt_softint_inum = add_softintr(LOCK_LEVEL, 88711066Srafael.vanoni@sun.com (softintrfunc)lbolt_ev_to_cyclic, NULL, SOFTINT_MT); 88811066Srafael.vanoni@sun.com } 88911066Srafael.vanoni@sun.com 89011066Srafael.vanoni@sun.com void 89111066Srafael.vanoni@sun.com lbolt_softint_post(void) 89211066Srafael.vanoni@sun.com { 89311066Srafael.vanoni@sun.com setsoftint(lbolt_softint_inum); 89411066Srafael.vanoni@sun.com } 895