10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52005Selowe * Common Development and Distribution License (the "License"). 62005Selowe * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 228960SJan.Setje-Eilers@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #include <sys/types.h> 270Sstevel@tonic-gate #include <sys/kstat.h> 280Sstevel@tonic-gate #include <sys/param.h> 290Sstevel@tonic-gate #include <sys/stack.h> 300Sstevel@tonic-gate #include <sys/regset.h> 310Sstevel@tonic-gate #include <sys/thread.h> 320Sstevel@tonic-gate #include <sys/proc.h> 330Sstevel@tonic-gate #include <sys/procfs_isa.h> 340Sstevel@tonic-gate #include <sys/kmem.h> 350Sstevel@tonic-gate #include <sys/cpuvar.h> 360Sstevel@tonic-gate #include <sys/systm.h> 370Sstevel@tonic-gate #include <sys/machpcb.h> 380Sstevel@tonic-gate #include <sys/machasi.h> 390Sstevel@tonic-gate #include <sys/vis.h> 400Sstevel@tonic-gate #include <sys/fpu/fpusystm.h> 410Sstevel@tonic-gate #include <sys/cpu_module.h> 420Sstevel@tonic-gate #include <sys/privregs.h> 430Sstevel@tonic-gate #include <sys/archsystm.h> 440Sstevel@tonic-gate #include <sys/atomic.h> 450Sstevel@tonic-gate #include <sys/cmn_err.h> 460Sstevel@tonic-gate #include <sys/time.h> 470Sstevel@tonic-gate #include <sys/clock.h> 480Sstevel@tonic-gate #include <sys/cmp.h> 490Sstevel@tonic-gate #include <sys/platform_module.h> 500Sstevel@tonic-gate #include <sys/bl.h> 510Sstevel@tonic-gate #include <sys/nvpair.h> 520Sstevel@tonic-gate #include <sys/kdi_impl.h> 530Sstevel@tonic-gate #include <sys/machsystm.h> 540Sstevel@tonic-gate #include <sys/sysmacros.h> 550Sstevel@tonic-gate #include <sys/promif.h> 560Sstevel@tonic-gate #include <sys/pool_pset.h> 573446Smrj #include <sys/mem.h> 583446Smrj #include <sys/dumphdr.h> 592005Selowe #include <vm/seg_kmem.h> 603446Smrj #include <sys/hold_page.h> 613446Smrj #include <sys/cpu.h> 620Sstevel@tonic-gate 630Sstevel@tonic-gate int maxphys = MMU_PAGESIZE * 16; /* 128k */ 640Sstevel@tonic-gate int klustsize = MMU_PAGESIZE * 16; /* 128k */ 650Sstevel@tonic-gate 660Sstevel@tonic-gate /* 670Sstevel@tonic-gate * Initialize kernel thread's stack. 680Sstevel@tonic-gate */ 690Sstevel@tonic-gate caddr_t 700Sstevel@tonic-gate thread_stk_init(caddr_t stk) 710Sstevel@tonic-gate { 720Sstevel@tonic-gate kfpu_t *fp; 730Sstevel@tonic-gate ulong_t align; 740Sstevel@tonic-gate 750Sstevel@tonic-gate /* allocate extra space for floating point state */ 760Sstevel@tonic-gate stk -= SA(sizeof (kfpu_t) + GSR_SIZE); 770Sstevel@tonic-gate align = (uintptr_t)stk & 0x3f; 780Sstevel@tonic-gate stk -= align; /* force v9_fpu to be 16 byte aligned */ 790Sstevel@tonic-gate fp = (kfpu_t *)stk; 800Sstevel@tonic-gate fp->fpu_fprs = 0; 810Sstevel@tonic-gate 820Sstevel@tonic-gate stk -= SA(MINFRAME); 830Sstevel@tonic-gate return (stk); 840Sstevel@tonic-gate } 850Sstevel@tonic-gate 862005Selowe #define WIN32_SIZE (MAXWIN * sizeof (struct rwindow32)) 872005Selowe #define WIN64_SIZE (MAXWIN * sizeof (struct rwindow64)) 882005Selowe 892005Selowe kmem_cache_t *wbuf32_cache; 902005Selowe kmem_cache_t *wbuf64_cache; 912005Selowe 922005Selowe void 932005Selowe lwp_stk_cache_init(void) 942005Selowe { 952224Selowe /* 962224Selowe * Window buffers are allocated from the static arena 972224Selowe * because they are accessed at TL>0. We also must use 982224Selowe * KMC_NOHASH to prevent them from straddling page 992224Selowe * boundaries as they are accessed by physical address. 1002224Selowe */ 1012005Selowe wbuf32_cache = kmem_cache_create("wbuf32_cache", WIN32_SIZE, 1022224Selowe 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1032005Selowe wbuf64_cache = kmem_cache_create("wbuf64_cache", WIN64_SIZE, 1042224Selowe 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1052005Selowe } 1062005Selowe 1070Sstevel@tonic-gate /* 1080Sstevel@tonic-gate * Initialize lwp's kernel stack. 1090Sstevel@tonic-gate * Note that now that the floating point register save area (kfpu_t) 1100Sstevel@tonic-gate * has been broken out from machpcb and aligned on a 64 byte boundary so that 1110Sstevel@tonic-gate * we can do block load/stores to/from it, there are a couple of potential 1120Sstevel@tonic-gate * optimizations to save stack space. 1. The floating point register save 1130Sstevel@tonic-gate * area could be aligned on a 16 byte boundary, and the floating point code 1140Sstevel@tonic-gate * changed to (a) check the alignment and (b) use different save/restore 1150Sstevel@tonic-gate * macros depending upon the alignment. 2. The lwp_stk_init code below 1160Sstevel@tonic-gate * could be changed to calculate if less space would be wasted if machpcb 1170Sstevel@tonic-gate * was first instead of second. However there is a REGOFF macro used in 1180Sstevel@tonic-gate * locore, syscall_trap, machdep and mlsetup that assumes that the saved 1190Sstevel@tonic-gate * register area is a fixed distance from the %sp, and would have to be 1200Sstevel@tonic-gate * changed to a pointer or something...JJ said later. 1210Sstevel@tonic-gate */ 1220Sstevel@tonic-gate caddr_t 1230Sstevel@tonic-gate lwp_stk_init(klwp_t *lwp, caddr_t stk) 1240Sstevel@tonic-gate { 1250Sstevel@tonic-gate struct machpcb *mpcb; 1260Sstevel@tonic-gate kfpu_t *fp; 1270Sstevel@tonic-gate uintptr_t aln; 1280Sstevel@tonic-gate 1290Sstevel@tonic-gate stk -= SA(sizeof (kfpu_t) + GSR_SIZE); 1300Sstevel@tonic-gate aln = (uintptr_t)stk & 0x3F; 1310Sstevel@tonic-gate stk -= aln; 1320Sstevel@tonic-gate fp = (kfpu_t *)stk; 1330Sstevel@tonic-gate stk -= SA(sizeof (struct machpcb)); 1340Sstevel@tonic-gate mpcb = (struct machpcb *)stk; 1350Sstevel@tonic-gate bzero(mpcb, sizeof (struct machpcb)); 1360Sstevel@tonic-gate bzero(fp, sizeof (kfpu_t) + GSR_SIZE); 1370Sstevel@tonic-gate lwp->lwp_regs = (void *)&mpcb->mpcb_regs; 1380Sstevel@tonic-gate lwp->lwp_fpu = (void *)fp; 1390Sstevel@tonic-gate mpcb->mpcb_fpu = fp; 1400Sstevel@tonic-gate mpcb->mpcb_fpu->fpu_q = mpcb->mpcb_fpu_q; 1410Sstevel@tonic-gate mpcb->mpcb_thread = lwp->lwp_thread; 1420Sstevel@tonic-gate mpcb->mpcb_wbcnt = 0; 1430Sstevel@tonic-gate if (lwp->lwp_procp->p_model == DATAMODEL_ILP32) { 1440Sstevel@tonic-gate mpcb->mpcb_wstate = WSTATE_USER32; 1452005Selowe mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP); 1460Sstevel@tonic-gate } else { 1470Sstevel@tonic-gate mpcb->mpcb_wstate = WSTATE_USER64; 1482005Selowe mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP); 1490Sstevel@tonic-gate } 1500Sstevel@tonic-gate ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0); 1510Sstevel@tonic-gate mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf); 1520Sstevel@tonic-gate mpcb->mpcb_pa = va_to_pa(mpcb); 1530Sstevel@tonic-gate return (stk); 1540Sstevel@tonic-gate } 1550Sstevel@tonic-gate 1560Sstevel@tonic-gate void 1570Sstevel@tonic-gate lwp_stk_fini(klwp_t *lwp) 1580Sstevel@tonic-gate { 1590Sstevel@tonic-gate struct machpcb *mpcb = lwptompcb(lwp); 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate /* 1620Sstevel@tonic-gate * there might be windows still in the wbuf due to unmapped 1630Sstevel@tonic-gate * stack, misaligned stack pointer, etc. We just free it. 1640Sstevel@tonic-gate */ 1650Sstevel@tonic-gate mpcb->mpcb_wbcnt = 0; 1660Sstevel@tonic-gate if (mpcb->mpcb_wstate == WSTATE_USER32) 1672005Selowe kmem_cache_free(wbuf32_cache, mpcb->mpcb_wbuf); 1680Sstevel@tonic-gate else 1692005Selowe kmem_cache_free(wbuf64_cache, mpcb->mpcb_wbuf); 1700Sstevel@tonic-gate mpcb->mpcb_wbuf = NULL; 1710Sstevel@tonic-gate mpcb->mpcb_wbuf_pa = -1; 1720Sstevel@tonic-gate } 1730Sstevel@tonic-gate 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate /* 1760Sstevel@tonic-gate * Copy regs from parent to child. 1770Sstevel@tonic-gate */ 1780Sstevel@tonic-gate void 1790Sstevel@tonic-gate lwp_forkregs(klwp_t *lwp, klwp_t *clwp) 1800Sstevel@tonic-gate { 1810Sstevel@tonic-gate kthread_t *t, *pt = lwptot(lwp); 1820Sstevel@tonic-gate struct machpcb *mpcb = lwptompcb(clwp); 1830Sstevel@tonic-gate struct machpcb *pmpcb = lwptompcb(lwp); 1840Sstevel@tonic-gate kfpu_t *fp, *pfp = lwptofpu(lwp); 1850Sstevel@tonic-gate caddr_t wbuf; 1860Sstevel@tonic-gate uint_t wstate; 1870Sstevel@tonic-gate 1880Sstevel@tonic-gate t = mpcb->mpcb_thread; 1890Sstevel@tonic-gate /* 1900Sstevel@tonic-gate * remember child's fp and wbuf since they will get erased during 1910Sstevel@tonic-gate * the bcopy. 1920Sstevel@tonic-gate */ 1930Sstevel@tonic-gate fp = mpcb->mpcb_fpu; 1940Sstevel@tonic-gate wbuf = mpcb->mpcb_wbuf; 1950Sstevel@tonic-gate wstate = mpcb->mpcb_wstate; 1960Sstevel@tonic-gate /* 1970Sstevel@tonic-gate * Don't copy mpcb_frame since we hand-crafted it 1980Sstevel@tonic-gate * in thread_load(). 1990Sstevel@tonic-gate */ 2000Sstevel@tonic-gate bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct machpcb) - REGOFF); 2010Sstevel@tonic-gate mpcb->mpcb_thread = t; 2020Sstevel@tonic-gate mpcb->mpcb_fpu = fp; 2030Sstevel@tonic-gate fp->fpu_q = mpcb->mpcb_fpu_q; 2040Sstevel@tonic-gate 2050Sstevel@tonic-gate /* 2060Sstevel@tonic-gate * It is theoretically possibly for the lwp's wstate to 2070Sstevel@tonic-gate * be different from its value assigned in lwp_stk_init, 2080Sstevel@tonic-gate * since lwp_stk_init assumed the data model of the process. 2090Sstevel@tonic-gate * Here, we took on the data model of the cloned lwp. 2100Sstevel@tonic-gate */ 2110Sstevel@tonic-gate if (mpcb->mpcb_wstate != wstate) { 2120Sstevel@tonic-gate if (wstate == WSTATE_USER32) { 2132005Selowe kmem_cache_free(wbuf32_cache, wbuf); 2142005Selowe wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP); 2150Sstevel@tonic-gate wstate = WSTATE_USER64; 2160Sstevel@tonic-gate } else { 2172005Selowe kmem_cache_free(wbuf64_cache, wbuf); 2182005Selowe wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP); 2190Sstevel@tonic-gate wstate = WSTATE_USER32; 2200Sstevel@tonic-gate } 2210Sstevel@tonic-gate } 2220Sstevel@tonic-gate 2230Sstevel@tonic-gate mpcb->mpcb_pa = va_to_pa(mpcb); 2240Sstevel@tonic-gate mpcb->mpcb_wbuf = wbuf; 2250Sstevel@tonic-gate mpcb->mpcb_wbuf_pa = va_to_pa(wbuf); 2260Sstevel@tonic-gate 2270Sstevel@tonic-gate ASSERT(mpcb->mpcb_wstate == wstate); 2280Sstevel@tonic-gate 2290Sstevel@tonic-gate if (mpcb->mpcb_wbcnt != 0) { 2300Sstevel@tonic-gate bcopy(pmpcb->mpcb_wbuf, mpcb->mpcb_wbuf, 2310Sstevel@tonic-gate mpcb->mpcb_wbcnt * ((mpcb->mpcb_wstate == WSTATE_USER32) ? 2320Sstevel@tonic-gate sizeof (struct rwindow32) : sizeof (struct rwindow64))); 2330Sstevel@tonic-gate } 2340Sstevel@tonic-gate 2350Sstevel@tonic-gate if (pt == curthread) 2360Sstevel@tonic-gate pfp->fpu_fprs = _fp_read_fprs(); 2370Sstevel@tonic-gate if ((pfp->fpu_en) || (pfp->fpu_fprs & FPRS_FEF)) { 2380Sstevel@tonic-gate if (pt == curthread && fpu_exists) { 2390Sstevel@tonic-gate save_gsr(clwp->lwp_fpu); 2400Sstevel@tonic-gate } else { 2410Sstevel@tonic-gate uint64_t gsr; 2420Sstevel@tonic-gate gsr = get_gsr(lwp->lwp_fpu); 2430Sstevel@tonic-gate set_gsr(gsr, clwp->lwp_fpu); 2440Sstevel@tonic-gate } 2450Sstevel@tonic-gate fp_fork(lwp, clwp); 2460Sstevel@tonic-gate } 2470Sstevel@tonic-gate } 2480Sstevel@tonic-gate 2490Sstevel@tonic-gate /* 2500Sstevel@tonic-gate * Free lwp fpu regs. 2510Sstevel@tonic-gate */ 2520Sstevel@tonic-gate void 2530Sstevel@tonic-gate lwp_freeregs(klwp_t *lwp, int isexec) 2540Sstevel@tonic-gate { 2550Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 2560Sstevel@tonic-gate 2570Sstevel@tonic-gate if (lwptot(lwp) == curthread) 2580Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 2590Sstevel@tonic-gate if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) 2600Sstevel@tonic-gate fp_free(fp, isexec); 2610Sstevel@tonic-gate } 2620Sstevel@tonic-gate 2630Sstevel@tonic-gate /* 2646994Sedp * These function are currently unused on sparc. 2652712Snn35248 */ 2662712Snn35248 /*ARGSUSED*/ 2672712Snn35248 void 2682712Snn35248 lwp_attach_brand_hdlrs(klwp_t *lwp) 2692712Snn35248 {} 2702712Snn35248 2716994Sedp /*ARGSUSED*/ 2726994Sedp void 2736994Sedp lwp_detach_brand_hdlrs(klwp_t *lwp) 2746994Sedp {} 2756994Sedp 2762712Snn35248 /* 2770Sstevel@tonic-gate * fill in the extra register state area specified with the 2780Sstevel@tonic-gate * specified lwp's platform-dependent non-floating-point extra 2790Sstevel@tonic-gate * register state information 2800Sstevel@tonic-gate */ 2810Sstevel@tonic-gate /* ARGSUSED */ 2820Sstevel@tonic-gate void 2830Sstevel@tonic-gate xregs_getgfiller(klwp_id_t lwp, caddr_t xrp) 2840Sstevel@tonic-gate { 2850Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 2860Sstevel@tonic-gate } 2870Sstevel@tonic-gate 2880Sstevel@tonic-gate /* 2890Sstevel@tonic-gate * fill in the extra register state area specified with the specified lwp's 2900Sstevel@tonic-gate * platform-dependent floating-point extra register state information. 2910Sstevel@tonic-gate * NOTE: 'lwp' might not correspond to 'curthread' since this is 2920Sstevel@tonic-gate * called from code in /proc to get the registers of another lwp. 2930Sstevel@tonic-gate */ 2940Sstevel@tonic-gate void 2950Sstevel@tonic-gate xregs_getfpfiller(klwp_id_t lwp, caddr_t xrp) 2960Sstevel@tonic-gate { 2970Sstevel@tonic-gate prxregset_t *xregs = (prxregset_t *)xrp; 2980Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 2990Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 3000Sstevel@tonic-gate uint64_t gsr; 3010Sstevel@tonic-gate 3020Sstevel@tonic-gate /* 3030Sstevel@tonic-gate * fp_fksave() does not flush the GSR register into 3040Sstevel@tonic-gate * the lwp area, so do it now 3050Sstevel@tonic-gate */ 3060Sstevel@tonic-gate kpreempt_disable(); 3070Sstevel@tonic-gate if (ttolwp(curthread) == lwp && fpu_exists) { 3080Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 3090Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 3100Sstevel@tonic-gate _fp_write_fprs(fprs); 3110Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 3120Sstevel@tonic-gate } 3130Sstevel@tonic-gate save_gsr(fp); 3140Sstevel@tonic-gate } 3150Sstevel@tonic-gate gsr = get_gsr(fp); 3160Sstevel@tonic-gate kpreempt_enable(); 3170Sstevel@tonic-gate PRXREG_GSR(xregs) = gsr; 3180Sstevel@tonic-gate } 3190Sstevel@tonic-gate 3200Sstevel@tonic-gate /* 3210Sstevel@tonic-gate * set the specified lwp's platform-dependent non-floating-point 3220Sstevel@tonic-gate * extra register state based on the specified input 3230Sstevel@tonic-gate */ 3240Sstevel@tonic-gate /* ARGSUSED */ 3250Sstevel@tonic-gate void 3260Sstevel@tonic-gate xregs_setgfiller(klwp_id_t lwp, caddr_t xrp) 3270Sstevel@tonic-gate { 3280Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 3290Sstevel@tonic-gate } 3300Sstevel@tonic-gate 3310Sstevel@tonic-gate /* 3320Sstevel@tonic-gate * set the specified lwp's platform-dependent floating-point 3330Sstevel@tonic-gate * extra register state based on the specified input 3340Sstevel@tonic-gate */ 3350Sstevel@tonic-gate void 3360Sstevel@tonic-gate xregs_setfpfiller(klwp_id_t lwp, caddr_t xrp) 3370Sstevel@tonic-gate { 3380Sstevel@tonic-gate prxregset_t *xregs = (prxregset_t *)xrp; 3390Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 3400Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 3410Sstevel@tonic-gate uint64_t gsr = PRXREG_GSR(xregs); 3420Sstevel@tonic-gate 3430Sstevel@tonic-gate kpreempt_disable(); 3440Sstevel@tonic-gate set_gsr(gsr, lwptofpu(lwp)); 3450Sstevel@tonic-gate 3460Sstevel@tonic-gate if ((lwp == ttolwp(curthread)) && fpu_exists) { 3470Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 3480Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 3490Sstevel@tonic-gate _fp_write_fprs(fprs); 3500Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 3510Sstevel@tonic-gate } 3520Sstevel@tonic-gate restore_gsr(lwptofpu(lwp)); 3530Sstevel@tonic-gate } 3540Sstevel@tonic-gate kpreempt_enable(); 3550Sstevel@tonic-gate } 3560Sstevel@tonic-gate 3570Sstevel@tonic-gate /* 3580Sstevel@tonic-gate * fill in the sun4u asrs, ie, the lwp's platform-dependent 3590Sstevel@tonic-gate * non-floating-point extra register state information 3600Sstevel@tonic-gate */ 3610Sstevel@tonic-gate /* ARGSUSED */ 3620Sstevel@tonic-gate void 3630Sstevel@tonic-gate getasrs(klwp_t *lwp, asrset_t asr) 3640Sstevel@tonic-gate { 3650Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 3660Sstevel@tonic-gate } 3670Sstevel@tonic-gate 3680Sstevel@tonic-gate /* 3690Sstevel@tonic-gate * fill in the sun4u asrs, ie, the lwp's platform-dependent 3700Sstevel@tonic-gate * floating-point extra register state information 3710Sstevel@tonic-gate */ 3720Sstevel@tonic-gate void 3730Sstevel@tonic-gate getfpasrs(klwp_t *lwp, asrset_t asr) 3740Sstevel@tonic-gate { 3750Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 3760Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate kpreempt_disable(); 3790Sstevel@tonic-gate if (ttolwp(curthread) == lwp) 3800Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 3810Sstevel@tonic-gate if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) { 3820Sstevel@tonic-gate if (fpu_exists && ttolwp(curthread) == lwp) { 3830Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 3840Sstevel@tonic-gate _fp_write_fprs(fprs); 3850Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 3860Sstevel@tonic-gate } 3870Sstevel@tonic-gate save_gsr(fp); 3880Sstevel@tonic-gate } 3890Sstevel@tonic-gate asr[ASR_GSR] = (int64_t)get_gsr(fp); 3900Sstevel@tonic-gate } 3910Sstevel@tonic-gate kpreempt_enable(); 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate /* 3950Sstevel@tonic-gate * set the sun4u asrs, ie, the lwp's platform-dependent 3960Sstevel@tonic-gate * non-floating-point extra register state information 3970Sstevel@tonic-gate */ 3980Sstevel@tonic-gate /* ARGSUSED */ 3990Sstevel@tonic-gate void 4000Sstevel@tonic-gate setasrs(klwp_t *lwp, asrset_t asr) 4010Sstevel@tonic-gate { 4020Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 4030Sstevel@tonic-gate } 4040Sstevel@tonic-gate 4050Sstevel@tonic-gate void 4060Sstevel@tonic-gate setfpasrs(klwp_t *lwp, asrset_t asr) 4070Sstevel@tonic-gate { 4080Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 4090Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate kpreempt_disable(); 4120Sstevel@tonic-gate if (ttolwp(curthread) == lwp) 4130Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 4140Sstevel@tonic-gate if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) { 4150Sstevel@tonic-gate set_gsr(asr[ASR_GSR], fp); 4160Sstevel@tonic-gate if (fpu_exists && ttolwp(curthread) == lwp) { 4170Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 4180Sstevel@tonic-gate _fp_write_fprs(fprs); 4190Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 4200Sstevel@tonic-gate } 4210Sstevel@tonic-gate restore_gsr(fp); 4220Sstevel@tonic-gate } 4230Sstevel@tonic-gate } 4240Sstevel@tonic-gate kpreempt_enable(); 4250Sstevel@tonic-gate } 4260Sstevel@tonic-gate 4270Sstevel@tonic-gate /* 4280Sstevel@tonic-gate * Create interrupt kstats for this CPU. 4290Sstevel@tonic-gate */ 4300Sstevel@tonic-gate void 4310Sstevel@tonic-gate cpu_create_intrstat(cpu_t *cp) 4320Sstevel@tonic-gate { 4330Sstevel@tonic-gate int i; 4340Sstevel@tonic-gate kstat_t *intr_ksp; 4350Sstevel@tonic-gate kstat_named_t *knp; 4360Sstevel@tonic-gate char name[KSTAT_STRLEN]; 4370Sstevel@tonic-gate zoneid_t zoneid; 4380Sstevel@tonic-gate 4390Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 4400Sstevel@tonic-gate 4410Sstevel@tonic-gate if (pool_pset_enabled()) 4420Sstevel@tonic-gate zoneid = GLOBAL_ZONEID; 4430Sstevel@tonic-gate else 4440Sstevel@tonic-gate zoneid = ALL_ZONES; 4450Sstevel@tonic-gate 4460Sstevel@tonic-gate intr_ksp = kstat_create_zone("cpu", cp->cpu_id, "intrstat", "misc", 4470Sstevel@tonic-gate KSTAT_TYPE_NAMED, PIL_MAX * 2, NULL, zoneid); 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate /* 4500Sstevel@tonic-gate * Initialize each PIL's named kstat 4510Sstevel@tonic-gate */ 4520Sstevel@tonic-gate if (intr_ksp != NULL) { 4530Sstevel@tonic-gate intr_ksp->ks_update = cpu_kstat_intrstat_update; 4540Sstevel@tonic-gate knp = (kstat_named_t *)intr_ksp->ks_data; 4550Sstevel@tonic-gate intr_ksp->ks_private = cp; 4560Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 4570Sstevel@tonic-gate (void) snprintf(name, KSTAT_STRLEN, "level-%d-time", 4580Sstevel@tonic-gate i + 1); 4590Sstevel@tonic-gate kstat_named_init(&knp[i * 2], name, KSTAT_DATA_UINT64); 4600Sstevel@tonic-gate (void) snprintf(name, KSTAT_STRLEN, "level-%d-count", 4610Sstevel@tonic-gate i + 1); 4620Sstevel@tonic-gate kstat_named_init(&knp[(i * 2) + 1], name, 4630Sstevel@tonic-gate KSTAT_DATA_UINT64); 4640Sstevel@tonic-gate } 4650Sstevel@tonic-gate kstat_install(intr_ksp); 4660Sstevel@tonic-gate } 4670Sstevel@tonic-gate } 4680Sstevel@tonic-gate 4690Sstevel@tonic-gate /* 4700Sstevel@tonic-gate * Delete interrupt kstats for this CPU. 4710Sstevel@tonic-gate */ 4720Sstevel@tonic-gate void 4730Sstevel@tonic-gate cpu_delete_intrstat(cpu_t *cp) 4740Sstevel@tonic-gate { 4750Sstevel@tonic-gate kstat_delete_byname_zone("cpu", cp->cpu_id, "intrstat", ALL_ZONES); 4760Sstevel@tonic-gate } 4770Sstevel@tonic-gate 4780Sstevel@tonic-gate /* 4790Sstevel@tonic-gate * Convert interrupt statistics from CPU ticks to nanoseconds and 4800Sstevel@tonic-gate * update kstat. 4810Sstevel@tonic-gate */ 4820Sstevel@tonic-gate int 4830Sstevel@tonic-gate cpu_kstat_intrstat_update(kstat_t *ksp, int rw) 4840Sstevel@tonic-gate { 4850Sstevel@tonic-gate kstat_named_t *knp = ksp->ks_data; 4860Sstevel@tonic-gate cpu_t *cpup = (cpu_t *)ksp->ks_private; 4870Sstevel@tonic-gate int i; 4880Sstevel@tonic-gate 4890Sstevel@tonic-gate if (rw == KSTAT_WRITE) 4900Sstevel@tonic-gate return (EACCES); 4910Sstevel@tonic-gate 4920Sstevel@tonic-gate /* 4930Sstevel@tonic-gate * We use separate passes to copy and convert the statistics to 4940Sstevel@tonic-gate * nanoseconds. This assures that the snapshot of the data is as 4950Sstevel@tonic-gate * self-consistent as possible. 4960Sstevel@tonic-gate */ 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 4990Sstevel@tonic-gate knp[i * 2].value.ui64 = cpup->cpu_m.intrstat[i + 1][0]; 5000Sstevel@tonic-gate knp[(i * 2) + 1].value.ui64 = cpup->cpu_stats.sys.intr[i]; 5010Sstevel@tonic-gate } 5020Sstevel@tonic-gate 5030Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 5040Sstevel@tonic-gate knp[i * 2].value.ui64 = 5050Sstevel@tonic-gate (uint64_t)tick2ns((hrtime_t)knp[i * 2].value.ui64, 5065084Sjohnlev cpup->cpu_id); 5070Sstevel@tonic-gate } 5080Sstevel@tonic-gate 5090Sstevel@tonic-gate return (0); 5100Sstevel@tonic-gate } 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate /* 5130Sstevel@tonic-gate * Called by common/os/cpu.c for psrinfo(1m) kstats 5140Sstevel@tonic-gate */ 5150Sstevel@tonic-gate char * 5160Sstevel@tonic-gate cpu_fru_fmri(cpu_t *cp) 5170Sstevel@tonic-gate { 5180Sstevel@tonic-gate return (cpunodes[cp->cpu_id].fru_fmri); 5190Sstevel@tonic-gate } 5200Sstevel@tonic-gate 5210Sstevel@tonic-gate /* 5220Sstevel@tonic-gate * An interrupt thread is ending a time slice, so compute the interval it 5230Sstevel@tonic-gate * ran for and update the statistic for its PIL. 5240Sstevel@tonic-gate */ 5250Sstevel@tonic-gate void 5260Sstevel@tonic-gate cpu_intr_swtch_enter(kthread_id_t t) 5270Sstevel@tonic-gate { 5280Sstevel@tonic-gate uint64_t interval; 5290Sstevel@tonic-gate uint64_t start; 530590Sesolom cpu_t *cpu; 5310Sstevel@tonic-gate 5320Sstevel@tonic-gate ASSERT((t->t_flag & T_INTR_THREAD) != 0); 5330Sstevel@tonic-gate ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 5340Sstevel@tonic-gate 5350Sstevel@tonic-gate /* 5360Sstevel@tonic-gate * We could be here with a zero timestamp. This could happen if: 5370Sstevel@tonic-gate * an interrupt thread which no longer has a pinned thread underneath 5380Sstevel@tonic-gate * it (i.e. it blocked at some point in its past) has finished running 5390Sstevel@tonic-gate * its handler. intr_thread() updated the interrupt statistic for its 5400Sstevel@tonic-gate * PIL and zeroed its timestamp. Since there was no pinned thread to 5410Sstevel@tonic-gate * return to, swtch() gets called and we end up here. 5420Sstevel@tonic-gate * 5430Sstevel@tonic-gate * It can also happen if an interrupt thread in intr_thread() calls 5440Sstevel@tonic-gate * preempt. It will have already taken care of updating stats. In 5450Sstevel@tonic-gate * this event, the interrupt thread will be runnable. 5460Sstevel@tonic-gate */ 5470Sstevel@tonic-gate if (t->t_intr_start) { 5480Sstevel@tonic-gate do { 5490Sstevel@tonic-gate start = t->t_intr_start; 5500Sstevel@tonic-gate interval = gettick_counter() - start; 5510Sstevel@tonic-gate } while (cas64(&t->t_intr_start, start, 0) != start); 552590Sesolom cpu = CPU; 553590Sesolom if (cpu->cpu_m.divisor > 1) 554590Sesolom interval *= cpu->cpu_m.divisor; 555590Sesolom cpu->cpu_m.intrstat[t->t_pil][0] += interval; 556590Sesolom 557590Sesolom atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate], 558590Sesolom interval); 5590Sstevel@tonic-gate } else 5600Sstevel@tonic-gate ASSERT(t->t_intr == NULL || t->t_state == TS_RUN); 5610Sstevel@tonic-gate } 5620Sstevel@tonic-gate 5630Sstevel@tonic-gate 5640Sstevel@tonic-gate /* 5650Sstevel@tonic-gate * An interrupt thread is returning from swtch(). Place a starting timestamp 5660Sstevel@tonic-gate * in its thread structure. 5670Sstevel@tonic-gate */ 5680Sstevel@tonic-gate void 5690Sstevel@tonic-gate cpu_intr_swtch_exit(kthread_id_t t) 5700Sstevel@tonic-gate { 5710Sstevel@tonic-gate uint64_t ts; 5720Sstevel@tonic-gate 5730Sstevel@tonic-gate ASSERT((t->t_flag & T_INTR_THREAD) != 0); 5740Sstevel@tonic-gate ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate do { 5770Sstevel@tonic-gate ts = t->t_intr_start; 5780Sstevel@tonic-gate } while (cas64(&t->t_intr_start, ts, gettick_counter()) != ts); 5790Sstevel@tonic-gate } 5800Sstevel@tonic-gate 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate int 5830Sstevel@tonic-gate blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class) 5840Sstevel@tonic-gate { 5850Sstevel@tonic-gate if (&plat_blacklist) 5860Sstevel@tonic-gate return (plat_blacklist(cmd, scheme, fmri, class)); 5870Sstevel@tonic-gate 5880Sstevel@tonic-gate return (ENOTSUP); 5890Sstevel@tonic-gate } 5900Sstevel@tonic-gate 5910Sstevel@tonic-gate int 5920Sstevel@tonic-gate kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp) 5930Sstevel@tonic-gate { 5940Sstevel@tonic-gate extern void kdi_flush_caches(void); 5950Sstevel@tonic-gate size_t nread = 0; 5960Sstevel@tonic-gate uint32_t word; 5970Sstevel@tonic-gate int slop, i; 5980Sstevel@tonic-gate 5990Sstevel@tonic-gate kdi_flush_caches(); 6000Sstevel@tonic-gate membar_enter(); 6010Sstevel@tonic-gate 6020Sstevel@tonic-gate /* We might not begin on a word boundary. */ 6030Sstevel@tonic-gate if ((slop = addr & 3) != 0) { 6040Sstevel@tonic-gate word = ldphys(addr & ~3); 6050Sstevel@tonic-gate for (i = slop; i < 4 && nbytes > 0; i++, nbytes--, nread++) 6060Sstevel@tonic-gate *buf++ = ((uchar_t *)&word)[i]; 6070Sstevel@tonic-gate addr = roundup(addr, 4); 6080Sstevel@tonic-gate } 6090Sstevel@tonic-gate 6100Sstevel@tonic-gate while (nbytes > 0) { 6110Sstevel@tonic-gate word = ldphys(addr); 6120Sstevel@tonic-gate for (i = 0; i < 4 && nbytes > 0; i++, nbytes--, nread++, addr++) 6130Sstevel@tonic-gate *buf++ = ((uchar_t *)&word)[i]; 6140Sstevel@tonic-gate } 6150Sstevel@tonic-gate 6160Sstevel@tonic-gate kdi_flush_caches(); 6170Sstevel@tonic-gate 6180Sstevel@tonic-gate *ncopiedp = nread; 6190Sstevel@tonic-gate return (0); 6200Sstevel@tonic-gate } 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate int 6230Sstevel@tonic-gate kdi_pwrite(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp) 6240Sstevel@tonic-gate { 6250Sstevel@tonic-gate extern void kdi_flush_caches(void); 6260Sstevel@tonic-gate size_t nwritten = 0; 6270Sstevel@tonic-gate uint32_t word; 6280Sstevel@tonic-gate int slop, i; 6290Sstevel@tonic-gate 6300Sstevel@tonic-gate kdi_flush_caches(); 6310Sstevel@tonic-gate 6320Sstevel@tonic-gate /* We might not begin on a word boundary. */ 6330Sstevel@tonic-gate if ((slop = addr & 3) != 0) { 6340Sstevel@tonic-gate word = ldphys(addr & ~3); 6350Sstevel@tonic-gate for (i = slop; i < 4 && nbytes > 0; i++, nbytes--, nwritten++) 6360Sstevel@tonic-gate ((uchar_t *)&word)[i] = *buf++; 6370Sstevel@tonic-gate stphys(addr & ~3, word); 6380Sstevel@tonic-gate addr = roundup(addr, 4); 6390Sstevel@tonic-gate } 6400Sstevel@tonic-gate 6410Sstevel@tonic-gate while (nbytes > 3) { 6420Sstevel@tonic-gate for (word = 0, i = 0; i < 4; i++, nbytes--, nwritten++) 6430Sstevel@tonic-gate ((uchar_t *)&word)[i] = *buf++; 6440Sstevel@tonic-gate stphys(addr, word); 6450Sstevel@tonic-gate addr += 4; 6460Sstevel@tonic-gate } 6470Sstevel@tonic-gate 6480Sstevel@tonic-gate /* We might not end with a whole word. */ 6490Sstevel@tonic-gate if (nbytes > 0) { 6500Sstevel@tonic-gate word = ldphys(addr); 6510Sstevel@tonic-gate for (i = 0; nbytes > 0; i++, nbytes--, nwritten++) 6520Sstevel@tonic-gate ((uchar_t *)&word)[i] = *buf++; 6530Sstevel@tonic-gate stphys(addr, word); 6540Sstevel@tonic-gate } 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate membar_enter(); 6570Sstevel@tonic-gate kdi_flush_caches(); 6580Sstevel@tonic-gate 6590Sstevel@tonic-gate *ncopiedp = nwritten; 6600Sstevel@tonic-gate return (0); 6610Sstevel@tonic-gate } 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate static void 6640Sstevel@tonic-gate kdi_kernpanic(struct regs *regs, uint_t tt) 6650Sstevel@tonic-gate { 6660Sstevel@tonic-gate sync_reg_buf = *regs; 6670Sstevel@tonic-gate sync_tt = tt; 6680Sstevel@tonic-gate 6690Sstevel@tonic-gate sync_handler(); 6700Sstevel@tonic-gate } 6710Sstevel@tonic-gate 6720Sstevel@tonic-gate static void 6730Sstevel@tonic-gate kdi_plat_call(void (*platfn)(void)) 6740Sstevel@tonic-gate { 6750Sstevel@tonic-gate if (platfn != NULL) { 6760Sstevel@tonic-gate prom_suspend_prepost(); 6770Sstevel@tonic-gate platfn(); 6780Sstevel@tonic-gate prom_resume_prepost(); 6790Sstevel@tonic-gate } 6800Sstevel@tonic-gate } 6810Sstevel@tonic-gate 6820Sstevel@tonic-gate void 6830Sstevel@tonic-gate mach_kdi_init(kdi_t *kdi) 6840Sstevel@tonic-gate { 6850Sstevel@tonic-gate kdi->kdi_plat_call = kdi_plat_call; 6863446Smrj kdi->kdi_kmdb_enter = kmdb_enter; 6870Sstevel@tonic-gate kdi->mkdi_cpu_index = kdi_cpu_index; 6880Sstevel@tonic-gate kdi->mkdi_trap_vatotte = kdi_trap_vatotte; 6890Sstevel@tonic-gate kdi->mkdi_kernpanic = kdi_kernpanic; 6900Sstevel@tonic-gate } 691590Sesolom 692590Sesolom 693590Sesolom /* 694590Sesolom * get_cpu_mstate() is passed an array of timestamps, NCMSTATES 695590Sesolom * long, and it fills in the array with the time spent on cpu in 696590Sesolom * each of the mstates, where time is returned in nsec. 697590Sesolom * 698590Sesolom * No guarantee is made that the returned values in times[] will 699590Sesolom * monotonically increase on sequential calls, although this will 700590Sesolom * be true in the long run. Any such guarantee must be handled by 701590Sesolom * the caller, if needed. This can happen if we fail to account 702590Sesolom * for elapsed time due to a generation counter conflict, yet we 703590Sesolom * did account for it on a prior call (see below). 704590Sesolom * 705590Sesolom * The complication is that the cpu in question may be updating 706590Sesolom * its microstate at the same time that we are reading it. 707590Sesolom * Because the microstate is only updated when the CPU's state 708590Sesolom * changes, the values in cpu_intracct[] can be indefinitely out 709590Sesolom * of date. To determine true current values, it is necessary to 710590Sesolom * compare the current time with cpu_mstate_start, and add the 711590Sesolom * difference to times[cpu_mstate]. 712590Sesolom * 713590Sesolom * This can be a problem if those values are changing out from 714590Sesolom * under us. Because the code path in new_cpu_mstate() is 715590Sesolom * performance critical, we have not added a lock to it. Instead, 716590Sesolom * we have added a generation counter. Before beginning 717590Sesolom * modifications, the counter is set to 0. After modifications, 718590Sesolom * it is set to the old value plus one. 719590Sesolom * 720590Sesolom * get_cpu_mstate() will not consider the values of cpu_mstate 721590Sesolom * and cpu_mstate_start to be usable unless the value of 722590Sesolom * cpu_mstate_gen is both non-zero and unchanged, both before and 723590Sesolom * after reading the mstate information. Note that we must 724590Sesolom * protect against out-of-order loads around accesses to the 725590Sesolom * generation counter. Also, this is a best effort approach in 726590Sesolom * that we do not retry should the counter be found to have 727590Sesolom * changed. 728590Sesolom * 729590Sesolom * cpu_intracct[] is used to identify time spent in each CPU 730590Sesolom * mstate while handling interrupts. Such time should be reported 731590Sesolom * against system time, and so is subtracted out from its 732590Sesolom * corresponding cpu_acct[] time and added to 733590Sesolom * cpu_acct[CMS_SYSTEM]. Additionally, intracct time is stored in 734590Sesolom * %ticks, but acct time may be stored as %sticks, thus requiring 735590Sesolom * different conversions before they can be compared. 736590Sesolom */ 737590Sesolom 738590Sesolom void 739590Sesolom get_cpu_mstate(cpu_t *cpu, hrtime_t *times) 740590Sesolom { 741590Sesolom int i; 742590Sesolom hrtime_t now, start; 743590Sesolom uint16_t gen; 744590Sesolom uint16_t state; 745590Sesolom hrtime_t intracct[NCMSTATES]; 746590Sesolom 747590Sesolom /* 748590Sesolom * Load all volatile state under the protection of membar. 749590Sesolom * cpu_acct[cpu_mstate] must be loaded to avoid double counting 750590Sesolom * of (now - cpu_mstate_start) by a change in CPU mstate that 751590Sesolom * arrives after we make our last check of cpu_mstate_gen. 752590Sesolom */ 753590Sesolom 754590Sesolom now = gethrtime_unscaled(); 755590Sesolom gen = cpu->cpu_mstate_gen; 756590Sesolom 757590Sesolom membar_consumer(); /* guarantee load ordering */ 758590Sesolom start = cpu->cpu_mstate_start; 759590Sesolom state = cpu->cpu_mstate; 760590Sesolom for (i = 0; i < NCMSTATES; i++) { 761590Sesolom intracct[i] = cpu->cpu_intracct[i]; 762590Sesolom times[i] = cpu->cpu_acct[i]; 763590Sesolom } 764590Sesolom membar_consumer(); /* guarantee load ordering */ 765590Sesolom 766590Sesolom if (gen != 0 && gen == cpu->cpu_mstate_gen && now > start) 767590Sesolom times[state] += now - start; 768590Sesolom 769590Sesolom for (i = 0; i < NCMSTATES; i++) { 770590Sesolom scalehrtime(×[i]); 771590Sesolom intracct[i] = tick2ns((hrtime_t)intracct[i], cpu->cpu_id); 772590Sesolom } 773590Sesolom 774590Sesolom for (i = 0; i < NCMSTATES; i++) { 775590Sesolom if (i == CMS_SYSTEM) 776590Sesolom continue; 777590Sesolom times[i] -= intracct[i]; 778590Sesolom if (times[i] < 0) { 779590Sesolom intracct[i] += times[i]; 780590Sesolom times[i] = 0; 781590Sesolom } 782590Sesolom times[CMS_SYSTEM] += intracct[i]; 783590Sesolom } 784590Sesolom } 7853446Smrj 7863446Smrj void 7873446Smrj mach_cpu_pause(volatile char *safe) 7883446Smrj { 7893446Smrj /* 7903446Smrj * This cpu is now safe. 7913446Smrj */ 7923446Smrj *safe = PAUSE_WAIT; 7933446Smrj membar_enter(); /* make sure stores are flushed */ 7943446Smrj 7953446Smrj /* 7963446Smrj * Now we wait. When we are allowed to continue, safe 7973446Smrj * will be set to PAUSE_IDLE. 7983446Smrj */ 7993446Smrj while (*safe != PAUSE_IDLE) 8003446Smrj SMT_PAUSE(); 8013446Smrj } 8023446Smrj 8033446Smrj /*ARGSUSED*/ 8043446Smrj int 8055084Sjohnlev plat_mem_do_mmio(struct uio *uio, enum uio_rw rw) 8063446Smrj { 8075084Sjohnlev return (ENOTSUP); 8083446Smrj } 8093446Smrj 810*10843SDave.Plauger@Sun.COM /* cpu threshold for compressed dumps */ 811*10843SDave.Plauger@Sun.COM #ifdef sun4v 812*10843SDave.Plauger@Sun.COM uint_t dump_plat_mincpu = DUMP_PLAT_SUN4V_MINCPU; 813*10843SDave.Plauger@Sun.COM #else 814*10843SDave.Plauger@Sun.COM uint_t dump_plat_mincpu = DUMP_PLAT_SUN4U_MINCPU; 815*10843SDave.Plauger@Sun.COM #endif 816*10843SDave.Plauger@Sun.COM 8173446Smrj int 8183446Smrj dump_plat_addr() 8193446Smrj { 8203446Smrj return (0); 8213446Smrj } 8223446Smrj 8233446Smrj void 8243446Smrj dump_plat_pfn() 8253446Smrj { 8263446Smrj } 8273446Smrj 8283446Smrj /* ARGSUSED */ 8293446Smrj int 8303446Smrj dump_plat_data(void *dump_cdata) 8313446Smrj { 8323446Smrj return (0); 8333446Smrj } 8343446Smrj 8353446Smrj /* ARGSUSED */ 8363446Smrj int 8373446Smrj plat_hold_page(pfn_t pfn, int lock, page_t **pp_ret) 8383446Smrj { 8393446Smrj return (PLAT_HOLD_OK); 8403446Smrj } 8413446Smrj 8423446Smrj /* ARGSUSED */ 8433446Smrj void 8443446Smrj plat_release_page(page_t *pp) 8453446Smrj { 8463446Smrj } 8478960SJan.Setje-Eilers@Sun.COM 8488960SJan.Setje-Eilers@Sun.COM /* ARGSUSED */ 8498960SJan.Setje-Eilers@Sun.COM void 8508960SJan.Setje-Eilers@Sun.COM progressbar_key_abort(ldi_ident_t li) 8518960SJan.Setje-Eilers@Sun.COM { 8528960SJan.Setje-Eilers@Sun.COM } 853