10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51991Sheppo * Common Development and Distribution License (the "License"). 61991Sheppo * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 221310Sha137994 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/types.h> 290Sstevel@tonic-gate #include <sys/systm.h> 300Sstevel@tonic-gate #include <sys/archsystm.h> 310Sstevel@tonic-gate #include <sys/t_lock.h> 320Sstevel@tonic-gate #include <sys/uadmin.h> 330Sstevel@tonic-gate #include <sys/panic.h> 340Sstevel@tonic-gate #include <sys/reboot.h> 350Sstevel@tonic-gate #include <sys/autoconf.h> 360Sstevel@tonic-gate #include <sys/machsystm.h> 370Sstevel@tonic-gate #include <sys/promif.h> 380Sstevel@tonic-gate #include <sys/membar.h> 390Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 400Sstevel@tonic-gate #include <sys/cpu_module.h> 410Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h> 420Sstevel@tonic-gate #include <sys/intreg.h> 430Sstevel@tonic-gate #include <sys/consdev.h> 440Sstevel@tonic-gate #include <sys/kdi_impl.h> 451077Ssvemuri #include <sys/traptrace.h> 460Sstevel@tonic-gate #include <sys/hypervisor_api.h> 470Sstevel@tonic-gate #include <sys/vmsystm.h> 480Sstevel@tonic-gate #include <sys/dtrace.h> 490Sstevel@tonic-gate #include <sys/xc_impl.h> 50136Sachartre #include <sys/callb.h> 511991Sheppo #include <sys/mdesc.h> 521991Sheppo #include <sys/mach_descrip.h> 53*2036Swentaoy #include <sys/wdt.h> 540Sstevel@tonic-gate 550Sstevel@tonic-gate /* 560Sstevel@tonic-gate * hvdump_buf_va is a pointer to the currently-configured hvdump_buf. 570Sstevel@tonic-gate * A value of NULL indicates that this area is not configured. 580Sstevel@tonic-gate * hvdump_buf_sz is tunable but will be clamped to HVDUMP_SIZE_MAX. 590Sstevel@tonic-gate */ 600Sstevel@tonic-gate 610Sstevel@tonic-gate caddr_t hvdump_buf_va; 620Sstevel@tonic-gate uint64_t hvdump_buf_sz = HVDUMP_SIZE_DEFAULT; 630Sstevel@tonic-gate static uint64_t hvdump_buf_pa; 640Sstevel@tonic-gate 651077Ssvemuri u_longlong_t panic_tick; 660Sstevel@tonic-gate 671077Ssvemuri extern u_longlong_t gettick(); 680Sstevel@tonic-gate static void reboot_machine(char *); 690Sstevel@tonic-gate static void update_hvdump_buffer(void); 700Sstevel@tonic-gate 710Sstevel@tonic-gate /* 720Sstevel@tonic-gate * For xt_sync synchronization. 730Sstevel@tonic-gate */ 740Sstevel@tonic-gate extern uint64_t xc_tick_limit; 750Sstevel@tonic-gate extern uint64_t xc_tick_jump_limit; 760Sstevel@tonic-gate 770Sstevel@tonic-gate /* 780Sstevel@tonic-gate * We keep our own copies, used for cache flushing, because we can be called 790Sstevel@tonic-gate * before cpu_fiximpl(). 800Sstevel@tonic-gate */ 810Sstevel@tonic-gate static int kdi_dcache_size; 820Sstevel@tonic-gate static int kdi_dcache_linesize; 830Sstevel@tonic-gate static int kdi_icache_size; 840Sstevel@tonic-gate static int kdi_icache_linesize; 850Sstevel@tonic-gate 860Sstevel@tonic-gate /* 870Sstevel@tonic-gate * Assembly support for generic modules in sun4v/ml/mach_xc.s 880Sstevel@tonic-gate */ 890Sstevel@tonic-gate extern void init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2); 900Sstevel@tonic-gate extern void kdi_flush_idcache(int, int, int, int); 910Sstevel@tonic-gate extern uint64_t get_cpuaddr(uint64_t, uint64_t); 920Sstevel@tonic-gate 930Sstevel@tonic-gate /* 940Sstevel@tonic-gate * Machine dependent code to reboot. 950Sstevel@tonic-gate * "mdep" is interpreted as a character pointer; if non-null, it is a pointer 960Sstevel@tonic-gate * to a string to be used as the argument string when rebooting. 97136Sachartre * 98136Sachartre * "invoke_cb" is a boolean. It is set to true when mdboot() can safely 99136Sachartre * invoke CB_CL_MDBOOT callbacks before shutting the system down, i.e. when 100136Sachartre * we are in a normal shutdown sequence (interrupts are not blocked, the 101136Sachartre * system is not panic'ing or being suspended). 1020Sstevel@tonic-gate */ 1030Sstevel@tonic-gate /*ARGSUSED*/ 1040Sstevel@tonic-gate void 105136Sachartre mdboot(int cmd, int fcn, char *bootstr, boolean_t invoke_cb) 1060Sstevel@tonic-gate { 1070Sstevel@tonic-gate extern void pm_cfb_check_and_powerup(void); 1080Sstevel@tonic-gate 1090Sstevel@tonic-gate /* 1100Sstevel@tonic-gate * XXX - rconsvp is set to NULL to ensure that output messages 1110Sstevel@tonic-gate * are sent to the underlying "hardware" device using the 1120Sstevel@tonic-gate * monitor's printf routine since we are in the process of 1130Sstevel@tonic-gate * either rebooting or halting the machine. 1140Sstevel@tonic-gate */ 1150Sstevel@tonic-gate rconsvp = NULL; 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate /* 1180Sstevel@tonic-gate * At a high interrupt level we can't: 1190Sstevel@tonic-gate * 1) bring up the console 1200Sstevel@tonic-gate * or 1210Sstevel@tonic-gate * 2) wait for pending interrupts prior to redistribution 1220Sstevel@tonic-gate * to the current CPU 1230Sstevel@tonic-gate * 1240Sstevel@tonic-gate * so we do them now. 1250Sstevel@tonic-gate */ 1260Sstevel@tonic-gate pm_cfb_check_and_powerup(); 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate /* make sure there are no more changes to the device tree */ 1290Sstevel@tonic-gate devtree_freeze(); 1300Sstevel@tonic-gate 131136Sachartre if (invoke_cb) 132136Sachartre (void) callb_execute_class(CB_CL_MDBOOT, NULL); 133136Sachartre 1340Sstevel@tonic-gate /* 135917Selowe * Clear any unresolved UEs from memory. 136917Selowe */ 137917Selowe if (memsegs != NULL) 138917Selowe page_retire_hunt(page_retire_mdboot_cb); 139917Selowe 140917Selowe /* 1410Sstevel@tonic-gate * stop other cpus which also raise our priority. since there is only 1420Sstevel@tonic-gate * one active cpu after this, and our priority will be too high 1430Sstevel@tonic-gate * for us to be preempted, we're essentially single threaded 1440Sstevel@tonic-gate * from here on out. 1450Sstevel@tonic-gate */ 1460Sstevel@tonic-gate stop_other_cpus(); 1470Sstevel@tonic-gate 1480Sstevel@tonic-gate /* 1490Sstevel@tonic-gate * try and reset leaf devices. reset_leaves() should only 1500Sstevel@tonic-gate * be called when there are no other threads that could be 1510Sstevel@tonic-gate * accessing devices 1520Sstevel@tonic-gate */ 1530Sstevel@tonic-gate reset_leaves(); 1540Sstevel@tonic-gate 155*2036Swentaoy watchdog_clear(); 156*2036Swentaoy 1570Sstevel@tonic-gate if (fcn == AD_HALT) { 1580Sstevel@tonic-gate halt((char *)NULL); 1590Sstevel@tonic-gate } else if (fcn == AD_POWEROFF) { 1600Sstevel@tonic-gate power_down(NULL); 1610Sstevel@tonic-gate } else { 1620Sstevel@tonic-gate if (bootstr == NULL) { 1630Sstevel@tonic-gate switch (fcn) { 1640Sstevel@tonic-gate 1650Sstevel@tonic-gate case AD_BOOT: 1660Sstevel@tonic-gate bootstr = ""; 1670Sstevel@tonic-gate break; 1680Sstevel@tonic-gate 1690Sstevel@tonic-gate case AD_IBOOT: 1700Sstevel@tonic-gate bootstr = "-a"; 1710Sstevel@tonic-gate break; 1720Sstevel@tonic-gate 1730Sstevel@tonic-gate case AD_SBOOT: 1740Sstevel@tonic-gate bootstr = "-s"; 1750Sstevel@tonic-gate break; 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate case AD_SIBOOT: 1780Sstevel@tonic-gate bootstr = "-sa"; 1790Sstevel@tonic-gate break; 1800Sstevel@tonic-gate default: 1810Sstevel@tonic-gate cmn_err(CE_WARN, 1820Sstevel@tonic-gate "mdboot: invalid function %d", fcn); 1830Sstevel@tonic-gate bootstr = ""; 1840Sstevel@tonic-gate break; 1850Sstevel@tonic-gate } 1860Sstevel@tonic-gate } 1870Sstevel@tonic-gate reboot_machine(bootstr); 1880Sstevel@tonic-gate } 1890Sstevel@tonic-gate /* MAYBE REACHED */ 1900Sstevel@tonic-gate } 1910Sstevel@tonic-gate 1920Sstevel@tonic-gate /* mdpreboot - may be called prior to mdboot while root fs still mounted */ 1930Sstevel@tonic-gate /*ARGSUSED*/ 1940Sstevel@tonic-gate void 1950Sstevel@tonic-gate mdpreboot(int cmd, int fcn, char *bootstr) 1960Sstevel@tonic-gate { 1970Sstevel@tonic-gate } 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate /* 2000Sstevel@tonic-gate * Halt the machine and then reboot with the device 2010Sstevel@tonic-gate * and arguments specified in bootstr. 2020Sstevel@tonic-gate */ 2030Sstevel@tonic-gate static void 2040Sstevel@tonic-gate reboot_machine(char *bootstr) 2050Sstevel@tonic-gate { 2060Sstevel@tonic-gate flush_windows(); 2070Sstevel@tonic-gate stop_other_cpus(); /* send stop signal to other CPUs */ 2080Sstevel@tonic-gate prom_printf("rebooting...\n"); 2090Sstevel@tonic-gate /* 2100Sstevel@tonic-gate * For platforms that use CPU signatures, we 2110Sstevel@tonic-gate * need to set the signature block to OS and 2120Sstevel@tonic-gate * the state to exiting for all the processors. 2130Sstevel@tonic-gate */ 2140Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_REBOOT, -1); 2150Sstevel@tonic-gate prom_reboot(bootstr); 2160Sstevel@tonic-gate /*NOTREACHED*/ 2170Sstevel@tonic-gate } 2180Sstevel@tonic-gate 2190Sstevel@tonic-gate /* 2200Sstevel@tonic-gate * We use the x-trap mechanism and idle_stop_xcall() to stop the other CPUs. 2210Sstevel@tonic-gate * Once in panic_idle() they raise spl, record their location, and spin. 2220Sstevel@tonic-gate */ 2230Sstevel@tonic-gate static void 2240Sstevel@tonic-gate panic_idle(void) 2250Sstevel@tonic-gate { 2260Sstevel@tonic-gate (void) spl7(); 2270Sstevel@tonic-gate 2280Sstevel@tonic-gate debug_flush_windows(); 2290Sstevel@tonic-gate (void) setjmp(&curthread->t_pcb); 2300Sstevel@tonic-gate 2310Sstevel@tonic-gate CPU->cpu_m.in_prom = 1; 2320Sstevel@tonic-gate membar_stld(); 2330Sstevel@tonic-gate 2340Sstevel@tonic-gate for (;;); 2350Sstevel@tonic-gate } 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate /* 2380Sstevel@tonic-gate * Force the other CPUs to trap into panic_idle(), and then remove them 2390Sstevel@tonic-gate * from the cpu_ready_set so they will no longer receive cross-calls. 2400Sstevel@tonic-gate */ 2410Sstevel@tonic-gate /*ARGSUSED*/ 2420Sstevel@tonic-gate void 2430Sstevel@tonic-gate panic_stopcpus(cpu_t *cp, kthread_t *t, int spl) 2440Sstevel@tonic-gate { 2450Sstevel@tonic-gate cpuset_t cps; 2460Sstevel@tonic-gate int i; 2470Sstevel@tonic-gate 2480Sstevel@tonic-gate (void) splzs(); 2490Sstevel@tonic-gate CPUSET_ALL_BUT(cps, cp->cpu_id); 2500Sstevel@tonic-gate xt_some(cps, (xcfunc_t *)idle_stop_xcall, (uint64_t)&panic_idle, NULL); 2510Sstevel@tonic-gate 2520Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 2530Sstevel@tonic-gate if (i != cp->cpu_id && CPU_XCALL_READY(i)) { 2540Sstevel@tonic-gate int ntries = 0x10000; 2550Sstevel@tonic-gate 2560Sstevel@tonic-gate while (!cpu[i]->cpu_m.in_prom && ntries) { 2570Sstevel@tonic-gate DELAY(50); 2580Sstevel@tonic-gate ntries--; 2590Sstevel@tonic-gate } 2600Sstevel@tonic-gate 2610Sstevel@tonic-gate if (!cpu[i]->cpu_m.in_prom) 2620Sstevel@tonic-gate printf("panic: failed to stop cpu%d\n", i); 2630Sstevel@tonic-gate 2640Sstevel@tonic-gate cpu[i]->cpu_flags &= ~CPU_READY; 2650Sstevel@tonic-gate cpu[i]->cpu_flags |= CPU_QUIESCED; 2660Sstevel@tonic-gate CPUSET_DEL(cpu_ready_set, cpu[i]->cpu_id); 2670Sstevel@tonic-gate } 2680Sstevel@tonic-gate } 2690Sstevel@tonic-gate } 2700Sstevel@tonic-gate 2710Sstevel@tonic-gate /* 2720Sstevel@tonic-gate * Platform callback following each entry to panicsys(). If we've panicked at 2730Sstevel@tonic-gate * level 14, we examine t_panic_trap to see if a fatal trap occurred. If so, 2740Sstevel@tonic-gate * we disable further %tick_cmpr interrupts. If not, an explicit call to panic 2750Sstevel@tonic-gate * was made and so we re-enqueue an interrupt request structure to allow 2760Sstevel@tonic-gate * further level 14 interrupts to be processed once we lower PIL. This allows 2770Sstevel@tonic-gate * us to handle panics from the deadman() CY_HIGH_LEVEL cyclic. 2780Sstevel@tonic-gate */ 2790Sstevel@tonic-gate void 2800Sstevel@tonic-gate panic_enter_hw(int spl) 2810Sstevel@tonic-gate { 282526Sarao if (!panic_tick) { 283526Sarao panic_tick = gettick(); 2841077Ssvemuri if (mach_htraptrace_enable) { 2851077Ssvemuri uint64_t prev_freeze; 2861077Ssvemuri 2871077Ssvemuri /* there are no possible error codes for this hcall */ 2881077Ssvemuri (void) hv_ttrace_freeze((uint64_t)TRAP_TFREEZE_ALL, 2891077Ssvemuri &prev_freeze); 2901077Ssvemuri } 2911077Ssvemuri #ifdef TRAPTRACE 292526Sarao TRAPTRACE_FREEZE; 2931077Ssvemuri #endif 294526Sarao } 2950Sstevel@tonic-gate if (spl == ipltospl(PIL_14)) { 2960Sstevel@tonic-gate uint_t opstate = disable_vec_intr(); 2970Sstevel@tonic-gate 2980Sstevel@tonic-gate if (curthread->t_panic_trap != NULL) { 2990Sstevel@tonic-gate tickcmpr_disable(); 3000Sstevel@tonic-gate intr_dequeue_req(PIL_14, cbe_level14_inum); 3010Sstevel@tonic-gate } else { 3020Sstevel@tonic-gate if (!tickcmpr_disabled()) 3030Sstevel@tonic-gate intr_enqueue_req(PIL_14, cbe_level14_inum); 3040Sstevel@tonic-gate /* 3050Sstevel@tonic-gate * Clear SOFTINT<14>, SOFTINT<0> (TICK_INT) 3060Sstevel@tonic-gate * and SOFTINT<16> (STICK_INT) to indicate 3070Sstevel@tonic-gate * that the current level 14 has been serviced. 3080Sstevel@tonic-gate */ 3090Sstevel@tonic-gate wr_clr_softint((1 << PIL_14) | 3100Sstevel@tonic-gate TICK_INT_MASK | STICK_INT_MASK); 3110Sstevel@tonic-gate } 3120Sstevel@tonic-gate 3130Sstevel@tonic-gate enable_vec_intr(opstate); 3140Sstevel@tonic-gate } 3150Sstevel@tonic-gate } 3160Sstevel@tonic-gate 3170Sstevel@tonic-gate /* 3180Sstevel@tonic-gate * Miscellaneous hardware-specific code to execute after panicstr is set 3190Sstevel@tonic-gate * by the panic code: we also print and record PTL1 panic information here. 3200Sstevel@tonic-gate */ 3210Sstevel@tonic-gate /*ARGSUSED*/ 3220Sstevel@tonic-gate void 3230Sstevel@tonic-gate panic_quiesce_hw(panic_data_t *pdp) 3240Sstevel@tonic-gate { 3250Sstevel@tonic-gate extern uint_t getpstate(void); 3260Sstevel@tonic-gate extern void setpstate(uint_t); 3270Sstevel@tonic-gate 3280Sstevel@tonic-gate /* 3290Sstevel@tonic-gate * Turn off TRAPTRACE and save the current %tick value in panic_tick. 3300Sstevel@tonic-gate */ 3311077Ssvemuri if (!panic_tick) { 3320Sstevel@tonic-gate panic_tick = gettick(); 3331077Ssvemuri if (mach_htraptrace_enable) { 3341077Ssvemuri uint64_t prev_freeze; 3351077Ssvemuri 3361077Ssvemuri /* there are no possible error codes for this hcall */ 3371077Ssvemuri (void) hv_ttrace_freeze((uint64_t)TRAP_TFREEZE_ALL, 3381077Ssvemuri &prev_freeze); 3391077Ssvemuri } 3401077Ssvemuri #ifdef TRAPTRACE 3411077Ssvemuri TRAPTRACE_FREEZE; 3420Sstevel@tonic-gate #endif 3431077Ssvemuri } 3440Sstevel@tonic-gate /* 3450Sstevel@tonic-gate * For Platforms that use CPU signatures, we 3460Sstevel@tonic-gate * need to set the signature block to OS, the state to 3470Sstevel@tonic-gate * exiting, and the substate to panic for all the processors. 3480Sstevel@tonic-gate */ 3490Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_PANIC, -1); 3500Sstevel@tonic-gate 3510Sstevel@tonic-gate update_hvdump_buffer(); 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate /* 3540Sstevel@tonic-gate * Disable further ECC errors from the bus nexus. 3550Sstevel@tonic-gate */ 3560Sstevel@tonic-gate (void) bus_func_invoke(BF_TYPE_ERRDIS); 3570Sstevel@tonic-gate 3580Sstevel@tonic-gate /* 3590Sstevel@tonic-gate * Redirect all interrupts to the current CPU. 3600Sstevel@tonic-gate */ 3610Sstevel@tonic-gate intr_redist_all_cpus_shutdown(); 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate /* 3640Sstevel@tonic-gate * This call exists solely to support dumps to network 3650Sstevel@tonic-gate * devices after sync from OBP. 3660Sstevel@tonic-gate * 3670Sstevel@tonic-gate * If we came here via the sync callback, then on some 3680Sstevel@tonic-gate * platforms, interrupts may have arrived while we were 3690Sstevel@tonic-gate * stopped in OBP. OBP will arrange for those interrupts to 3700Sstevel@tonic-gate * be redelivered if you say "go", but not if you invoke a 3710Sstevel@tonic-gate * client callback like 'sync'. For some dump devices 3720Sstevel@tonic-gate * (network swap devices), we need interrupts to be 3730Sstevel@tonic-gate * delivered in order to dump, so we have to call the bus 3740Sstevel@tonic-gate * nexus driver to reset the interrupt state machines. 3750Sstevel@tonic-gate */ 3760Sstevel@tonic-gate (void) bus_func_invoke(BF_TYPE_RESINTR); 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate setpstate(getpstate() | PSTATE_IE); 3790Sstevel@tonic-gate } 3800Sstevel@tonic-gate 3810Sstevel@tonic-gate /* 3820Sstevel@tonic-gate * Platforms that use CPU signatures need to set the signature block to OS and 3830Sstevel@tonic-gate * the state to exiting for all CPUs. PANIC_CONT indicates that we're about to 3840Sstevel@tonic-gate * write the crash dump, which tells the SSP/SMS to begin a timeout routine to 3850Sstevel@tonic-gate * reboot the machine if the dump never completes. 3860Sstevel@tonic-gate */ 3870Sstevel@tonic-gate /*ARGSUSED*/ 3880Sstevel@tonic-gate void 3890Sstevel@tonic-gate panic_dump_hw(int spl) 3900Sstevel@tonic-gate { 3910Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_DUMP, -1); 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate /* 3950Sstevel@tonic-gate * for ptl1_panic 3960Sstevel@tonic-gate */ 3970Sstevel@tonic-gate void 3980Sstevel@tonic-gate ptl1_init_cpu(struct cpu *cpu) 3990Sstevel@tonic-gate { 4000Sstevel@tonic-gate ptl1_state_t *pstate = &cpu->cpu_m.ptl1_state; 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate /*CONSTCOND*/ 4030Sstevel@tonic-gate if (sizeof (struct cpu) + PTL1_SSIZE > CPU_ALLOC_SIZE) { 4040Sstevel@tonic-gate panic("ptl1_init_cpu: not enough space left for ptl1_panic " 405911Siskreen "stack, sizeof (struct cpu) = %lu", 406911Siskreen (unsigned long)sizeof (struct cpu)); 4070Sstevel@tonic-gate } 4080Sstevel@tonic-gate 4090Sstevel@tonic-gate pstate->ptl1_stktop = (uintptr_t)cpu + CPU_ALLOC_SIZE; 4100Sstevel@tonic-gate cpu_pa[cpu->cpu_id] = va_to_pa(cpu); 4110Sstevel@tonic-gate } 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate void 4140Sstevel@tonic-gate ptl1_panic_handler(ptl1_state_t *pstate) 4150Sstevel@tonic-gate { 4160Sstevel@tonic-gate static const char *ptl1_reasons[] = { 4170Sstevel@tonic-gate #ifdef PTL1_PANIC_DEBUG 4180Sstevel@tonic-gate "trap for debug purpose", /* PTL1_BAD_DEBUG */ 4190Sstevel@tonic-gate #else 4200Sstevel@tonic-gate "unknown trap", /* PTL1_BAD_DEBUG */ 4210Sstevel@tonic-gate #endif 4220Sstevel@tonic-gate "register window trap", /* PTL1_BAD_WTRAP */ 4230Sstevel@tonic-gate "kernel MMU miss", /* PTL1_BAD_KMISS */ 4240Sstevel@tonic-gate "kernel protection fault", /* PTL1_BAD_KPROT_FAULT */ 4250Sstevel@tonic-gate "ISM MMU miss", /* PTL1_BAD_ISM */ 4260Sstevel@tonic-gate "kernel MMU trap", /* PTL1_BAD_MMUTRAP */ 4270Sstevel@tonic-gate "kernel trap handler state", /* PTL1_BAD_TRAP */ 4280Sstevel@tonic-gate "floating point trap", /* PTL1_BAD_FPTRAP */ 4290Sstevel@tonic-gate #ifdef DEBUG 4300Sstevel@tonic-gate "pointer to intr_req", /* PTL1_BAD_INTR_REQ */ 4310Sstevel@tonic-gate #else 4320Sstevel@tonic-gate "unknown trap", /* PTL1_BAD_INTR_REQ */ 4330Sstevel@tonic-gate #endif 4340Sstevel@tonic-gate #ifdef TRAPTRACE 4350Sstevel@tonic-gate "TRACE_PTR state", /* PTL1_BAD_TRACE_PTR */ 4360Sstevel@tonic-gate #else 4370Sstevel@tonic-gate "unknown trap", /* PTL1_BAD_TRACE_PTR */ 4380Sstevel@tonic-gate #endif 4390Sstevel@tonic-gate "stack overflow", /* PTL1_BAD_STACK */ 4400Sstevel@tonic-gate "DTrace flags", /* PTL1_BAD_DTRACE_FLAGS */ 4410Sstevel@tonic-gate "attempt to steal locked ctx", /* PTL1_BAD_CTX_STEAL */ 4420Sstevel@tonic-gate "CPU ECC error loop", /* PTL1_BAD_ECC */ 4430Sstevel@tonic-gate "unexpected error from hypervisor call", /* PTL1_BAD_HCALL */ 444526Sarao "unexpected global level(%gl)", /* PTL1_BAD_GL */ 4451991Sheppo "Watchdog Reset", /* PTL1_BAD_WATCHDOG */ 4461991Sheppo "unexpected RED mode trap", /* PTL1_BAD_RED */ 4471991Sheppo "return value EINVAL from hcall: "\ 4481991Sheppo "UNMAP_PERM_ADDR", /* PTL1_BAD_HCALL_UNMAP_PERM_EINVAL */ 4491991Sheppo "return value ENOMAP from hcall: "\ 4501991Sheppo "UNMAP_PERM_ADDR", /* PTL1_BAD_HCALL_UNMAP_PERM_ENOMAP */ 4510Sstevel@tonic-gate }; 4520Sstevel@tonic-gate 453357Ssvemuri uint_t reason = pstate->ptl1_regs.ptl1_gregs[0].ptl1_g1; 4540Sstevel@tonic-gate uint_t tl = pstate->ptl1_regs.ptl1_trap_regs[0].ptl1_tl; 4550Sstevel@tonic-gate struct trap_info ti = { 0 }; 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate /* 4580Sstevel@tonic-gate * Use trap_info for a place holder to call panic_savetrap() and 4590Sstevel@tonic-gate * panic_showtrap() to save and print out ptl1_panic information. 4600Sstevel@tonic-gate */ 4610Sstevel@tonic-gate if (curthread->t_panic_trap == NULL) 4620Sstevel@tonic-gate curthread->t_panic_trap = &ti; 4630Sstevel@tonic-gate 4640Sstevel@tonic-gate if (reason < sizeof (ptl1_reasons) / sizeof (ptl1_reasons[0])) 4650Sstevel@tonic-gate panic("bad %s at TL %u", ptl1_reasons[reason], tl); 4660Sstevel@tonic-gate else 4670Sstevel@tonic-gate panic("ptl1_panic reason 0x%x at TL %u", reason, tl); 4680Sstevel@tonic-gate } 4690Sstevel@tonic-gate 4700Sstevel@tonic-gate void 4710Sstevel@tonic-gate clear_watchdog_on_exit(void) 4720Sstevel@tonic-gate { 473*2036Swentaoy prom_printf("Debugging requested; hardware watchdog suspended.\n"); 474*2036Swentaoy (void) watchdog_suspend(); 4750Sstevel@tonic-gate } 4760Sstevel@tonic-gate 477*2036Swentaoy /* 478*2036Swentaoy * Restore the watchdog timer when returning from a debugger 479*2036Swentaoy * after a panic or L1-A and resume watchdog pat. 480*2036Swentaoy */ 4810Sstevel@tonic-gate void 482*2036Swentaoy restore_watchdog_on_entry() 4830Sstevel@tonic-gate { 484*2036Swentaoy watchdog_resume(); 4850Sstevel@tonic-gate } 4860Sstevel@tonic-gate 4870Sstevel@tonic-gate int 4880Sstevel@tonic-gate kdi_watchdog_disable(void) 4890Sstevel@tonic-gate { 490*2036Swentaoy watchdog_suspend(); 491*2036Swentaoy 492*2036Swentaoy return (0); 4930Sstevel@tonic-gate } 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate void 4960Sstevel@tonic-gate kdi_watchdog_restore(void) 4970Sstevel@tonic-gate { 498*2036Swentaoy watchdog_resume(); 4990Sstevel@tonic-gate } 5000Sstevel@tonic-gate 5010Sstevel@tonic-gate void 5020Sstevel@tonic-gate mach_dump_buffer_init(void) 5030Sstevel@tonic-gate { 5040Sstevel@tonic-gate uint64_t ret, minsize = 0; 5050Sstevel@tonic-gate 5060Sstevel@tonic-gate if (hvdump_buf_sz > HVDUMP_SIZE_MAX) 5070Sstevel@tonic-gate hvdump_buf_sz = HVDUMP_SIZE_MAX; 5080Sstevel@tonic-gate 509288Sarao hvdump_buf_va = contig_mem_alloc_align(hvdump_buf_sz, PAGESIZE); 5100Sstevel@tonic-gate if (hvdump_buf_va == NULL) 5110Sstevel@tonic-gate return; 5120Sstevel@tonic-gate 5130Sstevel@tonic-gate hvdump_buf_pa = va_to_pa(hvdump_buf_va); 5140Sstevel@tonic-gate 5150Sstevel@tonic-gate ret = hv_dump_buf_update(hvdump_buf_pa, hvdump_buf_sz, 5160Sstevel@tonic-gate &minsize); 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate if (ret != H_EOK) { 5190Sstevel@tonic-gate contig_mem_free(hvdump_buf_va, hvdump_buf_sz); 5200Sstevel@tonic-gate hvdump_buf_va = NULL; 5210Sstevel@tonic-gate cmn_err(CE_NOTE, "!Error in setting up hvstate" 5220Sstevel@tonic-gate "dump buffer. Error = 0x%lx, size = 0x%lx," 5230Sstevel@tonic-gate "buf_pa = 0x%lx", ret, hvdump_buf_sz, 5240Sstevel@tonic-gate hvdump_buf_pa); 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate if (ret == H_EINVAL) { 5270Sstevel@tonic-gate cmn_err(CE_NOTE, "!Buffer size too small." 5280Sstevel@tonic-gate "Available buffer size = 0x%lx," 5290Sstevel@tonic-gate "Minimum buffer size required = 0x%lx", 5300Sstevel@tonic-gate hvdump_buf_sz, minsize); 5310Sstevel@tonic-gate } 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate } 5340Sstevel@tonic-gate 5350Sstevel@tonic-gate 5360Sstevel@tonic-gate static void 5370Sstevel@tonic-gate update_hvdump_buffer(void) 5380Sstevel@tonic-gate { 5390Sstevel@tonic-gate uint64_t ret, dummy_val; 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate if (hvdump_buf_va == NULL) 5420Sstevel@tonic-gate return; 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate ret = hv_dump_buf_update(hvdump_buf_pa, hvdump_buf_sz, 5450Sstevel@tonic-gate &dummy_val); 5460Sstevel@tonic-gate if (ret != H_EOK) { 5470Sstevel@tonic-gate cmn_err(CE_NOTE, "!Cannot update hvstate dump" 5480Sstevel@tonic-gate "buffer. Error = 0x%lx", ret); 5490Sstevel@tonic-gate } 5500Sstevel@tonic-gate } 5510Sstevel@tonic-gate 5520Sstevel@tonic-gate 5530Sstevel@tonic-gate static int 554789Sahrens getintprop(pnode_t node, char *name, int deflt) 5550Sstevel@tonic-gate { 5560Sstevel@tonic-gate int value; 5570Sstevel@tonic-gate 5580Sstevel@tonic-gate switch (prom_getproplen(node, name)) { 5590Sstevel@tonic-gate case 0: 5600Sstevel@tonic-gate value = 1; /* boolean properties */ 5610Sstevel@tonic-gate break; 5620Sstevel@tonic-gate 5630Sstevel@tonic-gate case sizeof (int): 5640Sstevel@tonic-gate (void) prom_getprop(node, name, (caddr_t)&value); 5650Sstevel@tonic-gate break; 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate default: 5680Sstevel@tonic-gate value = deflt; 5690Sstevel@tonic-gate break; 5700Sstevel@tonic-gate } 5710Sstevel@tonic-gate 5720Sstevel@tonic-gate return (value); 5730Sstevel@tonic-gate } 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate /* 5760Sstevel@tonic-gate * Called by setcpudelay 5770Sstevel@tonic-gate */ 5780Sstevel@tonic-gate void 5790Sstevel@tonic-gate cpu_init_tick_freq(void) 5800Sstevel@tonic-gate { 5811991Sheppo md_t *mdp; 5821991Sheppo mde_cookie_t rootnode; 5831991Sheppo int listsz; 5841991Sheppo mde_cookie_t *listp = NULL; 5851991Sheppo int num_nodes; 5861991Sheppo uint64_t stick_prop; 5871991Sheppo 5881991Sheppo if (broken_md_flag) { 5891991Sheppo sys_tick_freq = cpunodes[CPU->cpu_id].clock_freq; 5901991Sheppo return; 5911991Sheppo } 5921991Sheppo 5931991Sheppo if ((mdp = md_get_handle()) == NULL) 5941991Sheppo panic("stick_frequency property not found in MD"); 5951991Sheppo 5961991Sheppo rootnode = md_root_node(mdp); 5971991Sheppo ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE); 5981991Sheppo 5991991Sheppo num_nodes = md_node_count(mdp); 6001991Sheppo 6011991Sheppo ASSERT(num_nodes > 0); 6021991Sheppo listsz = num_nodes * sizeof (mde_cookie_t); 6031991Sheppo listp = (mde_cookie_t *)prom_alloc((caddr_t)0, listsz, 0); 6041991Sheppo 6051991Sheppo if (listp == NULL) 6061991Sheppo panic("cannot allocate list for MD properties"); 6071991Sheppo 6081991Sheppo num_nodes = md_scan_dag(mdp, rootnode, md_find_name(mdp, "platform"), 6091991Sheppo md_find_name(mdp, "fwd"), listp); 6101991Sheppo 6111991Sheppo ASSERT(num_nodes == 1); 6121991Sheppo 6131991Sheppo if (md_get_prop_val(mdp, *listp, "stick-frequency", &stick_prop) != 0) 6141991Sheppo panic("stick_frequency property not found in MD"); 6151991Sheppo 6161991Sheppo sys_tick_freq = stick_prop; 6171991Sheppo 6181991Sheppo prom_free((caddr_t)listp, listsz); 6191991Sheppo (void) md_fini_handle(mdp); 6200Sstevel@tonic-gate } 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate int shipit(int n, uint64_t cpu_list_ra); 6230Sstevel@tonic-gate extern uint64_t xc_tick_limit; 6240Sstevel@tonic-gate extern uint64_t xc_tick_jump_limit; 6250Sstevel@tonic-gate 6260Sstevel@tonic-gate #ifdef DEBUG 6270Sstevel@tonic-gate #define SEND_MONDO_STATS 1 6280Sstevel@tonic-gate #endif 6290Sstevel@tonic-gate 6300Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 6310Sstevel@tonic-gate uint32_t x_one_stimes[64]; 6320Sstevel@tonic-gate uint32_t x_one_ltimes[16]; 6330Sstevel@tonic-gate uint32_t x_set_stimes[64]; 6340Sstevel@tonic-gate uint32_t x_set_ltimes[16]; 6350Sstevel@tonic-gate uint32_t x_set_cpus[NCPU]; 6360Sstevel@tonic-gate #endif 6370Sstevel@tonic-gate 6380Sstevel@tonic-gate void 6390Sstevel@tonic-gate send_one_mondo(int cpuid) 6400Sstevel@tonic-gate { 6410Sstevel@tonic-gate int retries, stat; 6420Sstevel@tonic-gate uint64_t starttick, endtick, tick, lasttick; 6430Sstevel@tonic-gate struct machcpu *mcpup = &(CPU->cpu_m); 6440Sstevel@tonic-gate 6450Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, xcalls, 1); 6460Sstevel@tonic-gate starttick = lasttick = gettick(); 6470Sstevel@tonic-gate mcpup->cpu_list[0] = (uint16_t)cpuid; 6480Sstevel@tonic-gate stat = shipit(1, mcpup->cpu_list_ra); 6490Sstevel@tonic-gate endtick = starttick + xc_tick_limit; 6500Sstevel@tonic-gate retries = 0; 6511310Sha137994 while (stat != H_EOK) { 6521310Sha137994 if (stat != H_EWOULDBLOCK) { 6531310Sha137994 if (panic_quiesce) 6541310Sha137994 return; 6551310Sha137994 if (stat == H_ECPUERROR) 6561310Sha137994 cmn_err(CE_PANIC, "send_one_mondo: " 6571310Sha137994 "cpuid: 0x%x has been marked in " 6581310Sha137994 "error", cpuid); 6591310Sha137994 else 6601310Sha137994 cmn_err(CE_PANIC, "send_one_mondo: " 6611310Sha137994 "unexpected hypervisor error 0x%x " 6621310Sha137994 "while sending a mondo to cpuid: " 6631310Sha137994 "0x%x", stat, cpuid); 6641310Sha137994 } 6650Sstevel@tonic-gate tick = gettick(); 6660Sstevel@tonic-gate /* 6670Sstevel@tonic-gate * If there is a big jump between the current tick 6680Sstevel@tonic-gate * count and lasttick, we have probably hit a break 6690Sstevel@tonic-gate * point. Adjust endtick accordingly to avoid panic. 6700Sstevel@tonic-gate */ 6710Sstevel@tonic-gate if (tick > (lasttick + xc_tick_jump_limit)) 6720Sstevel@tonic-gate endtick += (tick - lasttick); 6730Sstevel@tonic-gate lasttick = tick; 6740Sstevel@tonic-gate if (tick > endtick) { 6750Sstevel@tonic-gate if (panic_quiesce) 6760Sstevel@tonic-gate return; 6770Sstevel@tonic-gate cmn_err(CE_PANIC, "send mondo timeout " 6780Sstevel@tonic-gate "(target 0x%x) [retries: 0x%x hvstat: 0x%x]", 6790Sstevel@tonic-gate cpuid, retries, stat); 6800Sstevel@tonic-gate } 6810Sstevel@tonic-gate drv_usecwait(1); 6820Sstevel@tonic-gate stat = shipit(1, mcpup->cpu_list_ra); 6830Sstevel@tonic-gate retries++; 6840Sstevel@tonic-gate } 6850Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 6860Sstevel@tonic-gate { 6871310Sha137994 uint64_t n = gettick() - starttick; 6880Sstevel@tonic-gate if (n < 8192) 6890Sstevel@tonic-gate x_one_stimes[n >> 7]++; 6901310Sha137994 else if (n < 15*8192) 6911310Sha137994 x_one_ltimes[n >> 13]++; 6920Sstevel@tonic-gate else 6930Sstevel@tonic-gate x_one_ltimes[0xf]++; 6940Sstevel@tonic-gate } 6950Sstevel@tonic-gate #endif 6960Sstevel@tonic-gate } 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate void 6990Sstevel@tonic-gate send_mondo_set(cpuset_t set) 7000Sstevel@tonic-gate { 7010Sstevel@tonic-gate uint64_t starttick, endtick, tick, lasttick; 7020Sstevel@tonic-gate int shipped = 0; 7031310Sha137994 int retries = 0; 7040Sstevel@tonic-gate struct machcpu *mcpup = &(CPU->cpu_m); 7050Sstevel@tonic-gate 7060Sstevel@tonic-gate ASSERT(!CPUSET_ISNULL(set)); 7070Sstevel@tonic-gate starttick = lasttick = gettick(); 7080Sstevel@tonic-gate endtick = starttick + xc_tick_limit; 7090Sstevel@tonic-gate 7101310Sha137994 do { 7111310Sha137994 int ncpuids = 0; 7121310Sha137994 int i, stat; 7131310Sha137994 7141310Sha137994 /* assemble CPU list for HV argument */ 7151310Sha137994 for (i = 0; i < NCPU; i++) { 7161310Sha137994 if (CPU_IN_SET(set, i)) { 7171310Sha137994 mcpup->cpu_list[ncpuids] = (uint16_t)i; 7181310Sha137994 ncpuids++; 7191310Sha137994 } 7201310Sha137994 } 7211310Sha137994 7221310Sha137994 stat = shipit(ncpuids, mcpup->cpu_list_ra); 7231310Sha137994 if (stat == H_EOK) { 7241310Sha137994 shipped += ncpuids; 7251310Sha137994 break; 7261310Sha137994 } 7271310Sha137994 7281310Sha137994 /* 7291310Sha137994 * Either not all CPU mondos were sent, or an 7301310Sha137994 * error occurred. CPUs that were sent mondos 7311310Sha137994 * have their CPU IDs overwritten in cpu_list. 7321310Sha137994 * Reset the cpuset so that its only members 7331310Sha137994 * are those CPU IDs that still need to be sent. 7341310Sha137994 */ 7351310Sha137994 CPUSET_ZERO(set); 7361310Sha137994 for (i = 0; i < ncpuids; i++) { 7371310Sha137994 if (mcpup->cpu_list[i] == HV_SEND_MONDO_ENTRYDONE) { 7381310Sha137994 shipped++; 7391310Sha137994 } else { 7401310Sha137994 CPUSET_ADD(set, mcpup->cpu_list[i]); 7410Sstevel@tonic-gate } 7420Sstevel@tonic-gate } 7431310Sha137994 7441310Sha137994 /* 7451310Sha137994 * Now handle possible errors returned 7461310Sha137994 * from hypervisor. 7471310Sha137994 */ 7481310Sha137994 if (stat == H_ECPUERROR) { 7491310Sha137994 cpuset_t error_set; 7501310Sha137994 7511310Sha137994 /* 7521310Sha137994 * One or more of the CPUs passed to HV is 7531310Sha137994 * in the error state. Remove those CPUs from 7541310Sha137994 * set and record them in error_set. 7551310Sha137994 */ 7561310Sha137994 CPUSET_ZERO(error_set); 7571310Sha137994 for (i = 0; i < NCPU; i++) { 7581310Sha137994 if (CPU_IN_SET(set, i)) { 7591451Sha137994 uint64_t state = CPU_STATE_INVALID; 7601310Sha137994 (void) hv_cpu_state(i, &state); 7611310Sha137994 if (state == CPU_STATE_ERROR) { 7621310Sha137994 CPUSET_ADD(error_set, i); 7631310Sha137994 CPUSET_DEL(set, i); 7641310Sha137994 } 7651310Sha137994 } 7661310Sha137994 } 7670Sstevel@tonic-gate 7681310Sha137994 if (!panic_quiesce) { 7691310Sha137994 if (CPUSET_ISNULL(error_set)) { 7701310Sha137994 cmn_err(CE_PANIC, "send_mondo_set: " 7711310Sha137994 "hypervisor returned " 7721310Sha137994 "H_ECPUERROR but no CPU in " 7731310Sha137994 "cpu_list in error state"); 7741310Sha137994 } 7751310Sha137994 7761310Sha137994 cmn_err(CE_CONT, "send_mondo_set: cpuid(s) "); 7771310Sha137994 for (i = 0; i < NCPU; i++) { 7781310Sha137994 if (CPU_IN_SET(error_set, i)) { 7791310Sha137994 cmn_err(CE_CONT, "0x%x ", i); 7801310Sha137994 } 7811310Sha137994 } 7821310Sha137994 cmn_err(CE_CONT, "have been marked in " 7831310Sha137994 "error\n"); 7841310Sha137994 cmn_err(CE_PANIC, "send_mondo_set: CPU(s) " 7851310Sha137994 "in error state"); 7861310Sha137994 } 7871310Sha137994 } else if (stat != H_EWOULDBLOCK) { 7881310Sha137994 if (panic_quiesce) 7891310Sha137994 return; 7901310Sha137994 /* 7911310Sha137994 * For all other errors, panic. 7921310Sha137994 */ 7931310Sha137994 cmn_err(CE_CONT, "send_mondo_set: unexpected " 7941310Sha137994 "hypervisor error 0x%x while sending a " 7951310Sha137994 "mondo to cpuid(s):", stat); 7961310Sha137994 for (i = 0; i < NCPU; i++) { 7971310Sha137994 if (CPU_IN_SET(set, i)) { 7981310Sha137994 cmn_err(CE_CONT, " 0x%x", i); 7991310Sha137994 } 8001310Sha137994 } 8011310Sha137994 cmn_err(CE_CONT, "\n"); 8021310Sha137994 cmn_err(CE_PANIC, "send_mondo_set: unexpected " 8031310Sha137994 "hypervisor error"); 8041310Sha137994 } 8051310Sha137994 8060Sstevel@tonic-gate tick = gettick(); 8070Sstevel@tonic-gate /* 8080Sstevel@tonic-gate * If there is a big jump between the current tick 8090Sstevel@tonic-gate * count and lasttick, we have probably hit a break 8100Sstevel@tonic-gate * point. Adjust endtick accordingly to avoid panic. 8110Sstevel@tonic-gate */ 8120Sstevel@tonic-gate if (tick > (lasttick + xc_tick_jump_limit)) 8130Sstevel@tonic-gate endtick += (tick - lasttick); 8140Sstevel@tonic-gate lasttick = tick; 8150Sstevel@tonic-gate if (tick > endtick) { 8160Sstevel@tonic-gate if (panic_quiesce) 8170Sstevel@tonic-gate return; 8180Sstevel@tonic-gate cmn_err(CE_CONT, "send mondo timeout " 8190Sstevel@tonic-gate "[retries: 0x%x] cpuids: ", retries); 8201310Sha137994 for (i = 0; i < NCPU; i++) 8210Sstevel@tonic-gate if (CPU_IN_SET(set, i)) 8220Sstevel@tonic-gate cmn_err(CE_CONT, " 0x%x", i); 8230Sstevel@tonic-gate cmn_err(CE_CONT, "\n"); 8240Sstevel@tonic-gate cmn_err(CE_PANIC, "send_mondo_set: timeout"); 8250Sstevel@tonic-gate } 8260Sstevel@tonic-gate 8270Sstevel@tonic-gate while (gettick() < (tick + sys_clock_mhz)) 8280Sstevel@tonic-gate ; 8290Sstevel@tonic-gate retries++; 8301310Sha137994 } while (!CPUSET_ISNULL(set)); 8311310Sha137994 8321310Sha137994 CPU_STATS_ADDQ(CPU, sys, xcalls, shipped); 8330Sstevel@tonic-gate 8340Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 8350Sstevel@tonic-gate { 8361310Sha137994 uint64_t n = gettick() - starttick; 8370Sstevel@tonic-gate if (n < 8192) 8380Sstevel@tonic-gate x_set_stimes[n >> 7]++; 8391310Sha137994 else if (n < 15*8192) 8401310Sha137994 x_set_ltimes[n >> 13]++; 8410Sstevel@tonic-gate else 8420Sstevel@tonic-gate x_set_ltimes[0xf]++; 8430Sstevel@tonic-gate } 8440Sstevel@tonic-gate x_set_cpus[shipped]++; 8450Sstevel@tonic-gate #endif 8460Sstevel@tonic-gate } 8470Sstevel@tonic-gate 8480Sstevel@tonic-gate void 8490Sstevel@tonic-gate syncfpu(void) 8500Sstevel@tonic-gate { 8510Sstevel@tonic-gate } 8520Sstevel@tonic-gate 8530Sstevel@tonic-gate void 8540Sstevel@tonic-gate cpu_flush_ecache(void) 8550Sstevel@tonic-gate { 8560Sstevel@tonic-gate } 8570Sstevel@tonic-gate 8580Sstevel@tonic-gate void 8590Sstevel@tonic-gate sticksync_slave(void) 8600Sstevel@tonic-gate {} 8610Sstevel@tonic-gate 8620Sstevel@tonic-gate void 8630Sstevel@tonic-gate sticksync_master(void) 8640Sstevel@tonic-gate {} 8650Sstevel@tonic-gate 8660Sstevel@tonic-gate void 8670Sstevel@tonic-gate cpu_init_cache_scrub(void) 8680Sstevel@tonic-gate {} 8690Sstevel@tonic-gate 8700Sstevel@tonic-gate int 8710Sstevel@tonic-gate dtrace_blksuword32_err(uintptr_t addr, uint32_t *data) 8720Sstevel@tonic-gate { 8730Sstevel@tonic-gate int ret, watched; 8740Sstevel@tonic-gate 8750Sstevel@tonic-gate watched = watch_disable_addr((void *)addr, 4, S_WRITE); 8760Sstevel@tonic-gate ret = dtrace_blksuword32(addr, data, 0); 8770Sstevel@tonic-gate if (watched) 8780Sstevel@tonic-gate watch_enable_addr((void *)addr, 4, S_WRITE); 8790Sstevel@tonic-gate 8800Sstevel@tonic-gate return (ret); 8810Sstevel@tonic-gate } 8820Sstevel@tonic-gate 8830Sstevel@tonic-gate int 8840Sstevel@tonic-gate dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain) 8850Sstevel@tonic-gate { 8860Sstevel@tonic-gate if (suword32((void *)addr, *data) == -1) 8870Sstevel@tonic-gate return (tryagain ? dtrace_blksuword32_err(addr, data) : -1); 8880Sstevel@tonic-gate dtrace_flush_sec(addr); 8890Sstevel@tonic-gate 8900Sstevel@tonic-gate return (0); 8910Sstevel@tonic-gate } 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate /*ARGSUSED*/ 8940Sstevel@tonic-gate void 8950Sstevel@tonic-gate cpu_faulted_enter(struct cpu *cp) 8960Sstevel@tonic-gate { 8970Sstevel@tonic-gate } 8980Sstevel@tonic-gate 8990Sstevel@tonic-gate /*ARGSUSED*/ 9000Sstevel@tonic-gate void 9010Sstevel@tonic-gate cpu_faulted_exit(struct cpu *cp) 9020Sstevel@tonic-gate { 9030Sstevel@tonic-gate } 9040Sstevel@tonic-gate 9050Sstevel@tonic-gate static int 9060Sstevel@tonic-gate kdi_cpu_ready_iter(int (*cb)(int, void *), void *arg) 9070Sstevel@tonic-gate { 9080Sstevel@tonic-gate int rc, i; 9090Sstevel@tonic-gate 9100Sstevel@tonic-gate for (rc = 0, i = 0; i < NCPU; i++) { 9110Sstevel@tonic-gate if (CPU_IN_SET(cpu_ready_set, i)) 9120Sstevel@tonic-gate rc += cb(i, arg); 9130Sstevel@tonic-gate } 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate return (rc); 9160Sstevel@tonic-gate } 9170Sstevel@tonic-gate 9180Sstevel@tonic-gate /* 9190Sstevel@tonic-gate * Sends a cross-call to a specified processor. The caller assumes 9200Sstevel@tonic-gate * responsibility for repetition of cross-calls, as appropriate (MARSA for 9210Sstevel@tonic-gate * debugging). 9220Sstevel@tonic-gate */ 9230Sstevel@tonic-gate static int 9240Sstevel@tonic-gate kdi_xc_one(int cpuid, void (*func)(uintptr_t, uintptr_t), uintptr_t arg1, 9250Sstevel@tonic-gate uintptr_t arg2) 9260Sstevel@tonic-gate { 9270Sstevel@tonic-gate int stat; 9280Sstevel@tonic-gate struct machcpu *mcpup; 9290Sstevel@tonic-gate uint64_t cpuaddr_reg = 0, cpuaddr_scr = 0; 9300Sstevel@tonic-gate 9310Sstevel@tonic-gate mcpup = &(((cpu_t *)get_cpuaddr(cpuaddr_reg, cpuaddr_scr))->cpu_m); 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate /* 9340Sstevel@tonic-gate * if (idsr_busy()) 9350Sstevel@tonic-gate * return (KDI_XC_RES_ERR); 9360Sstevel@tonic-gate */ 9370Sstevel@tonic-gate 9380Sstevel@tonic-gate init_mondo_nocheck((xcfunc_t *)func, arg1, arg2); 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate mcpup->cpu_list[0] = (uint16_t)cpuid; 9410Sstevel@tonic-gate stat = shipit(1, mcpup->cpu_list_ra); 9420Sstevel@tonic-gate 9430Sstevel@tonic-gate if (stat == 0) 9440Sstevel@tonic-gate return (KDI_XC_RES_OK); 9450Sstevel@tonic-gate else 9460Sstevel@tonic-gate return (KDI_XC_RES_NACK); 9470Sstevel@tonic-gate } 9480Sstevel@tonic-gate 9490Sstevel@tonic-gate static void 9500Sstevel@tonic-gate kdi_tickwait(clock_t nticks) 9510Sstevel@tonic-gate { 9520Sstevel@tonic-gate clock_t endtick = gettick() + nticks; 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate while (gettick() < endtick); 9550Sstevel@tonic-gate } 9560Sstevel@tonic-gate 9570Sstevel@tonic-gate static void 9580Sstevel@tonic-gate kdi_cpu_init(int dcache_size, int dcache_linesize, int icache_size, 9590Sstevel@tonic-gate int icache_linesize) 9600Sstevel@tonic-gate { 9610Sstevel@tonic-gate kdi_dcache_size = dcache_size; 9620Sstevel@tonic-gate kdi_dcache_linesize = dcache_linesize; 9630Sstevel@tonic-gate kdi_icache_size = icache_size; 9640Sstevel@tonic-gate kdi_icache_linesize = icache_linesize; 9650Sstevel@tonic-gate } 9660Sstevel@tonic-gate 9670Sstevel@tonic-gate /* used directly by kdi_read/write_phys */ 9680Sstevel@tonic-gate void 9690Sstevel@tonic-gate kdi_flush_caches(void) 9700Sstevel@tonic-gate { 9710Sstevel@tonic-gate /* Not required on sun4v architecture. */ 9720Sstevel@tonic-gate } 9730Sstevel@tonic-gate 9740Sstevel@tonic-gate /*ARGSUSED*/ 9750Sstevel@tonic-gate int 9760Sstevel@tonic-gate kdi_get_stick(uint64_t *stickp) 9770Sstevel@tonic-gate { 9780Sstevel@tonic-gate return (-1); 9790Sstevel@tonic-gate } 9800Sstevel@tonic-gate 9810Sstevel@tonic-gate void 9820Sstevel@tonic-gate cpu_kdi_init(kdi_t *kdi) 9830Sstevel@tonic-gate { 9840Sstevel@tonic-gate kdi->kdi_flush_caches = kdi_flush_caches; 9850Sstevel@tonic-gate kdi->mkdi_cpu_init = kdi_cpu_init; 9860Sstevel@tonic-gate kdi->mkdi_cpu_ready_iter = kdi_cpu_ready_iter; 9870Sstevel@tonic-gate kdi->mkdi_xc_one = kdi_xc_one; 9880Sstevel@tonic-gate kdi->mkdi_tickwait = kdi_tickwait; 9890Sstevel@tonic-gate kdi->mkdi_get_stick = kdi_get_stick; 9900Sstevel@tonic-gate } 9910Sstevel@tonic-gate 992*2036Swentaoy static void 993*2036Swentaoy sun4v_system_claim(void) 994*2036Swentaoy { 995*2036Swentaoy watchdog_suspend(); 996*2036Swentaoy } 997*2036Swentaoy 998*2036Swentaoy static void 999*2036Swentaoy sun4v_system_release(void) 1000*2036Swentaoy { 1001*2036Swentaoy watchdog_resume(); 1002*2036Swentaoy } 1003*2036Swentaoy 1004*2036Swentaoy void 1005*2036Swentaoy plat_kdi_init(kdi_t *kdi) 1006*2036Swentaoy { 1007*2036Swentaoy kdi->pkdi_system_claim = sun4v_system_claim; 1008*2036Swentaoy kdi->pkdi_system_release = sun4v_system_release; 1009*2036Swentaoy } 1010*2036Swentaoy 10110Sstevel@tonic-gate /* 10120Sstevel@tonic-gate * Routine to return memory information associated 10130Sstevel@tonic-gate * with a physical address and syndrome. 10140Sstevel@tonic-gate */ 10150Sstevel@tonic-gate /* ARGSUSED */ 10160Sstevel@tonic-gate int 10170Sstevel@tonic-gate cpu_get_mem_info(uint64_t synd, uint64_t afar, 10180Sstevel@tonic-gate uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep, 10190Sstevel@tonic-gate int *segsp, int *banksp, int *mcidp) 10200Sstevel@tonic-gate { 10210Sstevel@tonic-gate return (ENOTSUP); 10220Sstevel@tonic-gate } 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate /* 10250Sstevel@tonic-gate * This routine returns the size of the kernel's FRU name buffer. 10260Sstevel@tonic-gate */ 10270Sstevel@tonic-gate size_t 10280Sstevel@tonic-gate cpu_get_name_bufsize() 10290Sstevel@tonic-gate { 10300Sstevel@tonic-gate return (UNUM_NAMLEN); 10310Sstevel@tonic-gate } 10320Sstevel@tonic-gate 10330Sstevel@tonic-gate /* 10340Sstevel@tonic-gate * This routine is a more generic interface to cpu_get_mem_unum(), 10350Sstevel@tonic-gate * that may be used by other modules (e.g. mm). 10360Sstevel@tonic-gate */ 10370Sstevel@tonic-gate /* ARGSUSED */ 10380Sstevel@tonic-gate int 10390Sstevel@tonic-gate cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar, 10400Sstevel@tonic-gate char *buf, int buflen, int *lenp) 10410Sstevel@tonic-gate { 10420Sstevel@tonic-gate return (ENOTSUP); 10430Sstevel@tonic-gate } 10440Sstevel@tonic-gate 10451186Sayznaga /* ARGSUSED */ 10461186Sayznaga int 10471186Sayznaga cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp) 10481186Sayznaga { 10491186Sayznaga return (ENOTSUP); 10501186Sayznaga } 10511186Sayznaga 10521186Sayznaga /* ARGSUSED */ 10531186Sayznaga int 10541186Sayznaga cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp) 10551186Sayznaga { 10561186Sayznaga return (ENOTSUP); 10571186Sayznaga } 10581186Sayznaga 10590Sstevel@tonic-gate /* 10600Sstevel@tonic-gate * xt_sync - wait for previous x-traps to finish 10610Sstevel@tonic-gate */ 10620Sstevel@tonic-gate void 10630Sstevel@tonic-gate xt_sync(cpuset_t cpuset) 10640Sstevel@tonic-gate { 10650Sstevel@tonic-gate union { 10660Sstevel@tonic-gate uint8_t volatile byte[NCPU]; 10670Sstevel@tonic-gate uint64_t volatile xword[NCPU / 8]; 10680Sstevel@tonic-gate } cpu_sync; 10690Sstevel@tonic-gate uint64_t starttick, endtick, tick, lasttick; 10700Sstevel@tonic-gate int i; 10710Sstevel@tonic-gate 10720Sstevel@tonic-gate kpreempt_disable(); 10730Sstevel@tonic-gate CPUSET_DEL(cpuset, CPU->cpu_id); 10740Sstevel@tonic-gate CPUSET_AND(cpuset, cpu_ready_set); 10750Sstevel@tonic-gate 10760Sstevel@tonic-gate /* 10770Sstevel@tonic-gate * Sun4v uses a queue for receiving mondos. Successful 10780Sstevel@tonic-gate * transmission of a mondo only indicates that the mondo 10790Sstevel@tonic-gate * has been written into the queue. 10800Sstevel@tonic-gate * 10810Sstevel@tonic-gate * We use an array of bytes to let each cpu to signal back 10820Sstevel@tonic-gate * to the cross trap sender that the cross trap has been 10830Sstevel@tonic-gate * executed. Set the byte to 1 before sending the cross trap 10840Sstevel@tonic-gate * and wait until other cpus reset it to 0. 10850Sstevel@tonic-gate */ 10860Sstevel@tonic-gate bzero((void *)&cpu_sync, NCPU); 10870Sstevel@tonic-gate for (i = 0; i < NCPU; i++) 10880Sstevel@tonic-gate if (CPU_IN_SET(cpuset, i)) 10890Sstevel@tonic-gate cpu_sync.byte[i] = 1; 10900Sstevel@tonic-gate 10910Sstevel@tonic-gate xt_some(cpuset, (xcfunc_t *)xt_sync_tl1, 10920Sstevel@tonic-gate (uint64_t)cpu_sync.byte, 0); 10930Sstevel@tonic-gate 10940Sstevel@tonic-gate starttick = lasttick = gettick(); 10950Sstevel@tonic-gate endtick = starttick + xc_tick_limit; 10960Sstevel@tonic-gate 10970Sstevel@tonic-gate for (i = 0; i < (NCPU / 8); i ++) { 10980Sstevel@tonic-gate while (cpu_sync.xword[i] != 0) { 10990Sstevel@tonic-gate tick = gettick(); 11000Sstevel@tonic-gate /* 11010Sstevel@tonic-gate * If there is a big jump between the current tick 11020Sstevel@tonic-gate * count and lasttick, we have probably hit a break 11030Sstevel@tonic-gate * point. Adjust endtick accordingly to avoid panic. 11040Sstevel@tonic-gate */ 11050Sstevel@tonic-gate if (tick > (lasttick + xc_tick_jump_limit)) { 11060Sstevel@tonic-gate endtick += (tick - lasttick); 11070Sstevel@tonic-gate } 11080Sstevel@tonic-gate lasttick = tick; 11090Sstevel@tonic-gate if (tick > endtick) { 11100Sstevel@tonic-gate if (panic_quiesce) 11110Sstevel@tonic-gate goto out; 11120Sstevel@tonic-gate cmn_err(CE_CONT, "Cross trap sync timeout " 11130Sstevel@tonic-gate "at cpu_sync.xword[%d]: 0x%lx\n", 11140Sstevel@tonic-gate i, cpu_sync.xword[i]); 11150Sstevel@tonic-gate cmn_err(CE_PANIC, "xt_sync: timeout"); 11160Sstevel@tonic-gate } 11170Sstevel@tonic-gate } 11180Sstevel@tonic-gate } 11190Sstevel@tonic-gate 11200Sstevel@tonic-gate out: 11210Sstevel@tonic-gate kpreempt_enable(); 11220Sstevel@tonic-gate } 1123