10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51991Sheppo * Common Development and Distribution License (the "License"). 61991Sheppo * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 221310Sha137994 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/types.h> 290Sstevel@tonic-gate #include <sys/systm.h> 300Sstevel@tonic-gate #include <sys/archsystm.h> 310Sstevel@tonic-gate #include <sys/t_lock.h> 320Sstevel@tonic-gate #include <sys/uadmin.h> 330Sstevel@tonic-gate #include <sys/panic.h> 340Sstevel@tonic-gate #include <sys/reboot.h> 350Sstevel@tonic-gate #include <sys/autoconf.h> 360Sstevel@tonic-gate #include <sys/machsystm.h> 370Sstevel@tonic-gate #include <sys/promif.h> 380Sstevel@tonic-gate #include <sys/membar.h> 390Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 400Sstevel@tonic-gate #include <sys/cpu_module.h> 410Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h> 420Sstevel@tonic-gate #include <sys/intreg.h> 430Sstevel@tonic-gate #include <sys/consdev.h> 440Sstevel@tonic-gate #include <sys/kdi_impl.h> 451077Ssvemuri #include <sys/traptrace.h> 460Sstevel@tonic-gate #include <sys/hypervisor_api.h> 470Sstevel@tonic-gate #include <sys/vmsystm.h> 480Sstevel@tonic-gate #include <sys/dtrace.h> 490Sstevel@tonic-gate #include <sys/xc_impl.h> 50136Sachartre #include <sys/callb.h> 511991Sheppo #include <sys/mdesc.h> 521991Sheppo #include <sys/mach_descrip.h> 532036Swentaoy #include <sys/wdt.h> 540Sstevel@tonic-gate 550Sstevel@tonic-gate /* 560Sstevel@tonic-gate * hvdump_buf_va is a pointer to the currently-configured hvdump_buf. 570Sstevel@tonic-gate * A value of NULL indicates that this area is not configured. 580Sstevel@tonic-gate * hvdump_buf_sz is tunable but will be clamped to HVDUMP_SIZE_MAX. 590Sstevel@tonic-gate */ 600Sstevel@tonic-gate 610Sstevel@tonic-gate caddr_t hvdump_buf_va; 620Sstevel@tonic-gate uint64_t hvdump_buf_sz = HVDUMP_SIZE_DEFAULT; 630Sstevel@tonic-gate static uint64_t hvdump_buf_pa; 640Sstevel@tonic-gate 651077Ssvemuri u_longlong_t panic_tick; 660Sstevel@tonic-gate 671077Ssvemuri extern u_longlong_t gettick(); 680Sstevel@tonic-gate static void reboot_machine(char *); 690Sstevel@tonic-gate static void update_hvdump_buffer(void); 700Sstevel@tonic-gate 710Sstevel@tonic-gate /* 720Sstevel@tonic-gate * For xt_sync synchronization. 730Sstevel@tonic-gate */ 740Sstevel@tonic-gate extern uint64_t xc_tick_limit; 750Sstevel@tonic-gate extern uint64_t xc_tick_jump_limit; 760Sstevel@tonic-gate 770Sstevel@tonic-gate /* 780Sstevel@tonic-gate * We keep our own copies, used for cache flushing, because we can be called 790Sstevel@tonic-gate * before cpu_fiximpl(). 800Sstevel@tonic-gate */ 810Sstevel@tonic-gate static int kdi_dcache_size; 820Sstevel@tonic-gate static int kdi_dcache_linesize; 830Sstevel@tonic-gate static int kdi_icache_size; 840Sstevel@tonic-gate static int kdi_icache_linesize; 850Sstevel@tonic-gate 860Sstevel@tonic-gate /* 870Sstevel@tonic-gate * Assembly support for generic modules in sun4v/ml/mach_xc.s 880Sstevel@tonic-gate */ 890Sstevel@tonic-gate extern void init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2); 900Sstevel@tonic-gate extern void kdi_flush_idcache(int, int, int, int); 910Sstevel@tonic-gate extern uint64_t get_cpuaddr(uint64_t, uint64_t); 920Sstevel@tonic-gate 930Sstevel@tonic-gate /* 940Sstevel@tonic-gate * Machine dependent code to reboot. 950Sstevel@tonic-gate * "mdep" is interpreted as a character pointer; if non-null, it is a pointer 960Sstevel@tonic-gate * to a string to be used as the argument string when rebooting. 97136Sachartre * 98136Sachartre * "invoke_cb" is a boolean. It is set to true when mdboot() can safely 99136Sachartre * invoke CB_CL_MDBOOT callbacks before shutting the system down, i.e. when 100136Sachartre * we are in a normal shutdown sequence (interrupts are not blocked, the 101136Sachartre * system is not panic'ing or being suspended). 1020Sstevel@tonic-gate */ 1030Sstevel@tonic-gate /*ARGSUSED*/ 1040Sstevel@tonic-gate void 105136Sachartre mdboot(int cmd, int fcn, char *bootstr, boolean_t invoke_cb) 1060Sstevel@tonic-gate { 1070Sstevel@tonic-gate extern void pm_cfb_check_and_powerup(void); 1080Sstevel@tonic-gate 1090Sstevel@tonic-gate /* 1100Sstevel@tonic-gate * XXX - rconsvp is set to NULL to ensure that output messages 1110Sstevel@tonic-gate * are sent to the underlying "hardware" device using the 1120Sstevel@tonic-gate * monitor's printf routine since we are in the process of 1130Sstevel@tonic-gate * either rebooting or halting the machine. 1140Sstevel@tonic-gate */ 1150Sstevel@tonic-gate rconsvp = NULL; 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate /* 1180Sstevel@tonic-gate * At a high interrupt level we can't: 1190Sstevel@tonic-gate * 1) bring up the console 1200Sstevel@tonic-gate * or 1210Sstevel@tonic-gate * 2) wait for pending interrupts prior to redistribution 1220Sstevel@tonic-gate * to the current CPU 1230Sstevel@tonic-gate * 1240Sstevel@tonic-gate * so we do them now. 1250Sstevel@tonic-gate */ 1260Sstevel@tonic-gate pm_cfb_check_and_powerup(); 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate /* make sure there are no more changes to the device tree */ 1290Sstevel@tonic-gate devtree_freeze(); 1300Sstevel@tonic-gate 131136Sachartre if (invoke_cb) 132136Sachartre (void) callb_execute_class(CB_CL_MDBOOT, NULL); 133136Sachartre 1340Sstevel@tonic-gate /* 135917Selowe * Clear any unresolved UEs from memory. 136917Selowe */ 137917Selowe if (memsegs != NULL) 138917Selowe page_retire_hunt(page_retire_mdboot_cb); 139917Selowe 140917Selowe /* 1410Sstevel@tonic-gate * stop other cpus which also raise our priority. since there is only 1420Sstevel@tonic-gate * one active cpu after this, and our priority will be too high 1430Sstevel@tonic-gate * for us to be preempted, we're essentially single threaded 1440Sstevel@tonic-gate * from here on out. 1450Sstevel@tonic-gate */ 1460Sstevel@tonic-gate stop_other_cpus(); 1470Sstevel@tonic-gate 1480Sstevel@tonic-gate /* 1490Sstevel@tonic-gate * try and reset leaf devices. reset_leaves() should only 1500Sstevel@tonic-gate * be called when there are no other threads that could be 1510Sstevel@tonic-gate * accessing devices 1520Sstevel@tonic-gate */ 1530Sstevel@tonic-gate reset_leaves(); 1540Sstevel@tonic-gate 1552036Swentaoy watchdog_clear(); 1562036Swentaoy 1570Sstevel@tonic-gate if (fcn == AD_HALT) { 1580Sstevel@tonic-gate halt((char *)NULL); 1590Sstevel@tonic-gate } else if (fcn == AD_POWEROFF) { 1600Sstevel@tonic-gate power_down(NULL); 1610Sstevel@tonic-gate } else { 1620Sstevel@tonic-gate if (bootstr == NULL) { 1630Sstevel@tonic-gate switch (fcn) { 1640Sstevel@tonic-gate 1650Sstevel@tonic-gate case AD_BOOT: 1660Sstevel@tonic-gate bootstr = ""; 1670Sstevel@tonic-gate break; 1680Sstevel@tonic-gate 1690Sstevel@tonic-gate case AD_IBOOT: 1700Sstevel@tonic-gate bootstr = "-a"; 1710Sstevel@tonic-gate break; 1720Sstevel@tonic-gate 1730Sstevel@tonic-gate case AD_SBOOT: 1740Sstevel@tonic-gate bootstr = "-s"; 1750Sstevel@tonic-gate break; 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate case AD_SIBOOT: 1780Sstevel@tonic-gate bootstr = "-sa"; 1790Sstevel@tonic-gate break; 1800Sstevel@tonic-gate default: 1810Sstevel@tonic-gate cmn_err(CE_WARN, 1820Sstevel@tonic-gate "mdboot: invalid function %d", fcn); 1830Sstevel@tonic-gate bootstr = ""; 1840Sstevel@tonic-gate break; 1850Sstevel@tonic-gate } 1860Sstevel@tonic-gate } 1870Sstevel@tonic-gate reboot_machine(bootstr); 1880Sstevel@tonic-gate } 1890Sstevel@tonic-gate /* MAYBE REACHED */ 1900Sstevel@tonic-gate } 1910Sstevel@tonic-gate 1920Sstevel@tonic-gate /* mdpreboot - may be called prior to mdboot while root fs still mounted */ 1930Sstevel@tonic-gate /*ARGSUSED*/ 1940Sstevel@tonic-gate void 1950Sstevel@tonic-gate mdpreboot(int cmd, int fcn, char *bootstr) 1960Sstevel@tonic-gate { 1970Sstevel@tonic-gate } 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate /* 2000Sstevel@tonic-gate * Halt the machine and then reboot with the device 2010Sstevel@tonic-gate * and arguments specified in bootstr. 2020Sstevel@tonic-gate */ 2030Sstevel@tonic-gate static void 2040Sstevel@tonic-gate reboot_machine(char *bootstr) 2050Sstevel@tonic-gate { 2060Sstevel@tonic-gate flush_windows(); 2070Sstevel@tonic-gate stop_other_cpus(); /* send stop signal to other CPUs */ 2080Sstevel@tonic-gate prom_printf("rebooting...\n"); 2090Sstevel@tonic-gate /* 2100Sstevel@tonic-gate * For platforms that use CPU signatures, we 2110Sstevel@tonic-gate * need to set the signature block to OS and 2120Sstevel@tonic-gate * the state to exiting for all the processors. 2130Sstevel@tonic-gate */ 2140Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_REBOOT, -1); 2150Sstevel@tonic-gate prom_reboot(bootstr); 2160Sstevel@tonic-gate /*NOTREACHED*/ 2170Sstevel@tonic-gate } 2180Sstevel@tonic-gate 2190Sstevel@tonic-gate /* 2200Sstevel@tonic-gate * We use the x-trap mechanism and idle_stop_xcall() to stop the other CPUs. 2210Sstevel@tonic-gate * Once in panic_idle() they raise spl, record their location, and spin. 2220Sstevel@tonic-gate */ 2230Sstevel@tonic-gate static void 2240Sstevel@tonic-gate panic_idle(void) 2250Sstevel@tonic-gate { 2260Sstevel@tonic-gate (void) spl7(); 2270Sstevel@tonic-gate 2280Sstevel@tonic-gate debug_flush_windows(); 2290Sstevel@tonic-gate (void) setjmp(&curthread->t_pcb); 2300Sstevel@tonic-gate 2310Sstevel@tonic-gate CPU->cpu_m.in_prom = 1; 2320Sstevel@tonic-gate membar_stld(); 2330Sstevel@tonic-gate 2340Sstevel@tonic-gate for (;;); 2350Sstevel@tonic-gate } 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate /* 2380Sstevel@tonic-gate * Force the other CPUs to trap into panic_idle(), and then remove them 2390Sstevel@tonic-gate * from the cpu_ready_set so they will no longer receive cross-calls. 2400Sstevel@tonic-gate */ 2410Sstevel@tonic-gate /*ARGSUSED*/ 2420Sstevel@tonic-gate void 2430Sstevel@tonic-gate panic_stopcpus(cpu_t *cp, kthread_t *t, int spl) 2440Sstevel@tonic-gate { 2450Sstevel@tonic-gate cpuset_t cps; 2460Sstevel@tonic-gate int i; 2470Sstevel@tonic-gate 2480Sstevel@tonic-gate (void) splzs(); 2490Sstevel@tonic-gate CPUSET_ALL_BUT(cps, cp->cpu_id); 2500Sstevel@tonic-gate xt_some(cps, (xcfunc_t *)idle_stop_xcall, (uint64_t)&panic_idle, NULL); 2510Sstevel@tonic-gate 2520Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 2530Sstevel@tonic-gate if (i != cp->cpu_id && CPU_XCALL_READY(i)) { 2540Sstevel@tonic-gate int ntries = 0x10000; 2550Sstevel@tonic-gate 2560Sstevel@tonic-gate while (!cpu[i]->cpu_m.in_prom && ntries) { 2570Sstevel@tonic-gate DELAY(50); 2580Sstevel@tonic-gate ntries--; 2590Sstevel@tonic-gate } 2600Sstevel@tonic-gate 2610Sstevel@tonic-gate if (!cpu[i]->cpu_m.in_prom) 2620Sstevel@tonic-gate printf("panic: failed to stop cpu%d\n", i); 2630Sstevel@tonic-gate 2640Sstevel@tonic-gate cpu[i]->cpu_flags &= ~CPU_READY; 2650Sstevel@tonic-gate cpu[i]->cpu_flags |= CPU_QUIESCED; 2660Sstevel@tonic-gate CPUSET_DEL(cpu_ready_set, cpu[i]->cpu_id); 2670Sstevel@tonic-gate } 2680Sstevel@tonic-gate } 2690Sstevel@tonic-gate } 2700Sstevel@tonic-gate 2710Sstevel@tonic-gate /* 2720Sstevel@tonic-gate * Platform callback following each entry to panicsys(). If we've panicked at 2730Sstevel@tonic-gate * level 14, we examine t_panic_trap to see if a fatal trap occurred. If so, 2740Sstevel@tonic-gate * we disable further %tick_cmpr interrupts. If not, an explicit call to panic 2750Sstevel@tonic-gate * was made and so we re-enqueue an interrupt request structure to allow 2760Sstevel@tonic-gate * further level 14 interrupts to be processed once we lower PIL. This allows 2770Sstevel@tonic-gate * us to handle panics from the deadman() CY_HIGH_LEVEL cyclic. 2780Sstevel@tonic-gate */ 2790Sstevel@tonic-gate void 2800Sstevel@tonic-gate panic_enter_hw(int spl) 2810Sstevel@tonic-gate { 282526Sarao if (!panic_tick) { 283526Sarao panic_tick = gettick(); 2841077Ssvemuri if (mach_htraptrace_enable) { 2851077Ssvemuri uint64_t prev_freeze; 2861077Ssvemuri 2871077Ssvemuri /* there are no possible error codes for this hcall */ 2881077Ssvemuri (void) hv_ttrace_freeze((uint64_t)TRAP_TFREEZE_ALL, 2891077Ssvemuri &prev_freeze); 2901077Ssvemuri } 2911077Ssvemuri #ifdef TRAPTRACE 292526Sarao TRAPTRACE_FREEZE; 2931077Ssvemuri #endif 294526Sarao } 2950Sstevel@tonic-gate if (spl == ipltospl(PIL_14)) { 2960Sstevel@tonic-gate uint_t opstate = disable_vec_intr(); 2970Sstevel@tonic-gate 2980Sstevel@tonic-gate if (curthread->t_panic_trap != NULL) { 2990Sstevel@tonic-gate tickcmpr_disable(); 3000Sstevel@tonic-gate intr_dequeue_req(PIL_14, cbe_level14_inum); 3010Sstevel@tonic-gate } else { 3020Sstevel@tonic-gate if (!tickcmpr_disabled()) 3030Sstevel@tonic-gate intr_enqueue_req(PIL_14, cbe_level14_inum); 3040Sstevel@tonic-gate /* 3050Sstevel@tonic-gate * Clear SOFTINT<14>, SOFTINT<0> (TICK_INT) 3060Sstevel@tonic-gate * and SOFTINT<16> (STICK_INT) to indicate 3070Sstevel@tonic-gate * that the current level 14 has been serviced. 3080Sstevel@tonic-gate */ 3090Sstevel@tonic-gate wr_clr_softint((1 << PIL_14) | 3100Sstevel@tonic-gate TICK_INT_MASK | STICK_INT_MASK); 3110Sstevel@tonic-gate } 3120Sstevel@tonic-gate 3130Sstevel@tonic-gate enable_vec_intr(opstate); 3140Sstevel@tonic-gate } 3150Sstevel@tonic-gate } 3160Sstevel@tonic-gate 3170Sstevel@tonic-gate /* 3180Sstevel@tonic-gate * Miscellaneous hardware-specific code to execute after panicstr is set 3190Sstevel@tonic-gate * by the panic code: we also print and record PTL1 panic information here. 3200Sstevel@tonic-gate */ 3210Sstevel@tonic-gate /*ARGSUSED*/ 3220Sstevel@tonic-gate void 3230Sstevel@tonic-gate panic_quiesce_hw(panic_data_t *pdp) 3240Sstevel@tonic-gate { 3250Sstevel@tonic-gate extern uint_t getpstate(void); 3260Sstevel@tonic-gate extern void setpstate(uint_t); 3270Sstevel@tonic-gate 3280Sstevel@tonic-gate /* 3290Sstevel@tonic-gate * Turn off TRAPTRACE and save the current %tick value in panic_tick. 3300Sstevel@tonic-gate */ 3311077Ssvemuri if (!panic_tick) { 3320Sstevel@tonic-gate panic_tick = gettick(); 3331077Ssvemuri if (mach_htraptrace_enable) { 3341077Ssvemuri uint64_t prev_freeze; 3351077Ssvemuri 3361077Ssvemuri /* there are no possible error codes for this hcall */ 3371077Ssvemuri (void) hv_ttrace_freeze((uint64_t)TRAP_TFREEZE_ALL, 3381077Ssvemuri &prev_freeze); 3391077Ssvemuri } 3401077Ssvemuri #ifdef TRAPTRACE 3411077Ssvemuri TRAPTRACE_FREEZE; 3420Sstevel@tonic-gate #endif 3431077Ssvemuri } 3440Sstevel@tonic-gate /* 3450Sstevel@tonic-gate * For Platforms that use CPU signatures, we 3460Sstevel@tonic-gate * need to set the signature block to OS, the state to 3470Sstevel@tonic-gate * exiting, and the substate to panic for all the processors. 3480Sstevel@tonic-gate */ 3490Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_PANIC, -1); 3500Sstevel@tonic-gate 3510Sstevel@tonic-gate update_hvdump_buffer(); 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate /* 3540Sstevel@tonic-gate * Disable further ECC errors from the bus nexus. 3550Sstevel@tonic-gate */ 3560Sstevel@tonic-gate (void) bus_func_invoke(BF_TYPE_ERRDIS); 3570Sstevel@tonic-gate 3580Sstevel@tonic-gate /* 3590Sstevel@tonic-gate * Redirect all interrupts to the current CPU. 3600Sstevel@tonic-gate */ 3610Sstevel@tonic-gate intr_redist_all_cpus_shutdown(); 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate /* 3640Sstevel@tonic-gate * This call exists solely to support dumps to network 3650Sstevel@tonic-gate * devices after sync from OBP. 3660Sstevel@tonic-gate * 3670Sstevel@tonic-gate * If we came here via the sync callback, then on some 3680Sstevel@tonic-gate * platforms, interrupts may have arrived while we were 3690Sstevel@tonic-gate * stopped in OBP. OBP will arrange for those interrupts to 3700Sstevel@tonic-gate * be redelivered if you say "go", but not if you invoke a 3710Sstevel@tonic-gate * client callback like 'sync'. For some dump devices 3720Sstevel@tonic-gate * (network swap devices), we need interrupts to be 3730Sstevel@tonic-gate * delivered in order to dump, so we have to call the bus 3740Sstevel@tonic-gate * nexus driver to reset the interrupt state machines. 3750Sstevel@tonic-gate */ 3760Sstevel@tonic-gate (void) bus_func_invoke(BF_TYPE_RESINTR); 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate setpstate(getpstate() | PSTATE_IE); 3790Sstevel@tonic-gate } 3800Sstevel@tonic-gate 3810Sstevel@tonic-gate /* 3820Sstevel@tonic-gate * Platforms that use CPU signatures need to set the signature block to OS and 3830Sstevel@tonic-gate * the state to exiting for all CPUs. PANIC_CONT indicates that we're about to 3840Sstevel@tonic-gate * write the crash dump, which tells the SSP/SMS to begin a timeout routine to 3850Sstevel@tonic-gate * reboot the machine if the dump never completes. 3860Sstevel@tonic-gate */ 3870Sstevel@tonic-gate /*ARGSUSED*/ 3880Sstevel@tonic-gate void 3890Sstevel@tonic-gate panic_dump_hw(int spl) 3900Sstevel@tonic-gate { 3910Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_DUMP, -1); 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate /* 3950Sstevel@tonic-gate * for ptl1_panic 3960Sstevel@tonic-gate */ 3970Sstevel@tonic-gate void 3980Sstevel@tonic-gate ptl1_init_cpu(struct cpu *cpu) 3990Sstevel@tonic-gate { 4000Sstevel@tonic-gate ptl1_state_t *pstate = &cpu->cpu_m.ptl1_state; 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate /*CONSTCOND*/ 4030Sstevel@tonic-gate if (sizeof (struct cpu) + PTL1_SSIZE > CPU_ALLOC_SIZE) { 4040Sstevel@tonic-gate panic("ptl1_init_cpu: not enough space left for ptl1_panic " 405911Siskreen "stack, sizeof (struct cpu) = %lu", 406911Siskreen (unsigned long)sizeof (struct cpu)); 4070Sstevel@tonic-gate } 4080Sstevel@tonic-gate 4090Sstevel@tonic-gate pstate->ptl1_stktop = (uintptr_t)cpu + CPU_ALLOC_SIZE; 4100Sstevel@tonic-gate cpu_pa[cpu->cpu_id] = va_to_pa(cpu); 4110Sstevel@tonic-gate } 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate void 4140Sstevel@tonic-gate ptl1_panic_handler(ptl1_state_t *pstate) 4150Sstevel@tonic-gate { 4160Sstevel@tonic-gate static const char *ptl1_reasons[] = { 4170Sstevel@tonic-gate #ifdef PTL1_PANIC_DEBUG 4180Sstevel@tonic-gate "trap for debug purpose", /* PTL1_BAD_DEBUG */ 4190Sstevel@tonic-gate #else 4200Sstevel@tonic-gate "unknown trap", /* PTL1_BAD_DEBUG */ 4210Sstevel@tonic-gate #endif 4220Sstevel@tonic-gate "register window trap", /* PTL1_BAD_WTRAP */ 4230Sstevel@tonic-gate "kernel MMU miss", /* PTL1_BAD_KMISS */ 4240Sstevel@tonic-gate "kernel protection fault", /* PTL1_BAD_KPROT_FAULT */ 4250Sstevel@tonic-gate "ISM MMU miss", /* PTL1_BAD_ISM */ 4260Sstevel@tonic-gate "kernel MMU trap", /* PTL1_BAD_MMUTRAP */ 4270Sstevel@tonic-gate "kernel trap handler state", /* PTL1_BAD_TRAP */ 4280Sstevel@tonic-gate "floating point trap", /* PTL1_BAD_FPTRAP */ 4290Sstevel@tonic-gate #ifdef DEBUG 4300Sstevel@tonic-gate "pointer to intr_req", /* PTL1_BAD_INTR_REQ */ 4310Sstevel@tonic-gate #else 4320Sstevel@tonic-gate "unknown trap", /* PTL1_BAD_INTR_REQ */ 4330Sstevel@tonic-gate #endif 4340Sstevel@tonic-gate #ifdef TRAPTRACE 4350Sstevel@tonic-gate "TRACE_PTR state", /* PTL1_BAD_TRACE_PTR */ 4360Sstevel@tonic-gate #else 4370Sstevel@tonic-gate "unknown trap", /* PTL1_BAD_TRACE_PTR */ 4380Sstevel@tonic-gate #endif 4390Sstevel@tonic-gate "stack overflow", /* PTL1_BAD_STACK */ 4400Sstevel@tonic-gate "DTrace flags", /* PTL1_BAD_DTRACE_FLAGS */ 4410Sstevel@tonic-gate "attempt to steal locked ctx", /* PTL1_BAD_CTX_STEAL */ 4420Sstevel@tonic-gate "CPU ECC error loop", /* PTL1_BAD_ECC */ 4430Sstevel@tonic-gate "unexpected error from hypervisor call", /* PTL1_BAD_HCALL */ 444526Sarao "unexpected global level(%gl)", /* PTL1_BAD_GL */ 4451991Sheppo "Watchdog Reset", /* PTL1_BAD_WATCHDOG */ 4461991Sheppo "unexpected RED mode trap", /* PTL1_BAD_RED */ 4471991Sheppo "return value EINVAL from hcall: "\ 4481991Sheppo "UNMAP_PERM_ADDR", /* PTL1_BAD_HCALL_UNMAP_PERM_EINVAL */ 4491991Sheppo "return value ENOMAP from hcall: "\ 4501991Sheppo "UNMAP_PERM_ADDR", /* PTL1_BAD_HCALL_UNMAP_PERM_ENOMAP */ 4510Sstevel@tonic-gate }; 4520Sstevel@tonic-gate 453357Ssvemuri uint_t reason = pstate->ptl1_regs.ptl1_gregs[0].ptl1_g1; 4540Sstevel@tonic-gate uint_t tl = pstate->ptl1_regs.ptl1_trap_regs[0].ptl1_tl; 4550Sstevel@tonic-gate struct trap_info ti = { 0 }; 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate /* 4580Sstevel@tonic-gate * Use trap_info for a place holder to call panic_savetrap() and 4590Sstevel@tonic-gate * panic_showtrap() to save and print out ptl1_panic information. 4600Sstevel@tonic-gate */ 4610Sstevel@tonic-gate if (curthread->t_panic_trap == NULL) 4620Sstevel@tonic-gate curthread->t_panic_trap = &ti; 4630Sstevel@tonic-gate 4640Sstevel@tonic-gate if (reason < sizeof (ptl1_reasons) / sizeof (ptl1_reasons[0])) 4650Sstevel@tonic-gate panic("bad %s at TL %u", ptl1_reasons[reason], tl); 4660Sstevel@tonic-gate else 4670Sstevel@tonic-gate panic("ptl1_panic reason 0x%x at TL %u", reason, tl); 4680Sstevel@tonic-gate } 4690Sstevel@tonic-gate 4700Sstevel@tonic-gate void 4710Sstevel@tonic-gate clear_watchdog_on_exit(void) 4720Sstevel@tonic-gate { 4732036Swentaoy prom_printf("Debugging requested; hardware watchdog suspended.\n"); 4742036Swentaoy (void) watchdog_suspend(); 4750Sstevel@tonic-gate } 4760Sstevel@tonic-gate 4772036Swentaoy /* 4782036Swentaoy * Restore the watchdog timer when returning from a debugger 4792036Swentaoy * after a panic or L1-A and resume watchdog pat. 4802036Swentaoy */ 4810Sstevel@tonic-gate void 4822036Swentaoy restore_watchdog_on_entry() 4830Sstevel@tonic-gate { 4842036Swentaoy watchdog_resume(); 4850Sstevel@tonic-gate } 4860Sstevel@tonic-gate 4870Sstevel@tonic-gate int 4880Sstevel@tonic-gate kdi_watchdog_disable(void) 4890Sstevel@tonic-gate { 4902036Swentaoy watchdog_suspend(); 4912036Swentaoy 4922036Swentaoy return (0); 4930Sstevel@tonic-gate } 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate void 4960Sstevel@tonic-gate kdi_watchdog_restore(void) 4970Sstevel@tonic-gate { 4982036Swentaoy watchdog_resume(); 4990Sstevel@tonic-gate } 5000Sstevel@tonic-gate 5010Sstevel@tonic-gate void 5020Sstevel@tonic-gate mach_dump_buffer_init(void) 5030Sstevel@tonic-gate { 5040Sstevel@tonic-gate uint64_t ret, minsize = 0; 5050Sstevel@tonic-gate 5060Sstevel@tonic-gate if (hvdump_buf_sz > HVDUMP_SIZE_MAX) 5070Sstevel@tonic-gate hvdump_buf_sz = HVDUMP_SIZE_MAX; 5080Sstevel@tonic-gate 509288Sarao hvdump_buf_va = contig_mem_alloc_align(hvdump_buf_sz, PAGESIZE); 5100Sstevel@tonic-gate if (hvdump_buf_va == NULL) 5110Sstevel@tonic-gate return; 5120Sstevel@tonic-gate 5130Sstevel@tonic-gate hvdump_buf_pa = va_to_pa(hvdump_buf_va); 5140Sstevel@tonic-gate 5150Sstevel@tonic-gate ret = hv_dump_buf_update(hvdump_buf_pa, hvdump_buf_sz, 5160Sstevel@tonic-gate &minsize); 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate if (ret != H_EOK) { 5190Sstevel@tonic-gate contig_mem_free(hvdump_buf_va, hvdump_buf_sz); 5200Sstevel@tonic-gate hvdump_buf_va = NULL; 5210Sstevel@tonic-gate cmn_err(CE_NOTE, "!Error in setting up hvstate" 5220Sstevel@tonic-gate "dump buffer. Error = 0x%lx, size = 0x%lx," 5230Sstevel@tonic-gate "buf_pa = 0x%lx", ret, hvdump_buf_sz, 5240Sstevel@tonic-gate hvdump_buf_pa); 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate if (ret == H_EINVAL) { 5270Sstevel@tonic-gate cmn_err(CE_NOTE, "!Buffer size too small." 5280Sstevel@tonic-gate "Available buffer size = 0x%lx," 5290Sstevel@tonic-gate "Minimum buffer size required = 0x%lx", 5300Sstevel@tonic-gate hvdump_buf_sz, minsize); 5310Sstevel@tonic-gate } 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate } 5340Sstevel@tonic-gate 5350Sstevel@tonic-gate 5360Sstevel@tonic-gate static void 5370Sstevel@tonic-gate update_hvdump_buffer(void) 5380Sstevel@tonic-gate { 5390Sstevel@tonic-gate uint64_t ret, dummy_val; 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate if (hvdump_buf_va == NULL) 5420Sstevel@tonic-gate return; 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate ret = hv_dump_buf_update(hvdump_buf_pa, hvdump_buf_sz, 5450Sstevel@tonic-gate &dummy_val); 5460Sstevel@tonic-gate if (ret != H_EOK) { 5470Sstevel@tonic-gate cmn_err(CE_NOTE, "!Cannot update hvstate dump" 5480Sstevel@tonic-gate "buffer. Error = 0x%lx", ret); 5490Sstevel@tonic-gate } 5500Sstevel@tonic-gate } 5510Sstevel@tonic-gate 5520Sstevel@tonic-gate 5530Sstevel@tonic-gate static int 554789Sahrens getintprop(pnode_t node, char *name, int deflt) 5550Sstevel@tonic-gate { 5560Sstevel@tonic-gate int value; 5570Sstevel@tonic-gate 5580Sstevel@tonic-gate switch (prom_getproplen(node, name)) { 5590Sstevel@tonic-gate case 0: 5600Sstevel@tonic-gate value = 1; /* boolean properties */ 5610Sstevel@tonic-gate break; 5620Sstevel@tonic-gate 5630Sstevel@tonic-gate case sizeof (int): 5640Sstevel@tonic-gate (void) prom_getprop(node, name, (caddr_t)&value); 5650Sstevel@tonic-gate break; 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate default: 5680Sstevel@tonic-gate value = deflt; 5690Sstevel@tonic-gate break; 5700Sstevel@tonic-gate } 5710Sstevel@tonic-gate 5720Sstevel@tonic-gate return (value); 5730Sstevel@tonic-gate } 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate /* 5760Sstevel@tonic-gate * Called by setcpudelay 5770Sstevel@tonic-gate */ 5780Sstevel@tonic-gate void 5790Sstevel@tonic-gate cpu_init_tick_freq(void) 5800Sstevel@tonic-gate { 5811991Sheppo md_t *mdp; 5821991Sheppo mde_cookie_t rootnode; 5831991Sheppo int listsz; 5841991Sheppo mde_cookie_t *listp = NULL; 5851991Sheppo int num_nodes; 5861991Sheppo uint64_t stick_prop; 5871991Sheppo 5881991Sheppo if (broken_md_flag) { 5891991Sheppo sys_tick_freq = cpunodes[CPU->cpu_id].clock_freq; 5901991Sheppo return; 5911991Sheppo } 5921991Sheppo 5931991Sheppo if ((mdp = md_get_handle()) == NULL) 5941991Sheppo panic("stick_frequency property not found in MD"); 5951991Sheppo 5961991Sheppo rootnode = md_root_node(mdp); 5971991Sheppo ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE); 5981991Sheppo 5991991Sheppo num_nodes = md_node_count(mdp); 6001991Sheppo 6011991Sheppo ASSERT(num_nodes > 0); 6021991Sheppo listsz = num_nodes * sizeof (mde_cookie_t); 6031991Sheppo listp = (mde_cookie_t *)prom_alloc((caddr_t)0, listsz, 0); 6041991Sheppo 6051991Sheppo if (listp == NULL) 6061991Sheppo panic("cannot allocate list for MD properties"); 6071991Sheppo 6081991Sheppo num_nodes = md_scan_dag(mdp, rootnode, md_find_name(mdp, "platform"), 6091991Sheppo md_find_name(mdp, "fwd"), listp); 6101991Sheppo 6111991Sheppo ASSERT(num_nodes == 1); 6121991Sheppo 6131991Sheppo if (md_get_prop_val(mdp, *listp, "stick-frequency", &stick_prop) != 0) 6141991Sheppo panic("stick_frequency property not found in MD"); 6151991Sheppo 6161991Sheppo sys_tick_freq = stick_prop; 6171991Sheppo 6181991Sheppo prom_free((caddr_t)listp, listsz); 6191991Sheppo (void) md_fini_handle(mdp); 6200Sstevel@tonic-gate } 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate int shipit(int n, uint64_t cpu_list_ra); 6230Sstevel@tonic-gate extern uint64_t xc_tick_limit; 6240Sstevel@tonic-gate extern uint64_t xc_tick_jump_limit; 6250Sstevel@tonic-gate 6260Sstevel@tonic-gate #ifdef DEBUG 6270Sstevel@tonic-gate #define SEND_MONDO_STATS 1 6280Sstevel@tonic-gate #endif 6290Sstevel@tonic-gate 6300Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 6310Sstevel@tonic-gate uint32_t x_one_stimes[64]; 6320Sstevel@tonic-gate uint32_t x_one_ltimes[16]; 6330Sstevel@tonic-gate uint32_t x_set_stimes[64]; 6340Sstevel@tonic-gate uint32_t x_set_ltimes[16]; 6350Sstevel@tonic-gate uint32_t x_set_cpus[NCPU]; 6360Sstevel@tonic-gate #endif 6370Sstevel@tonic-gate 6380Sstevel@tonic-gate void 6390Sstevel@tonic-gate send_one_mondo(int cpuid) 6400Sstevel@tonic-gate { 6410Sstevel@tonic-gate int retries, stat; 6420Sstevel@tonic-gate uint64_t starttick, endtick, tick, lasttick; 6430Sstevel@tonic-gate struct machcpu *mcpup = &(CPU->cpu_m); 6440Sstevel@tonic-gate 6450Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, xcalls, 1); 6460Sstevel@tonic-gate starttick = lasttick = gettick(); 6470Sstevel@tonic-gate mcpup->cpu_list[0] = (uint16_t)cpuid; 6480Sstevel@tonic-gate stat = shipit(1, mcpup->cpu_list_ra); 6490Sstevel@tonic-gate endtick = starttick + xc_tick_limit; 6500Sstevel@tonic-gate retries = 0; 6511310Sha137994 while (stat != H_EOK) { 6521310Sha137994 if (stat != H_EWOULDBLOCK) { 6531310Sha137994 if (panic_quiesce) 6541310Sha137994 return; 6551310Sha137994 if (stat == H_ECPUERROR) 6561310Sha137994 cmn_err(CE_PANIC, "send_one_mondo: " 6571310Sha137994 "cpuid: 0x%x has been marked in " 6581310Sha137994 "error", cpuid); 6591310Sha137994 else 6601310Sha137994 cmn_err(CE_PANIC, "send_one_mondo: " 6611310Sha137994 "unexpected hypervisor error 0x%x " 6621310Sha137994 "while sending a mondo to cpuid: " 6631310Sha137994 "0x%x", stat, cpuid); 6641310Sha137994 } 6650Sstevel@tonic-gate tick = gettick(); 6660Sstevel@tonic-gate /* 6670Sstevel@tonic-gate * If there is a big jump between the current tick 6680Sstevel@tonic-gate * count and lasttick, we have probably hit a break 6690Sstevel@tonic-gate * point. Adjust endtick accordingly to avoid panic. 6700Sstevel@tonic-gate */ 6710Sstevel@tonic-gate if (tick > (lasttick + xc_tick_jump_limit)) 6720Sstevel@tonic-gate endtick += (tick - lasttick); 6730Sstevel@tonic-gate lasttick = tick; 6740Sstevel@tonic-gate if (tick > endtick) { 6750Sstevel@tonic-gate if (panic_quiesce) 6760Sstevel@tonic-gate return; 6770Sstevel@tonic-gate cmn_err(CE_PANIC, "send mondo timeout " 6780Sstevel@tonic-gate "(target 0x%x) [retries: 0x%x hvstat: 0x%x]", 6790Sstevel@tonic-gate cpuid, retries, stat); 6800Sstevel@tonic-gate } 6810Sstevel@tonic-gate drv_usecwait(1); 6820Sstevel@tonic-gate stat = shipit(1, mcpup->cpu_list_ra); 6830Sstevel@tonic-gate retries++; 6840Sstevel@tonic-gate } 6850Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 6860Sstevel@tonic-gate { 6871310Sha137994 uint64_t n = gettick() - starttick; 6880Sstevel@tonic-gate if (n < 8192) 6890Sstevel@tonic-gate x_one_stimes[n >> 7]++; 6901310Sha137994 else if (n < 15*8192) 6911310Sha137994 x_one_ltimes[n >> 13]++; 6920Sstevel@tonic-gate else 6930Sstevel@tonic-gate x_one_ltimes[0xf]++; 6940Sstevel@tonic-gate } 6950Sstevel@tonic-gate #endif 6960Sstevel@tonic-gate } 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate void 6990Sstevel@tonic-gate send_mondo_set(cpuset_t set) 7000Sstevel@tonic-gate { 7010Sstevel@tonic-gate uint64_t starttick, endtick, tick, lasttick; 7022492Sha137994 uint_t largestid, smallestid; 7032492Sha137994 int i, j; 7042492Sha137994 int ncpuids = 0; 7050Sstevel@tonic-gate int shipped = 0; 7061310Sha137994 int retries = 0; 7070Sstevel@tonic-gate struct machcpu *mcpup = &(CPU->cpu_m); 7080Sstevel@tonic-gate 7090Sstevel@tonic-gate ASSERT(!CPUSET_ISNULL(set)); 7102492Sha137994 CPUSET_BOUNDS(set, smallestid, largestid); 7112492Sha137994 if (smallestid == CPUSET_NOTINSET) { 7122492Sha137994 return; 7132492Sha137994 } 7142492Sha137994 7150Sstevel@tonic-gate starttick = lasttick = gettick(); 7160Sstevel@tonic-gate endtick = starttick + xc_tick_limit; 7170Sstevel@tonic-gate 7182492Sha137994 /* 7192492Sha137994 * Assemble CPU list for HV argument. We already know 7202492Sha137994 * smallestid and largestid are members of set. 7212492Sha137994 */ 7222492Sha137994 mcpup->cpu_list[ncpuids++] = (uint16_t)smallestid; 7232492Sha137994 if (largestid != smallestid) { 7242492Sha137994 for (i = smallestid+1; i <= largestid-1; i++) { 7251310Sha137994 if (CPU_IN_SET(set, i)) { 7262492Sha137994 mcpup->cpu_list[ncpuids++] = (uint16_t)i; 7271310Sha137994 } 7281310Sha137994 } 7292492Sha137994 mcpup->cpu_list[ncpuids++] = (uint16_t)largestid; 7302492Sha137994 } 7312492Sha137994 7322492Sha137994 do { 7332492Sha137994 int stat; 7341310Sha137994 7351310Sha137994 stat = shipit(ncpuids, mcpup->cpu_list_ra); 7361310Sha137994 if (stat == H_EOK) { 7371310Sha137994 shipped += ncpuids; 7381310Sha137994 break; 7391310Sha137994 } 7401310Sha137994 7411310Sha137994 /* 7421310Sha137994 * Either not all CPU mondos were sent, or an 7431310Sha137994 * error occurred. CPUs that were sent mondos 7441310Sha137994 * have their CPU IDs overwritten in cpu_list. 7452492Sha137994 * Reset cpu_list so that it only holds those 7462492Sha137994 * CPU IDs that still need to be sent. 7471310Sha137994 */ 7482492Sha137994 for (i = 0, j = 0; i < ncpuids; i++) { 7491310Sha137994 if (mcpup->cpu_list[i] == HV_SEND_MONDO_ENTRYDONE) { 7501310Sha137994 shipped++; 7511310Sha137994 } else { 7522492Sha137994 mcpup->cpu_list[j++] = mcpup->cpu_list[i]; 7530Sstevel@tonic-gate } 7540Sstevel@tonic-gate } 7552492Sha137994 ncpuids = j; 7561310Sha137994 7571310Sha137994 /* 7581310Sha137994 * Now handle possible errors returned 7591310Sha137994 * from hypervisor. 7601310Sha137994 */ 7611310Sha137994 if (stat == H_ECPUERROR) { 7622492Sha137994 int errorcpus; 7632492Sha137994 7642492Sha137994 if (!panic_quiesce) 7652492Sha137994 cmn_err(CE_CONT, "send_mondo_set: cpuid(s) "); 7661310Sha137994 7671310Sha137994 /* 7682492Sha137994 * Remove any CPUs in the error state from 7692492Sha137994 * cpu_list. At this point cpu_list only 7702492Sha137994 * contains the CPU IDs for mondos not 7712492Sha137994 * succesfully sent. 7721310Sha137994 */ 7732492Sha137994 for (i = 0, errorcpus = 0; i < ncpuids; i++) { 7742492Sha137994 uint64_t state = CPU_STATE_INVALID; 7752492Sha137994 uint16_t id = mcpup->cpu_list[i]; 7762492Sha137994 7772492Sha137994 (void) hv_cpu_state(id, &state); 7782492Sha137994 if (state == CPU_STATE_ERROR) { 7792492Sha137994 if (!panic_quiesce) 7802492Sha137994 cmn_err(CE_CONT, "0x%x ", id); 7812492Sha137994 errorcpus++; 7822492Sha137994 } else if (errorcpus > 0) { 7832492Sha137994 mcpup->cpu_list[i - errorcpus] = 7842492Sha137994 mcpup->cpu_list[i]; 7851310Sha137994 } 7861310Sha137994 } 7872492Sha137994 ncpuids -= errorcpus; 7880Sstevel@tonic-gate 7891310Sha137994 if (!panic_quiesce) { 7902492Sha137994 if (errorcpus == 0) { 7912492Sha137994 cmn_err(CE_CONT, "<none> have been " 7922492Sha137994 "marked in error\n"); 7931310Sha137994 cmn_err(CE_PANIC, "send_mondo_set: " 7941310Sha137994 "hypervisor returned " 7951310Sha137994 "H_ECPUERROR but no CPU in " 7961310Sha137994 "cpu_list in error state"); 7972492Sha137994 } else { 7982492Sha137994 cmn_err(CE_CONT, "have been marked in " 7992492Sha137994 "error\n"); 8002492Sha137994 cmn_err(CE_PANIC, "send_mondo_set: " 8012492Sha137994 "CPU(s) in error state"); 8021310Sha137994 } 8031310Sha137994 } 8041310Sha137994 } else if (stat != H_EWOULDBLOCK) { 8051310Sha137994 if (panic_quiesce) 8061310Sha137994 return; 8071310Sha137994 /* 8081310Sha137994 * For all other errors, panic. 8091310Sha137994 */ 8101310Sha137994 cmn_err(CE_CONT, "send_mondo_set: unexpected " 8111310Sha137994 "hypervisor error 0x%x while sending a " 8121310Sha137994 "mondo to cpuid(s):", stat); 8132492Sha137994 for (i = 0; i < ncpuids; i++) { 8142492Sha137994 cmn_err(CE_CONT, " 0x%x", mcpup->cpu_list[i]); 8151310Sha137994 } 8161310Sha137994 cmn_err(CE_CONT, "\n"); 8171310Sha137994 cmn_err(CE_PANIC, "send_mondo_set: unexpected " 8181310Sha137994 "hypervisor error"); 8191310Sha137994 } 8201310Sha137994 8210Sstevel@tonic-gate tick = gettick(); 8220Sstevel@tonic-gate /* 8230Sstevel@tonic-gate * If there is a big jump between the current tick 8240Sstevel@tonic-gate * count and lasttick, we have probably hit a break 8250Sstevel@tonic-gate * point. Adjust endtick accordingly to avoid panic. 8260Sstevel@tonic-gate */ 8270Sstevel@tonic-gate if (tick > (lasttick + xc_tick_jump_limit)) 8280Sstevel@tonic-gate endtick += (tick - lasttick); 8290Sstevel@tonic-gate lasttick = tick; 8300Sstevel@tonic-gate if (tick > endtick) { 8310Sstevel@tonic-gate if (panic_quiesce) 8320Sstevel@tonic-gate return; 8330Sstevel@tonic-gate cmn_err(CE_CONT, "send mondo timeout " 8340Sstevel@tonic-gate "[retries: 0x%x] cpuids: ", retries); 8352492Sha137994 for (i = 0; i < ncpuids; i++) 8362492Sha137994 cmn_err(CE_CONT, " 0x%x", mcpup->cpu_list[i]); 8370Sstevel@tonic-gate cmn_err(CE_CONT, "\n"); 8380Sstevel@tonic-gate cmn_err(CE_PANIC, "send_mondo_set: timeout"); 8390Sstevel@tonic-gate } 8400Sstevel@tonic-gate 8410Sstevel@tonic-gate while (gettick() < (tick + sys_clock_mhz)) 8420Sstevel@tonic-gate ; 8430Sstevel@tonic-gate retries++; 8442492Sha137994 } while (ncpuids > 0); 8451310Sha137994 8461310Sha137994 CPU_STATS_ADDQ(CPU, sys, xcalls, shipped); 8470Sstevel@tonic-gate 8480Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 8490Sstevel@tonic-gate { 8501310Sha137994 uint64_t n = gettick() - starttick; 8510Sstevel@tonic-gate if (n < 8192) 8520Sstevel@tonic-gate x_set_stimes[n >> 7]++; 8531310Sha137994 else if (n < 15*8192) 8541310Sha137994 x_set_ltimes[n >> 13]++; 8550Sstevel@tonic-gate else 8560Sstevel@tonic-gate x_set_ltimes[0xf]++; 8570Sstevel@tonic-gate } 8580Sstevel@tonic-gate x_set_cpus[shipped]++; 8590Sstevel@tonic-gate #endif 8600Sstevel@tonic-gate } 8610Sstevel@tonic-gate 8620Sstevel@tonic-gate void 8630Sstevel@tonic-gate syncfpu(void) 8640Sstevel@tonic-gate { 8650Sstevel@tonic-gate } 8660Sstevel@tonic-gate 8670Sstevel@tonic-gate void 8680Sstevel@tonic-gate cpu_flush_ecache(void) 8690Sstevel@tonic-gate { 8700Sstevel@tonic-gate } 8710Sstevel@tonic-gate 8720Sstevel@tonic-gate void 8730Sstevel@tonic-gate sticksync_slave(void) 8740Sstevel@tonic-gate {} 8750Sstevel@tonic-gate 8760Sstevel@tonic-gate void 8770Sstevel@tonic-gate sticksync_master(void) 8780Sstevel@tonic-gate {} 8790Sstevel@tonic-gate 8800Sstevel@tonic-gate void 8810Sstevel@tonic-gate cpu_init_cache_scrub(void) 8820Sstevel@tonic-gate {} 8830Sstevel@tonic-gate 8840Sstevel@tonic-gate int 8850Sstevel@tonic-gate dtrace_blksuword32_err(uintptr_t addr, uint32_t *data) 8860Sstevel@tonic-gate { 8870Sstevel@tonic-gate int ret, watched; 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate watched = watch_disable_addr((void *)addr, 4, S_WRITE); 8900Sstevel@tonic-gate ret = dtrace_blksuword32(addr, data, 0); 8910Sstevel@tonic-gate if (watched) 8920Sstevel@tonic-gate watch_enable_addr((void *)addr, 4, S_WRITE); 8930Sstevel@tonic-gate 8940Sstevel@tonic-gate return (ret); 8950Sstevel@tonic-gate } 8960Sstevel@tonic-gate 8970Sstevel@tonic-gate int 8980Sstevel@tonic-gate dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain) 8990Sstevel@tonic-gate { 9000Sstevel@tonic-gate if (suword32((void *)addr, *data) == -1) 9010Sstevel@tonic-gate return (tryagain ? dtrace_blksuword32_err(addr, data) : -1); 9020Sstevel@tonic-gate dtrace_flush_sec(addr); 9030Sstevel@tonic-gate 9040Sstevel@tonic-gate return (0); 9050Sstevel@tonic-gate } 9060Sstevel@tonic-gate 9070Sstevel@tonic-gate /*ARGSUSED*/ 9080Sstevel@tonic-gate void 9090Sstevel@tonic-gate cpu_faulted_enter(struct cpu *cp) 9100Sstevel@tonic-gate { 9110Sstevel@tonic-gate } 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate /*ARGSUSED*/ 9140Sstevel@tonic-gate void 9150Sstevel@tonic-gate cpu_faulted_exit(struct cpu *cp) 9160Sstevel@tonic-gate { 9170Sstevel@tonic-gate } 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate static int 9200Sstevel@tonic-gate kdi_cpu_ready_iter(int (*cb)(int, void *), void *arg) 9210Sstevel@tonic-gate { 9220Sstevel@tonic-gate int rc, i; 9230Sstevel@tonic-gate 9240Sstevel@tonic-gate for (rc = 0, i = 0; i < NCPU; i++) { 9250Sstevel@tonic-gate if (CPU_IN_SET(cpu_ready_set, i)) 9260Sstevel@tonic-gate rc += cb(i, arg); 9270Sstevel@tonic-gate } 9280Sstevel@tonic-gate 9290Sstevel@tonic-gate return (rc); 9300Sstevel@tonic-gate } 9310Sstevel@tonic-gate 9320Sstevel@tonic-gate /* 9330Sstevel@tonic-gate * Sends a cross-call to a specified processor. The caller assumes 9340Sstevel@tonic-gate * responsibility for repetition of cross-calls, as appropriate (MARSA for 9350Sstevel@tonic-gate * debugging). 9360Sstevel@tonic-gate */ 9370Sstevel@tonic-gate static int 9380Sstevel@tonic-gate kdi_xc_one(int cpuid, void (*func)(uintptr_t, uintptr_t), uintptr_t arg1, 9390Sstevel@tonic-gate uintptr_t arg2) 9400Sstevel@tonic-gate { 9410Sstevel@tonic-gate int stat; 9420Sstevel@tonic-gate struct machcpu *mcpup; 9430Sstevel@tonic-gate uint64_t cpuaddr_reg = 0, cpuaddr_scr = 0; 9440Sstevel@tonic-gate 9450Sstevel@tonic-gate mcpup = &(((cpu_t *)get_cpuaddr(cpuaddr_reg, cpuaddr_scr))->cpu_m); 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate /* 9480Sstevel@tonic-gate * if (idsr_busy()) 9490Sstevel@tonic-gate * return (KDI_XC_RES_ERR); 9500Sstevel@tonic-gate */ 9510Sstevel@tonic-gate 9520Sstevel@tonic-gate init_mondo_nocheck((xcfunc_t *)func, arg1, arg2); 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate mcpup->cpu_list[0] = (uint16_t)cpuid; 9550Sstevel@tonic-gate stat = shipit(1, mcpup->cpu_list_ra); 9560Sstevel@tonic-gate 9570Sstevel@tonic-gate if (stat == 0) 9580Sstevel@tonic-gate return (KDI_XC_RES_OK); 9590Sstevel@tonic-gate else 9600Sstevel@tonic-gate return (KDI_XC_RES_NACK); 9610Sstevel@tonic-gate } 9620Sstevel@tonic-gate 9630Sstevel@tonic-gate static void 9640Sstevel@tonic-gate kdi_tickwait(clock_t nticks) 9650Sstevel@tonic-gate { 9660Sstevel@tonic-gate clock_t endtick = gettick() + nticks; 9670Sstevel@tonic-gate 9680Sstevel@tonic-gate while (gettick() < endtick); 9690Sstevel@tonic-gate } 9700Sstevel@tonic-gate 9710Sstevel@tonic-gate static void 9720Sstevel@tonic-gate kdi_cpu_init(int dcache_size, int dcache_linesize, int icache_size, 9730Sstevel@tonic-gate int icache_linesize) 9740Sstevel@tonic-gate { 9750Sstevel@tonic-gate kdi_dcache_size = dcache_size; 9760Sstevel@tonic-gate kdi_dcache_linesize = dcache_linesize; 9770Sstevel@tonic-gate kdi_icache_size = icache_size; 9780Sstevel@tonic-gate kdi_icache_linesize = icache_linesize; 9790Sstevel@tonic-gate } 9800Sstevel@tonic-gate 9810Sstevel@tonic-gate /* used directly by kdi_read/write_phys */ 9820Sstevel@tonic-gate void 9830Sstevel@tonic-gate kdi_flush_caches(void) 9840Sstevel@tonic-gate { 9850Sstevel@tonic-gate /* Not required on sun4v architecture. */ 9860Sstevel@tonic-gate } 9870Sstevel@tonic-gate 9880Sstevel@tonic-gate /*ARGSUSED*/ 9890Sstevel@tonic-gate int 9900Sstevel@tonic-gate kdi_get_stick(uint64_t *stickp) 9910Sstevel@tonic-gate { 9920Sstevel@tonic-gate return (-1); 9930Sstevel@tonic-gate } 9940Sstevel@tonic-gate 9950Sstevel@tonic-gate void 9960Sstevel@tonic-gate cpu_kdi_init(kdi_t *kdi) 9970Sstevel@tonic-gate { 9980Sstevel@tonic-gate kdi->kdi_flush_caches = kdi_flush_caches; 9990Sstevel@tonic-gate kdi->mkdi_cpu_init = kdi_cpu_init; 10000Sstevel@tonic-gate kdi->mkdi_cpu_ready_iter = kdi_cpu_ready_iter; 10010Sstevel@tonic-gate kdi->mkdi_xc_one = kdi_xc_one; 10020Sstevel@tonic-gate kdi->mkdi_tickwait = kdi_tickwait; 10030Sstevel@tonic-gate kdi->mkdi_get_stick = kdi_get_stick; 10040Sstevel@tonic-gate } 10050Sstevel@tonic-gate 10062036Swentaoy static void 10072036Swentaoy sun4v_system_claim(void) 10082036Swentaoy { 10092036Swentaoy watchdog_suspend(); 10102036Swentaoy } 10112036Swentaoy 10122036Swentaoy static void 10132036Swentaoy sun4v_system_release(void) 10142036Swentaoy { 10152036Swentaoy watchdog_resume(); 10162036Swentaoy } 10172036Swentaoy 10182036Swentaoy void 10192036Swentaoy plat_kdi_init(kdi_t *kdi) 10202036Swentaoy { 10212036Swentaoy kdi->pkdi_system_claim = sun4v_system_claim; 10222036Swentaoy kdi->pkdi_system_release = sun4v_system_release; 10232036Swentaoy } 10242036Swentaoy 10250Sstevel@tonic-gate /* 10260Sstevel@tonic-gate * Routine to return memory information associated 10270Sstevel@tonic-gate * with a physical address and syndrome. 10280Sstevel@tonic-gate */ 10290Sstevel@tonic-gate /* ARGSUSED */ 10300Sstevel@tonic-gate int 10310Sstevel@tonic-gate cpu_get_mem_info(uint64_t synd, uint64_t afar, 10320Sstevel@tonic-gate uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep, 10330Sstevel@tonic-gate int *segsp, int *banksp, int *mcidp) 10340Sstevel@tonic-gate { 10350Sstevel@tonic-gate return (ENOTSUP); 10360Sstevel@tonic-gate } 10370Sstevel@tonic-gate 10380Sstevel@tonic-gate /* 10390Sstevel@tonic-gate * This routine returns the size of the kernel's FRU name buffer. 10400Sstevel@tonic-gate */ 10410Sstevel@tonic-gate size_t 10420Sstevel@tonic-gate cpu_get_name_bufsize() 10430Sstevel@tonic-gate { 10440Sstevel@tonic-gate return (UNUM_NAMLEN); 10450Sstevel@tonic-gate } 10460Sstevel@tonic-gate 10470Sstevel@tonic-gate /* 10480Sstevel@tonic-gate * This routine is a more generic interface to cpu_get_mem_unum(), 10490Sstevel@tonic-gate * that may be used by other modules (e.g. mm). 10500Sstevel@tonic-gate */ 10510Sstevel@tonic-gate /* ARGSUSED */ 10520Sstevel@tonic-gate int 10530Sstevel@tonic-gate cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar, 10540Sstevel@tonic-gate char *buf, int buflen, int *lenp) 10550Sstevel@tonic-gate { 10560Sstevel@tonic-gate return (ENOTSUP); 10570Sstevel@tonic-gate } 10580Sstevel@tonic-gate 10591186Sayznaga /* ARGSUSED */ 10601186Sayznaga int 10611186Sayznaga cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp) 10621186Sayznaga { 10631186Sayznaga return (ENOTSUP); 10641186Sayznaga } 10651186Sayznaga 10661186Sayznaga /* ARGSUSED */ 10671186Sayznaga int 10681186Sayznaga cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp) 10691186Sayznaga { 10701186Sayznaga return (ENOTSUP); 10711186Sayznaga } 10721186Sayznaga 10730Sstevel@tonic-gate /* 10740Sstevel@tonic-gate * xt_sync - wait for previous x-traps to finish 10750Sstevel@tonic-gate */ 10760Sstevel@tonic-gate void 10770Sstevel@tonic-gate xt_sync(cpuset_t cpuset) 10780Sstevel@tonic-gate { 10790Sstevel@tonic-gate union { 10800Sstevel@tonic-gate uint8_t volatile byte[NCPU]; 10810Sstevel@tonic-gate uint64_t volatile xword[NCPU / 8]; 10820Sstevel@tonic-gate } cpu_sync; 10830Sstevel@tonic-gate uint64_t starttick, endtick, tick, lasttick; 10842492Sha137994 uint_t largestid, smallestid; 10850Sstevel@tonic-gate int i; 10860Sstevel@tonic-gate 10870Sstevel@tonic-gate kpreempt_disable(); 10880Sstevel@tonic-gate CPUSET_DEL(cpuset, CPU->cpu_id); 10890Sstevel@tonic-gate CPUSET_AND(cpuset, cpu_ready_set); 10900Sstevel@tonic-gate 10912492Sha137994 CPUSET_BOUNDS(cpuset, smallestid, largestid); 10922492Sha137994 if (smallestid == CPUSET_NOTINSET) 10932492Sha137994 goto out; 10942492Sha137994 10950Sstevel@tonic-gate /* 10960Sstevel@tonic-gate * Sun4v uses a queue for receiving mondos. Successful 10970Sstevel@tonic-gate * transmission of a mondo only indicates that the mondo 10980Sstevel@tonic-gate * has been written into the queue. 10990Sstevel@tonic-gate * 11000Sstevel@tonic-gate * We use an array of bytes to let each cpu to signal back 11010Sstevel@tonic-gate * to the cross trap sender that the cross trap has been 11020Sstevel@tonic-gate * executed. Set the byte to 1 before sending the cross trap 11030Sstevel@tonic-gate * and wait until other cpus reset it to 0. 11040Sstevel@tonic-gate */ 11050Sstevel@tonic-gate bzero((void *)&cpu_sync, NCPU); 11062492Sha137994 cpu_sync.byte[smallestid] = 1; 11072492Sha137994 if (largestid != smallestid) { 11082492Sha137994 for (i = (smallestid + 1); i <= (largestid - 1); i++) 11092492Sha137994 if (CPU_IN_SET(cpuset, i)) 11102492Sha137994 cpu_sync.byte[i] = 1; 11112492Sha137994 cpu_sync.byte[largestid] = 1; 11122492Sha137994 } 11130Sstevel@tonic-gate 11140Sstevel@tonic-gate xt_some(cpuset, (xcfunc_t *)xt_sync_tl1, 11150Sstevel@tonic-gate (uint64_t)cpu_sync.byte, 0); 11160Sstevel@tonic-gate 11170Sstevel@tonic-gate starttick = lasttick = gettick(); 11180Sstevel@tonic-gate endtick = starttick + xc_tick_limit; 11190Sstevel@tonic-gate 11202492Sha137994 for (i = (smallestid / 8); i <= (largestid / 8); i++) { 11210Sstevel@tonic-gate while (cpu_sync.xword[i] != 0) { 11220Sstevel@tonic-gate tick = gettick(); 11230Sstevel@tonic-gate /* 11240Sstevel@tonic-gate * If there is a big jump between the current tick 11250Sstevel@tonic-gate * count and lasttick, we have probably hit a break 11260Sstevel@tonic-gate * point. Adjust endtick accordingly to avoid panic. 11270Sstevel@tonic-gate */ 11280Sstevel@tonic-gate if (tick > (lasttick + xc_tick_jump_limit)) { 11290Sstevel@tonic-gate endtick += (tick - lasttick); 11300Sstevel@tonic-gate } 11310Sstevel@tonic-gate lasttick = tick; 11320Sstevel@tonic-gate if (tick > endtick) { 11330Sstevel@tonic-gate if (panic_quiesce) 11340Sstevel@tonic-gate goto out; 11350Sstevel@tonic-gate cmn_err(CE_CONT, "Cross trap sync timeout " 11360Sstevel@tonic-gate "at cpu_sync.xword[%d]: 0x%lx\n", 11370Sstevel@tonic-gate i, cpu_sync.xword[i]); 11380Sstevel@tonic-gate cmn_err(CE_PANIC, "xt_sync: timeout"); 11390Sstevel@tonic-gate } 11400Sstevel@tonic-gate } 11410Sstevel@tonic-gate } 11420Sstevel@tonic-gate 11430Sstevel@tonic-gate out: 11440Sstevel@tonic-gate kpreempt_enable(); 11450Sstevel@tonic-gate } 1146*2957Sjm22469 1147*2957Sjm22469 /* 1148*2957Sjm22469 * Recalculate the values of the cross-call timeout variables based 1149*2957Sjm22469 * on the value of the 'inter-cpu-latency' property of the platform node. 1150*2957Sjm22469 * The property sets the number of nanosec to wait for a cross-call 1151*2957Sjm22469 * to be acknowledged. Other timeout variables are derived from it. 1152*2957Sjm22469 * 1153*2957Sjm22469 * N.B. This implementation is aware of the internals of xc_init() 1154*2957Sjm22469 * and updates many of the same variables. 1155*2957Sjm22469 */ 1156*2957Sjm22469 void 1157*2957Sjm22469 recalc_xc_timeouts(void) 1158*2957Sjm22469 { 1159*2957Sjm22469 typedef union { 1160*2957Sjm22469 uint64_t whole; 1161*2957Sjm22469 struct { 1162*2957Sjm22469 uint_t high; 1163*2957Sjm22469 uint_t low; 1164*2957Sjm22469 } half; 1165*2957Sjm22469 } u_number; 1166*2957Sjm22469 1167*2957Sjm22469 /* See x_call.c for descriptions of these extern variables. */ 1168*2957Sjm22469 extern uint64_t xc_tick_limit_scale; 1169*2957Sjm22469 extern uint64_t xc_mondo_time_limit; 1170*2957Sjm22469 extern uint64_t xc_func_time_limit; 1171*2957Sjm22469 extern uint64_t xc_scale; 1172*2957Sjm22469 extern uint64_t xc_mondo_multiplier; 1173*2957Sjm22469 extern uint_t nsec_shift; 1174*2957Sjm22469 1175*2957Sjm22469 /* Temp versions of the target variables */ 1176*2957Sjm22469 uint64_t tick_limit; 1177*2957Sjm22469 uint64_t tick_jump_limit; 1178*2957Sjm22469 uint64_t mondo_time_limit; 1179*2957Sjm22469 uint64_t func_time_limit; 1180*2957Sjm22469 uint64_t scale; 1181*2957Sjm22469 1182*2957Sjm22469 uint64_t latency; /* nanoseconds */ 1183*2957Sjm22469 uint64_t maxfreq; 1184*2957Sjm22469 uint64_t tick_limit_save = xc_tick_limit; 1185*2957Sjm22469 uint_t tick_scale; 1186*2957Sjm22469 uint64_t top; 1187*2957Sjm22469 uint64_t bottom; 1188*2957Sjm22469 u_number tk; 1189*2957Sjm22469 1190*2957Sjm22469 md_t *mdp; 1191*2957Sjm22469 int nrnode; 1192*2957Sjm22469 mde_cookie_t *platlist; 1193*2957Sjm22469 1194*2957Sjm22469 /* 1195*2957Sjm22469 * Look up the 'inter-cpu-latency' (optional) property in the 1196*2957Sjm22469 * platform node of the MD. The units are nanoseconds. 1197*2957Sjm22469 */ 1198*2957Sjm22469 if ((mdp = md_get_handle()) == NULL) { 1199*2957Sjm22469 cmn_err(CE_WARN, "recalc_xc_timeouts: " 1200*2957Sjm22469 "Unable to initialize machine description"); 1201*2957Sjm22469 return; 1202*2957Sjm22469 } 1203*2957Sjm22469 1204*2957Sjm22469 nrnode = md_alloc_scan_dag(mdp, 1205*2957Sjm22469 md_root_node(mdp), "platform", "fwd", &platlist); 1206*2957Sjm22469 1207*2957Sjm22469 ASSERT(nrnode == 1); 1208*2957Sjm22469 if (nrnode < 1) { 1209*2957Sjm22469 cmn_err(CE_WARN, "recalc_xc_timeouts: platform node missing"); 1210*2957Sjm22469 return; 1211*2957Sjm22469 } 1212*2957Sjm22469 1213*2957Sjm22469 if (md_get_prop_val(mdp, platlist[0], 1214*2957Sjm22469 "inter-cpu-latency", &latency) == -1) 1215*2957Sjm22469 return; 1216*2957Sjm22469 1217*2957Sjm22469 /* 1218*2957Sjm22469 * clock.h defines an assembly-language macro 1219*2957Sjm22469 * (NATIVE_TIME_TO_NSEC_SCALE) to convert from %stick 1220*2957Sjm22469 * units to nanoseconds. Since the inter-cpu-latency 1221*2957Sjm22469 * units are nanoseconds and the xc_* variables require 1222*2957Sjm22469 * %stick units, we need the inverse of that function. 1223*2957Sjm22469 * The trick is to perform the calculation without 1224*2957Sjm22469 * floating point, but also without integer truncation 1225*2957Sjm22469 * or overflow. To understand the calculation below, 1226*2957Sjm22469 * please read the discussion of the macro in clock.h. 1227*2957Sjm22469 * Since this new code will be invoked infrequently, 1228*2957Sjm22469 * we can afford to implement it in C. 1229*2957Sjm22469 * 1230*2957Sjm22469 * tick_scale is the reciprocal of nsec_scale which is 1231*2957Sjm22469 * calculated at startup in setcpudelay(). The calc 1232*2957Sjm22469 * of tick_limit parallels that of NATIVE_TIME_TO_NSEC_SCALE 1233*2957Sjm22469 * except we use tick_scale instead of nsec_scale and 1234*2957Sjm22469 * C instead of assembler. 1235*2957Sjm22469 */ 1236*2957Sjm22469 tick_scale = (uint_t)(((u_longlong_t)sys_tick_freq 1237*2957Sjm22469 << (32 - nsec_shift)) / NANOSEC); 1238*2957Sjm22469 1239*2957Sjm22469 tk.whole = latency; 1240*2957Sjm22469 top = ((uint64_t)tk.half.high << 4) * tick_scale; 1241*2957Sjm22469 bottom = (((uint64_t)tk.half.low << 4) * (uint64_t)tick_scale) >> 32; 1242*2957Sjm22469 tick_limit = top + bottom; 1243*2957Sjm22469 1244*2957Sjm22469 1245*2957Sjm22469 /* 1246*2957Sjm22469 * xc_init() calculated 'maxfreq' by looking at all the cpus, 1247*2957Sjm22469 * and used it to derive some of the timeout variables that we 1248*2957Sjm22469 * recalculate below. We can back into the original value by 1249*2957Sjm22469 * using the inverse of one of those calculations. 1250*2957Sjm22469 */ 1251*2957Sjm22469 maxfreq = xc_mondo_time_limit / xc_scale; 1252*2957Sjm22469 1253*2957Sjm22469 /* 1254*2957Sjm22469 * Don't allow the new timeout (xc_tick_limit) to fall below 1255*2957Sjm22469 * the system tick frequency (stick). Allowing the timeout 1256*2957Sjm22469 * to be set more tightly than this empirically determined 1257*2957Sjm22469 * value may cause panics. 1258*2957Sjm22469 */ 1259*2957Sjm22469 tick_limit = tick_limit < sys_tick_freq ? sys_tick_freq : tick_limit; 1260*2957Sjm22469 1261*2957Sjm22469 tick_jump_limit = tick_limit / 32; 1262*2957Sjm22469 tick_limit *= xc_tick_limit_scale; 1263*2957Sjm22469 1264*2957Sjm22469 /* 1265*2957Sjm22469 * Recalculate xc_scale since it is used in a callback function 1266*2957Sjm22469 * (xc_func_timeout_adj) to adjust two of the timeouts dynamically. 1267*2957Sjm22469 * Make the change in xc_scale proportional to the change in 1268*2957Sjm22469 * xc_tick_limit. 1269*2957Sjm22469 */ 1270*2957Sjm22469 scale = (xc_scale * tick_limit + sys_tick_freq / 2) / tick_limit_save; 1271*2957Sjm22469 if (scale == 0) 1272*2957Sjm22469 scale = 1; 1273*2957Sjm22469 1274*2957Sjm22469 mondo_time_limit = maxfreq * scale; 1275*2957Sjm22469 func_time_limit = mondo_time_limit * xc_mondo_multiplier; 1276*2957Sjm22469 1277*2957Sjm22469 /* 1278*2957Sjm22469 * Don't modify the timeouts if nothing has changed. Else, 1279*2957Sjm22469 * stuff the variables with the freshly calculated (temp) 1280*2957Sjm22469 * variables. This minimizes the window where the set of 1281*2957Sjm22469 * values could be inconsistent. 1282*2957Sjm22469 */ 1283*2957Sjm22469 if (tick_limit != xc_tick_limit) { 1284*2957Sjm22469 xc_tick_limit = tick_limit; 1285*2957Sjm22469 xc_tick_jump_limit = tick_jump_limit; 1286*2957Sjm22469 xc_scale = scale; 1287*2957Sjm22469 xc_mondo_time_limit = mondo_time_limit; 1288*2957Sjm22469 xc_func_time_limit = func_time_limit; 1289*2957Sjm22469 /* 1290*2957Sjm22469 * Force the new values to be used for future cross 1291*2957Sjm22469 * calls. This is necessary only when we increase 1292*2957Sjm22469 * the timeouts. 1293*2957Sjm22469 */ 1294*2957Sjm22469 if (tick_limit > tick_limit_save) { 1295*2957Sjm22469 cpuset_t cpuset = cpu_ready_set; 1296*2957Sjm22469 1297*2957Sjm22469 xt_sync(cpuset); 1298*2957Sjm22469 } 1299*2957Sjm22469 } 1300*2957Sjm22469 } 1301