xref: /onnv-gate/usr/src/uts/sun4v/os/error.c (revision 11304)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52181Sayznaga  * Common Development and Distribution License (the "License").
62181Sayznaga  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
228574SJason.Beloro@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include <sys/types.h>
270Sstevel@tonic-gate #include <sys/machsystm.h>
283199Sep32863 #include <sys/sysmacros.h>
290Sstevel@tonic-gate #include <sys/cpuvar.h>
300Sstevel@tonic-gate #include <sys/async.h>
310Sstevel@tonic-gate #include <sys/ontrap.h>
320Sstevel@tonic-gate #include <sys/ddifm.h>
330Sstevel@tonic-gate #include <sys/hypervisor_api.h>
340Sstevel@tonic-gate #include <sys/errorq.h>
350Sstevel@tonic-gate #include <sys/promif.h>
360Sstevel@tonic-gate #include <sys/prom_plat.h>
370Sstevel@tonic-gate #include <sys/x_call.h>
380Sstevel@tonic-gate #include <sys/error.h>
390Sstevel@tonic-gate #include <sys/fm/util.h>
40541Srf157361 #include <sys/ivintr.h>
413156Sgirish #include <sys/archsystm.h>
420Sstevel@tonic-gate 
430Sstevel@tonic-gate #define	MAX_CE_FLTS		10
440Sstevel@tonic-gate #define	MAX_ASYNC_FLTS		6
450Sstevel@tonic-gate 
460Sstevel@tonic-gate errorq_t *ue_queue;			/* queue of uncorrectable errors */
470Sstevel@tonic-gate errorq_t *ce_queue;			/* queue of correctable errors */
480Sstevel@tonic-gate 
490Sstevel@tonic-gate /*
500Sstevel@tonic-gate  * Being used by memory test driver.
510Sstevel@tonic-gate  * ce_verbose_memory - covers CEs in DIMMs
520Sstevel@tonic-gate  * ce_verbose_other - covers "others" (ecache, IO, etc.)
530Sstevel@tonic-gate  *
540Sstevel@tonic-gate  * If the value is 0, nothing is logged.
550Sstevel@tonic-gate  * If the value is 1, the error is logged to the log file, but not console.
560Sstevel@tonic-gate  * If the value is 2, the error is logged to the log file and console.
570Sstevel@tonic-gate  */
580Sstevel@tonic-gate int	ce_verbose_memory = 1;
590Sstevel@tonic-gate int	ce_verbose_other = 1;
600Sstevel@tonic-gate 
610Sstevel@tonic-gate int	ce_show_data = 0;
620Sstevel@tonic-gate int	ce_debug = 0;
630Sstevel@tonic-gate int	ue_debug = 0;
640Sstevel@tonic-gate int	reset_debug = 0;
650Sstevel@tonic-gate 
660Sstevel@tonic-gate /*
670Sstevel@tonic-gate  * Tunables for controlling the handling of asynchronous faults (AFTs). Setting
680Sstevel@tonic-gate  * these to non-default values on a non-DEBUG kernel is NOT supported.
690Sstevel@tonic-gate  */
700Sstevel@tonic-gate int	aft_verbose = 0;	/* log AFT messages > 1 to log only */
710Sstevel@tonic-gate int	aft_panic = 0;		/* panic (not reboot) on fatal usermode AFLT */
720Sstevel@tonic-gate int	aft_testfatal = 0;	/* force all AFTs to panic immediately */
730Sstevel@tonic-gate 
740Sstevel@tonic-gate /*
754612Srf157361  * Used for vbsc hostshutdown (power-off button)
76541Srf157361  */
77541Srf157361 int	err_shutdown_triggered = 0;	/* only once */
782973Sgovinda uint64_t err_shutdown_inum = 0;	/* used to pull the trigger */
79541Srf157361 
80541Srf157361 /*
814612Srf157361  * Used to print NRE/RE via system variable or kmdb
824612Srf157361  */
834612Srf157361 int		printerrh = 0;		/* see /etc/system */
844612Srf157361 static void	errh_er_print(errh_er_t *, const char *);
854612Srf157361 kmutex_t	errh_print_lock;
864612Srf157361 
874612Srf157361 /*
880Sstevel@tonic-gate  * Defined in bus_func.c but initialised in error_init
890Sstevel@tonic-gate  */
900Sstevel@tonic-gate extern kmutex_t bfd_lock;
910Sstevel@tonic-gate 
920Sstevel@tonic-gate static uint32_t rq_overflow_count = 0;		/* counter for rq overflow */
930Sstevel@tonic-gate 
940Sstevel@tonic-gate static void cpu_queue_one_event(errh_async_flt_t *);
950Sstevel@tonic-gate static uint32_t count_entries_on_queue(uint64_t, uint64_t, uint32_t);
96917Selowe static void errh_page_retire(errh_async_flt_t *, uchar_t);
970Sstevel@tonic-gate static int errh_error_protected(struct regs *, struct async_flt *, int *);
980Sstevel@tonic-gate static void errh_rq_full(struct async_flt *);
990Sstevel@tonic-gate static void ue_drain(void *, struct async_flt *, errorq_elem_t *);
1000Sstevel@tonic-gate static void ce_drain(void *, struct async_flt *, errorq_elem_t *);
1013156Sgirish static void errh_handle_attr(errh_async_flt_t *);
1023156Sgirish static void errh_handle_asr(errh_async_flt_t *);
103*11304SJanie.Lu@Sun.COM static void errh_handle_sp(errh_async_flt_t *);
104*11304SJanie.Lu@Sun.COM static void sp_ereport_post(uint8_t);
1050Sstevel@tonic-gate 
1060Sstevel@tonic-gate /*ARGSUSED*/
1070Sstevel@tonic-gate void
1080Sstevel@tonic-gate process_resumable_error(struct regs *rp, uint32_t head_offset,
1090Sstevel@tonic-gate     uint32_t tail_offset)
1100Sstevel@tonic-gate {
1110Sstevel@tonic-gate 	struct machcpu *mcpup;
1120Sstevel@tonic-gate 	struct async_flt *aflt;
1130Sstevel@tonic-gate 	errh_async_flt_t errh_flt;
1140Sstevel@tonic-gate 	errh_er_t *head_va;
1150Sstevel@tonic-gate 
1160Sstevel@tonic-gate 	mcpup = &(CPU->cpu_m);
1170Sstevel@tonic-gate 
1180Sstevel@tonic-gate 	while (head_offset != tail_offset) {
1190Sstevel@tonic-gate 		/* kernel buffer starts right after the resumable queue */
1200Sstevel@tonic-gate 		head_va = (errh_er_t *)(mcpup->cpu_rq_va + head_offset +
1210Sstevel@tonic-gate 		    CPU_RQ_SIZE);
1220Sstevel@tonic-gate 		/* Copy the error report to local buffer */
1230Sstevel@tonic-gate 		bzero(&errh_flt, sizeof (errh_async_flt_t));
1240Sstevel@tonic-gate 		bcopy((char *)head_va, &(errh_flt.errh_er),
1250Sstevel@tonic-gate 		    sizeof (errh_er_t));
1260Sstevel@tonic-gate 
1274612Srf157361 		mcpup->cpu_rq_lastre = head_va;
1284612Srf157361 		if (printerrh)
1294612Srf157361 			errh_er_print(&errh_flt.errh_er, "RQ");
1304612Srf157361 
1310Sstevel@tonic-gate 		/* Increment the queue head */
1320Sstevel@tonic-gate 		head_offset += Q_ENTRY_SIZE;
1330Sstevel@tonic-gate 		/* Wrap around */
1340Sstevel@tonic-gate 		head_offset &= (CPU_RQ_SIZE - 1);
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate 		/* set error handle to zero so it can hold new error report */
1370Sstevel@tonic-gate 		head_va->ehdl = 0;
1380Sstevel@tonic-gate 
1390Sstevel@tonic-gate 		switch (errh_flt.errh_er.desc) {
1400Sstevel@tonic-gate 		case ERRH_DESC_UCOR_RE:
1413156Sgirish 			/*
1423156Sgirish 			 * Check error attribute, handle individual error
1433156Sgirish 			 * if it is needed.
1443156Sgirish 			 */
1453156Sgirish 			errh_handle_attr(&errh_flt);
1460Sstevel@tonic-gate 			break;
1470Sstevel@tonic-gate 
148541Srf157361 		case ERRH_DESC_WARN_RE:
149541Srf157361 			/*
150541Srf157361 			 * Power-off requested, but handle it one time only.
151541Srf157361 			 */
152541Srf157361 			if (!err_shutdown_triggered) {
153541Srf157361 				setsoftint(err_shutdown_inum);
154541Srf157361 				++err_shutdown_triggered;
155541Srf157361 			}
156541Srf157361 			continue;
157541Srf157361 
158*11304SJanie.Lu@Sun.COM 		case ERRH_DESC_SP:
159*11304SJanie.Lu@Sun.COM 			/*
160*11304SJanie.Lu@Sun.COM 			 * The state of the SP has changed.
161*11304SJanie.Lu@Sun.COM 			 */
162*11304SJanie.Lu@Sun.COM 			errh_handle_sp(&errh_flt);
163*11304SJanie.Lu@Sun.COM 			continue;
164*11304SJanie.Lu@Sun.COM 
1650Sstevel@tonic-gate 		default:
1660Sstevel@tonic-gate 			cmn_err(CE_WARN, "Error Descriptor 0x%llx "
1670Sstevel@tonic-gate 			    " invalid in resumable error handler",
1680Sstevel@tonic-gate 			    (long long) errh_flt.errh_er.desc);
1690Sstevel@tonic-gate 			continue;
1700Sstevel@tonic-gate 		}
1710Sstevel@tonic-gate 
1720Sstevel@tonic-gate 		aflt = (struct async_flt *)&(errh_flt.cmn_asyncflt);
1730Sstevel@tonic-gate 		aflt->flt_id = gethrtime();
1740Sstevel@tonic-gate 		aflt->flt_bus_id = getprocessorid();
1750Sstevel@tonic-gate 		aflt->flt_class = CPU_FAULT;
1760Sstevel@tonic-gate 		aflt->flt_prot = AFLT_PROT_NONE;
1770Sstevel@tonic-gate 		aflt->flt_priv = (((errh_flt.errh_er.attr & ERRH_MODE_MASK)
1780Sstevel@tonic-gate 		    >> ERRH_MODE_SHIFT) == ERRH_MODE_PRIV);
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate 		if (errh_flt.errh_er.attr & ERRH_ATTR_CPU)
1810Sstevel@tonic-gate 			/* If it is an error on other cpu */
1820Sstevel@tonic-gate 			aflt->flt_panic = 1;
1830Sstevel@tonic-gate 		else
1840Sstevel@tonic-gate 			aflt->flt_panic = 0;
1850Sstevel@tonic-gate 
1860Sstevel@tonic-gate 		/*
1870Sstevel@tonic-gate 		 * Handle resumable queue full case.
1880Sstevel@tonic-gate 		 */
1890Sstevel@tonic-gate 		if (errh_flt.errh_er.attr & ERRH_ATTR_RQF) {
1900Sstevel@tonic-gate 			(void) errh_rq_full(aflt);
1910Sstevel@tonic-gate 		}
1920Sstevel@tonic-gate 
1930Sstevel@tonic-gate 		/*
1940Sstevel@tonic-gate 		 * Queue the error on ce or ue queue depend on flt_panic.
1950Sstevel@tonic-gate 		 * Even if flt_panic is set, the code still keep processing
1960Sstevel@tonic-gate 		 * the rest element on rq until the panic starts.
1970Sstevel@tonic-gate 		 */
1980Sstevel@tonic-gate 		(void) cpu_queue_one_event(&errh_flt);
1990Sstevel@tonic-gate 
2000Sstevel@tonic-gate 		/*
2010Sstevel@tonic-gate 		 * Panic here if aflt->flt_panic has been set.
2020Sstevel@tonic-gate 		 * Enqueued errors will be logged as part of the panic flow.
2030Sstevel@tonic-gate 		 */
2040Sstevel@tonic-gate 		if (aflt->flt_panic) {
2050Sstevel@tonic-gate 			fm_panic("Unrecoverable error on another CPU");
2060Sstevel@tonic-gate 		}
2070Sstevel@tonic-gate 	}
2080Sstevel@tonic-gate }
2090Sstevel@tonic-gate 
2100Sstevel@tonic-gate void
2111457Swh94709 process_nonresumable_error(struct regs *rp, uint64_t flags,
2120Sstevel@tonic-gate     uint32_t head_offset, uint32_t tail_offset)
2130Sstevel@tonic-gate {
2140Sstevel@tonic-gate 	struct machcpu *mcpup;
2150Sstevel@tonic-gate 	struct async_flt *aflt;
2160Sstevel@tonic-gate 	errh_async_flt_t errh_flt;
2170Sstevel@tonic-gate 	errh_er_t *head_va;
2180Sstevel@tonic-gate 	int trampolined = 0;
2190Sstevel@tonic-gate 	int expected = DDI_FM_ERR_UNEXPECTED;
2200Sstevel@tonic-gate 	uint64_t exec_mode;
2211457Swh94709 	uint8_t u_spill_fill;
2220Sstevel@tonic-gate 
2230Sstevel@tonic-gate 	mcpup = &(CPU->cpu_m);
2240Sstevel@tonic-gate 
2250Sstevel@tonic-gate 	while (head_offset != tail_offset) {
2260Sstevel@tonic-gate 		/* kernel buffer starts right after the nonresumable queue */
2270Sstevel@tonic-gate 		head_va = (errh_er_t *)(mcpup->cpu_nrq_va + head_offset +
2280Sstevel@tonic-gate 		    CPU_NRQ_SIZE);
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate 		/* Copy the error report to local buffer */
2310Sstevel@tonic-gate 		bzero(&errh_flt, sizeof (errh_async_flt_t));
2320Sstevel@tonic-gate 
2330Sstevel@tonic-gate 		bcopy((char *)head_va, &(errh_flt.errh_er),
2340Sstevel@tonic-gate 		    sizeof (errh_er_t));
2350Sstevel@tonic-gate 
2364612Srf157361 		mcpup->cpu_nrq_lastnre = head_va;
2374612Srf157361 		if (printerrh)
2384612Srf157361 			errh_er_print(&errh_flt.errh_er, "NRQ");
2394612Srf157361 
2400Sstevel@tonic-gate 		/* Increment the queue head */
2410Sstevel@tonic-gate 		head_offset += Q_ENTRY_SIZE;
2420Sstevel@tonic-gate 		/* Wrap around */
2430Sstevel@tonic-gate 		head_offset &= (CPU_NRQ_SIZE - 1);
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate 		/* set error handle to zero so it can hold new error report */
2460Sstevel@tonic-gate 		head_va->ehdl = 0;
2470Sstevel@tonic-gate 
2480Sstevel@tonic-gate 		aflt = (struct async_flt *)&(errh_flt.cmn_asyncflt);
2490Sstevel@tonic-gate 
2500Sstevel@tonic-gate 		trampolined = 0;
2510Sstevel@tonic-gate 
2520Sstevel@tonic-gate 		if (errh_flt.errh_er.attr & ERRH_ATTR_PIO)
2530Sstevel@tonic-gate 			aflt->flt_class = BUS_FAULT;
2540Sstevel@tonic-gate 		else
2550Sstevel@tonic-gate 			aflt->flt_class = CPU_FAULT;
2560Sstevel@tonic-gate 
2570Sstevel@tonic-gate 		aflt->flt_id = gethrtime();
2580Sstevel@tonic-gate 		aflt->flt_bus_id = getprocessorid();
2590Sstevel@tonic-gate 		aflt->flt_pc = (caddr_t)rp->r_pc;
2600Sstevel@tonic-gate 		exec_mode = (errh_flt.errh_er.attr & ERRH_MODE_MASK)
2610Sstevel@tonic-gate 		    >> ERRH_MODE_SHIFT;
2620Sstevel@tonic-gate 		aflt->flt_priv = (exec_mode == ERRH_MODE_PRIV ||
2630Sstevel@tonic-gate 		    exec_mode == ERRH_MODE_UNKNOWN);
2640Sstevel@tonic-gate 		aflt->flt_prot = AFLT_PROT_NONE;
2651457Swh94709 		aflt->flt_tl = (uchar_t)(flags & ERRH_TL_MASK);
2660Sstevel@tonic-gate 		aflt->flt_panic = ((aflt->flt_tl != 0) ||
2670Sstevel@tonic-gate 		    (aft_testfatal != 0));
2680Sstevel@tonic-gate 
2691457Swh94709 		/*
2701457Swh94709 		 * For the first error packet on the queue, check if it
2711457Swh94709 		 * happened in user fill/spill trap.
2721457Swh94709 		 */
2731457Swh94709 		if (flags & ERRH_U_SPILL_FILL) {
2741457Swh94709 			u_spill_fill = 1;
2751457Swh94709 			/* clear the user fill/spill flag in flags */
2761457Swh94709 			flags = (uint64_t)aflt->flt_tl;
2771457Swh94709 		} else
2781457Swh94709 			u_spill_fill = 0;
2791457Swh94709 
2800Sstevel@tonic-gate 		switch (errh_flt.errh_er.desc) {
2810Sstevel@tonic-gate 		case ERRH_DESC_PR_NRE:
2821457Swh94709 			if (u_spill_fill) {
2831457Swh94709 				aflt->flt_panic = 0;
2841457Swh94709 				break;
2851457Swh94709 			}
2860Sstevel@tonic-gate 			/*
28710271SJason.Beloro@Sun.COM 			 * Fall through, precise fault also need to check
28810271SJason.Beloro@Sun.COM 			 * to see if it was protected.
2890Sstevel@tonic-gate 			 */
2901457Swh94709 			/*FALLTHRU*/
2910Sstevel@tonic-gate 
2920Sstevel@tonic-gate 		case ERRH_DESC_DEF_NRE:
2930Sstevel@tonic-gate 			/*
2940Sstevel@tonic-gate 			 * If the trap occurred in privileged mode at TL=0,
2950Sstevel@tonic-gate 			 * we need to check to see if we were executing
2960Sstevel@tonic-gate 			 * in kernel under on_trap() or t_lofault
2971280Srf157361 			 * protection. If so, and if it was a PIO or MEM
2981280Srf157361 			 * error, then modify the saved registers so that
2991280Srf157361 			 * we return from the trap to the appropriate
3001280Srf157361 			 * trampoline routine.
3010Sstevel@tonic-gate 			 */
3021280Srf157361 			if (aflt->flt_priv == 1 && aflt->flt_tl == 0 &&
3031280Srf157361 			    ((errh_flt.errh_er.attr & ERRH_ATTR_PIO) ||
3041280Srf157361 			    (errh_flt.errh_er.attr & ERRH_ATTR_MEM))) {
3050Sstevel@tonic-gate 				trampolined =
3060Sstevel@tonic-gate 				    errh_error_protected(rp, aflt, &expected);
3071280Srf157361 			}
3080Sstevel@tonic-gate 
3090Sstevel@tonic-gate 			if (!aflt->flt_priv || aflt->flt_prot ==
3100Sstevel@tonic-gate 			    AFLT_PROT_COPY) {
3110Sstevel@tonic-gate 				aflt->flt_panic |= aft_panic;
3120Sstevel@tonic-gate 			} else if (!trampolined &&
3132477Srf157361 			    (aflt->flt_class != BUS_FAULT)) {
3140Sstevel@tonic-gate 				aflt->flt_panic = 1;
3150Sstevel@tonic-gate 			}
3160Sstevel@tonic-gate 
3170Sstevel@tonic-gate 			/*
3183156Sgirish 			 * Check error attribute, handle individual error
3193156Sgirish 			 * if it is needed.
3203156Sgirish 			 */
3213156Sgirish 			errh_handle_attr(&errh_flt);
3223156Sgirish 
3233156Sgirish 			/*
3240Sstevel@tonic-gate 			 * If PIO error, we need to query the bus nexus
3250Sstevel@tonic-gate 			 * for fatal errors.
3260Sstevel@tonic-gate 			 */
3270Sstevel@tonic-gate 			if (aflt->flt_class == BUS_FAULT) {
32810271SJason.Beloro@Sun.COM 				aflt->flt_addr = errh_flt.errh_er.ra;
3290Sstevel@tonic-gate 				errh_cpu_run_bus_error_handlers(aflt,
3300Sstevel@tonic-gate 				    expected);
3310Sstevel@tonic-gate 			}
3320Sstevel@tonic-gate 
3330Sstevel@tonic-gate 			break;
3340Sstevel@tonic-gate 
3353313Siskreen 		case ERRH_DESC_USER_DCORE:
3363313Siskreen 			/*
3373313Siskreen 			 * User generated panic. Call panic directly
3383313Siskreen 			 * since there are no FMA e-reports to
3393313Siskreen 			 * display.
3403313Siskreen 			 */
3413313Siskreen 
3423313Siskreen 			panic("Panic - Generated at user request");
3433313Siskreen 
3443313Siskreen 			break;
3453313Siskreen 
3460Sstevel@tonic-gate 		default:
3472218Swh94709 			cmn_err(CE_WARN, "Panic - Error Descriptor 0x%llx "
3482218Swh94709 			    " invalid in non-resumable error handler",
3490Sstevel@tonic-gate 			    (long long) errh_flt.errh_er.desc);
3502218Swh94709 			aflt->flt_panic = 1;
3512218Swh94709 			break;
3520Sstevel@tonic-gate 		}
3530Sstevel@tonic-gate 
3540Sstevel@tonic-gate 		/*
3550Sstevel@tonic-gate 		 * Queue the error report for further processing. If
3560Sstevel@tonic-gate 		 * flt_panic is set, code still process other errors
3570Sstevel@tonic-gate 		 * in the queue until the panic routine stops the
3580Sstevel@tonic-gate 		 * kernel.
3590Sstevel@tonic-gate 		 */
3600Sstevel@tonic-gate 		(void) cpu_queue_one_event(&errh_flt);
3610Sstevel@tonic-gate 
3620Sstevel@tonic-gate 		/*
3630Sstevel@tonic-gate 		 * Panic here if aflt->flt_panic has been set.
3640Sstevel@tonic-gate 		 * Enqueued errors will be logged as part of the panic flow.
3650Sstevel@tonic-gate 		 */
3660Sstevel@tonic-gate 		if (aflt->flt_panic) {
3670Sstevel@tonic-gate 			fm_panic("Unrecoverable hardware error");
3680Sstevel@tonic-gate 		}
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate 		/*
371917Selowe 		 * Call page_retire() to handle memory errors.
3720Sstevel@tonic-gate 		 */
3730Sstevel@tonic-gate 		if (errh_flt.errh_er.attr & ERRH_ATTR_MEM)
374917Selowe 			errh_page_retire(&errh_flt, PR_UE);
3750Sstevel@tonic-gate 
3760Sstevel@tonic-gate 		/*
37710271SJason.Beloro@Sun.COM 		 * If we queued an error and the it was in user mode, or
37810271SJason.Beloro@Sun.COM 		 * protected by t_lofault, or user_spill_fill is set, we
37910271SJason.Beloro@Sun.COM 		 * set AST flag so the queue will be drained before
38010271SJason.Beloro@Sun.COM 		 * returning to user mode.
3810Sstevel@tonic-gate 		 */
38210271SJason.Beloro@Sun.COM 		if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY ||
38310271SJason.Beloro@Sun.COM 		    u_spill_fill) {
3840Sstevel@tonic-gate 			int pcb_flag = 0;
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate 			if (aflt->flt_class == CPU_FAULT)
3870Sstevel@tonic-gate 				pcb_flag |= ASYNC_HWERR;
3880Sstevel@tonic-gate 			else if (aflt->flt_class == BUS_FAULT)
3890Sstevel@tonic-gate 				pcb_flag |= ASYNC_BERR;
3900Sstevel@tonic-gate 
3910Sstevel@tonic-gate 			ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag;
3920Sstevel@tonic-gate 			aston(curthread);
3930Sstevel@tonic-gate 		}
3940Sstevel@tonic-gate 	}
3950Sstevel@tonic-gate }
3960Sstevel@tonic-gate 
3970Sstevel@tonic-gate /*
3980Sstevel@tonic-gate  * For PIO errors, this routine calls nexus driver's error
3990Sstevel@tonic-gate  * callback routines. If the callback routine returns fatal, and
4000Sstevel@tonic-gate  * we are in kernel or unknow mode without any error protection,
4010Sstevel@tonic-gate  * we need to turn on the panic flag.
4020Sstevel@tonic-gate  */
4030Sstevel@tonic-gate void
4040Sstevel@tonic-gate errh_cpu_run_bus_error_handlers(struct async_flt *aflt, int expected)
4050Sstevel@tonic-gate {
4060Sstevel@tonic-gate 	int status;
4070Sstevel@tonic-gate 	ddi_fm_error_t de;
4080Sstevel@tonic-gate 
4090Sstevel@tonic-gate 	bzero(&de, sizeof (ddi_fm_error_t));
4100Sstevel@tonic-gate 
4110Sstevel@tonic-gate 	de.fme_version = DDI_FME_VERSION;
4120Sstevel@tonic-gate 	de.fme_ena = fm_ena_generate(aflt->flt_id, FM_ENA_FMT1);
4130Sstevel@tonic-gate 	de.fme_flag = expected;
4140Sstevel@tonic-gate 	de.fme_bus_specific = (void *)aflt->flt_addr;
4150Sstevel@tonic-gate 	status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de);
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 	/*
4180Sstevel@tonic-gate 	 * If error is protected, it will jump to proper routine
4190Sstevel@tonic-gate 	 * to handle the handle; if it is in user level, we just
4200Sstevel@tonic-gate 	 * kill the user process; if the driver thinks the error is
4210Sstevel@tonic-gate 	 * not fatal, we can drive on. If none of above are true,
4220Sstevel@tonic-gate 	 * we panic
4230Sstevel@tonic-gate 	 */
4240Sstevel@tonic-gate 	if ((aflt->flt_prot == AFLT_PROT_NONE) && (aflt->flt_priv == 1) &&
4250Sstevel@tonic-gate 	    (status == DDI_FM_FATAL))
4260Sstevel@tonic-gate 		aflt->flt_panic = 1;
4270Sstevel@tonic-gate }
4280Sstevel@tonic-gate 
4290Sstevel@tonic-gate /*
4300Sstevel@tonic-gate  * This routine checks to see if we are under any error protection when
4310Sstevel@tonic-gate  * the error happens. If we are under error protection, we unwind to
4320Sstevel@tonic-gate  * the protection and indicate fault.
4330Sstevel@tonic-gate  */
4340Sstevel@tonic-gate static int
4350Sstevel@tonic-gate errh_error_protected(struct regs *rp, struct async_flt *aflt, int *expected)
4360Sstevel@tonic-gate {
4370Sstevel@tonic-gate 	int trampolined = 0;
4380Sstevel@tonic-gate 	ddi_acc_hdl_t *hp;
4390Sstevel@tonic-gate 
4400Sstevel@tonic-gate 	if (curthread->t_ontrap != NULL) {
4410Sstevel@tonic-gate 		on_trap_data_t *otp = curthread->t_ontrap;
4420Sstevel@tonic-gate 
4430Sstevel@tonic-gate 		if (otp->ot_prot & OT_DATA_EC) {
4440Sstevel@tonic-gate 			aflt->flt_prot = AFLT_PROT_EC;
4450Sstevel@tonic-gate 			otp->ot_trap |= OT_DATA_EC;
4460Sstevel@tonic-gate 			rp->r_pc = otp->ot_trampoline;
4470Sstevel@tonic-gate 			rp->r_npc = rp->r_pc +4;
4480Sstevel@tonic-gate 			trampolined = 1;
4490Sstevel@tonic-gate 		}
4500Sstevel@tonic-gate 
4510Sstevel@tonic-gate 		if (otp->ot_prot & OT_DATA_ACCESS) {
4520Sstevel@tonic-gate 			aflt->flt_prot = AFLT_PROT_ACCESS;
4530Sstevel@tonic-gate 			otp->ot_trap |= OT_DATA_ACCESS;
4540Sstevel@tonic-gate 			rp->r_pc = otp->ot_trampoline;
4550Sstevel@tonic-gate 			rp->r_npc = rp->r_pc + 4;
4560Sstevel@tonic-gate 			trampolined = 1;
4570Sstevel@tonic-gate 			/*
4580Sstevel@tonic-gate 			 * for peek and caut_gets
4590Sstevel@tonic-gate 			 * errors are expected
4600Sstevel@tonic-gate 			 */
4610Sstevel@tonic-gate 			hp = (ddi_acc_hdl_t *)otp->ot_handle;
4620Sstevel@tonic-gate 			if (!hp)
4630Sstevel@tonic-gate 				*expected = DDI_FM_ERR_PEEK;
4640Sstevel@tonic-gate 			else if (hp->ah_acc.devacc_attr_access ==
4650Sstevel@tonic-gate 			    DDI_CAUTIOUS_ACC)
4660Sstevel@tonic-gate 				*expected = DDI_FM_ERR_EXPECTED;
4670Sstevel@tonic-gate 		}
4680Sstevel@tonic-gate 	} else if (curthread->t_lofault) {
4690Sstevel@tonic-gate 		aflt->flt_prot = AFLT_PROT_COPY;
4700Sstevel@tonic-gate 		rp->r_g1 = EFAULT;
4710Sstevel@tonic-gate 		rp->r_pc = curthread->t_lofault;
4720Sstevel@tonic-gate 		rp->r_npc = rp->r_pc + 4;
4730Sstevel@tonic-gate 		trampolined = 1;
4740Sstevel@tonic-gate 	}
4750Sstevel@tonic-gate 
4760Sstevel@tonic-gate 	return (trampolined);
4770Sstevel@tonic-gate }
4780Sstevel@tonic-gate 
4790Sstevel@tonic-gate /*
4800Sstevel@tonic-gate  * Queue one event.
4810Sstevel@tonic-gate  */
4820Sstevel@tonic-gate static void
4830Sstevel@tonic-gate cpu_queue_one_event(errh_async_flt_t *errh_fltp)
4840Sstevel@tonic-gate {
4850Sstevel@tonic-gate 	struct async_flt *aflt = (struct async_flt *)errh_fltp;
4860Sstevel@tonic-gate 	errorq_t *eqp;
4870Sstevel@tonic-gate 
4880Sstevel@tonic-gate 	if (aflt->flt_panic)
4890Sstevel@tonic-gate 		eqp = ue_queue;
4900Sstevel@tonic-gate 	else
4910Sstevel@tonic-gate 		eqp = ce_queue;
4920Sstevel@tonic-gate 
4930Sstevel@tonic-gate 	errorq_dispatch(eqp, errh_fltp, sizeof (errh_async_flt_t),
4940Sstevel@tonic-gate 	    aflt->flt_panic);
4950Sstevel@tonic-gate }
4960Sstevel@tonic-gate 
4970Sstevel@tonic-gate /*
4980Sstevel@tonic-gate  * The cpu_async_log_err() function is called by the ce/ue_drain() function to
4990Sstevel@tonic-gate  * handle logging for CPU events that are dequeued.  As such, it can be invoked
5000Sstevel@tonic-gate  * from softint context, from AST processing in the trap() flow, or from the
5010Sstevel@tonic-gate  * panic flow.  We decode the CPU-specific data, and log appropriate messages.
5020Sstevel@tonic-gate  */
5030Sstevel@tonic-gate void
5040Sstevel@tonic-gate cpu_async_log_err(void *flt)
5050Sstevel@tonic-gate {
5060Sstevel@tonic-gate 	errh_async_flt_t *errh_fltp = (errh_async_flt_t *)flt;
5070Sstevel@tonic-gate 	errh_er_t *errh_erp = (errh_er_t *)&errh_fltp->errh_er;
5080Sstevel@tonic-gate 
5090Sstevel@tonic-gate 	switch (errh_erp->desc) {
5100Sstevel@tonic-gate 	case ERRH_DESC_UCOR_RE:
5110Sstevel@tonic-gate 		if (errh_erp->attr & ERRH_ATTR_MEM) {
5120Sstevel@tonic-gate 			/*
513917Selowe 			 * Turn on the PR_UE flag. The page will be
5140Sstevel@tonic-gate 			 * scrubbed when it is freed.
5150Sstevel@tonic-gate 			 */
516917Selowe 			errh_page_retire(errh_fltp, PR_UE);
5170Sstevel@tonic-gate 		}
5180Sstevel@tonic-gate 
5190Sstevel@tonic-gate 		break;
5200Sstevel@tonic-gate 
5210Sstevel@tonic-gate 	case ERRH_DESC_PR_NRE:
5220Sstevel@tonic-gate 	case ERRH_DESC_DEF_NRE:
5230Sstevel@tonic-gate 		if (errh_erp->attr & ERRH_ATTR_MEM) {
5240Sstevel@tonic-gate 			/*
5250Sstevel@tonic-gate 			 * For non-resumable memory error, retire
5260Sstevel@tonic-gate 			 * the page here.
5270Sstevel@tonic-gate 			 */
528917Selowe 			errh_page_retire(errh_fltp, PR_UE);
529639Swh94709 
530639Swh94709 			/*
531639Swh94709 			 * If we are going to panic, scrub the page first
532639Swh94709 			 */
533639Swh94709 			if (errh_fltp->cmn_asyncflt.flt_panic)
53410271SJason.Beloro@Sun.COM 				mem_scrub(errh_fltp->errh_er.ra,
535639Swh94709 				    errh_fltp->errh_er.sz);
5360Sstevel@tonic-gate 		}
5370Sstevel@tonic-gate 		break;
5380Sstevel@tonic-gate 
5390Sstevel@tonic-gate 	default:
5400Sstevel@tonic-gate 		break;
5410Sstevel@tonic-gate 	}
5420Sstevel@tonic-gate }
5430Sstevel@tonic-gate 
5440Sstevel@tonic-gate /*
5450Sstevel@tonic-gate  * Called from ce_drain().
5460Sstevel@tonic-gate  */
5470Sstevel@tonic-gate void
5480Sstevel@tonic-gate cpu_ce_log_err(struct async_flt *aflt)
5490Sstevel@tonic-gate {
5500Sstevel@tonic-gate 	switch (aflt->flt_class) {
5510Sstevel@tonic-gate 	case CPU_FAULT:
5520Sstevel@tonic-gate 		cpu_async_log_err(aflt);
5530Sstevel@tonic-gate 		break;
5540Sstevel@tonic-gate 
5550Sstevel@tonic-gate 	case BUS_FAULT:
5560Sstevel@tonic-gate 		cpu_async_log_err(aflt);
5570Sstevel@tonic-gate 		break;
5580Sstevel@tonic-gate 
5590Sstevel@tonic-gate 	default:
5600Sstevel@tonic-gate 		break;
5610Sstevel@tonic-gate 	}
5620Sstevel@tonic-gate }
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate /*
5650Sstevel@tonic-gate  * Called from ue_drain().
5660Sstevel@tonic-gate  */
5670Sstevel@tonic-gate void
5680Sstevel@tonic-gate cpu_ue_log_err(struct async_flt *aflt)
5690Sstevel@tonic-gate {
5700Sstevel@tonic-gate 	switch (aflt->flt_class) {
5710Sstevel@tonic-gate 	case CPU_FAULT:
5720Sstevel@tonic-gate 		cpu_async_log_err(aflt);
5730Sstevel@tonic-gate 		break;
5740Sstevel@tonic-gate 
5750Sstevel@tonic-gate 	case BUS_FAULT:
5760Sstevel@tonic-gate 		cpu_async_log_err(aflt);
5770Sstevel@tonic-gate 		break;
5780Sstevel@tonic-gate 
5790Sstevel@tonic-gate 	default:
5800Sstevel@tonic-gate 		break;
5810Sstevel@tonic-gate 	}
5820Sstevel@tonic-gate }
5830Sstevel@tonic-gate 
5840Sstevel@tonic-gate /*
5850Sstevel@tonic-gate  * Turn on flag on the error memory region.
5860Sstevel@tonic-gate  */
5870Sstevel@tonic-gate static void
588917Selowe errh_page_retire(errh_async_flt_t *errh_fltp, uchar_t flag)
5890Sstevel@tonic-gate {
59010271SJason.Beloro@Sun.COM 	uint64_t flt_real_addr_start = errh_fltp->errh_er.ra;
5910Sstevel@tonic-gate 	uint64_t flt_real_addr_end = flt_real_addr_start +
5920Sstevel@tonic-gate 	    errh_fltp->errh_er.sz - 1;
5930Sstevel@tonic-gate 	int64_t current_addr;
5940Sstevel@tonic-gate 
5950Sstevel@tonic-gate 	if (errh_fltp->errh_er.sz == 0)
5960Sstevel@tonic-gate 		return;
5970Sstevel@tonic-gate 
5980Sstevel@tonic-gate 	for (current_addr = flt_real_addr_start;
5990Sstevel@tonic-gate 	    current_addr < flt_real_addr_end; current_addr += MMU_PAGESIZE) {
600917Selowe 		(void) page_retire(current_addr, flag);
6010Sstevel@tonic-gate 	}
6020Sstevel@tonic-gate }
6030Sstevel@tonic-gate 
6040Sstevel@tonic-gate void
6050Sstevel@tonic-gate mem_scrub(uint64_t paddr, uint64_t len)
6060Sstevel@tonic-gate {
6070Sstevel@tonic-gate 	uint64_t pa, length, scrubbed_len;
6080Sstevel@tonic-gate 
6090Sstevel@tonic-gate 	pa = paddr;
6100Sstevel@tonic-gate 	length = len;
6110Sstevel@tonic-gate 	scrubbed_len = 0;
6120Sstevel@tonic-gate 
613639Swh94709 	while (length > 0) {
614639Swh94709 		if (hv_mem_scrub(pa, length, &scrubbed_len) != H_EOK)
6150Sstevel@tonic-gate 			break;
6160Sstevel@tonic-gate 
6170Sstevel@tonic-gate 		pa += scrubbed_len;
6180Sstevel@tonic-gate 		length -= scrubbed_len;
6190Sstevel@tonic-gate 	}
6200Sstevel@tonic-gate }
6210Sstevel@tonic-gate 
6221457Swh94709 /*
6233199Sep32863  * Call hypervisor to flush the memory region.
6243199Sep32863  * Both va and len must be MMU_PAGESIZE aligned.
6253199Sep32863  * Returns the total number of bytes flushed.
6261457Swh94709  */
6273199Sep32863 uint64_t
6283729Swh94709 mem_sync(caddr_t orig_va, size_t orig_len)
6290Sstevel@tonic-gate {
6300Sstevel@tonic-gate 	uint64_t pa, length, flushed;
6313199Sep32863 	uint64_t chunk_len = MMU_PAGESIZE;
6323199Sep32863 	uint64_t total_flushed = 0;
6333729Swh94709 	uint64_t va, len;
6340Sstevel@tonic-gate 
6353729Swh94709 	if (orig_len == 0)
6363199Sep32863 		return (total_flushed);
6370Sstevel@tonic-gate 
6383729Swh94709 	/* align va */
6393729Swh94709 	va = P2ALIGN_TYPED(orig_va, MMU_PAGESIZE, uint64_t);
6403729Swh94709 	/* round up len to MMU_PAGESIZE aligned */
6413729Swh94709 	len = P2ROUNDUP_TYPED(orig_va + orig_len, MMU_PAGESIZE, uint64_t) - va;
6423729Swh94709 
6433199Sep32863 	while (len > 0) {
6443199Sep32863 		pa = va_to_pa((caddr_t)va);
6453199Sep32863 		if (pa == (uint64_t)-1)
6463199Sep32863 			return (total_flushed);
6470Sstevel@tonic-gate 
6483199Sep32863 		length = chunk_len;
6493199Sep32863 		flushed = 0;
6501457Swh94709 
6513199Sep32863 		while (length > 0) {
6523199Sep32863 			if (hv_mem_sync(pa, length, &flushed) != H_EOK)
6533199Sep32863 				return (total_flushed);
6540Sstevel@tonic-gate 
6553199Sep32863 			pa += flushed;
6563199Sep32863 			length -= flushed;
6573199Sep32863 			total_flushed += flushed;
6583199Sep32863 		}
6590Sstevel@tonic-gate 
6603199Sep32863 		va += chunk_len;
6613199Sep32863 		len -= chunk_len;
6620Sstevel@tonic-gate 	}
6633199Sep32863 
6643199Sep32863 	return (total_flushed);
6650Sstevel@tonic-gate }
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate /*
6680Sstevel@tonic-gate  * If resumable queue is full, we need to check if any cpu is in
6690Sstevel@tonic-gate  * error state. If not, we drive on. If yes, we need to panic. The
6700Sstevel@tonic-gate  * hypervisor call hv_cpu_state() is being used for checking the
6713750Srf157361  * cpu state.  And reset %tick_compr in case tick-compare was lost.
6720Sstevel@tonic-gate  */
6730Sstevel@tonic-gate static void
6740Sstevel@tonic-gate errh_rq_full(struct async_flt *afltp)
6750Sstevel@tonic-gate {
6760Sstevel@tonic-gate 	processorid_t who;
6770Sstevel@tonic-gate 	uint64_t cpu_state;
6780Sstevel@tonic-gate 	uint64_t retval;
6793750Srf157361 	uint64_t current_tick;
6803750Srf157361 
6813750Srf157361 	current_tick = (uint64_t)gettick();
6823750Srf157361 	tickcmpr_set(current_tick);
6830Sstevel@tonic-gate 
6840Sstevel@tonic-gate 	for (who = 0; who < NCPU; who++)
6850Sstevel@tonic-gate 		if (CPU_IN_SET(cpu_ready_set, who)) {
6860Sstevel@tonic-gate 			retval = hv_cpu_state(who, &cpu_state);
6870Sstevel@tonic-gate 			if (retval != H_EOK || cpu_state == CPU_STATE_ERROR) {
6880Sstevel@tonic-gate 				afltp->flt_panic = 1;
6890Sstevel@tonic-gate 				break;
6900Sstevel@tonic-gate 			}
6910Sstevel@tonic-gate 		}
6920Sstevel@tonic-gate }
6930Sstevel@tonic-gate 
6940Sstevel@tonic-gate /*
6950Sstevel@tonic-gate  * Return processor specific async error structure
6960Sstevel@tonic-gate  * size used.
6970Sstevel@tonic-gate  */
6980Sstevel@tonic-gate int
6990Sstevel@tonic-gate cpu_aflt_size(void)
7000Sstevel@tonic-gate {
7010Sstevel@tonic-gate 	return (sizeof (errh_async_flt_t));
7020Sstevel@tonic-gate }
7030Sstevel@tonic-gate 
7040Sstevel@tonic-gate #define	SZ_TO_ETRS_SHIFT	6
7050Sstevel@tonic-gate 
7060Sstevel@tonic-gate /*
7070Sstevel@tonic-gate  * Message print out when resumable queue is overflown
7080Sstevel@tonic-gate  */
7090Sstevel@tonic-gate /*ARGSUSED*/
7100Sstevel@tonic-gate void
7110Sstevel@tonic-gate rq_overflow(struct regs *rp, uint64_t head_offset,
7120Sstevel@tonic-gate     uint64_t tail_offset)
7130Sstevel@tonic-gate {
7140Sstevel@tonic-gate 	rq_overflow_count++;
7150Sstevel@tonic-gate }
7160Sstevel@tonic-gate 
7170Sstevel@tonic-gate /*
7180Sstevel@tonic-gate  * Handler to process a fatal error.  This routine can be called from a
7190Sstevel@tonic-gate  * softint, called from trap()'s AST handling, or called from the panic flow.
7200Sstevel@tonic-gate  */
7210Sstevel@tonic-gate /*ARGSUSED*/
7220Sstevel@tonic-gate static void
7230Sstevel@tonic-gate ue_drain(void *ignored, struct async_flt *aflt, errorq_elem_t *eqep)
7240Sstevel@tonic-gate {
7250Sstevel@tonic-gate 	cpu_ue_log_err(aflt);
7260Sstevel@tonic-gate }
7270Sstevel@tonic-gate 
7280Sstevel@tonic-gate /*
7290Sstevel@tonic-gate  * Handler to process a correctable error.  This routine can be called from a
7300Sstevel@tonic-gate  * softint.  We just call the CPU module's logging routine.
7310Sstevel@tonic-gate  */
7320Sstevel@tonic-gate /*ARGSUSED*/
7330Sstevel@tonic-gate static void
7340Sstevel@tonic-gate ce_drain(void *ignored, struct async_flt *aflt, errorq_elem_t *eqep)
7350Sstevel@tonic-gate {
7360Sstevel@tonic-gate 	cpu_ce_log_err(aflt);
7370Sstevel@tonic-gate }
7380Sstevel@tonic-gate 
7390Sstevel@tonic-gate /*
740541Srf157361  * Handler to process vbsc hostshutdown (power-off button).
741541Srf157361  */
742541Srf157361 static int
743541Srf157361 err_shutdown_softintr()
744541Srf157361 {
745541Srf157361 	cmn_err(CE_WARN, "Power-off requested, system will now shutdown.");
746541Srf157361 	do_shutdown();
747541Srf157361 
748541Srf157361 	/*
749541Srf157361 	 * just in case do_shutdown() fails
750541Srf157361 	 */
751541Srf157361 	(void) timeout((void(*)(void *))power_down, NULL, 100 * hz);
752541Srf157361 	return (DDI_INTR_CLAIMED);
753541Srf157361 }
754541Srf157361 
755541Srf157361 /*
7560Sstevel@tonic-gate  * Allocate error queue sizes based on max_ncpus.  max_ncpus is set just
7570Sstevel@tonic-gate  * after ncpunode has been determined.  ncpus is set in start_other_cpus
7580Sstevel@tonic-gate  * which is called after error_init() but may change dynamically.
7590Sstevel@tonic-gate  */
7600Sstevel@tonic-gate void
7610Sstevel@tonic-gate error_init(void)
7620Sstevel@tonic-gate {
7630Sstevel@tonic-gate 	char tmp_name[MAXSYSNAME];
764789Sahrens 	pnode_t node;
7650Sstevel@tonic-gate 	size_t size = cpu_aflt_size();
7660Sstevel@tonic-gate 
7670Sstevel@tonic-gate 	/*
7680Sstevel@tonic-gate 	 * Initialize the correctable and uncorrectable error queues.
7690Sstevel@tonic-gate 	 */
7700Sstevel@tonic-gate 	ue_queue = errorq_create("ue_queue", (errorq_func_t)ue_drain, NULL,
7710Sstevel@tonic-gate 	    MAX_ASYNC_FLTS * (max_ncpus + 1), size, PIL_2, ERRORQ_VITAL);
7720Sstevel@tonic-gate 
7730Sstevel@tonic-gate 	ce_queue = errorq_create("ce_queue", (errorq_func_t)ce_drain, NULL,
7740Sstevel@tonic-gate 	    MAX_CE_FLTS * (max_ncpus + 1), size, PIL_1, 0);
7750Sstevel@tonic-gate 
7760Sstevel@tonic-gate 	if (ue_queue == NULL || ce_queue == NULL)
7770Sstevel@tonic-gate 		panic("failed to create required system error queue");
7780Sstevel@tonic-gate 
7790Sstevel@tonic-gate 	/*
780541Srf157361 	 * Setup interrupt handler for power-off button.
781541Srf157361 	 */
782541Srf157361 	err_shutdown_inum = add_softintr(PIL_9,
7832973Sgovinda 	    (softintrfunc)err_shutdown_softintr, NULL, SOFTINT_ST);
784541Srf157361 
785541Srf157361 	/*
7860Sstevel@tonic-gate 	 * Initialize the busfunc list mutex.  This must be a PIL_15 spin lock
7870Sstevel@tonic-gate 	 * because we will need to acquire it from cpu_async_error().
7880Sstevel@tonic-gate 	 */
7890Sstevel@tonic-gate 	mutex_init(&bfd_lock, NULL, MUTEX_SPIN, (void *)PIL_15);
7900Sstevel@tonic-gate 
7914612Srf157361 	/* Only allow one cpu at a time to dump errh errors. */
7924612Srf157361 	mutex_init(&errh_print_lock, NULL, MUTEX_SPIN, (void *)PIL_15);
7934612Srf157361 
7940Sstevel@tonic-gate 	node = prom_rootnode();
7950Sstevel@tonic-gate 	if ((node == OBP_NONODE) || (node == OBP_BADNODE)) {
7960Sstevel@tonic-gate 		cmn_err(CE_CONT, "error_init: node 0x%x\n", (uint_t)node);
7970Sstevel@tonic-gate 		return;
7980Sstevel@tonic-gate 	}
7990Sstevel@tonic-gate 
8000Sstevel@tonic-gate 	if (((size = prom_getproplen(node, "reset-reason")) != -1) &&
8010Sstevel@tonic-gate 	    (size <= MAXSYSNAME) &&
8020Sstevel@tonic-gate 	    (prom_getprop(node, "reset-reason", tmp_name) != -1)) {
8030Sstevel@tonic-gate 		if (reset_debug) {
8040Sstevel@tonic-gate 			cmn_err(CE_CONT, "System booting after %s\n", tmp_name);
8050Sstevel@tonic-gate 		} else if (strncmp(tmp_name, "FATAL", 5) == 0) {
8060Sstevel@tonic-gate 			cmn_err(CE_CONT,
8070Sstevel@tonic-gate 			    "System booting after fatal error %s\n", tmp_name);
8080Sstevel@tonic-gate 		}
8090Sstevel@tonic-gate 	}
8100Sstevel@tonic-gate }
811817Swh94709 
812817Swh94709 /*
813817Swh94709  * Nonresumable queue is full, panic here
814817Swh94709  */
815817Swh94709 /*ARGSUSED*/
816817Swh94709 void
817817Swh94709 nrq_overflow(struct regs *rp)
818817Swh94709 {
819817Swh94709 	fm_panic("Nonresumable queue full");
820817Swh94709 }
8213156Sgirish 
8223156Sgirish /*
8233156Sgirish  * This is the place for special error handling for individual errors.
8243156Sgirish  */
8253156Sgirish static void
8263156Sgirish errh_handle_attr(errh_async_flt_t *errh_fltp)
8273156Sgirish {
8283156Sgirish 	switch (errh_fltp->errh_er.attr & ~ERRH_MODE_MASK) {
8293156Sgirish 	case ERRH_ATTR_CPU:
8303156Sgirish 	case ERRH_ATTR_MEM:
8313156Sgirish 	case ERRH_ATTR_PIO:
8323156Sgirish 	case ERRH_ATTR_IRF:
8333156Sgirish 	case ERRH_ATTR_FRF:
8343156Sgirish 	case ERRH_ATTR_SHUT:
8353156Sgirish 		break;
8363156Sgirish 
8373156Sgirish 	case ERRH_ATTR_ASR:
8383156Sgirish 		errh_handle_asr(errh_fltp);
8393156Sgirish 		break;
8403156Sgirish 
8413156Sgirish 	case ERRH_ATTR_ASI:
8423156Sgirish 	case ERRH_ATTR_PREG:
8433156Sgirish 	case ERRH_ATTR_RQF:
8443156Sgirish 		break;
8453156Sgirish 
8463156Sgirish 	default:
8473156Sgirish 		break;
8483156Sgirish 	}
8493156Sgirish }
8503156Sgirish 
8513156Sgirish /*
8523156Sgirish  * Handle ASR bit set in ATTR
8533156Sgirish  */
8543156Sgirish static void
8553156Sgirish errh_handle_asr(errh_async_flt_t *errh_fltp)
8563156Sgirish {
8573156Sgirish 	uint64_t current_tick;
8583156Sgirish 
8593156Sgirish 	switch (errh_fltp->errh_er.reg) {
8603156Sgirish 	case ASR_REG_VALID | ASR_REG_TICK:
8613156Sgirish 		/*
8623156Sgirish 		 * For Tick Compare Register error, it only happens when
8633156Sgirish 		 * the register is being read or compared with the %tick
8643156Sgirish 		 * register. Since we lost the contents of the register,
8653156Sgirish 		 * we set the %tick_compr in the future. An interrupt will
8663156Sgirish 		 * happen when %tick matches the value field of %tick_compr.
8673156Sgirish 		 */
8683156Sgirish 		current_tick = (uint64_t)gettick();
8693156Sgirish 		tickcmpr_set(current_tick);
8703156Sgirish 		/* Do not panic */
8713156Sgirish 		errh_fltp->cmn_asyncflt.flt_panic = 0;
8723156Sgirish 		break;
8733156Sgirish 
8743156Sgirish 	default:
8753156Sgirish 		break;
8763156Sgirish 	}
8773156Sgirish }
8784612Srf157361 
8794612Srf157361 /*
880*11304SJanie.Lu@Sun.COM  * Handle a SP state change.
881*11304SJanie.Lu@Sun.COM  */
882*11304SJanie.Lu@Sun.COM static void
883*11304SJanie.Lu@Sun.COM errh_handle_sp(errh_async_flt_t *errh_fltp)
884*11304SJanie.Lu@Sun.COM {
885*11304SJanie.Lu@Sun.COM 	uint8_t		sp_state;
886*11304SJanie.Lu@Sun.COM 
887*11304SJanie.Lu@Sun.COM 	sp_state = (errh_fltp->errh_er.attr & ERRH_SP_MASK) >> ERRH_SP_SHIFT;
888*11304SJanie.Lu@Sun.COM 
889*11304SJanie.Lu@Sun.COM 	/*
890*11304SJanie.Lu@Sun.COM 	 * Only the SP is unavailable state change is currently valid.
891*11304SJanie.Lu@Sun.COM 	 */
892*11304SJanie.Lu@Sun.COM 	if (sp_state == ERRH_SP_UNAVAILABLE) {
893*11304SJanie.Lu@Sun.COM 		sp_ereport_post(sp_state);
894*11304SJanie.Lu@Sun.COM 	} else {
895*11304SJanie.Lu@Sun.COM 		cmn_err(CE_WARN, "Invalid SP state 0x%x in SP state change "
896*11304SJanie.Lu@Sun.COM 		    "handler.\n", sp_state);
897*11304SJanie.Lu@Sun.COM 	}
898*11304SJanie.Lu@Sun.COM }
899*11304SJanie.Lu@Sun.COM 
900*11304SJanie.Lu@Sun.COM /*
9014612Srf157361  * Dump the error packet
9024612Srf157361  */
9034612Srf157361 /*ARGSUSED*/
9044612Srf157361 static void
9054612Srf157361 errh_er_print(errh_er_t *errh_erp, const char *queue)
9064612Srf157361 {
9074612Srf157361 	typedef union {
9084612Srf157361 		uint64_t w;
9094612Srf157361 		uint16_t s[4];
9104612Srf157361 	} errhp_t;
9114612Srf157361 	errhp_t *p = (errhp_t *)errh_erp;
9124612Srf157361 	int i;
9134612Srf157361 
9144612Srf157361 	mutex_enter(&errh_print_lock);
9154612Srf157361 	switch (errh_erp->desc) {
9164612Srf157361 	case ERRH_DESC_UCOR_RE:
9174612Srf157361 		cmn_err(CE_CONT, "\nResumable Uncorrectable Error ");
9184612Srf157361 		break;
9194612Srf157361 	case ERRH_DESC_PR_NRE:
9204612Srf157361 		cmn_err(CE_CONT, "\nNonresumable Precise Error ");
9214612Srf157361 		break;
9224612Srf157361 	case ERRH_DESC_DEF_NRE:
9234612Srf157361 		cmn_err(CE_CONT, "\nNonresumable Deferred Error ");
9244612Srf157361 		break;
9254612Srf157361 	default:
9264612Srf157361 		cmn_err(CE_CONT, "\nError packet ");
9274612Srf157361 		break;
9284612Srf157361 	}
9294612Srf157361 	cmn_err(CE_CONT, "received on %s\n", queue);
9304612Srf157361 
9314612Srf157361 	/*
9324612Srf157361 	 * Print Q_ENTRY_SIZE bytes of epacket with 8 bytes per line
9334612Srf157361 	 */
9344612Srf157361 	for (i = Q_ENTRY_SIZE; i > 0; i -= 8, ++p) {
9354612Srf157361 		cmn_err(CE_CONT, "%016lx: %04x %04x %04x %04x\n", (uint64_t)p,
9364612Srf157361 		    p->s[0], p->s[1], p->s[2], p->s[3]);
9374612Srf157361 	}
9384612Srf157361 	mutex_exit(&errh_print_lock);
9394612Srf157361 }
940*11304SJanie.Lu@Sun.COM 
941*11304SJanie.Lu@Sun.COM static void
942*11304SJanie.Lu@Sun.COM sp_ereport_post(uint8_t sp_state)
943*11304SJanie.Lu@Sun.COM {
944*11304SJanie.Lu@Sun.COM 	nvlist_t	*ereport, *detector;
945*11304SJanie.Lu@Sun.COM 
946*11304SJanie.Lu@Sun.COM 	/*
947*11304SJanie.Lu@Sun.COM 	 * Currently an ereport is only sent when the state of the SP
948*11304SJanie.Lu@Sun.COM 	 * changes to unavailable.
949*11304SJanie.Lu@Sun.COM 	 */
950*11304SJanie.Lu@Sun.COM 	ASSERT(sp_state == ERRH_SP_UNAVAILABLE);
951*11304SJanie.Lu@Sun.COM 
952*11304SJanie.Lu@Sun.COM 	ereport = fm_nvlist_create(NULL);
953*11304SJanie.Lu@Sun.COM 	detector = fm_nvlist_create(NULL);
954*11304SJanie.Lu@Sun.COM 
955*11304SJanie.Lu@Sun.COM 	/*
956*11304SJanie.Lu@Sun.COM 	 * Create an HC-scheme detector FMRI.
957*11304SJanie.Lu@Sun.COM 	 */
958*11304SJanie.Lu@Sun.COM 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
959*11304SJanie.Lu@Sun.COM 	    "chassis", 0);
960*11304SJanie.Lu@Sun.COM 
961*11304SJanie.Lu@Sun.COM 	fm_ereport_set(ereport, FM_EREPORT_VERSION, "chassis.sp.unavailable",
962*11304SJanie.Lu@Sun.COM 	    fm_ena_generate(0, FM_ENA_FMT1), detector, NULL);
963*11304SJanie.Lu@Sun.COM 
964*11304SJanie.Lu@Sun.COM 	(void) fm_ereport_post(ereport, EVCH_TRYHARD);
965*11304SJanie.Lu@Sun.COM 
966*11304SJanie.Lu@Sun.COM 	fm_nvlist_destroy(ereport, FM_NVA_FREE);
967*11304SJanie.Lu@Sun.COM 	fm_nvlist_destroy(detector, FM_NVA_FREE);
968*11304SJanie.Lu@Sun.COM }
969