xref: /onnv-gate/usr/src/uts/i86pc/os/machdep.c (revision 7532:bb6372f778bb)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51253Slq150181  * Common Development and Distribution License (the "License").
61253Slq150181  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211253Slq150181 
220Sstevel@tonic-gate /*
236681Sjohnlev  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #include <sys/types.h>
280Sstevel@tonic-gate #include <sys/t_lock.h>
290Sstevel@tonic-gate #include <sys/param.h>
303939Ssethg #include <sys/segments.h>
310Sstevel@tonic-gate #include <sys/sysmacros.h>
320Sstevel@tonic-gate #include <sys/signal.h>
330Sstevel@tonic-gate #include <sys/systm.h>
340Sstevel@tonic-gate #include <sys/user.h>
350Sstevel@tonic-gate #include <sys/mman.h>
360Sstevel@tonic-gate #include <sys/vm.h>
370Sstevel@tonic-gate 
380Sstevel@tonic-gate #include <sys/disp.h>
390Sstevel@tonic-gate #include <sys/class.h>
400Sstevel@tonic-gate 
410Sstevel@tonic-gate #include <sys/proc.h>
420Sstevel@tonic-gate #include <sys/buf.h>
430Sstevel@tonic-gate #include <sys/kmem.h>
440Sstevel@tonic-gate 
450Sstevel@tonic-gate #include <sys/reboot.h>
460Sstevel@tonic-gate #include <sys/uadmin.h>
470Sstevel@tonic-gate #include <sys/callb.h>
480Sstevel@tonic-gate 
490Sstevel@tonic-gate #include <sys/cred.h>
500Sstevel@tonic-gate #include <sys/vnode.h>
510Sstevel@tonic-gate #include <sys/file.h>
520Sstevel@tonic-gate 
530Sstevel@tonic-gate #include <sys/procfs.h>
540Sstevel@tonic-gate #include <sys/acct.h>
550Sstevel@tonic-gate 
560Sstevel@tonic-gate #include <sys/vfs.h>
570Sstevel@tonic-gate #include <sys/dnlc.h>
580Sstevel@tonic-gate #include <sys/var.h>
590Sstevel@tonic-gate #include <sys/cmn_err.h>
600Sstevel@tonic-gate #include <sys/utsname.h>
610Sstevel@tonic-gate #include <sys/debug.h>
620Sstevel@tonic-gate 
630Sstevel@tonic-gate #include <sys/dumphdr.h>
640Sstevel@tonic-gate #include <sys/bootconf.h>
650Sstevel@tonic-gate #include <sys/varargs.h>
660Sstevel@tonic-gate #include <sys/promif.h>
670Sstevel@tonic-gate #include <sys/modctl.h>
680Sstevel@tonic-gate 
690Sstevel@tonic-gate #include <sys/consdev.h>
700Sstevel@tonic-gate #include <sys/frame.h>
710Sstevel@tonic-gate 
720Sstevel@tonic-gate #include <sys/sunddi.h>
730Sstevel@tonic-gate #include <sys/ddidmareq.h>
740Sstevel@tonic-gate #include <sys/psw.h>
750Sstevel@tonic-gate #include <sys/regset.h>
760Sstevel@tonic-gate #include <sys/privregs.h>
770Sstevel@tonic-gate #include <sys/clock.h>
780Sstevel@tonic-gate #include <sys/tss.h>
790Sstevel@tonic-gate #include <sys/cpu.h>
800Sstevel@tonic-gate #include <sys/stack.h>
810Sstevel@tonic-gate #include <sys/trap.h>
820Sstevel@tonic-gate #include <sys/pic.h>
830Sstevel@tonic-gate #include <vm/hat.h>
840Sstevel@tonic-gate #include <vm/anon.h>
850Sstevel@tonic-gate #include <vm/as.h>
860Sstevel@tonic-gate #include <vm/page.h>
870Sstevel@tonic-gate #include <vm/seg.h>
880Sstevel@tonic-gate #include <vm/seg_kmem.h>
890Sstevel@tonic-gate #include <vm/seg_map.h>
900Sstevel@tonic-gate #include <vm/seg_vn.h>
910Sstevel@tonic-gate #include <vm/seg_kp.h>
920Sstevel@tonic-gate #include <vm/hat_i86.h>
930Sstevel@tonic-gate #include <sys/swap.h>
940Sstevel@tonic-gate #include <sys/thread.h>
950Sstevel@tonic-gate #include <sys/sysconf.h>
960Sstevel@tonic-gate #include <sys/vm_machparam.h>
970Sstevel@tonic-gate #include <sys/archsystm.h>
980Sstevel@tonic-gate #include <sys/machsystm.h>
990Sstevel@tonic-gate #include <sys/machlock.h>
1000Sstevel@tonic-gate #include <sys/x_call.h>
1010Sstevel@tonic-gate #include <sys/instance.h>
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate #include <sys/time.h>
1040Sstevel@tonic-gate #include <sys/smp_impldefs.h>
1050Sstevel@tonic-gate #include <sys/psm_types.h>
1060Sstevel@tonic-gate #include <sys/atomic.h>
1070Sstevel@tonic-gate #include <sys/panic.h>
1080Sstevel@tonic-gate #include <sys/cpuvar.h>
1090Sstevel@tonic-gate #include <sys/dtrace.h>
1100Sstevel@tonic-gate #include <sys/bl.h>
1110Sstevel@tonic-gate #include <sys/nvpair.h>
1120Sstevel@tonic-gate #include <sys/x86_archext.h>
1130Sstevel@tonic-gate #include <sys/pool_pset.h>
1140Sstevel@tonic-gate #include <sys/autoconf.h>
1153446Smrj #include <sys/mem.h>
1163446Smrj #include <sys/dumphdr.h>
1173446Smrj #include <sys/compress.h>
118*7532SSean.Ye@Sun.COM #include <sys/cpu_module.h>
1195084Sjohnlev #if defined(__xpv)
1205084Sjohnlev #include <sys/hypervisor.h>
1215084Sjohnlev #include <sys/xpv_panic.h>
1225084Sjohnlev #endif
1230Sstevel@tonic-gate 
1240Sstevel@tonic-gate #ifdef	TRAPTRACE
1250Sstevel@tonic-gate #include <sys/traptrace.h>
1260Sstevel@tonic-gate #endif	/* TRAPTRACE */
1270Sstevel@tonic-gate 
1280Sstevel@tonic-gate extern void audit_enterprom(int);
1290Sstevel@tonic-gate extern void audit_exitprom(int);
1300Sstevel@tonic-gate 
1310Sstevel@tonic-gate /*
1326681Sjohnlev  * Occassionally the kernel knows better whether to power-off or reboot.
1336681Sjohnlev  */
1346681Sjohnlev int force_shutdown_method = AD_UNKNOWN;
1356681Sjohnlev 
1366681Sjohnlev /*
1370Sstevel@tonic-gate  * The panicbuf array is used to record messages and state:
1380Sstevel@tonic-gate  */
1390Sstevel@tonic-gate char panicbuf[PANICBUFSIZE];
1400Sstevel@tonic-gate 
1410Sstevel@tonic-gate /*
1420Sstevel@tonic-gate  * maxphys - used during physio
1430Sstevel@tonic-gate  * klustsize - used for klustering by swapfs and specfs
1440Sstevel@tonic-gate  */
1450Sstevel@tonic-gate int maxphys = 56 * 1024;    /* XXX See vm_subr.c - max b_count in physio */
1460Sstevel@tonic-gate int klustsize = 56 * 1024;
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate caddr_t	p0_va;		/* Virtual address for accessing physical page 0 */
1490Sstevel@tonic-gate 
1500Sstevel@tonic-gate /*
1510Sstevel@tonic-gate  * defined here, though unused on x86,
1520Sstevel@tonic-gate  * to make kstat_fr.c happy.
1530Sstevel@tonic-gate  */
1540Sstevel@tonic-gate int vac;
1550Sstevel@tonic-gate 
1560Sstevel@tonic-gate void stop_other_cpus();
1570Sstevel@tonic-gate void debug_enter(char *);
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate extern void pm_cfb_check_and_powerup(void);
1600Sstevel@tonic-gate extern void pm_cfb_rele(void);
1610Sstevel@tonic-gate 
1620Sstevel@tonic-gate /*
1630Sstevel@tonic-gate  * Machine dependent code to reboot.
1640Sstevel@tonic-gate  * "mdep" is interpreted as a character pointer; if non-null, it is a pointer
1650Sstevel@tonic-gate  * to a string to be used as the argument string when rebooting.
166136Sachartre  *
167136Sachartre  * "invoke_cb" is a boolean. It is set to true when mdboot() can safely
168136Sachartre  * invoke CB_CL_MDBOOT callbacks before shutting the system down, i.e. when
169136Sachartre  * we are in a normal shutdown sequence (interrupts are not blocked, the
170136Sachartre  * system is not panic'ing or being suspended).
1710Sstevel@tonic-gate  */
1720Sstevel@tonic-gate /*ARGSUSED*/
1730Sstevel@tonic-gate void
174136Sachartre mdboot(int cmd, int fcn, char *mdep, boolean_t invoke_cb)
1750Sstevel@tonic-gate {
1760Sstevel@tonic-gate 	if (!panicstr) {
1770Sstevel@tonic-gate 		kpreempt_disable();
1781389Sdmick 		affinity_set(CPU_CURRENT);
1790Sstevel@tonic-gate 	}
1800Sstevel@tonic-gate 
1816681Sjohnlev 	if (force_shutdown_method != AD_UNKNOWN)
1826681Sjohnlev 		fcn = force_shutdown_method;
1836681Sjohnlev 
1840Sstevel@tonic-gate 	/*
1855630Sjbeck 	 * XXX - rconsvp is set to NULL to ensure that output messages
1865630Sjbeck 	 * are sent to the underlying "hardware" device using the
1875630Sjbeck 	 * monitor's printf routine since we are in the process of
1885630Sjbeck 	 * either rebooting or halting the machine.
1895630Sjbeck 	 */
1905630Sjbeck 	rconsvp = NULL;
1915630Sjbeck 
1925630Sjbeck 	/*
1930Sstevel@tonic-gate 	 * Print the reboot message now, before pausing other cpus.
1940Sstevel@tonic-gate 	 * There is a race condition in the printing support that
1950Sstevel@tonic-gate 	 * can deadlock multiprocessor machines.
1960Sstevel@tonic-gate 	 */
1970Sstevel@tonic-gate 	if (!(fcn == AD_HALT || fcn == AD_POWEROFF))
1980Sstevel@tonic-gate 		prom_printf("rebooting...\n");
1990Sstevel@tonic-gate 
2005084Sjohnlev 	if (IN_XPV_PANIC())
2015084Sjohnlev 		reset();
2025084Sjohnlev 
2030Sstevel@tonic-gate 	/*
2040Sstevel@tonic-gate 	 * We can't bring up the console from above lock level, so do it now
2050Sstevel@tonic-gate 	 */
2060Sstevel@tonic-gate 	pm_cfb_check_and_powerup();
2070Sstevel@tonic-gate 
2080Sstevel@tonic-gate 	/* make sure there are no more changes to the device tree */
2090Sstevel@tonic-gate 	devtree_freeze();
2100Sstevel@tonic-gate 
211136Sachartre 	if (invoke_cb)
212136Sachartre 		(void) callb_execute_class(CB_CL_MDBOOT, NULL);
213136Sachartre 
2143253Smec 	/*
2153253Smec 	 * Clear any unresolved UEs from memory.
2163253Smec 	 */
2173253Smec 	page_retire_mdboot();
218917Selowe 
2195084Sjohnlev #if defined(__xpv)
2205084Sjohnlev 	/*
2215084Sjohnlev 	 * XXPV	Should probably think some more about how we deal
2225084Sjohnlev 	 *	with panicing before it's really safe to panic.
2235084Sjohnlev 	 *	On hypervisors, we reboot very quickly..  Perhaps panic
2245084Sjohnlev 	 *	should only attempt to recover by rebooting if,
2255084Sjohnlev 	 *	say, we were able to mount the root filesystem,
2265084Sjohnlev 	 *	or if we successfully launched init(1m).
2275084Sjohnlev 	 */
2285084Sjohnlev 	if (panicstr && proc_init == NULL)
2295084Sjohnlev 		(void) HYPERVISOR_shutdown(SHUTDOWN_poweroff);
2305084Sjohnlev #endif
2315084Sjohnlev 
2320Sstevel@tonic-gate 	/*
2330Sstevel@tonic-gate 	 * stop other cpus and raise our priority.  since there is only
2340Sstevel@tonic-gate 	 * one active cpu after this, and our priority will be too high
2350Sstevel@tonic-gate 	 * for us to be preempted, we're essentially single threaded
2360Sstevel@tonic-gate 	 * from here on out.
2370Sstevel@tonic-gate 	 */
2380Sstevel@tonic-gate 	(void) spl6();
2390Sstevel@tonic-gate 	if (!panicstr) {
2400Sstevel@tonic-gate 		mutex_enter(&cpu_lock);
2410Sstevel@tonic-gate 		pause_cpus(NULL);
2420Sstevel@tonic-gate 		mutex_exit(&cpu_lock);
2430Sstevel@tonic-gate 	}
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate 	/*
2460Sstevel@tonic-gate 	 * try and reset leaf devices.  reset_leaves() should only
2470Sstevel@tonic-gate 	 * be called when there are no other threads that could be
2480Sstevel@tonic-gate 	 * accessing devices
2490Sstevel@tonic-gate 	 */
2500Sstevel@tonic-gate 	reset_leaves();
2510Sstevel@tonic-gate 
2520Sstevel@tonic-gate 	(void) spl8();
2530Sstevel@tonic-gate 	(*psm_shutdownf)(cmd, fcn);
2540Sstevel@tonic-gate 
2550Sstevel@tonic-gate 	if (fcn == AD_HALT || fcn == AD_POWEROFF)
2560Sstevel@tonic-gate 		halt((char *)NULL);
2570Sstevel@tonic-gate 	else
2580Sstevel@tonic-gate 		prom_reboot("");
2590Sstevel@tonic-gate 	/*NOTREACHED*/
2600Sstevel@tonic-gate }
2610Sstevel@tonic-gate 
2620Sstevel@tonic-gate /* mdpreboot - may be called prior to mdboot while root fs still mounted */
2630Sstevel@tonic-gate /*ARGSUSED*/
2640Sstevel@tonic-gate void
2650Sstevel@tonic-gate mdpreboot(int cmd, int fcn, char *mdep)
2660Sstevel@tonic-gate {
2670Sstevel@tonic-gate 	(*psm_preshutdownf)(cmd, fcn);
2680Sstevel@tonic-gate }
2690Sstevel@tonic-gate 
2700Sstevel@tonic-gate void
2710Sstevel@tonic-gate idle_other_cpus()
2720Sstevel@tonic-gate {
2730Sstevel@tonic-gate 	int cpuid = CPU->cpu_id;
2740Sstevel@tonic-gate 	cpuset_t xcset;
2750Sstevel@tonic-gate 
2760Sstevel@tonic-gate 	ASSERT(cpuid < NCPU);
2770Sstevel@tonic-gate 	CPUSET_ALL_BUT(xcset, cpuid);
2780Sstevel@tonic-gate 	xc_capture_cpus(xcset);
2790Sstevel@tonic-gate }
2800Sstevel@tonic-gate 
2810Sstevel@tonic-gate void
2820Sstevel@tonic-gate resume_other_cpus()
2830Sstevel@tonic-gate {
2840Sstevel@tonic-gate 	ASSERT(CPU->cpu_id < NCPU);
2850Sstevel@tonic-gate 
2860Sstevel@tonic-gate 	xc_release_cpus();
2870Sstevel@tonic-gate }
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate void
2900Sstevel@tonic-gate stop_other_cpus()
2910Sstevel@tonic-gate {
2920Sstevel@tonic-gate 	int cpuid = CPU->cpu_id;
2930Sstevel@tonic-gate 	cpuset_t xcset;
2940Sstevel@tonic-gate 
2950Sstevel@tonic-gate 	ASSERT(cpuid < NCPU);
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate 	/*
2983446Smrj 	 * xc_trycall will attempt to make all other CPUs execute mach_cpu_halt,
2990Sstevel@tonic-gate 	 * and will return immediately regardless of whether or not it was
3000Sstevel@tonic-gate 	 * able to make them do it.
3010Sstevel@tonic-gate 	 */
3020Sstevel@tonic-gate 	CPUSET_ALL_BUT(xcset, cpuid);
3033446Smrj 	xc_trycall(NULL, NULL, NULL, xcset, (int (*)())mach_cpu_halt);
3040Sstevel@tonic-gate }
3050Sstevel@tonic-gate 
3060Sstevel@tonic-gate /*
3070Sstevel@tonic-gate  *	Machine dependent abort sequence handling
3080Sstevel@tonic-gate  */
3090Sstevel@tonic-gate void
3100Sstevel@tonic-gate abort_sequence_enter(char *msg)
3110Sstevel@tonic-gate {
3120Sstevel@tonic-gate 	if (abort_enable == 0) {
3130Sstevel@tonic-gate 		if (audit_active)
3140Sstevel@tonic-gate 			audit_enterprom(0);
3150Sstevel@tonic-gate 		return;
3160Sstevel@tonic-gate 	}
3170Sstevel@tonic-gate 	if (audit_active)
3180Sstevel@tonic-gate 		audit_enterprom(1);
3190Sstevel@tonic-gate 	debug_enter(msg);
3200Sstevel@tonic-gate 	if (audit_active)
3210Sstevel@tonic-gate 		audit_exitprom(1);
3220Sstevel@tonic-gate }
3230Sstevel@tonic-gate 
3240Sstevel@tonic-gate /*
3250Sstevel@tonic-gate  * Enter debugger.  Called when the user types ctrl-alt-d or whenever
3260Sstevel@tonic-gate  * code wants to enter the debugger and possibly resume later.
3270Sstevel@tonic-gate  */
3280Sstevel@tonic-gate void
3290Sstevel@tonic-gate debug_enter(
3300Sstevel@tonic-gate 	char	*msg)		/* message to print, possibly NULL */
3310Sstevel@tonic-gate {
3320Sstevel@tonic-gate 	if (dtrace_debugger_init != NULL)
3330Sstevel@tonic-gate 		(*dtrace_debugger_init)();
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate 	if (msg)
3360Sstevel@tonic-gate 		prom_printf("%s\n", msg);
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 	if (boothowto & RB_DEBUG)
3393446Smrj 		kmdb_enter();
3400Sstevel@tonic-gate 
3410Sstevel@tonic-gate 	if (dtrace_debugger_fini != NULL)
3420Sstevel@tonic-gate 		(*dtrace_debugger_fini)();
3430Sstevel@tonic-gate }
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate void
3460Sstevel@tonic-gate reset(void)
3470Sstevel@tonic-gate {
3485084Sjohnlev #if !defined(__xpv)
3490Sstevel@tonic-gate 	ushort_t *bios_memchk;
3500Sstevel@tonic-gate 
3510Sstevel@tonic-gate 	/*
3520Sstevel@tonic-gate 	 * Can't use psm_map_phys before the hat is initialized.
3530Sstevel@tonic-gate 	 */
3540Sstevel@tonic-gate 	if (khat_running) {
3550Sstevel@tonic-gate 		bios_memchk = (ushort_t *)psm_map_phys(0x472,
3560Sstevel@tonic-gate 		    sizeof (ushort_t), PROT_READ | PROT_WRITE);
3570Sstevel@tonic-gate 		if (bios_memchk)
3580Sstevel@tonic-gate 			*bios_memchk = 0x1234;	/* bios memory check disable */
3590Sstevel@tonic-gate 	}
3600Sstevel@tonic-gate 
3612866Sszhou 	if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_root_node(), 0, "efi-systab"))
3622866Sszhou 		efi_reset();
3630Sstevel@tonic-gate 	pc_reset();
3645084Sjohnlev #else
3655084Sjohnlev 	if (IN_XPV_PANIC())
3665084Sjohnlev 		pc_reset();
3675084Sjohnlev 	(void) HYPERVISOR_shutdown(SHUTDOWN_reboot);
3685084Sjohnlev 	panic("HYPERVISOR_shutdown() failed");
3695084Sjohnlev #endif
3700Sstevel@tonic-gate 	/*NOTREACHED*/
3710Sstevel@tonic-gate }
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate /*
3740Sstevel@tonic-gate  * Halt the machine and return to the monitor
3750Sstevel@tonic-gate  */
3760Sstevel@tonic-gate void
3770Sstevel@tonic-gate halt(char *s)
3780Sstevel@tonic-gate {
3790Sstevel@tonic-gate 	stop_other_cpus();	/* send stop signal to other CPUs */
3800Sstevel@tonic-gate 	if (s)
3810Sstevel@tonic-gate 		prom_printf("(%s) \n", s);
3820Sstevel@tonic-gate 	prom_exit_to_mon();
3830Sstevel@tonic-gate 	/*NOTREACHED*/
3840Sstevel@tonic-gate }
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate /*
3870Sstevel@tonic-gate  * Initiate interrupt redistribution.
3880Sstevel@tonic-gate  */
3890Sstevel@tonic-gate void
3900Sstevel@tonic-gate i_ddi_intr_redist_all_cpus()
3910Sstevel@tonic-gate {
3920Sstevel@tonic-gate }
3930Sstevel@tonic-gate 
3940Sstevel@tonic-gate /*
3950Sstevel@tonic-gate  * XXX These probably ought to live somewhere else
3960Sstevel@tonic-gate  * XXX They are called from mem.c
3970Sstevel@tonic-gate  */
3980Sstevel@tonic-gate 
3990Sstevel@tonic-gate /*
4000Sstevel@tonic-gate  * Convert page frame number to an OBMEM page frame number
4010Sstevel@tonic-gate  * (i.e. put in the type bits -- zero for this implementation)
4020Sstevel@tonic-gate  */
4030Sstevel@tonic-gate pfn_t
4040Sstevel@tonic-gate impl_obmem_pfnum(pfn_t pf)
4050Sstevel@tonic-gate {
4060Sstevel@tonic-gate 	return (pf);
4070Sstevel@tonic-gate }
4080Sstevel@tonic-gate 
4090Sstevel@tonic-gate #ifdef	NM_DEBUG
4100Sstevel@tonic-gate int nmi_test = 0;	/* checked in intentry.s during clock int */
4110Sstevel@tonic-gate int nmtest = -1;
4120Sstevel@tonic-gate nmfunc1(arg, rp)
4130Sstevel@tonic-gate int	arg;
4140Sstevel@tonic-gate struct regs *rp;
4150Sstevel@tonic-gate {
4160Sstevel@tonic-gate 	printf("nmi called with arg = %x, regs = %x\n", arg, rp);
4170Sstevel@tonic-gate 	nmtest += 50;
4180Sstevel@tonic-gate 	if (arg == nmtest) {
4190Sstevel@tonic-gate 		printf("ip = %x\n", rp->r_pc);
4200Sstevel@tonic-gate 		return (1);
4210Sstevel@tonic-gate 	}
4220Sstevel@tonic-gate 	return (0);
4230Sstevel@tonic-gate }
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate #endif
4260Sstevel@tonic-gate 
4270Sstevel@tonic-gate #include <sys/bootsvcs.h>
4280Sstevel@tonic-gate 
4290Sstevel@tonic-gate /* Hacked up initialization for initial kernel check out is HERE. */
4300Sstevel@tonic-gate /* The basic steps are: */
4310Sstevel@tonic-gate /*	kernel bootfuncs definition/initialization for KADB */
4320Sstevel@tonic-gate /*	kadb bootfuncs pointer initialization */
4330Sstevel@tonic-gate /*	putchar/getchar (interrupts disabled) */
4340Sstevel@tonic-gate 
4350Sstevel@tonic-gate /* kadb bootfuncs pointer initialization */
4360Sstevel@tonic-gate 
4370Sstevel@tonic-gate int
4380Sstevel@tonic-gate sysp_getchar()
4390Sstevel@tonic-gate {
4400Sstevel@tonic-gate 	int i;
4413446Smrj 	ulong_t s;
4420Sstevel@tonic-gate 
4430Sstevel@tonic-gate 	if (cons_polledio == NULL) {
4440Sstevel@tonic-gate 		/* Uh oh */
4450Sstevel@tonic-gate 		prom_printf("getchar called with no console\n");
4460Sstevel@tonic-gate 		for (;;)
4470Sstevel@tonic-gate 			/* LOOP FOREVER */;
4480Sstevel@tonic-gate 	}
4490Sstevel@tonic-gate 
4500Sstevel@tonic-gate 	s = clear_int_flag();
4510Sstevel@tonic-gate 	i = cons_polledio->cons_polledio_getchar(
4525084Sjohnlev 	    cons_polledio->cons_polledio_argument);
4530Sstevel@tonic-gate 	restore_int_flag(s);
4540Sstevel@tonic-gate 	return (i);
4550Sstevel@tonic-gate }
4560Sstevel@tonic-gate 
4570Sstevel@tonic-gate void
4580Sstevel@tonic-gate sysp_putchar(int c)
4590Sstevel@tonic-gate {
4603446Smrj 	ulong_t s;
4610Sstevel@tonic-gate 
4620Sstevel@tonic-gate 	/*
4630Sstevel@tonic-gate 	 * We have no alternative but to drop the output on the floor.
4640Sstevel@tonic-gate 	 */
4651253Slq150181 	if (cons_polledio == NULL ||
4661253Slq150181 	    cons_polledio->cons_polledio_putchar == NULL)
4670Sstevel@tonic-gate 		return;
4680Sstevel@tonic-gate 
4690Sstevel@tonic-gate 	s = clear_int_flag();
4700Sstevel@tonic-gate 	cons_polledio->cons_polledio_putchar(
4715084Sjohnlev 	    cons_polledio->cons_polledio_argument, c);
4720Sstevel@tonic-gate 	restore_int_flag(s);
4730Sstevel@tonic-gate }
4740Sstevel@tonic-gate 
4750Sstevel@tonic-gate int
4760Sstevel@tonic-gate sysp_ischar()
4770Sstevel@tonic-gate {
4780Sstevel@tonic-gate 	int i;
4793446Smrj 	ulong_t s;
4800Sstevel@tonic-gate 
4811253Slq150181 	if (cons_polledio == NULL ||
4821253Slq150181 	    cons_polledio->cons_polledio_ischar == NULL)
4830Sstevel@tonic-gate 		return (0);
4840Sstevel@tonic-gate 
4850Sstevel@tonic-gate 	s = clear_int_flag();
4860Sstevel@tonic-gate 	i = cons_polledio->cons_polledio_ischar(
4875084Sjohnlev 	    cons_polledio->cons_polledio_argument);
4880Sstevel@tonic-gate 	restore_int_flag(s);
4890Sstevel@tonic-gate 	return (i);
4900Sstevel@tonic-gate }
4910Sstevel@tonic-gate 
4920Sstevel@tonic-gate int
4930Sstevel@tonic-gate goany(void)
4940Sstevel@tonic-gate {
4950Sstevel@tonic-gate 	prom_printf("Type any key to continue ");
4960Sstevel@tonic-gate 	(void) prom_getchar();
4970Sstevel@tonic-gate 	prom_printf("\n");
4980Sstevel@tonic-gate 	return (1);
4990Sstevel@tonic-gate }
5000Sstevel@tonic-gate 
5010Sstevel@tonic-gate static struct boot_syscalls kern_sysp = {
5020Sstevel@tonic-gate 	sysp_getchar,	/*	unchar	(*getchar)();	7  */
5030Sstevel@tonic-gate 	sysp_putchar,	/*	int	(*putchar)();	8  */
5040Sstevel@tonic-gate 	sysp_ischar,	/*	int	(*ischar)();	9  */
5050Sstevel@tonic-gate };
5060Sstevel@tonic-gate 
5075084Sjohnlev #if defined(__xpv)
5085084Sjohnlev int using_kern_polledio;
5095084Sjohnlev #endif
5105084Sjohnlev 
5110Sstevel@tonic-gate void
5120Sstevel@tonic-gate kadb_uses_kernel()
5130Sstevel@tonic-gate {
5140Sstevel@tonic-gate 	/*
5150Sstevel@tonic-gate 	 * This routine is now totally misnamed, since it does not in fact
5160Sstevel@tonic-gate 	 * control kadb's I/O; it only controls the kernel's prom_* I/O.
5170Sstevel@tonic-gate 	 */
5180Sstevel@tonic-gate 	sysp = &kern_sysp;
5195084Sjohnlev #if defined(__xpv)
5205084Sjohnlev 	using_kern_polledio = 1;
5215084Sjohnlev #endif
5220Sstevel@tonic-gate }
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate /*
5250Sstevel@tonic-gate  *	the interface to the outside world
5260Sstevel@tonic-gate  */
5270Sstevel@tonic-gate 
5280Sstevel@tonic-gate /*
5290Sstevel@tonic-gate  * poll_port -- wait for a register to achieve a
5300Sstevel@tonic-gate  *		specific state.  Arguments are a mask of bits we care about,
5310Sstevel@tonic-gate  *		and two sub-masks.  To return normally, all the bits in the
5320Sstevel@tonic-gate  *		first sub-mask must be ON, all the bits in the second sub-
5330Sstevel@tonic-gate  *		mask must be OFF.  If about seconds pass without the register
5340Sstevel@tonic-gate  *		achieving the desired bit configuration, we return 1, else
5350Sstevel@tonic-gate  *		0.
5360Sstevel@tonic-gate  */
5370Sstevel@tonic-gate int
5380Sstevel@tonic-gate poll_port(ushort_t port, ushort_t mask, ushort_t onbits, ushort_t offbits)
5390Sstevel@tonic-gate {
5400Sstevel@tonic-gate 	int i;
5410Sstevel@tonic-gate 	ushort_t maskval;
5420Sstevel@tonic-gate 
5430Sstevel@tonic-gate 	for (i = 500000; i; i--) {
5440Sstevel@tonic-gate 		maskval = inb(port) & mask;
5450Sstevel@tonic-gate 		if (((maskval & onbits) == onbits) &&
5465084Sjohnlev 		    ((maskval & offbits) == 0))
5470Sstevel@tonic-gate 			return (0);
5480Sstevel@tonic-gate 		drv_usecwait(10);
5490Sstevel@tonic-gate 	}
5500Sstevel@tonic-gate 	return (1);
5510Sstevel@tonic-gate }
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate /*
5540Sstevel@tonic-gate  * set_idle_cpu is called from idle() when a CPU becomes idle.
5550Sstevel@tonic-gate  */
5560Sstevel@tonic-gate /*LINTED: static unused */
5570Sstevel@tonic-gate static uint_t last_idle_cpu;
5580Sstevel@tonic-gate 
5590Sstevel@tonic-gate /*ARGSUSED*/
5600Sstevel@tonic-gate void
5610Sstevel@tonic-gate set_idle_cpu(int cpun)
5620Sstevel@tonic-gate {
5630Sstevel@tonic-gate 	last_idle_cpu = cpun;
5640Sstevel@tonic-gate 	(*psm_set_idle_cpuf)(cpun);
5650Sstevel@tonic-gate }
5660Sstevel@tonic-gate 
5670Sstevel@tonic-gate /*
5680Sstevel@tonic-gate  * unset_idle_cpu is called from idle() when a CPU is no longer idle.
5690Sstevel@tonic-gate  */
5700Sstevel@tonic-gate /*ARGSUSED*/
5710Sstevel@tonic-gate void
5720Sstevel@tonic-gate unset_idle_cpu(int cpun)
5730Sstevel@tonic-gate {
5740Sstevel@tonic-gate 	(*psm_unset_idle_cpuf)(cpun);
5750Sstevel@tonic-gate }
5760Sstevel@tonic-gate 
5770Sstevel@tonic-gate /*
5780Sstevel@tonic-gate  * This routine is almost correct now, but not quite.  It still needs the
5790Sstevel@tonic-gate  * equivalent concept of "hres_last_tick", just like on the sparc side.
5800Sstevel@tonic-gate  * The idea is to take a snapshot of the hi-res timer while doing the
5810Sstevel@tonic-gate  * hrestime_adj updates under hres_lock in locore, so that the small
5820Sstevel@tonic-gate  * interval between interrupt assertion and interrupt processing is
5830Sstevel@tonic-gate  * accounted for correctly.  Once we have this, the code below should
5840Sstevel@tonic-gate  * be modified to subtract off hres_last_tick rather than hrtime_base.
5850Sstevel@tonic-gate  *
5860Sstevel@tonic-gate  * I'd have done this myself, but I don't have source to all of the
5870Sstevel@tonic-gate  * vendor-specific hi-res timer routines (grrr...).  The generic hook I
5880Sstevel@tonic-gate  * need is something like "gethrtime_unlocked()", which would be just like
5890Sstevel@tonic-gate  * gethrtime() but would assume that you're already holding CLOCK_LOCK().
5900Sstevel@tonic-gate  * This is what the GET_HRTIME() macro is for on sparc (although it also
5910Sstevel@tonic-gate  * serves the function of making time available without a function call
5920Sstevel@tonic-gate  * so you don't take a register window overflow while traps are disabled).
5930Sstevel@tonic-gate  */
5940Sstevel@tonic-gate void
5950Sstevel@tonic-gate pc_gethrestime(timestruc_t *tp)
5960Sstevel@tonic-gate {
5970Sstevel@tonic-gate 	int lock_prev;
5980Sstevel@tonic-gate 	timestruc_t now;
5990Sstevel@tonic-gate 	int nslt;		/* nsec since last tick */
6000Sstevel@tonic-gate 	int adj;		/* amount of adjustment to apply */
6010Sstevel@tonic-gate 
6020Sstevel@tonic-gate loop:
6030Sstevel@tonic-gate 	lock_prev = hres_lock;
6040Sstevel@tonic-gate 	now = hrestime;
6050Sstevel@tonic-gate 	nslt = (int)(gethrtime() - hres_last_tick);
6060Sstevel@tonic-gate 	if (nslt < 0) {
6070Sstevel@tonic-gate 		/*
6080Sstevel@tonic-gate 		 * nslt < 0 means a tick came between sampling
6090Sstevel@tonic-gate 		 * gethrtime() and hres_last_tick; restart the loop
6100Sstevel@tonic-gate 		 */
6110Sstevel@tonic-gate 
6120Sstevel@tonic-gate 		goto loop;
6130Sstevel@tonic-gate 	}
6140Sstevel@tonic-gate 	now.tv_nsec += nslt;
6150Sstevel@tonic-gate 	if (hrestime_adj != 0) {
6160Sstevel@tonic-gate 		if (hrestime_adj > 0) {
6170Sstevel@tonic-gate 			adj = (nslt >> ADJ_SHIFT);
6180Sstevel@tonic-gate 			if (adj > hrestime_adj)
6190Sstevel@tonic-gate 				adj = (int)hrestime_adj;
6200Sstevel@tonic-gate 		} else {
6210Sstevel@tonic-gate 			adj = -(nslt >> ADJ_SHIFT);
6220Sstevel@tonic-gate 			if (adj < hrestime_adj)
6230Sstevel@tonic-gate 				adj = (int)hrestime_adj;
6240Sstevel@tonic-gate 		}
6250Sstevel@tonic-gate 		now.tv_nsec += adj;
6260Sstevel@tonic-gate 	}
6270Sstevel@tonic-gate 	while ((unsigned long)now.tv_nsec >= NANOSEC) {
6280Sstevel@tonic-gate 
6290Sstevel@tonic-gate 		/*
6300Sstevel@tonic-gate 		 * We might have a large adjustment or have been in the
6310Sstevel@tonic-gate 		 * debugger for a long time; take care of (at most) four
6320Sstevel@tonic-gate 		 * of those missed seconds (tv_nsec is 32 bits, so
6330Sstevel@tonic-gate 		 * anything >4s will be wrapping around).  However,
6340Sstevel@tonic-gate 		 * anything more than 2 seconds out of sync will trigger
6350Sstevel@tonic-gate 		 * timedelta from clock() to go correct the time anyway,
6360Sstevel@tonic-gate 		 * so do what we can, and let the big crowbar do the
6370Sstevel@tonic-gate 		 * rest.  A similar correction while loop exists inside
6380Sstevel@tonic-gate 		 * hres_tick(); in all cases we'd like tv_nsec to
6390Sstevel@tonic-gate 		 * satisfy 0 <= tv_nsec < NANOSEC to avoid confusing
6400Sstevel@tonic-gate 		 * user processes, but if tv_sec's a little behind for a
6410Sstevel@tonic-gate 		 * little while, that's OK; time still monotonically
6420Sstevel@tonic-gate 		 * increases.
6430Sstevel@tonic-gate 		 */
6440Sstevel@tonic-gate 
6450Sstevel@tonic-gate 		now.tv_nsec -= NANOSEC;
6460Sstevel@tonic-gate 		now.tv_sec++;
6470Sstevel@tonic-gate 	}
6480Sstevel@tonic-gate 	if ((hres_lock & ~1) != lock_prev)
6490Sstevel@tonic-gate 		goto loop;
6500Sstevel@tonic-gate 
6510Sstevel@tonic-gate 	*tp = now;
6520Sstevel@tonic-gate }
6530Sstevel@tonic-gate 
6540Sstevel@tonic-gate void
6550Sstevel@tonic-gate gethrestime_lasttick(timespec_t *tp)
6560Sstevel@tonic-gate {
6570Sstevel@tonic-gate 	int s;
6580Sstevel@tonic-gate 
6590Sstevel@tonic-gate 	s = hr_clock_lock();
6600Sstevel@tonic-gate 	*tp = hrestime;
6610Sstevel@tonic-gate 	hr_clock_unlock(s);
6620Sstevel@tonic-gate }
6630Sstevel@tonic-gate 
6640Sstevel@tonic-gate time_t
6650Sstevel@tonic-gate gethrestime_sec(void)
6660Sstevel@tonic-gate {
6670Sstevel@tonic-gate 	timestruc_t now;
6680Sstevel@tonic-gate 
6690Sstevel@tonic-gate 	gethrestime(&now);
6700Sstevel@tonic-gate 	return (now.tv_sec);
6710Sstevel@tonic-gate }
6720Sstevel@tonic-gate 
6730Sstevel@tonic-gate /*
6740Sstevel@tonic-gate  * Initialize a kernel thread's stack
6750Sstevel@tonic-gate  */
6760Sstevel@tonic-gate 
6770Sstevel@tonic-gate caddr_t
6780Sstevel@tonic-gate thread_stk_init(caddr_t stk)
6790Sstevel@tonic-gate {
6800Sstevel@tonic-gate 	ASSERT(((uintptr_t)stk & (STACK_ALIGN - 1)) == 0);
6810Sstevel@tonic-gate 	return (stk - SA(MINFRAME));
6820Sstevel@tonic-gate }
6830Sstevel@tonic-gate 
6840Sstevel@tonic-gate /*
6850Sstevel@tonic-gate  * Initialize lwp's kernel stack.
6860Sstevel@tonic-gate  */
6870Sstevel@tonic-gate 
6880Sstevel@tonic-gate #ifdef TRAPTRACE
6890Sstevel@tonic-gate /*
6900Sstevel@tonic-gate  * There's a tricky interdependency here between use of sysenter and
6910Sstevel@tonic-gate  * TRAPTRACE which needs recording to avoid future confusion (this is
6920Sstevel@tonic-gate  * about the third time I've re-figured this out ..)
6930Sstevel@tonic-gate  *
6940Sstevel@tonic-gate  * Here's how debugging lcall works with TRAPTRACE.
6950Sstevel@tonic-gate  *
6960Sstevel@tonic-gate  * 1 We're in userland with a breakpoint on the lcall instruction.
6970Sstevel@tonic-gate  * 2 We execute the instruction - the instruction pushes the userland
6980Sstevel@tonic-gate  *   %ss, %esp, %efl, %cs, %eip on the stack and zips into the kernel
6990Sstevel@tonic-gate  *   via the call gate.
7000Sstevel@tonic-gate  * 3 The hardware raises a debug trap in kernel mode, the hardware
7010Sstevel@tonic-gate  *   pushes %efl, %cs, %eip and gets to dbgtrap via the idt.
7020Sstevel@tonic-gate  * 4 dbgtrap pushes the error code and trapno and calls cmntrap
7030Sstevel@tonic-gate  * 5 cmntrap finishes building a trap frame
7040Sstevel@tonic-gate  * 6 The TRACE_REGS macros in cmntrap copy a REGSIZE worth chunk
7050Sstevel@tonic-gate  *   off the stack into the traptrace buffer.
7060Sstevel@tonic-gate  *
7070Sstevel@tonic-gate  * This means that the traptrace buffer contains the wrong values in
7080Sstevel@tonic-gate  * %esp and %ss, but everything else in there is correct.
7090Sstevel@tonic-gate  *
7100Sstevel@tonic-gate  * Here's how debugging sysenter works with TRAPTRACE.
7110Sstevel@tonic-gate  *
7120Sstevel@tonic-gate  * a We're in userland with a breakpoint on the sysenter instruction.
7130Sstevel@tonic-gate  * b We execute the instruction - the instruction pushes -nothing-
7140Sstevel@tonic-gate  *   on the stack, but sets %cs, %eip, %ss, %esp to prearranged
7150Sstevel@tonic-gate  *   values to take us to sys_sysenter, at the top of the lwp's
7160Sstevel@tonic-gate  *   stack.
7170Sstevel@tonic-gate  * c goto 3
7180Sstevel@tonic-gate  *
7190Sstevel@tonic-gate  * At this point, because we got into the kernel without the requisite
7200Sstevel@tonic-gate  * five pushes on the stack, if we didn't make extra room, we'd
7210Sstevel@tonic-gate  * end up with the TRACE_REGS macro fetching the saved %ss and %esp
7220Sstevel@tonic-gate  * values from negative (unmapped) stack addresses -- which really bites.
7230Sstevel@tonic-gate  * That's why we do the '-= 8' below.
7240Sstevel@tonic-gate  *
7250Sstevel@tonic-gate  * XXX	Note that reading "up" lwp0's stack works because t0 is declared
7260Sstevel@tonic-gate  *	right next to t0stack in locore.s
7270Sstevel@tonic-gate  */
7280Sstevel@tonic-gate #endif
7290Sstevel@tonic-gate 
7300Sstevel@tonic-gate caddr_t
7310Sstevel@tonic-gate lwp_stk_init(klwp_t *lwp, caddr_t stk)
7320Sstevel@tonic-gate {
7330Sstevel@tonic-gate 	caddr_t oldstk;
7340Sstevel@tonic-gate 	struct pcb *pcb = &lwp->lwp_pcb;
7350Sstevel@tonic-gate 
7360Sstevel@tonic-gate 	oldstk = stk;
7370Sstevel@tonic-gate 	stk -= SA(sizeof (struct regs) + SA(MINFRAME));
7380Sstevel@tonic-gate #ifdef TRAPTRACE
7390Sstevel@tonic-gate 	stk -= 2 * sizeof (greg_t); /* space for phony %ss:%sp (see above) */
7400Sstevel@tonic-gate #endif
7410Sstevel@tonic-gate 	stk = (caddr_t)((uintptr_t)stk & ~(STACK_ALIGN - 1ul));
7420Sstevel@tonic-gate 	bzero(stk, oldstk - stk);
7430Sstevel@tonic-gate 	lwp->lwp_regs = (void *)(stk + SA(MINFRAME));
7440Sstevel@tonic-gate 
7450Sstevel@tonic-gate 	/*
7460Sstevel@tonic-gate 	 * Arrange that the virtualized %fs and %gs GDT descriptors
7470Sstevel@tonic-gate 	 * have a well-defined initial state (present, ring 3
7480Sstevel@tonic-gate 	 * and of type data).
7490Sstevel@tonic-gate 	 */
7500Sstevel@tonic-gate #if defined(__amd64)
7510Sstevel@tonic-gate 	if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE)
7520Sstevel@tonic-gate 		pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc;
7530Sstevel@tonic-gate 	else
7540Sstevel@tonic-gate 		pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_u32desc;
7550Sstevel@tonic-gate #elif defined(__i386)
7560Sstevel@tonic-gate 	pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc;
7570Sstevel@tonic-gate #endif	/* __i386 */
7580Sstevel@tonic-gate 	lwp_installctx(lwp);
7590Sstevel@tonic-gate 	return (stk);
7600Sstevel@tonic-gate }
7610Sstevel@tonic-gate 
7620Sstevel@tonic-gate /*ARGSUSED*/
7630Sstevel@tonic-gate void
7640Sstevel@tonic-gate lwp_stk_fini(klwp_t *lwp)
7650Sstevel@tonic-gate {}
7660Sstevel@tonic-gate 
7670Sstevel@tonic-gate /*
7681389Sdmick  * If we're not the panic CPU, we wait in panic_idle for reboot.
7690Sstevel@tonic-gate  */
7700Sstevel@tonic-gate static void
7710Sstevel@tonic-gate panic_idle(void)
7720Sstevel@tonic-gate {
7730Sstevel@tonic-gate 	splx(ipltospl(CLOCK_LEVEL));
7740Sstevel@tonic-gate 	(void) setjmp(&curthread->t_pcb);
7750Sstevel@tonic-gate 
7763446Smrj 	for (;;)
7773446Smrj 		;
7780Sstevel@tonic-gate }
7790Sstevel@tonic-gate 
7800Sstevel@tonic-gate /*
7810Sstevel@tonic-gate  * Stop the other CPUs by cross-calling them and forcing them to enter
7820Sstevel@tonic-gate  * the panic_idle() loop above.
7830Sstevel@tonic-gate  */
7840Sstevel@tonic-gate /*ARGSUSED*/
7850Sstevel@tonic-gate void
7860Sstevel@tonic-gate panic_stopcpus(cpu_t *cp, kthread_t *t, int spl)
7870Sstevel@tonic-gate {
7880Sstevel@tonic-gate 	processorid_t i;
7890Sstevel@tonic-gate 	cpuset_t xcset;
7900Sstevel@tonic-gate 
7915084Sjohnlev 	/*
7925084Sjohnlev 	 * In the case of a Xen panic, the hypervisor has already stopped
7935084Sjohnlev 	 * all of the CPUs.
7945084Sjohnlev 	 */
7955084Sjohnlev 	if (!IN_XPV_PANIC()) {
7965084Sjohnlev 		(void) splzs();
7970Sstevel@tonic-gate 
7985084Sjohnlev 		CPUSET_ALL_BUT(xcset, cp->cpu_id);
7995084Sjohnlev 		xc_trycall(NULL, NULL, NULL, xcset, (int (*)())panic_idle);
8005084Sjohnlev 	}
8010Sstevel@tonic-gate 
8020Sstevel@tonic-gate 	for (i = 0; i < NCPU; i++) {
8030Sstevel@tonic-gate 		if (i != cp->cpu_id && cpu[i] != NULL &&
8040Sstevel@tonic-gate 		    (cpu[i]->cpu_flags & CPU_EXISTS))
8050Sstevel@tonic-gate 			cpu[i]->cpu_flags |= CPU_QUIESCED;
8060Sstevel@tonic-gate 	}
8070Sstevel@tonic-gate }
8080Sstevel@tonic-gate 
8090Sstevel@tonic-gate /*
8100Sstevel@tonic-gate  * Platform callback following each entry to panicsys().
8110Sstevel@tonic-gate  */
8120Sstevel@tonic-gate /*ARGSUSED*/
8130Sstevel@tonic-gate void
8140Sstevel@tonic-gate panic_enter_hw(int spl)
8150Sstevel@tonic-gate {
8160Sstevel@tonic-gate 	/* Nothing to do here */
8170Sstevel@tonic-gate }
8180Sstevel@tonic-gate 
8190Sstevel@tonic-gate /*
8200Sstevel@tonic-gate  * Platform-specific code to execute after panicstr is set: we invoke
8210Sstevel@tonic-gate  * the PSM entry point to indicate that a panic has occurred.
8220Sstevel@tonic-gate  */
8230Sstevel@tonic-gate /*ARGSUSED*/
8240Sstevel@tonic-gate void
8250Sstevel@tonic-gate panic_quiesce_hw(panic_data_t *pdp)
8260Sstevel@tonic-gate {
8270Sstevel@tonic-gate 	psm_notifyf(PSM_PANIC_ENTER);
8280Sstevel@tonic-gate 
829*7532SSean.Ye@Sun.COM 	cmi_panic_callback();
830*7532SSean.Ye@Sun.COM 
8310Sstevel@tonic-gate #ifdef	TRAPTRACE
8320Sstevel@tonic-gate 	/*
8330Sstevel@tonic-gate 	 * Turn off TRAPTRACE
8340Sstevel@tonic-gate 	 */
8350Sstevel@tonic-gate 	TRAPTRACE_FREEZE;
8360Sstevel@tonic-gate #endif	/* TRAPTRACE */
8370Sstevel@tonic-gate }
8380Sstevel@tonic-gate 
8390Sstevel@tonic-gate /*
8400Sstevel@tonic-gate  * Platform callback prior to writing crash dump.
8410Sstevel@tonic-gate  */
8420Sstevel@tonic-gate /*ARGSUSED*/
8430Sstevel@tonic-gate void
8440Sstevel@tonic-gate panic_dump_hw(int spl)
8450Sstevel@tonic-gate {
8460Sstevel@tonic-gate 	/* Nothing to do here */
8470Sstevel@tonic-gate }
8480Sstevel@tonic-gate 
8495084Sjohnlev void *
8505084Sjohnlev plat_traceback(void *fpreg)
8515084Sjohnlev {
8525084Sjohnlev #ifdef __xpv
8535084Sjohnlev 	if (IN_XPV_PANIC())
8545084Sjohnlev 		return (xpv_traceback(fpreg));
8555084Sjohnlev #endif
8565084Sjohnlev 	return (fpreg);
8575084Sjohnlev }
8585084Sjohnlev 
8590Sstevel@tonic-gate /*ARGSUSED*/
8600Sstevel@tonic-gate void
8610Sstevel@tonic-gate plat_tod_fault(enum tod_fault_type tod_bad)
8623446Smrj {}
8630Sstevel@tonic-gate 
8640Sstevel@tonic-gate /*ARGSUSED*/
8650Sstevel@tonic-gate int
8660Sstevel@tonic-gate blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class)
8670Sstevel@tonic-gate {
8680Sstevel@tonic-gate 	return (ENOTSUP);
8690Sstevel@tonic-gate }
8700Sstevel@tonic-gate 
8710Sstevel@tonic-gate /*
8720Sstevel@tonic-gate  * The underlying console output routines are protected by raising IPL in case
8730Sstevel@tonic-gate  * we are still calling into the early boot services.  Once we start calling
8740Sstevel@tonic-gate  * the kernel console emulator, it will disable interrupts completely during
8750Sstevel@tonic-gate  * character rendering (see sysp_putchar, for example).  Refer to the comments
8760Sstevel@tonic-gate  * and code in common/os/console.c for more information on these callbacks.
8770Sstevel@tonic-gate  */
8780Sstevel@tonic-gate /*ARGSUSED*/
8790Sstevel@tonic-gate int
8800Sstevel@tonic-gate console_enter(int busy)
8810Sstevel@tonic-gate {
8820Sstevel@tonic-gate 	return (splzs());
8830Sstevel@tonic-gate }
8840Sstevel@tonic-gate 
8850Sstevel@tonic-gate /*ARGSUSED*/
8860Sstevel@tonic-gate void
8870Sstevel@tonic-gate console_exit(int busy, int spl)
8880Sstevel@tonic-gate {
8890Sstevel@tonic-gate 	splx(spl);
8900Sstevel@tonic-gate }
8910Sstevel@tonic-gate 
8920Sstevel@tonic-gate /*
8930Sstevel@tonic-gate  * Allocate a region of virtual address space, unmapped.
8940Sstevel@tonic-gate  * Stubbed out except on sparc, at least for now.
8950Sstevel@tonic-gate  */
8960Sstevel@tonic-gate /*ARGSUSED*/
8970Sstevel@tonic-gate void *
8980Sstevel@tonic-gate boot_virt_alloc(void *addr, size_t size)
8990Sstevel@tonic-gate {
9000Sstevel@tonic-gate 	return (addr);
9010Sstevel@tonic-gate }
9020Sstevel@tonic-gate 
9030Sstevel@tonic-gate volatile unsigned long	tenmicrodata;
9040Sstevel@tonic-gate 
9050Sstevel@tonic-gate void
9060Sstevel@tonic-gate tenmicrosec(void)
9070Sstevel@tonic-gate {
9085084Sjohnlev 	extern int gethrtime_hires;
9090Sstevel@tonic-gate 
9105084Sjohnlev 	if (gethrtime_hires) {
9110Sstevel@tonic-gate 		hrtime_t start, end;
9120Sstevel@tonic-gate 		start = end =  gethrtime();
9130Sstevel@tonic-gate 		while ((end - start) < (10 * (NANOSEC / MICROSEC))) {
9140Sstevel@tonic-gate 			SMT_PAUSE();
9150Sstevel@tonic-gate 			end = gethrtime();
9160Sstevel@tonic-gate 		}
9170Sstevel@tonic-gate 	} else {
9185084Sjohnlev #if defined(__xpv)
9195084Sjohnlev 		hrtime_t newtime;
9205084Sjohnlev 
9215084Sjohnlev 		newtime = xpv_gethrtime() + 10000; /* now + 10 us */
9225084Sjohnlev 		while (xpv_gethrtime() < newtime)
9235084Sjohnlev 			SMT_PAUSE();
9245084Sjohnlev #else	/* __xpv */
9253446Smrj 		int i;
9263446Smrj 
9270Sstevel@tonic-gate 		/*
9280Sstevel@tonic-gate 		 * Artificial loop to induce delay.
9290Sstevel@tonic-gate 		 */
9300Sstevel@tonic-gate 		for (i = 0; i < microdata; i++)
9310Sstevel@tonic-gate 			tenmicrodata = microdata;
9325084Sjohnlev #endif	/* __xpv */
9330Sstevel@tonic-gate 	}
9340Sstevel@tonic-gate }
935590Sesolom 
936590Sesolom /*
937590Sesolom  * get_cpu_mstate() is passed an array of timestamps, NCMSTATES
938590Sesolom  * long, and it fills in the array with the time spent on cpu in
939590Sesolom  * each of the mstates, where time is returned in nsec.
940590Sesolom  *
941590Sesolom  * No guarantee is made that the returned values in times[] will
942590Sesolom  * monotonically increase on sequential calls, although this will
943590Sesolom  * be true in the long run. Any such guarantee must be handled by
944590Sesolom  * the caller, if needed. This can happen if we fail to account
945590Sesolom  * for elapsed time due to a generation counter conflict, yet we
946590Sesolom  * did account for it on a prior call (see below).
947590Sesolom  *
948590Sesolom  * The complication is that the cpu in question may be updating
949590Sesolom  * its microstate at the same time that we are reading it.
950590Sesolom  * Because the microstate is only updated when the CPU's state
951590Sesolom  * changes, the values in cpu_intracct[] can be indefinitely out
952590Sesolom  * of date. To determine true current values, it is necessary to
953590Sesolom  * compare the current time with cpu_mstate_start, and add the
954590Sesolom  * difference to times[cpu_mstate].
955590Sesolom  *
956590Sesolom  * This can be a problem if those values are changing out from
957590Sesolom  * under us. Because the code path in new_cpu_mstate() is
958590Sesolom  * performance critical, we have not added a lock to it. Instead,
959590Sesolom  * we have added a generation counter. Before beginning
960590Sesolom  * modifications, the counter is set to 0. After modifications,
961590Sesolom  * it is set to the old value plus one.
962590Sesolom  *
963590Sesolom  * get_cpu_mstate() will not consider the values of cpu_mstate
964590Sesolom  * and cpu_mstate_start to be usable unless the value of
965590Sesolom  * cpu_mstate_gen is both non-zero and unchanged, both before and
966590Sesolom  * after reading the mstate information. Note that we must
967590Sesolom  * protect against out-of-order loads around accesses to the
968590Sesolom  * generation counter. Also, this is a best effort approach in
969590Sesolom  * that we do not retry should the counter be found to have
970590Sesolom  * changed.
971590Sesolom  *
972590Sesolom  * cpu_intracct[] is used to identify time spent in each CPU
973590Sesolom  * mstate while handling interrupts. Such time should be reported
974590Sesolom  * against system time, and so is subtracted out from its
975590Sesolom  * corresponding cpu_acct[] time and added to
976590Sesolom  * cpu_acct[CMS_SYSTEM].
977590Sesolom  */
978590Sesolom 
979590Sesolom void
980590Sesolom get_cpu_mstate(cpu_t *cpu, hrtime_t *times)
981590Sesolom {
982590Sesolom 	int i;
983590Sesolom 	hrtime_t now, start;
984590Sesolom 	uint16_t gen;
985590Sesolom 	uint16_t state;
986590Sesolom 	hrtime_t intracct[NCMSTATES];
987590Sesolom 
988590Sesolom 	/*
989590Sesolom 	 * Load all volatile state under the protection of membar.
990590Sesolom 	 * cpu_acct[cpu_mstate] must be loaded to avoid double counting
991590Sesolom 	 * of (now - cpu_mstate_start) by a change in CPU mstate that
992590Sesolom 	 * arrives after we make our last check of cpu_mstate_gen.
993590Sesolom 	 */
994590Sesolom 
995590Sesolom 	now = gethrtime_unscaled();
996590Sesolom 	gen = cpu->cpu_mstate_gen;
997590Sesolom 
998590Sesolom 	membar_consumer();	/* guarantee load ordering */
999590Sesolom 	start = cpu->cpu_mstate_start;
1000590Sesolom 	state = cpu->cpu_mstate;
1001590Sesolom 	for (i = 0; i < NCMSTATES; i++) {
1002590Sesolom 		intracct[i] = cpu->cpu_intracct[i];
1003590Sesolom 		times[i] = cpu->cpu_acct[i];
1004590Sesolom 	}
1005590Sesolom 	membar_consumer();	/* guarantee load ordering */
1006590Sesolom 
1007590Sesolom 	if (gen != 0 && gen == cpu->cpu_mstate_gen && now > start)
1008590Sesolom 		times[state] += now - start;
1009590Sesolom 
1010590Sesolom 	for (i = 0; i < NCMSTATES; i++) {
1011590Sesolom 		if (i == CMS_SYSTEM)
1012590Sesolom 			continue;
1013590Sesolom 		times[i] -= intracct[i];
1014590Sesolom 		if (times[i] < 0) {
1015590Sesolom 			intracct[i] += times[i];
1016590Sesolom 			times[i] = 0;
1017590Sesolom 		}
1018590Sesolom 		times[CMS_SYSTEM] += intracct[i];
1019590Sesolom 		scalehrtime(&times[i]);
1020590Sesolom 	}
1021590Sesolom 	scalehrtime(&times[CMS_SYSTEM]);
1022590Sesolom }
10233446Smrj 
10243446Smrj /*
10253446Smrj  * This is a version of the rdmsr instruction that allows
10263446Smrj  * an error code to be returned in the case of failure.
10273446Smrj  */
10283446Smrj int
10293446Smrj checked_rdmsr(uint_t msr, uint64_t *value)
10303446Smrj {
10313446Smrj 	if ((x86_feature & X86_MSR) == 0)
10323446Smrj 		return (ENOTSUP);
10333446Smrj 	*value = rdmsr(msr);
10343446Smrj 	return (0);
10353446Smrj }
10363446Smrj 
10373446Smrj /*
10383446Smrj  * This is a version of the wrmsr instruction that allows
10393446Smrj  * an error code to be returned in the case of failure.
10403446Smrj  */
10413446Smrj int
10423446Smrj checked_wrmsr(uint_t msr, uint64_t value)
10433446Smrj {
10443446Smrj 	if ((x86_feature & X86_MSR) == 0)
10453446Smrj 		return (ENOTSUP);
10463446Smrj 	wrmsr(msr, value);
10473446Smrj 	return (0);
10483446Smrj }
10493446Smrj 
10503446Smrj /*
10515084Sjohnlev  * The mem driver's usual method of using hat_devload() to establish a
10525084Sjohnlev  * temporary mapping will not work for foreign pages mapped into this
10535084Sjohnlev  * domain or for the special hypervisor-provided pages.  For the foreign
10545084Sjohnlev  * pages, we often don't know which domain owns them, so we can't ask the
10555084Sjohnlev  * hypervisor to set up a new mapping.  For the other pages, we don't have
10565084Sjohnlev  * a pfn, so we can't create a new PTE.  For these special cases, we do a
10575084Sjohnlev  * direct uiomove() from the existing kernel virtual address.
10583446Smrj  */
10593446Smrj /*ARGSUSED*/
10603446Smrj int
10615084Sjohnlev plat_mem_do_mmio(struct uio *uio, enum uio_rw rw)
10625084Sjohnlev {
10635084Sjohnlev #if defined(__xpv)
10645084Sjohnlev 	void *va = (void *)(uintptr_t)uio->uio_loffset;
10655084Sjohnlev 	off_t pageoff = uio->uio_loffset & PAGEOFFSET;
10665084Sjohnlev 	size_t nbytes = MIN((size_t)(PAGESIZE - pageoff),
10675084Sjohnlev 	    (size_t)uio->uio_iov->iov_len);
10685084Sjohnlev 
10695084Sjohnlev 	if ((rw == UIO_READ &&
10705084Sjohnlev 	    (va == HYPERVISOR_shared_info || va == xen_info)) ||
10715084Sjohnlev 	    (pfn_is_foreign(hat_getpfnum(kas.a_hat, va))))
10725084Sjohnlev 		return (uiomove(va, nbytes, rw, uio));
10735084Sjohnlev #endif
10745084Sjohnlev 	return (ENOTSUP);
10755084Sjohnlev }
10765084Sjohnlev 
10775084Sjohnlev pgcnt_t
10785084Sjohnlev num_phys_pages()
10793446Smrj {
10805084Sjohnlev 	pgcnt_t npages = 0;
10815084Sjohnlev 	struct memlist *mp;
10825084Sjohnlev 
10835084Sjohnlev #if defined(__xpv)
10845084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
10855084Sjohnlev 		xen_sysctl_t op;
10865084Sjohnlev 
10875084Sjohnlev 		op.cmd = XEN_SYSCTL_physinfo;
10885084Sjohnlev 		op.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
10895084Sjohnlev 		if (HYPERVISOR_sysctl(&op) != 0)
10905084Sjohnlev 			panic("physinfo op refused");
10915084Sjohnlev 
10925084Sjohnlev 		return ((pgcnt_t)op.u.physinfo.total_pages);
10935084Sjohnlev 	}
10945084Sjohnlev #endif /* __xpv */
10955084Sjohnlev 
10965084Sjohnlev 	for (mp = phys_install; mp != NULL; mp = mp->next)
10975084Sjohnlev 		npages += mp->size >> PAGESHIFT;
10985084Sjohnlev 
10995084Sjohnlev 	return (npages);
11003446Smrj }
11013446Smrj 
11023446Smrj int
11033446Smrj dump_plat_addr()
11043446Smrj {
11055084Sjohnlev #ifdef __xpv
11065084Sjohnlev 	pfn_t pfn = mmu_btop(xen_info->shared_info) | PFN_IS_FOREIGN_MFN;
11075084Sjohnlev 	mem_vtop_t mem_vtop;
11085084Sjohnlev 	int cnt;
11095084Sjohnlev 
11105084Sjohnlev 	/*
11115084Sjohnlev 	 * On the hypervisor, we want to dump the page with shared_info on it.
11125084Sjohnlev 	 */
11135084Sjohnlev 	if (!IN_XPV_PANIC()) {
11145084Sjohnlev 		mem_vtop.m_as = &kas;
11155084Sjohnlev 		mem_vtop.m_va = HYPERVISOR_shared_info;
11165084Sjohnlev 		mem_vtop.m_pfn = pfn;
11175084Sjohnlev 		dumpvp_write(&mem_vtop, sizeof (mem_vtop_t));
11185084Sjohnlev 		cnt = 1;
11195084Sjohnlev 	} else {
11205084Sjohnlev 		cnt = dump_xpv_addr();
11215084Sjohnlev 	}
11225084Sjohnlev 	return (cnt);
11235084Sjohnlev #else
11243446Smrj 	return (0);
11255084Sjohnlev #endif
11263446Smrj }
11273446Smrj 
11283446Smrj void
11293446Smrj dump_plat_pfn()
11303446Smrj {
11315084Sjohnlev #ifdef __xpv
11325084Sjohnlev 	pfn_t pfn = mmu_btop(xen_info->shared_info) | PFN_IS_FOREIGN_MFN;
11335084Sjohnlev 
11345084Sjohnlev 	if (!IN_XPV_PANIC())
11355084Sjohnlev 		dumpvp_write(&pfn, sizeof (pfn));
11365084Sjohnlev 	else
11375084Sjohnlev 		dump_xpv_pfn();
11385084Sjohnlev #endif
11393446Smrj }
11403446Smrj 
11413446Smrj /*ARGSUSED*/
11423446Smrj int
11433446Smrj dump_plat_data(void *dump_cbuf)
11443446Smrj {
11455084Sjohnlev #ifdef __xpv
11465084Sjohnlev 	uint32_t csize;
11475084Sjohnlev 	int cnt;
11485084Sjohnlev 
11495084Sjohnlev 	if (!IN_XPV_PANIC()) {
11505084Sjohnlev 		csize = (uint32_t)compress(HYPERVISOR_shared_info, dump_cbuf,
11515084Sjohnlev 		    PAGESIZE);
11525084Sjohnlev 		dumpvp_write(&csize, sizeof (uint32_t));
11535084Sjohnlev 		dumpvp_write(dump_cbuf, csize);
11545084Sjohnlev 		cnt = 1;
11555084Sjohnlev 	} else {
11565084Sjohnlev 		cnt = dump_xpv_data(dump_cbuf);
11575084Sjohnlev 	}
11585084Sjohnlev 	return (cnt);
11595084Sjohnlev #else
11603446Smrj 	return (0);
11615084Sjohnlev #endif
11623446Smrj }
11633939Ssethg 
11643939Ssethg /*
11653939Ssethg  * Calculates a linear address, given the CS selector and PC values
11663939Ssethg  * by looking up the %cs selector process's LDT or the CPU's GDT.
11673939Ssethg  * proc->p_ldtlock must be held across this call.
11683939Ssethg  */
11693939Ssethg int
11703939Ssethg linear_pc(struct regs *rp, proc_t *p, caddr_t *linearp)
11713939Ssethg {
11723939Ssethg 	user_desc_t	*descrp;
11733939Ssethg 	caddr_t		baseaddr;
11743939Ssethg 	uint16_t	idx = SELTOIDX(rp->r_cs);
11753939Ssethg 
11763939Ssethg 	ASSERT(rp->r_cs <= 0xFFFF);
11773939Ssethg 	ASSERT(MUTEX_HELD(&p->p_ldtlock));
11783939Ssethg 
11793939Ssethg 	if (SELISLDT(rp->r_cs)) {
11803939Ssethg 		/*
11813939Ssethg 		 * Currently 64 bit processes cannot have private LDTs.
11823939Ssethg 		 */
11833939Ssethg 		ASSERT(p->p_model != DATAMODEL_LP64);
11843939Ssethg 
11853939Ssethg 		if (p->p_ldt == NULL)
11863939Ssethg 			return (-1);
11873939Ssethg 
11883939Ssethg 		descrp = &p->p_ldt[idx];
11893939Ssethg 		baseaddr = (caddr_t)(uintptr_t)USEGD_GETBASE(descrp);
11903939Ssethg 
11913939Ssethg 		/*
11923939Ssethg 		 * Calculate the linear address (wraparound is not only ok,
11933939Ssethg 		 * it's expected behavior).  The cast to uint32_t is because
11943939Ssethg 		 * LDT selectors are only allowed in 32-bit processes.
11953939Ssethg 		 */
11963939Ssethg 		*linearp = (caddr_t)(uintptr_t)(uint32_t)((uintptr_t)baseaddr +
11973939Ssethg 		    rp->r_pc);
11983939Ssethg 	} else {
11993939Ssethg #ifdef DEBUG
12003939Ssethg 		descrp = &CPU->cpu_gdt[idx];
12013939Ssethg 		baseaddr = (caddr_t)(uintptr_t)USEGD_GETBASE(descrp);
12023939Ssethg 		/* GDT-based descriptors' base addresses should always be 0 */
12033939Ssethg 		ASSERT(baseaddr == 0);
12043939Ssethg #endif
12053939Ssethg 		*linearp = (caddr_t)(uintptr_t)rp->r_pc;
12063939Ssethg 	}
12073939Ssethg 
12083939Ssethg 	return (0);
12093939Ssethg }
12103939Ssethg 
12113939Ssethg /*
12123939Ssethg  * The implementation of dtrace_linear_pc is similar to the that of
12133939Ssethg  * linear_pc, above, but here we acquire p_ldtlock before accessing
12143939Ssethg  * p_ldt.  This implementation is used by the pid provider; we prefix
12153939Ssethg  * it with "dtrace_" to avoid inducing spurious tracing events.
12163939Ssethg  */
12173939Ssethg int
12183939Ssethg dtrace_linear_pc(struct regs *rp, proc_t *p, caddr_t *linearp)
12193939Ssethg {
12203939Ssethg 	user_desc_t	*descrp;
12213939Ssethg 	caddr_t		baseaddr;
12223939Ssethg 	uint16_t	idx = SELTOIDX(rp->r_cs);
12233939Ssethg 
12243939Ssethg 	ASSERT(rp->r_cs <= 0xFFFF);
12253939Ssethg 
12263939Ssethg 	if (SELISLDT(rp->r_cs)) {
12273939Ssethg 		/*
12283939Ssethg 		 * Currently 64 bit processes cannot have private LDTs.
12293939Ssethg 		 */
12303939Ssethg 		ASSERT(p->p_model != DATAMODEL_LP64);
12313939Ssethg 
12323939Ssethg 		mutex_enter(&p->p_ldtlock);
12333939Ssethg 		if (p->p_ldt == NULL) {
12343939Ssethg 			mutex_exit(&p->p_ldtlock);
12353939Ssethg 			return (-1);
12363939Ssethg 		}
12373939Ssethg 		descrp = &p->p_ldt[idx];
12383939Ssethg 		baseaddr = (caddr_t)(uintptr_t)USEGD_GETBASE(descrp);
12393939Ssethg 		mutex_exit(&p->p_ldtlock);
12403939Ssethg 
12413939Ssethg 		/*
12423939Ssethg 		 * Calculate the linear address (wraparound is not only ok,
12433939Ssethg 		 * it's expected behavior).  The cast to uint32_t is because
12443939Ssethg 		 * LDT selectors are only allowed in 32-bit processes.
12453939Ssethg 		 */
12463939Ssethg 		*linearp = (caddr_t)(uintptr_t)(uint32_t)((uintptr_t)baseaddr +
12473939Ssethg 		    rp->r_pc);
12483939Ssethg 	} else {
12493939Ssethg #ifdef DEBUG
12503939Ssethg 		descrp = &CPU->cpu_gdt[idx];
12513939Ssethg 		baseaddr = (caddr_t)(uintptr_t)USEGD_GETBASE(descrp);
12523939Ssethg 		/* GDT-based descriptors' base addresses should always be 0 */
12533939Ssethg 		ASSERT(baseaddr == 0);
12543939Ssethg #endif
12553939Ssethg 		*linearp = (caddr_t)(uintptr_t)rp->r_pc;
12563939Ssethg 	}
12573939Ssethg 
12583939Ssethg 	return (0);
12593939Ssethg }
1260