xref: /onnv-gate/usr/src/uts/sun4v/os/mach_startup.c (revision 12149:607008ac563e)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51859Sha137994  * Common Development and Distribution License (the "License").
61859Sha137994  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211991Sheppo 
220Sstevel@tonic-gate /*
23*12149Srafael.vanoni@sun.com  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include <sys/machsystm.h>
270Sstevel@tonic-gate #include <sys/archsystm.h>
280Sstevel@tonic-gate #include <sys/prom_plat.h>
290Sstevel@tonic-gate #include <sys/promif.h>
300Sstevel@tonic-gate #include <sys/vm.h>
310Sstevel@tonic-gate #include <sys/cpu.h>
328408SEric.Saxe@Sun.COM #include <sys/bitset.h>
330Sstevel@tonic-gate #include <sys/cpupart.h>
340Sstevel@tonic-gate #include <sys/disp.h>
350Sstevel@tonic-gate #include <sys/hypervisor_api.h>
361077Ssvemuri #include <sys/traptrace.h>
371991Sheppo #include <sys/modctl.h>
381991Sheppo #include <sys/ldoms.h>
395834Spt157919 #include <sys/cpu_module.h>
405834Spt157919 #include <sys/mutex_impl.h>
416138Ssvemuri #include <sys/rwlock.h>
428408SEric.Saxe@Sun.COM #include <sys/sdt.h>
438408SEric.Saxe@Sun.COM #include <sys/cmt.h>
444104Sblakej #include <vm/vm_dep.h>
451077Ssvemuri 
460Sstevel@tonic-gate #ifdef TRAPTRACE
471077Ssvemuri int mach_htraptrace_enable = 1;
481077Ssvemuri #else
491077Ssvemuri int mach_htraptrace_enable = 0;
501077Ssvemuri #endif
511077Ssvemuri int htrap_tr0_inuse = 0;
521077Ssvemuri extern char htrap_tr0[];	/* prealloc buf for boot cpu */
530Sstevel@tonic-gate 
540Sstevel@tonic-gate caddr_t	mmu_fault_status_area;
550Sstevel@tonic-gate 
560Sstevel@tonic-gate extern void sfmmu_set_tsbs(void);
570Sstevel@tonic-gate /*
580Sstevel@tonic-gate  * CPU IDLE optimization variables/routines
590Sstevel@tonic-gate  */
600Sstevel@tonic-gate static int enable_halt_idle_cpus = 1;
610Sstevel@tonic-gate 
625864Sesaxe /*
635864Sesaxe  * Defines for the idle_state_transition DTrace probe
645864Sesaxe  *
655864Sesaxe  * The probe fires when the CPU undergoes an idle state change (e.g. hv yield)
665864Sesaxe  * The agument passed is the state to which the CPU is transitioning.
675864Sesaxe  *
685864Sesaxe  * The states are defined here.
695864Sesaxe  */
705864Sesaxe #define	IDLE_STATE_NORMAL 0
715864Sesaxe #define	IDLE_STATE_YIELDED 1
725864Sesaxe 
735788Smv143129 #define	SUN4V_CLOCK_TICK_THRESHOLD	64
745788Smv143129 #define	SUN4V_CLOCK_TICK_NCPUS		64
755788Smv143129 
765788Smv143129 extern int	clock_tick_threshold;
775788Smv143129 extern int	clock_tick_ncpus;
785788Smv143129 
79*12149Srafael.vanoni@sun.com uint_t cp_haltset_fanout = 3;
80*12149Srafael.vanoni@sun.com 
810Sstevel@tonic-gate void
setup_trap_table(void)820Sstevel@tonic-gate setup_trap_table(void)
830Sstevel@tonic-gate {
840Sstevel@tonic-gate 	caddr_t mmfsa_va;
850Sstevel@tonic-gate 	extern	 caddr_t mmu_fault_status_area;
860Sstevel@tonic-gate 	mmfsa_va =
870Sstevel@tonic-gate 	    mmu_fault_status_area + (MMFSA_SIZE * CPU->cpu_id);
880Sstevel@tonic-gate 
891991Sheppo 	intr_init(CPU);		/* init interrupt request free list */
900Sstevel@tonic-gate 	setwstate(WSTATE_KERN);
910Sstevel@tonic-gate 	set_mmfsa_scratchpad(mmfsa_va);
920Sstevel@tonic-gate 	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
930Sstevel@tonic-gate 	sfmmu_set_tsbs();
940Sstevel@tonic-gate }
950Sstevel@tonic-gate 
960Sstevel@tonic-gate void
phys_install_has_changed(void)970Sstevel@tonic-gate phys_install_has_changed(void)
980Sstevel@tonic-gate {
990Sstevel@tonic-gate 
1000Sstevel@tonic-gate }
1010Sstevel@tonic-gate 
1020Sstevel@tonic-gate /*
1030Sstevel@tonic-gate  * Halt the present CPU until awoken via an interrupt
1040Sstevel@tonic-gate  */
1050Sstevel@tonic-gate static void
cpu_halt(void)1060Sstevel@tonic-gate cpu_halt(void)
1070Sstevel@tonic-gate {
1080Sstevel@tonic-gate 	cpu_t *cpup = CPU;
1098408SEric.Saxe@Sun.COM 	processorid_t cpu_sid = cpup->cpu_seqid;
110711Sesaxe 	cpupart_t *cp = cpup->cpu_part;
1110Sstevel@tonic-gate 	int hset_update = 1;
1123156Sgirish 	volatile int *p = &cpup->cpu_disp->disp_nrunnable;
113232Sgirish 	uint_t s;
1140Sstevel@tonic-gate 
1150Sstevel@tonic-gate 	/*
1167971SDave.Marquardt@Sun.COM 	 * If this CPU is online then we should notate our halting
1170Sstevel@tonic-gate 	 * by adding ourselves to the partition's halted CPU
1188408SEric.Saxe@Sun.COM 	 * bitset. This allows other CPUs to find/awaken us when
1190Sstevel@tonic-gate 	 * work becomes available.
1200Sstevel@tonic-gate 	 */
1217971SDave.Marquardt@Sun.COM 	if (CPU->cpu_flags & CPU_OFFLINE)
1220Sstevel@tonic-gate 		hset_update = 0;
1230Sstevel@tonic-gate 
1240Sstevel@tonic-gate 	/*
1258408SEric.Saxe@Sun.COM 	 * Add ourselves to the partition's halted CPUs bitset
126303Sgirish 	 * and set our HALTED flag, if necessary.
127303Sgirish 	 *
128711Sesaxe 	 * When a thread becomes runnable, it is placed on the queue
1298408SEric.Saxe@Sun.COM 	 * and then the halted cpu bitset is checked to determine who
130711Sesaxe 	 * (if anyone) should be awoken. We therefore need to first
1318408SEric.Saxe@Sun.COM 	 * add ourselves to the halted bitset, and then check if there
1328408SEric.Saxe@Sun.COM 	 * is any work available.  The order is important to prevent a race
1338408SEric.Saxe@Sun.COM 	 * that can lead to work languishing on a run queue somewhere while
1348408SEric.Saxe@Sun.COM 	 * this CPU remains halted.
1358408SEric.Saxe@Sun.COM 	 *
1368408SEric.Saxe@Sun.COM 	 * Either the producing CPU will see we're halted and will awaken us,
1378408SEric.Saxe@Sun.COM 	 * or this CPU will see the work available in disp_anywork()
1380Sstevel@tonic-gate 	 */
1390Sstevel@tonic-gate 	if (hset_update) {
140303Sgirish 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
141303Sgirish 		membar_producer();
1428408SEric.Saxe@Sun.COM 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
1430Sstevel@tonic-gate 	}
1440Sstevel@tonic-gate 
1450Sstevel@tonic-gate 	/*
1460Sstevel@tonic-gate 	 * Check to make sure there's really nothing to do.
147711Sesaxe 	 * Work destined for this CPU may become available after
148711Sesaxe 	 * this check. We'll be notified through the clearing of our
1498408SEric.Saxe@Sun.COM 	 * bit in the halted CPU bitset, and a poke.
1500Sstevel@tonic-gate 	 */
1510Sstevel@tonic-gate 	if (disp_anywork()) {
152303Sgirish 		if (hset_update) {
153303Sgirish 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
1548408SEric.Saxe@Sun.COM 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
155303Sgirish 		}
156711Sesaxe 		return;
157711Sesaxe 	}
158711Sesaxe 
1593156Sgirish 	/*
1603156Sgirish 	 * We're on our way to being halted.  Wait until something becomes
1613156Sgirish 	 * runnable locally or we are awaken (i.e. removed from the halt set).
1623156Sgirish 	 * Note that the call to hv_cpu_yield() can return even if we have
1633156Sgirish 	 * nothing to do.
164711Sesaxe 	 *
165711Sesaxe 	 * Disable interrupts now, so that we'll awaken immediately
166711Sesaxe 	 * after halting if someone tries to poke us between now and
167711Sesaxe 	 * the time we actually halt.
168711Sesaxe 	 *
169711Sesaxe 	 * We check for the presence of our bit after disabling interrupts.
170711Sesaxe 	 * If it's cleared, we'll return. If the bit is cleared after
171711Sesaxe 	 * we check then the poke will pop us out of the halted state.
1723275Sgirish 	 * Also, if the offlined CPU has been brought back on-line, then
1733275Sgirish 	 * we return as well.
174711Sesaxe 	 *
175711Sesaxe 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
176711Sesaxe 	 * is important.
177711Sesaxe 	 * cpu_wakeup() must clear, then poke.
178711Sesaxe 	 * cpu_halt() must disable interrupts, then check for the bit.
1793156Sgirish 	 *
180711Sesaxe 	 * The check for anything locally runnable is here for performance
181711Sesaxe 	 * and isn't needed for correctness. disp_nrunnable ought to be
182711Sesaxe 	 * in our cache still, so it's inexpensive to check, and if there
183711Sesaxe 	 * is anything runnable we won't have to wait for the poke.
1843156Sgirish 	 *
1858931SDave.Plauger@Sun.COM 	 * Any interrupt will awaken the cpu from halt. Looping here
1868931SDave.Plauger@Sun.COM 	 * will filter spurious interrupts that wake us up, but don't
1878931SDave.Plauger@Sun.COM 	 * represent a need for us to head back out to idle().  This
1888931SDave.Plauger@Sun.COM 	 * will enable the idle loop to be more efficient and sleep in
1898931SDave.Plauger@Sun.COM 	 * the processor pipeline for a larger percent of the time,
1908931SDave.Plauger@Sun.COM 	 * which returns useful cycles to the peer hardware strand
1918931SDave.Plauger@Sun.COM 	 * that shares the pipeline.
192711Sesaxe 	 */
1933156Sgirish 	s = disable_vec_intr();
1943156Sgirish 	while (*p == 0 &&
1958408SEric.Saxe@Sun.COM 	    ((hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid)) ||
1963275Sgirish 	    (!hset_update && (CPU->cpu_flags & CPU_OFFLINE)))) {
1975864Sesaxe 
1985864Sesaxe 		DTRACE_PROBE1(idle__state__transition,
1995864Sesaxe 		    uint_t, IDLE_STATE_YIELDED);
2003156Sgirish 		(void) hv_cpu_yield();
2015864Sesaxe 		DTRACE_PROBE1(idle__state__transition,
2025864Sesaxe 		    uint_t, IDLE_STATE_NORMAL);
2035864Sesaxe 
204232Sgirish 		enable_vec_intr(s);
2053156Sgirish 		s = disable_vec_intr();
2060Sstevel@tonic-gate 	}
2070Sstevel@tonic-gate 
2080Sstevel@tonic-gate 	/*
2090Sstevel@tonic-gate 	 * We're no longer halted
2100Sstevel@tonic-gate 	 */
211232Sgirish 	enable_vec_intr(s);
212303Sgirish 	if (hset_update) {
213303Sgirish 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
2148408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
215303Sgirish 	}
2160Sstevel@tonic-gate }
2170Sstevel@tonic-gate 
2180Sstevel@tonic-gate /*
2190Sstevel@tonic-gate  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
2200Sstevel@tonic-gate  * Otherwise, see if other CPUs in the cpu partition are halted and need to
2210Sstevel@tonic-gate  * be woken up so that they can steal the thread we placed on this CPU.
2220Sstevel@tonic-gate  * This function is only used on MP systems.
2230Sstevel@tonic-gate  */
2240Sstevel@tonic-gate static void
cpu_wakeup(cpu_t * cpu,int bound)2250Sstevel@tonic-gate cpu_wakeup(cpu_t *cpu, int bound)
2260Sstevel@tonic-gate {
2270Sstevel@tonic-gate 	uint_t		cpu_found;
2288408SEric.Saxe@Sun.COM 	processorid_t	cpu_sid;
2290Sstevel@tonic-gate 	cpupart_t	*cp;
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate 	cp = cpu->cpu_part;
2328408SEric.Saxe@Sun.COM 	cpu_sid = cpu->cpu_seqid;
2338408SEric.Saxe@Sun.COM 	if (bitset_in_set(&cp->cp_haltset, cpu_sid)) {
2340Sstevel@tonic-gate 		/*
2350Sstevel@tonic-gate 		 * Clear the halted bit for that CPU since it will be
2360Sstevel@tonic-gate 		 * poked in a moment.
2370Sstevel@tonic-gate 		 */
2388408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
2390Sstevel@tonic-gate 		/*
2408408SEric.Saxe@Sun.COM 		 * We may find the current CPU present in the halted cpu bitset
2410Sstevel@tonic-gate 		 * if we're in the context of an interrupt that occurred
2420Sstevel@tonic-gate 		 * before we had a chance to clear our bit in cpu_halt().
2430Sstevel@tonic-gate 		 * Poking ourself is obviously unnecessary, since if
2440Sstevel@tonic-gate 		 * we're here, we're not halted.
2450Sstevel@tonic-gate 		 */
2460Sstevel@tonic-gate 		if (cpu != CPU)
2470Sstevel@tonic-gate 			poke_cpu(cpu->cpu_id);
2480Sstevel@tonic-gate 		return;
2490Sstevel@tonic-gate 	} else {
2500Sstevel@tonic-gate 		/*
2510Sstevel@tonic-gate 		 * This cpu isn't halted, but it's idle or undergoing a
2520Sstevel@tonic-gate 		 * context switch. No need to awaken anyone else.
2530Sstevel@tonic-gate 		 */
2540Sstevel@tonic-gate 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
2550Sstevel@tonic-gate 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
2560Sstevel@tonic-gate 			return;
2570Sstevel@tonic-gate 	}
2580Sstevel@tonic-gate 
2590Sstevel@tonic-gate 	/*
2608408SEric.Saxe@Sun.COM 	 * No need to wake up other CPUs if this is for a bound thread.
2610Sstevel@tonic-gate 	 */
2620Sstevel@tonic-gate 	if (bound)
2630Sstevel@tonic-gate 		return;
2640Sstevel@tonic-gate 
2650Sstevel@tonic-gate 	/*
2668408SEric.Saxe@Sun.COM 	 * The CPU specified for wakeup isn't currently halted, so check
2678408SEric.Saxe@Sun.COM 	 * to see if there are any other halted CPUs in the partition,
2688408SEric.Saxe@Sun.COM 	 * and if there are then awaken one.
2690Sstevel@tonic-gate 	 */
2700Sstevel@tonic-gate 	do {
2718408SEric.Saxe@Sun.COM 		cpu_found = bitset_find(&cp->cp_haltset);
2728408SEric.Saxe@Sun.COM 		if (cpu_found == (uint_t)-1)
2730Sstevel@tonic-gate 			return;
2748408SEric.Saxe@Sun.COM 	} while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0);
2750Sstevel@tonic-gate 
2768408SEric.Saxe@Sun.COM 	if (cpu_found != CPU->cpu_seqid)
2778408SEric.Saxe@Sun.COM 		poke_cpu(cpu_seq[cpu_found]->cpu_id);
2780Sstevel@tonic-gate }
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate void
mach_cpu_halt_idle(void)2818906SEric.Saxe@Sun.COM mach_cpu_halt_idle(void)
2820Sstevel@tonic-gate {
2830Sstevel@tonic-gate 	if (enable_halt_idle_cpus) {
2840Sstevel@tonic-gate 		idle_cpu = cpu_halt;
2850Sstevel@tonic-gate 		disp_enq_thread = cpu_wakeup;
2860Sstevel@tonic-gate 	}
2870Sstevel@tonic-gate }
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate int
ndata_alloc_mmfsa(struct memlist * ndata)2900Sstevel@tonic-gate ndata_alloc_mmfsa(struct memlist *ndata)
2910Sstevel@tonic-gate {
2920Sstevel@tonic-gate 	size_t	size;
2930Sstevel@tonic-gate 
2940Sstevel@tonic-gate 	size = MMFSA_SIZE * max_ncpus;
2950Sstevel@tonic-gate 	mmu_fault_status_area = ndata_alloc(ndata, size, ecache_alignsize);
2960Sstevel@tonic-gate 	if (mmu_fault_status_area == NULL)
2970Sstevel@tonic-gate 		return (-1);
2980Sstevel@tonic-gate 	return (0);
2990Sstevel@tonic-gate }
3000Sstevel@tonic-gate 
3010Sstevel@tonic-gate void
mach_memscrub(void)3020Sstevel@tonic-gate mach_memscrub(void)
3030Sstevel@tonic-gate {
3040Sstevel@tonic-gate 	/* no memscrub support for sun4v for now */
3050Sstevel@tonic-gate }
3060Sstevel@tonic-gate 
3070Sstevel@tonic-gate void
mach_fpras()3080Sstevel@tonic-gate mach_fpras()
3090Sstevel@tonic-gate {
3100Sstevel@tonic-gate 	/* no fpras support for sun4v for now */
3110Sstevel@tonic-gate }
3120Sstevel@tonic-gate 
3130Sstevel@tonic-gate void
mach_hw_copy_limit(void)3140Sstevel@tonic-gate mach_hw_copy_limit(void)
3150Sstevel@tonic-gate {
3160Sstevel@tonic-gate 	/* HW copy limits set by individual CPU module */
3170Sstevel@tonic-gate }
3180Sstevel@tonic-gate 
3190Sstevel@tonic-gate /*
3208275SEric Cheng  * We need to enable soft ring functionality on Niagara platforms since
3218275SEric Cheng  * one strand can't handle interrupts for a 1Gb NIC. So set the tunable
3228275SEric Cheng  * mac_soft_ring_enable by default on this platform.
3238275SEric Cheng  * mac_soft_ring_enable variable is defined in space.c and used by MAC
3248275SEric Cheng  * module. This tunable in concert with mac_soft_ring_count (declared
3258275SEric Cheng  * in mac.h) will configure the number of fanout soft rings for a link.
3261184Skrgopi  */
3278275SEric Cheng extern boolean_t mac_soft_ring_enable;
3281184Skrgopi void
startup_platform(void)3291184Skrgopi startup_platform(void)
3301184Skrgopi {
3318275SEric Cheng 	mac_soft_ring_enable = B_TRUE;
3325788Smv143129 	if (clock_tick_threshold == 0)
3335788Smv143129 		clock_tick_threshold = SUN4V_CLOCK_TICK_THRESHOLD;
3345788Smv143129 	if (clock_tick_ncpus == 0)
3355788Smv143129 		clock_tick_ncpus = SUN4V_CLOCK_TICK_NCPUS;
3365834Spt157919 	/* set per-platform constants for mutex_backoff */
3375834Spt157919 	mutex_backoff_base = 1;
3385834Spt157919 	mutex_cap_factor = 4;
3395834Spt157919 	if (l2_cache_node_count() > 1) {
3405834Spt157919 		/* VF for example */
3415834Spt157919 		mutex_backoff_base = 2;
3426138Ssvemuri 		mutex_cap_factor = 64;
3435834Spt157919 	}
3446138Ssvemuri 	rw_lock_backoff = default_lock_backoff;
3456138Ssvemuri 	rw_lock_delay = default_lock_delay;
3461184Skrgopi }
3471184Skrgopi 
3481184Skrgopi /*
3490Sstevel@tonic-gate  * This function sets up hypervisor traptrace buffer
3501077Ssvemuri  * This routine is called by the boot cpu only
3510Sstevel@tonic-gate  */
3520Sstevel@tonic-gate void
mach_htraptrace_setup(int cpuid)3531077Ssvemuri mach_htraptrace_setup(int cpuid)
3540Sstevel@tonic-gate {
3550Sstevel@tonic-gate 	TRAP_TRACE_CTL	*ctlp;
3561077Ssvemuri 	int bootcpuid = getprocessorid(); /* invoked on boot cpu only */
3570Sstevel@tonic-gate 
3581077Ssvemuri 	if (mach_htraptrace_enable && ((cpuid != bootcpuid) ||
3591077Ssvemuri 	    !htrap_tr0_inuse)) {
3601077Ssvemuri 		ctlp = &trap_trace_ctl[cpuid];
3611077Ssvemuri 		ctlp->d.hvaddr_base = (cpuid == bootcpuid) ? htrap_tr0 :
3621859Sha137994 		    contig_mem_alloc_align(HTRAP_TSIZE, HTRAP_TSIZE);
3631859Sha137994 		if (ctlp->d.hvaddr_base == NULL) {
3641859Sha137994 			ctlp->d.hlimit = 0;
3651859Sha137994 			ctlp->d.hpaddr_base = NULL;
3661859Sha137994 			cmn_err(CE_WARN, "!cpu%d: failed to allocate HV "
3671859Sha137994 			    "traptrace buffer", cpuid);
3681859Sha137994 		} else {
3691859Sha137994 			ctlp->d.hlimit = HTRAP_TSIZE;
3701859Sha137994 			ctlp->d.hpaddr_base = va_to_pa(ctlp->d.hvaddr_base);
3711859Sha137994 		}
3721077Ssvemuri 	}
3730Sstevel@tonic-gate }
3740Sstevel@tonic-gate 
3750Sstevel@tonic-gate /*
3761077Ssvemuri  * This function enables or disables the hypervisor traptracing
3770Sstevel@tonic-gate  */
3780Sstevel@tonic-gate void
mach_htraptrace_configure(int cpuid)3791077Ssvemuri mach_htraptrace_configure(int cpuid)
3800Sstevel@tonic-gate {
3810Sstevel@tonic-gate 	uint64_t ret;
3820Sstevel@tonic-gate 	uint64_t prev_buf, prev_bufsize;
3830Sstevel@tonic-gate 	uint64_t prev_enable;
3840Sstevel@tonic-gate 	uint64_t size;
3850Sstevel@tonic-gate 	TRAP_TRACE_CTL	*ctlp;
3860Sstevel@tonic-gate 
3871077Ssvemuri 	ctlp = &trap_trace_ctl[cpuid];
3881077Ssvemuri 	if (mach_htraptrace_enable) {
3891859Sha137994 		if ((ctlp->d.hvaddr_base != NULL) &&
3901859Sha137994 		    ((ctlp->d.hvaddr_base != htrap_tr0) ||
3911859Sha137994 		    (!htrap_tr0_inuse))) {
3921077Ssvemuri 			ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
3931077Ssvemuri 			if ((ret == H_EOK) && (prev_bufsize != 0)) {
3941077Ssvemuri 				cmn_err(CE_CONT,
3951077Ssvemuri 				    "!cpu%d: previous HV traptrace buffer of "
3961077Ssvemuri 				    "size 0x%lx at address 0x%lx", cpuid,
3971077Ssvemuri 				    prev_bufsize, prev_buf);
3981077Ssvemuri 			}
3990Sstevel@tonic-gate 
4001077Ssvemuri 			ret = hv_ttrace_buf_conf(ctlp->d.hpaddr_base,
4011859Sha137994 			    ctlp->d.hlimit /
4021859Sha137994 			    (sizeof (struct htrap_trace_record)), &size);
4031077Ssvemuri 			if (ret == H_EOK) {
4041077Ssvemuri 				ret = hv_ttrace_enable(\
4051077Ssvemuri 				    (uint64_t)TRAP_TENABLE_ALL, &prev_enable);
4061077Ssvemuri 				if (ret != H_EOK) {
4071077Ssvemuri 					cmn_err(CE_WARN,
4081077Ssvemuri 					    "!cpu%d: HV traptracing not "
4091077Ssvemuri 					    "enabled, ta: 0x%x returned error: "
4101077Ssvemuri 					    "%ld", cpuid, TTRACE_ENABLE, ret);
4111077Ssvemuri 				} else {
4121077Ssvemuri 					if (ctlp->d.hvaddr_base == htrap_tr0)
4131077Ssvemuri 						htrap_tr0_inuse = 1;
4141077Ssvemuri 				}
4151077Ssvemuri 			} else {
4161077Ssvemuri 				cmn_err(CE_WARN,
4171077Ssvemuri 				    "!cpu%d: HV traptrace buffer not "
4181077Ssvemuri 				    "configured, ta: 0x%x returned error: %ld",
4191077Ssvemuri 				    cpuid, TTRACE_BUF_CONF, ret);
4201077Ssvemuri 			}
4211077Ssvemuri 			/*
4221077Ssvemuri 			 * set hvaddr_base to NULL when traptrace buffer
4231077Ssvemuri 			 * registration fails
4241077Ssvemuri 			 */
4251077Ssvemuri 			if (ret != H_EOK) {
4261077Ssvemuri 				ctlp->d.hvaddr_base = NULL;
4271077Ssvemuri 				ctlp->d.hlimit = 0;
4281077Ssvemuri 				ctlp->d.hpaddr_base = NULL;
4291077Ssvemuri 			}
4300Sstevel@tonic-gate 		}
4310Sstevel@tonic-gate 	} else {
4321077Ssvemuri 		ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
4331077Ssvemuri 		if ((ret == H_EOK) && (prev_bufsize != 0)) {
4341077Ssvemuri 			ret = hv_ttrace_enable((uint64_t)TRAP_TDISABLE_ALL,
4351077Ssvemuri 			    &prev_enable);
4361077Ssvemuri 			if (ret == H_EOK) {
4371077Ssvemuri 				if (ctlp->d.hvaddr_base == htrap_tr0)
4381077Ssvemuri 					htrap_tr0_inuse = 0;
4391077Ssvemuri 				ctlp->d.hvaddr_base = NULL;
4401077Ssvemuri 				ctlp->d.hlimit = 0;
4411077Ssvemuri 				ctlp->d.hpaddr_base = NULL;
4421077Ssvemuri 			} else
4431077Ssvemuri 				cmn_err(CE_WARN,
4441077Ssvemuri 				    "!cpu%d: HV traptracing is not disabled, "
4451077Ssvemuri 				    "ta: 0x%x returned error: %ld",
4461077Ssvemuri 				    cpuid, TTRACE_ENABLE, ret);
4471077Ssvemuri 		}
4480Sstevel@tonic-gate 	}
4491077Ssvemuri }
4501077Ssvemuri 
4511077Ssvemuri /*
4521077Ssvemuri  * This function cleans up the hypervisor traptrace buffer
4531077Ssvemuri  */
4541077Ssvemuri void
mach_htraptrace_cleanup(int cpuid)4551077Ssvemuri mach_htraptrace_cleanup(int cpuid)
4561077Ssvemuri {
4571859Sha137994 	if (mach_htraptrace_enable) {
4581859Sha137994 		TRAP_TRACE_CTL *ctlp;
4591859Sha137994 		caddr_t httrace_buf_va;
4601077Ssvemuri 
4611859Sha137994 		ASSERT(cpuid < max_ncpus);
4621077Ssvemuri 		ctlp = &trap_trace_ctl[cpuid];
4631859Sha137994 		httrace_buf_va = ctlp->d.hvaddr_base;
4641859Sha137994 		if (httrace_buf_va == htrap_tr0) {
4651859Sha137994 			bzero(httrace_buf_va, HTRAP_TSIZE);
4661859Sha137994 		} else if (httrace_buf_va != NULL) {
4671859Sha137994 			contig_mem_free(httrace_buf_va, HTRAP_TSIZE);
4681077Ssvemuri 		}
4690Sstevel@tonic-gate 		ctlp->d.hvaddr_base = NULL;
4700Sstevel@tonic-gate 		ctlp->d.hlimit = 0;
4710Sstevel@tonic-gate 		ctlp->d.hpaddr_base = NULL;
4720Sstevel@tonic-gate 	}
4730Sstevel@tonic-gate }
4741991Sheppo 
4751991Sheppo /*
4761991Sheppo  * Load any required machine class (sun4v) specific drivers.
4771991Sheppo  */
4781991Sheppo void
load_mach_drivers(void)4791991Sheppo load_mach_drivers(void)
4801991Sheppo {
4811991Sheppo 	/*
4821991Sheppo 	 * We don't want to load these LDOMs-specific
4832531Snarayan 	 * modules if domaining is not supported.  Also,
4841991Sheppo 	 * we must be able to run on non-LDOMs firmware.
4851991Sheppo 	 */
4864776Sjm22469 	if (!domaining_supported())
4871991Sheppo 		return;
4881991Sheppo 
4891991Sheppo 	/*
4901991Sheppo 	 * Load the core domain services module
4911991Sheppo 	 */
4921991Sheppo 	if (modload("misc", "ds") == -1)
4931991Sheppo 		cmn_err(CE_NOTE, "!'ds' module failed to load");
4941991Sheppo 
4951991Sheppo 	/*
4961991Sheppo 	 * Load the rest of the domain services
4971991Sheppo 	 */
4981991Sheppo 	if (modload("misc", "fault_iso") == -1)
4991991Sheppo 		cmn_err(CE_NOTE, "!'fault_iso' module failed to load");
5001991Sheppo 
5011991Sheppo 	if (modload("misc", "platsvc") == -1)
5021991Sheppo 		cmn_err(CE_NOTE, "!'platsvc' module failed to load");
5031991Sheppo 
5044776Sjm22469 	if (domaining_enabled() && modload("misc", "dr_cpu") == -1)
5051991Sheppo 		cmn_err(CE_NOTE, "!'dr_cpu' module failed to load");
5061991Sheppo 
5076441Sjm22469 	if (modload("misc", "dr_io") == -1)
5086441Sjm22469 		cmn_err(CE_NOTE, "!'dr_io' module failed to load");
5096441Sjm22469 
51010106SJason.Beloro@Sun.COM 	if (modload("misc", "dr_mem") == -1)
51110106SJason.Beloro@Sun.COM 		cmn_err(CE_NOTE, "!'dr_mem' module failed to load");
51210106SJason.Beloro@Sun.COM 
5131991Sheppo 	/*
5141991Sheppo 	 * Attempt to attach any virtual device servers. These
5151991Sheppo 	 * drivers must be loaded at start of day so that they
5161991Sheppo 	 * can respond to any updates to the machine description.
5171991Sheppo 	 *
5181991Sheppo 	 * Since it is quite likely that a domain will not support
5191991Sheppo 	 * one or more of these servers, failures are ignored.
5201991Sheppo 	 */
5211991Sheppo 
5221991Sheppo 	/* virtual disk server */
5231991Sheppo 	(void) i_ddi_attach_hw_nodes("vds");
5241991Sheppo 
5251991Sheppo 	/* virtual network switch */
5261991Sheppo 	(void) i_ddi_attach_hw_nodes("vsw");
5271991Sheppo 
5281991Sheppo 	/* virtual console concentrator */
5291991Sheppo 	(void) i_ddi_attach_hw_nodes("vcc");
5301991Sheppo }
53111713SPavel.Tatashin@Sun.COM 
53211713SPavel.Tatashin@Sun.COM void
set_platform_defaults(void)53311713SPavel.Tatashin@Sun.COM set_platform_defaults(void)
53411713SPavel.Tatashin@Sun.COM {
53511713SPavel.Tatashin@Sun.COM 	/*
53611713SPavel.Tatashin@Sun.COM 	 * Allow at most one context domain per 8 CPUs, which is ample for
53711713SPavel.Tatashin@Sun.COM 	 * good performance.  Do not make this too large, because it
53811713SPavel.Tatashin@Sun.COM 	 * increases the space consumed in the per-process sfmmu structure.
53911713SPavel.Tatashin@Sun.COM 	 */
54011713SPavel.Tatashin@Sun.COM 	if (max_mmu_ctxdoms == 0)
54111713SPavel.Tatashin@Sun.COM 		max_mmu_ctxdoms = (NCPU + 7) / 8;
54211713SPavel.Tatashin@Sun.COM }
543