xref: /onnv-gate/usr/src/uts/sun4v/os/mach_startup.c (revision 8408:7b4e48a75d0c)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51859Sha137994  * Common Development and Distribution License (the "License").
61859Sha137994  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211991Sheppo 
220Sstevel@tonic-gate /*
235788Smv143129  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #include <sys/machsystm.h>
280Sstevel@tonic-gate #include <sys/archsystm.h>
290Sstevel@tonic-gate #include <sys/prom_plat.h>
300Sstevel@tonic-gate #include <sys/promif.h>
310Sstevel@tonic-gate #include <sys/vm.h>
320Sstevel@tonic-gate #include <sys/cpu.h>
33*8408SEric.Saxe@Sun.COM #include <sys/bitset.h>
340Sstevel@tonic-gate #include <sys/cpupart.h>
350Sstevel@tonic-gate #include <sys/disp.h>
360Sstevel@tonic-gate #include <sys/hypervisor_api.h>
371077Ssvemuri #include <sys/traptrace.h>
381991Sheppo #include <sys/modctl.h>
391991Sheppo #include <sys/ldoms.h>
405834Spt157919 #include <sys/cpu_module.h>
415834Spt157919 #include <sys/mutex_impl.h>
426138Ssvemuri #include <sys/rwlock.h>
43*8408SEric.Saxe@Sun.COM #include <sys/sdt.h>
44*8408SEric.Saxe@Sun.COM #include <sys/cmt.h>
454104Sblakej #include <vm/vm_dep.h>
461077Ssvemuri 
470Sstevel@tonic-gate #ifdef TRAPTRACE
481077Ssvemuri int mach_htraptrace_enable = 1;
491077Ssvemuri #else
501077Ssvemuri int mach_htraptrace_enable = 0;
511077Ssvemuri #endif
521077Ssvemuri int htrap_tr0_inuse = 0;
531077Ssvemuri extern char htrap_tr0[];	/* prealloc buf for boot cpu */
540Sstevel@tonic-gate 
550Sstevel@tonic-gate caddr_t	mmu_fault_status_area;
560Sstevel@tonic-gate 
570Sstevel@tonic-gate extern void sfmmu_set_tsbs(void);
580Sstevel@tonic-gate /*
590Sstevel@tonic-gate  * CPU IDLE optimization variables/routines
600Sstevel@tonic-gate  */
610Sstevel@tonic-gate static int enable_halt_idle_cpus = 1;
620Sstevel@tonic-gate 
635864Sesaxe /*
645864Sesaxe  * Defines for the idle_state_transition DTrace probe
655864Sesaxe  *
665864Sesaxe  * The probe fires when the CPU undergoes an idle state change (e.g. hv yield)
675864Sesaxe  * The agument passed is the state to which the CPU is transitioning.
685864Sesaxe  *
695864Sesaxe  * The states are defined here.
705864Sesaxe  */
715864Sesaxe #define	IDLE_STATE_NORMAL 0
725864Sesaxe #define	IDLE_STATE_YIELDED 1
735864Sesaxe 
745788Smv143129 #define	SUN4V_CLOCK_TICK_THRESHOLD	64
755788Smv143129 #define	SUN4V_CLOCK_TICK_NCPUS		64
765788Smv143129 
775788Smv143129 extern int	clock_tick_threshold;
785788Smv143129 extern int	clock_tick_ncpus;
795788Smv143129 
800Sstevel@tonic-gate void
810Sstevel@tonic-gate setup_trap_table(void)
820Sstevel@tonic-gate {
830Sstevel@tonic-gate 	caddr_t mmfsa_va;
840Sstevel@tonic-gate 	extern	 caddr_t mmu_fault_status_area;
850Sstevel@tonic-gate 	mmfsa_va =
860Sstevel@tonic-gate 	    mmu_fault_status_area + (MMFSA_SIZE * CPU->cpu_id);
870Sstevel@tonic-gate 
881991Sheppo 	intr_init(CPU);		/* init interrupt request free list */
890Sstevel@tonic-gate 	setwstate(WSTATE_KERN);
900Sstevel@tonic-gate 	set_mmfsa_scratchpad(mmfsa_va);
910Sstevel@tonic-gate 	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
920Sstevel@tonic-gate 	sfmmu_set_tsbs();
930Sstevel@tonic-gate }
940Sstevel@tonic-gate 
950Sstevel@tonic-gate void
960Sstevel@tonic-gate phys_install_has_changed(void)
970Sstevel@tonic-gate {
980Sstevel@tonic-gate 
990Sstevel@tonic-gate }
1000Sstevel@tonic-gate 
1010Sstevel@tonic-gate /*
1020Sstevel@tonic-gate  * Halt the present CPU until awoken via an interrupt
1030Sstevel@tonic-gate  */
1040Sstevel@tonic-gate static void
1050Sstevel@tonic-gate cpu_halt(void)
1060Sstevel@tonic-gate {
1070Sstevel@tonic-gate 	cpu_t *cpup = CPU;
108*8408SEric.Saxe@Sun.COM 	processorid_t cpu_sid = cpup->cpu_seqid;
109711Sesaxe 	cpupart_t *cp = cpup->cpu_part;
1100Sstevel@tonic-gate 	int hset_update = 1;
1113156Sgirish 	volatile int *p = &cpup->cpu_disp->disp_nrunnable;
112232Sgirish 	uint_t s;
1130Sstevel@tonic-gate 
1140Sstevel@tonic-gate 	/*
1157971SDave.Marquardt@Sun.COM 	 * If this CPU is online then we should notate our halting
1160Sstevel@tonic-gate 	 * by adding ourselves to the partition's halted CPU
117*8408SEric.Saxe@Sun.COM 	 * bitset. This allows other CPUs to find/awaken us when
1180Sstevel@tonic-gate 	 * work becomes available.
1190Sstevel@tonic-gate 	 */
1207971SDave.Marquardt@Sun.COM 	if (CPU->cpu_flags & CPU_OFFLINE)
1210Sstevel@tonic-gate 		hset_update = 0;
1220Sstevel@tonic-gate 
1230Sstevel@tonic-gate 	/*
124*8408SEric.Saxe@Sun.COM 	 * Add ourselves to the partition's halted CPUs bitset
125303Sgirish 	 * and set our HALTED flag, if necessary.
126303Sgirish 	 *
127711Sesaxe 	 * When a thread becomes runnable, it is placed on the queue
128*8408SEric.Saxe@Sun.COM 	 * and then the halted cpu bitset is checked to determine who
129711Sesaxe 	 * (if anyone) should be awoken. We therefore need to first
130*8408SEric.Saxe@Sun.COM 	 * add ourselves to the halted bitset, and then check if there
131*8408SEric.Saxe@Sun.COM 	 * is any work available.  The order is important to prevent a race
132*8408SEric.Saxe@Sun.COM 	 * that can lead to work languishing on a run queue somewhere while
133*8408SEric.Saxe@Sun.COM 	 * this CPU remains halted.
134*8408SEric.Saxe@Sun.COM 	 *
135*8408SEric.Saxe@Sun.COM 	 * Either the producing CPU will see we're halted and will awaken us,
136*8408SEric.Saxe@Sun.COM 	 * or this CPU will see the work available in disp_anywork()
1370Sstevel@tonic-gate 	 */
1380Sstevel@tonic-gate 	if (hset_update) {
139303Sgirish 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
140303Sgirish 		membar_producer();
141*8408SEric.Saxe@Sun.COM 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
1420Sstevel@tonic-gate 	}
1430Sstevel@tonic-gate 
1440Sstevel@tonic-gate 	/*
1450Sstevel@tonic-gate 	 * Check to make sure there's really nothing to do.
146711Sesaxe 	 * Work destined for this CPU may become available after
147711Sesaxe 	 * this check. We'll be notified through the clearing of our
148*8408SEric.Saxe@Sun.COM 	 * bit in the halted CPU bitset, and a poke.
1490Sstevel@tonic-gate 	 */
1500Sstevel@tonic-gate 	if (disp_anywork()) {
151303Sgirish 		if (hset_update) {
152303Sgirish 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
153*8408SEric.Saxe@Sun.COM 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
154303Sgirish 		}
155711Sesaxe 		return;
156711Sesaxe 	}
157711Sesaxe 
1583156Sgirish 	/*
1593156Sgirish 	 * We're on our way to being halted.  Wait until something becomes
1603156Sgirish 	 * runnable locally or we are awaken (i.e. removed from the halt set).
1613156Sgirish 	 * Note that the call to hv_cpu_yield() can return even if we have
1623156Sgirish 	 * nothing to do.
163711Sesaxe 	 *
164711Sesaxe 	 * Disable interrupts now, so that we'll awaken immediately
165711Sesaxe 	 * after halting if someone tries to poke us between now and
166711Sesaxe 	 * the time we actually halt.
167711Sesaxe 	 *
168711Sesaxe 	 * We check for the presence of our bit after disabling interrupts.
169711Sesaxe 	 * If it's cleared, we'll return. If the bit is cleared after
170711Sesaxe 	 * we check then the poke will pop us out of the halted state.
1713275Sgirish 	 * Also, if the offlined CPU has been brought back on-line, then
1723275Sgirish 	 * we return as well.
173711Sesaxe 	 *
174711Sesaxe 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
175711Sesaxe 	 * is important.
176711Sesaxe 	 * cpu_wakeup() must clear, then poke.
177711Sesaxe 	 * cpu_halt() must disable interrupts, then check for the bit.
1783156Sgirish 	 *
179711Sesaxe 	 * The check for anything locally runnable is here for performance
180711Sesaxe 	 * and isn't needed for correctness. disp_nrunnable ought to be
181711Sesaxe 	 * in our cache still, so it's inexpensive to check, and if there
182711Sesaxe 	 * is anything runnable we won't have to wait for the poke.
1833156Sgirish 	 *
184711Sesaxe 	 */
1853156Sgirish 	s = disable_vec_intr();
1863156Sgirish 	while (*p == 0 &&
187*8408SEric.Saxe@Sun.COM 	    ((hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid)) ||
1883275Sgirish 	    (!hset_update && (CPU->cpu_flags & CPU_OFFLINE)))) {
1895864Sesaxe 
1905864Sesaxe 		DTRACE_PROBE1(idle__state__transition,
1915864Sesaxe 		    uint_t, IDLE_STATE_YIELDED);
1923156Sgirish 		(void) hv_cpu_yield();
1935864Sesaxe 		DTRACE_PROBE1(idle__state__transition,
1945864Sesaxe 		    uint_t, IDLE_STATE_NORMAL);
1955864Sesaxe 
196232Sgirish 		enable_vec_intr(s);
1973156Sgirish 		s = disable_vec_intr();
1980Sstevel@tonic-gate 	}
1990Sstevel@tonic-gate 
2000Sstevel@tonic-gate 	/*
2010Sstevel@tonic-gate 	 * We're no longer halted
2020Sstevel@tonic-gate 	 */
203232Sgirish 	enable_vec_intr(s);
204303Sgirish 	if (hset_update) {
205303Sgirish 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
206*8408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
207303Sgirish 	}
2080Sstevel@tonic-gate }
2090Sstevel@tonic-gate 
2100Sstevel@tonic-gate /*
2110Sstevel@tonic-gate  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
2120Sstevel@tonic-gate  * Otherwise, see if other CPUs in the cpu partition are halted and need to
2130Sstevel@tonic-gate  * be woken up so that they can steal the thread we placed on this CPU.
2140Sstevel@tonic-gate  * This function is only used on MP systems.
2150Sstevel@tonic-gate  */
2160Sstevel@tonic-gate static void
2170Sstevel@tonic-gate cpu_wakeup(cpu_t *cpu, int bound)
2180Sstevel@tonic-gate {
2190Sstevel@tonic-gate 	uint_t		cpu_found;
220*8408SEric.Saxe@Sun.COM 	processorid_t	cpu_sid;
2210Sstevel@tonic-gate 	cpupart_t	*cp;
2220Sstevel@tonic-gate 
2230Sstevel@tonic-gate 	cp = cpu->cpu_part;
224*8408SEric.Saxe@Sun.COM 	cpu_sid = cpu->cpu_seqid;
225*8408SEric.Saxe@Sun.COM 	if (bitset_in_set(&cp->cp_haltset, cpu_sid)) {
2260Sstevel@tonic-gate 		/*
2270Sstevel@tonic-gate 		 * Clear the halted bit for that CPU since it will be
2280Sstevel@tonic-gate 		 * poked in a moment.
2290Sstevel@tonic-gate 		 */
230*8408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
2310Sstevel@tonic-gate 		/*
232*8408SEric.Saxe@Sun.COM 		 * We may find the current CPU present in the halted cpu bitset
2330Sstevel@tonic-gate 		 * if we're in the context of an interrupt that occurred
2340Sstevel@tonic-gate 		 * before we had a chance to clear our bit in cpu_halt().
2350Sstevel@tonic-gate 		 * Poking ourself is obviously unnecessary, since if
2360Sstevel@tonic-gate 		 * we're here, we're not halted.
2370Sstevel@tonic-gate 		 */
2380Sstevel@tonic-gate 		if (cpu != CPU)
2390Sstevel@tonic-gate 			poke_cpu(cpu->cpu_id);
2400Sstevel@tonic-gate 		return;
2410Sstevel@tonic-gate 	} else {
2420Sstevel@tonic-gate 		/*
2430Sstevel@tonic-gate 		 * This cpu isn't halted, but it's idle or undergoing a
2440Sstevel@tonic-gate 		 * context switch. No need to awaken anyone else.
2450Sstevel@tonic-gate 		 */
2460Sstevel@tonic-gate 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
2470Sstevel@tonic-gate 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
2480Sstevel@tonic-gate 			return;
2490Sstevel@tonic-gate 	}
2500Sstevel@tonic-gate 
2510Sstevel@tonic-gate 	/*
252*8408SEric.Saxe@Sun.COM 	 * No need to wake up other CPUs if this is for a bound thread.
2530Sstevel@tonic-gate 	 */
2540Sstevel@tonic-gate 	if (bound)
2550Sstevel@tonic-gate 		return;
2560Sstevel@tonic-gate 
2570Sstevel@tonic-gate 	/*
258*8408SEric.Saxe@Sun.COM 	 * The CPU specified for wakeup isn't currently halted, so check
259*8408SEric.Saxe@Sun.COM 	 * to see if there are any other halted CPUs in the partition,
260*8408SEric.Saxe@Sun.COM 	 * and if there are then awaken one.
2610Sstevel@tonic-gate 	 */
2620Sstevel@tonic-gate 	do {
263*8408SEric.Saxe@Sun.COM 		cpu_found = bitset_find(&cp->cp_haltset);
264*8408SEric.Saxe@Sun.COM 		if (cpu_found == (uint_t)-1)
2650Sstevel@tonic-gate 			return;
266*8408SEric.Saxe@Sun.COM 	} while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0);
2670Sstevel@tonic-gate 
268*8408SEric.Saxe@Sun.COM 	if (cpu_found != CPU->cpu_seqid)
269*8408SEric.Saxe@Sun.COM 		poke_cpu(cpu_seq[cpu_found]->cpu_id);
2700Sstevel@tonic-gate }
2710Sstevel@tonic-gate 
2720Sstevel@tonic-gate void
2730Sstevel@tonic-gate mach_cpu_halt_idle()
2740Sstevel@tonic-gate {
2750Sstevel@tonic-gate 	if (enable_halt_idle_cpus) {
2760Sstevel@tonic-gate 		idle_cpu = cpu_halt;
2770Sstevel@tonic-gate 		disp_enq_thread = cpu_wakeup;
2780Sstevel@tonic-gate 	}
2790Sstevel@tonic-gate }
2800Sstevel@tonic-gate 
2810Sstevel@tonic-gate int
2820Sstevel@tonic-gate ndata_alloc_mmfsa(struct memlist *ndata)
2830Sstevel@tonic-gate {
2840Sstevel@tonic-gate 	size_t	size;
2850Sstevel@tonic-gate 
2860Sstevel@tonic-gate 	size = MMFSA_SIZE * max_ncpus;
2870Sstevel@tonic-gate 	mmu_fault_status_area = ndata_alloc(ndata, size, ecache_alignsize);
2880Sstevel@tonic-gate 	if (mmu_fault_status_area == NULL)
2890Sstevel@tonic-gate 		return (-1);
2900Sstevel@tonic-gate 	return (0);
2910Sstevel@tonic-gate }
2920Sstevel@tonic-gate 
2930Sstevel@tonic-gate void
2940Sstevel@tonic-gate mach_memscrub(void)
2950Sstevel@tonic-gate {
2960Sstevel@tonic-gate 	/* no memscrub support for sun4v for now */
2970Sstevel@tonic-gate }
2980Sstevel@tonic-gate 
2990Sstevel@tonic-gate void
3000Sstevel@tonic-gate mach_fpras()
3010Sstevel@tonic-gate {
3020Sstevel@tonic-gate 	/* no fpras support for sun4v for now */
3030Sstevel@tonic-gate }
3040Sstevel@tonic-gate 
3050Sstevel@tonic-gate void
3060Sstevel@tonic-gate mach_hw_copy_limit(void)
3070Sstevel@tonic-gate {
3080Sstevel@tonic-gate 	/* HW copy limits set by individual CPU module */
3090Sstevel@tonic-gate }
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate /*
3128275SEric Cheng  * We need to enable soft ring functionality on Niagara platforms since
3138275SEric Cheng  * one strand can't handle interrupts for a 1Gb NIC. So set the tunable
3148275SEric Cheng  * mac_soft_ring_enable by default on this platform.
3158275SEric Cheng  * mac_soft_ring_enable variable is defined in space.c and used by MAC
3168275SEric Cheng  * module. This tunable in concert with mac_soft_ring_count (declared
3178275SEric Cheng  * in mac.h) will configure the number of fanout soft rings for a link.
3181184Skrgopi  */
3198275SEric Cheng extern boolean_t mac_soft_ring_enable;
3201184Skrgopi void
3211184Skrgopi startup_platform(void)
3221184Skrgopi {
3238275SEric Cheng 	mac_soft_ring_enable = B_TRUE;
3245788Smv143129 	if (clock_tick_threshold == 0)
3255788Smv143129 		clock_tick_threshold = SUN4V_CLOCK_TICK_THRESHOLD;
3265788Smv143129 	if (clock_tick_ncpus == 0)
3275788Smv143129 		clock_tick_ncpus = SUN4V_CLOCK_TICK_NCPUS;
3285834Spt157919 	/* set per-platform constants for mutex_backoff */
3295834Spt157919 	mutex_backoff_base = 1;
3305834Spt157919 	mutex_cap_factor = 4;
3315834Spt157919 	if (l2_cache_node_count() > 1) {
3325834Spt157919 		/* VF for example */
3335834Spt157919 		mutex_backoff_base = 2;
3346138Ssvemuri 		mutex_cap_factor = 64;
3355834Spt157919 	}
3366138Ssvemuri 	rw_lock_backoff = default_lock_backoff;
3376138Ssvemuri 	rw_lock_delay = default_lock_delay;
3381184Skrgopi }
3391184Skrgopi 
3401184Skrgopi /*
3410Sstevel@tonic-gate  * This function sets up hypervisor traptrace buffer
3421077Ssvemuri  * This routine is called by the boot cpu only
3430Sstevel@tonic-gate  */
3440Sstevel@tonic-gate void
3451077Ssvemuri mach_htraptrace_setup(int cpuid)
3460Sstevel@tonic-gate {
3470Sstevel@tonic-gate 	TRAP_TRACE_CTL	*ctlp;
3481077Ssvemuri 	int bootcpuid = getprocessorid(); /* invoked on boot cpu only */
3490Sstevel@tonic-gate 
3501077Ssvemuri 	if (mach_htraptrace_enable && ((cpuid != bootcpuid) ||
3511077Ssvemuri 	    !htrap_tr0_inuse)) {
3521077Ssvemuri 		ctlp = &trap_trace_ctl[cpuid];
3531077Ssvemuri 		ctlp->d.hvaddr_base = (cpuid == bootcpuid) ? htrap_tr0 :
3541859Sha137994 		    contig_mem_alloc_align(HTRAP_TSIZE, HTRAP_TSIZE);
3551859Sha137994 		if (ctlp->d.hvaddr_base == NULL) {
3561859Sha137994 			ctlp->d.hlimit = 0;
3571859Sha137994 			ctlp->d.hpaddr_base = NULL;
3581859Sha137994 			cmn_err(CE_WARN, "!cpu%d: failed to allocate HV "
3591859Sha137994 			    "traptrace buffer", cpuid);
3601859Sha137994 		} else {
3611859Sha137994 			ctlp->d.hlimit = HTRAP_TSIZE;
3621859Sha137994 			ctlp->d.hpaddr_base = va_to_pa(ctlp->d.hvaddr_base);
3631859Sha137994 		}
3641077Ssvemuri 	}
3650Sstevel@tonic-gate }
3660Sstevel@tonic-gate 
3670Sstevel@tonic-gate /*
3681077Ssvemuri  * This function enables or disables the hypervisor traptracing
3690Sstevel@tonic-gate  */
3700Sstevel@tonic-gate void
3711077Ssvemuri mach_htraptrace_configure(int cpuid)
3720Sstevel@tonic-gate {
3730Sstevel@tonic-gate 	uint64_t ret;
3740Sstevel@tonic-gate 	uint64_t prev_buf, prev_bufsize;
3750Sstevel@tonic-gate 	uint64_t prev_enable;
3760Sstevel@tonic-gate 	uint64_t size;
3770Sstevel@tonic-gate 	TRAP_TRACE_CTL	*ctlp;
3780Sstevel@tonic-gate 
3791077Ssvemuri 	ctlp = &trap_trace_ctl[cpuid];
3801077Ssvemuri 	if (mach_htraptrace_enable) {
3811859Sha137994 		if ((ctlp->d.hvaddr_base != NULL) &&
3821859Sha137994 		    ((ctlp->d.hvaddr_base != htrap_tr0) ||
3831859Sha137994 		    (!htrap_tr0_inuse))) {
3841077Ssvemuri 			ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
3851077Ssvemuri 			if ((ret == H_EOK) && (prev_bufsize != 0)) {
3861077Ssvemuri 				cmn_err(CE_CONT,
3871077Ssvemuri 				    "!cpu%d: previous HV traptrace buffer of "
3881077Ssvemuri 				    "size 0x%lx at address 0x%lx", cpuid,
3891077Ssvemuri 				    prev_bufsize, prev_buf);
3901077Ssvemuri 			}
3910Sstevel@tonic-gate 
3921077Ssvemuri 			ret = hv_ttrace_buf_conf(ctlp->d.hpaddr_base,
3931859Sha137994 			    ctlp->d.hlimit /
3941859Sha137994 			    (sizeof (struct htrap_trace_record)), &size);
3951077Ssvemuri 			if (ret == H_EOK) {
3961077Ssvemuri 				ret = hv_ttrace_enable(\
3971077Ssvemuri 				    (uint64_t)TRAP_TENABLE_ALL, &prev_enable);
3981077Ssvemuri 				if (ret != H_EOK) {
3991077Ssvemuri 					cmn_err(CE_WARN,
4001077Ssvemuri 					    "!cpu%d: HV traptracing not "
4011077Ssvemuri 					    "enabled, ta: 0x%x returned error: "
4021077Ssvemuri 					    "%ld", cpuid, TTRACE_ENABLE, ret);
4031077Ssvemuri 				} else {
4041077Ssvemuri 					if (ctlp->d.hvaddr_base == htrap_tr0)
4051077Ssvemuri 						htrap_tr0_inuse = 1;
4061077Ssvemuri 				}
4071077Ssvemuri 			} else {
4081077Ssvemuri 				cmn_err(CE_WARN,
4091077Ssvemuri 				    "!cpu%d: HV traptrace buffer not "
4101077Ssvemuri 				    "configured, ta: 0x%x returned error: %ld",
4111077Ssvemuri 				    cpuid, TTRACE_BUF_CONF, ret);
4121077Ssvemuri 			}
4131077Ssvemuri 			/*
4141077Ssvemuri 			 * set hvaddr_base to NULL when traptrace buffer
4151077Ssvemuri 			 * registration fails
4161077Ssvemuri 			 */
4171077Ssvemuri 			if (ret != H_EOK) {
4181077Ssvemuri 				ctlp->d.hvaddr_base = NULL;
4191077Ssvemuri 				ctlp->d.hlimit = 0;
4201077Ssvemuri 				ctlp->d.hpaddr_base = NULL;
4211077Ssvemuri 			}
4220Sstevel@tonic-gate 		}
4230Sstevel@tonic-gate 	} else {
4241077Ssvemuri 		ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
4251077Ssvemuri 		if ((ret == H_EOK) && (prev_bufsize != 0)) {
4261077Ssvemuri 			ret = hv_ttrace_enable((uint64_t)TRAP_TDISABLE_ALL,
4271077Ssvemuri 			    &prev_enable);
4281077Ssvemuri 			if (ret == H_EOK) {
4291077Ssvemuri 				if (ctlp->d.hvaddr_base == htrap_tr0)
4301077Ssvemuri 					htrap_tr0_inuse = 0;
4311077Ssvemuri 				ctlp->d.hvaddr_base = NULL;
4321077Ssvemuri 				ctlp->d.hlimit = 0;
4331077Ssvemuri 				ctlp->d.hpaddr_base = NULL;
4341077Ssvemuri 			} else
4351077Ssvemuri 				cmn_err(CE_WARN,
4361077Ssvemuri 				    "!cpu%d: HV traptracing is not disabled, "
4371077Ssvemuri 				    "ta: 0x%x returned error: %ld",
4381077Ssvemuri 				    cpuid, TTRACE_ENABLE, ret);
4391077Ssvemuri 		}
4400Sstevel@tonic-gate 	}
4411077Ssvemuri }
4421077Ssvemuri 
4431077Ssvemuri /*
4441077Ssvemuri  * This function cleans up the hypervisor traptrace buffer
4451077Ssvemuri  */
4461077Ssvemuri void
4471077Ssvemuri mach_htraptrace_cleanup(int cpuid)
4481077Ssvemuri {
4491859Sha137994 	if (mach_htraptrace_enable) {
4501859Sha137994 		TRAP_TRACE_CTL *ctlp;
4511859Sha137994 		caddr_t httrace_buf_va;
4521077Ssvemuri 
4531859Sha137994 		ASSERT(cpuid < max_ncpus);
4541077Ssvemuri 		ctlp = &trap_trace_ctl[cpuid];
4551859Sha137994 		httrace_buf_va = ctlp->d.hvaddr_base;
4561859Sha137994 		if (httrace_buf_va == htrap_tr0) {
4571859Sha137994 			bzero(httrace_buf_va, HTRAP_TSIZE);
4581859Sha137994 		} else if (httrace_buf_va != NULL) {
4591859Sha137994 			contig_mem_free(httrace_buf_va, HTRAP_TSIZE);
4601077Ssvemuri 		}
4610Sstevel@tonic-gate 		ctlp->d.hvaddr_base = NULL;
4620Sstevel@tonic-gate 		ctlp->d.hlimit = 0;
4630Sstevel@tonic-gate 		ctlp->d.hpaddr_base = NULL;
4640Sstevel@tonic-gate 	}
4650Sstevel@tonic-gate }
4661991Sheppo 
4671991Sheppo /*
4681991Sheppo  * Load any required machine class (sun4v) specific drivers.
4691991Sheppo  */
4701991Sheppo void
4711991Sheppo load_mach_drivers(void)
4721991Sheppo {
4731991Sheppo 	/*
4741991Sheppo 	 * We don't want to load these LDOMs-specific
4752531Snarayan 	 * modules if domaining is not supported.  Also,
4761991Sheppo 	 * we must be able to run on non-LDOMs firmware.
4771991Sheppo 	 */
4784776Sjm22469 	if (!domaining_supported())
4791991Sheppo 		return;
4801991Sheppo 
4811991Sheppo 	/*
4821991Sheppo 	 * Load the core domain services module
4831991Sheppo 	 */
4841991Sheppo 	if (modload("misc", "ds") == -1)
4851991Sheppo 		cmn_err(CE_NOTE, "!'ds' module failed to load");
4861991Sheppo 
4871991Sheppo 	/*
4881991Sheppo 	 * Load the rest of the domain services
4891991Sheppo 	 */
4901991Sheppo 	if (modload("misc", "fault_iso") == -1)
4911991Sheppo 		cmn_err(CE_NOTE, "!'fault_iso' module failed to load");
4921991Sheppo 
4931991Sheppo 	if (modload("misc", "platsvc") == -1)
4941991Sheppo 		cmn_err(CE_NOTE, "!'platsvc' module failed to load");
4951991Sheppo 
4964776Sjm22469 	if (domaining_enabled() && modload("misc", "dr_cpu") == -1)
4971991Sheppo 		cmn_err(CE_NOTE, "!'dr_cpu' module failed to load");
4981991Sheppo 
4996441Sjm22469 	if (modload("misc", "dr_io") == -1)
5006441Sjm22469 		cmn_err(CE_NOTE, "!'dr_io' module failed to load");
5016441Sjm22469 
5021991Sheppo 	/*
5031991Sheppo 	 * Attempt to attach any virtual device servers. These
5041991Sheppo 	 * drivers must be loaded at start of day so that they
5051991Sheppo 	 * can respond to any updates to the machine description.
5061991Sheppo 	 *
5071991Sheppo 	 * Since it is quite likely that a domain will not support
5081991Sheppo 	 * one or more of these servers, failures are ignored.
5091991Sheppo 	 */
5101991Sheppo 
5111991Sheppo 	/* virtual disk server */
5121991Sheppo 	(void) i_ddi_attach_hw_nodes("vds");
5131991Sheppo 
5141991Sheppo 	/* virtual network switch */
5151991Sheppo 	(void) i_ddi_attach_hw_nodes("vsw");
5161991Sheppo 
5171991Sheppo 	/* virtual console concentrator */
5181991Sheppo 	(void) i_ddi_attach_hw_nodes("vcc");
5191991Sheppo }
520