xref: /onnv-gate/usr/src/uts/sun4v/os/mach_startup.c (revision 3156:30109e935ec8)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51859Sha137994  * Common Development and Distribution License (the "License").
61859Sha137994  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211991Sheppo 
220Sstevel@tonic-gate /*
231859Sha137994  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/machsystm.h>
300Sstevel@tonic-gate #include <sys/archsystm.h>
310Sstevel@tonic-gate #include <sys/prom_plat.h>
320Sstevel@tonic-gate #include <sys/promif.h>
330Sstevel@tonic-gate #include <sys/vm.h>
340Sstevel@tonic-gate #include <sys/cpu.h>
350Sstevel@tonic-gate #include <sys/atomic.h>
360Sstevel@tonic-gate #include <sys/cpupart.h>
370Sstevel@tonic-gate #include <sys/disp.h>
380Sstevel@tonic-gate #include <sys/hypervisor_api.h>
391077Ssvemuri #include <sys/traptrace.h>
401991Sheppo #include <sys/modctl.h>
411991Sheppo #include <sys/ldoms.h>
421077Ssvemuri 
430Sstevel@tonic-gate #ifdef TRAPTRACE
441077Ssvemuri int mach_htraptrace_enable = 1;
451077Ssvemuri #else
461077Ssvemuri int mach_htraptrace_enable = 0;
471077Ssvemuri #endif
481077Ssvemuri int htrap_tr0_inuse = 0;
491077Ssvemuri extern char htrap_tr0[];	/* prealloc buf for boot cpu */
500Sstevel@tonic-gate 
510Sstevel@tonic-gate caddr_t	mmu_fault_status_area;
520Sstevel@tonic-gate 
530Sstevel@tonic-gate extern void sfmmu_set_tsbs(void);
540Sstevel@tonic-gate /*
550Sstevel@tonic-gate  * CPU IDLE optimization variables/routines
560Sstevel@tonic-gate  */
570Sstevel@tonic-gate static int enable_halt_idle_cpus = 1;
580Sstevel@tonic-gate 
590Sstevel@tonic-gate void
600Sstevel@tonic-gate setup_trap_table(void)
610Sstevel@tonic-gate {
620Sstevel@tonic-gate 	caddr_t mmfsa_va;
630Sstevel@tonic-gate 	extern	 caddr_t mmu_fault_status_area;
640Sstevel@tonic-gate 	mmfsa_va =
650Sstevel@tonic-gate 	    mmu_fault_status_area + (MMFSA_SIZE * CPU->cpu_id);
660Sstevel@tonic-gate 
671991Sheppo 	intr_init(CPU);		/* init interrupt request free list */
680Sstevel@tonic-gate 	setwstate(WSTATE_KERN);
690Sstevel@tonic-gate 	set_mmfsa_scratchpad(mmfsa_va);
700Sstevel@tonic-gate 	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
710Sstevel@tonic-gate 	sfmmu_set_tsbs();
720Sstevel@tonic-gate }
730Sstevel@tonic-gate 
740Sstevel@tonic-gate void
750Sstevel@tonic-gate phys_install_has_changed(void)
760Sstevel@tonic-gate {
770Sstevel@tonic-gate 
780Sstevel@tonic-gate }
790Sstevel@tonic-gate 
80*3156Sgirish #ifdef N2_IDLE_WORKAROUND
81*3156Sgirish /*
82*3156Sgirish  * Tuneable to control enabling of IDLE loop workaround on Niagara2 1.x parts.
83*3156Sgirish  * This workaround will be removed before the RR.
84*3156Sgirish  */
85*3156Sgirish int	n2_idle_workaround;
86*3156Sgirish #endif
87*3156Sgirish 
880Sstevel@tonic-gate /*
890Sstevel@tonic-gate  * Halt the present CPU until awoken via an interrupt
900Sstevel@tonic-gate  */
910Sstevel@tonic-gate static void
920Sstevel@tonic-gate cpu_halt(void)
930Sstevel@tonic-gate {
940Sstevel@tonic-gate 	cpu_t *cpup = CPU;
950Sstevel@tonic-gate 	processorid_t cpun = cpup->cpu_id;
96711Sesaxe 	cpupart_t *cp = cpup->cpu_part;
970Sstevel@tonic-gate 	int hset_update = 1;
98*3156Sgirish 	volatile int *p = &cpup->cpu_disp->disp_nrunnable;
99232Sgirish 	uint_t s;
1000Sstevel@tonic-gate 
1010Sstevel@tonic-gate 	/*
1020Sstevel@tonic-gate 	 * If this CPU is online, and there's multiple CPUs
1030Sstevel@tonic-gate 	 * in the system, then we should notate our halting
1040Sstevel@tonic-gate 	 * by adding ourselves to the partition's halted CPU
1050Sstevel@tonic-gate 	 * bitmap. This allows other CPUs to find/awaken us when
1060Sstevel@tonic-gate 	 * work becomes available.
1070Sstevel@tonic-gate 	 */
1080Sstevel@tonic-gate 	if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1)
1090Sstevel@tonic-gate 		hset_update = 0;
1100Sstevel@tonic-gate 
1110Sstevel@tonic-gate 	/*
1120Sstevel@tonic-gate 	 * Add ourselves to the partition's halted CPUs bitmask
113303Sgirish 	 * and set our HALTED flag, if necessary.
114303Sgirish 	 *
115711Sesaxe 	 * When a thread becomes runnable, it is placed on the queue
116711Sesaxe 	 * and then the halted cpuset is checked to determine who
117711Sesaxe 	 * (if anyone) should be awoken. We therefore need to first
118711Sesaxe 	 * add ourselves to the halted cpuset, and then check if there
119711Sesaxe 	 * is any work available.
1200Sstevel@tonic-gate 	 */
1210Sstevel@tonic-gate 	if (hset_update) {
122303Sgirish 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
123303Sgirish 		membar_producer();
1242722Sjohnlev 		CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun);
1250Sstevel@tonic-gate 	}
1260Sstevel@tonic-gate 
1270Sstevel@tonic-gate 	/*
1280Sstevel@tonic-gate 	 * Check to make sure there's really nothing to do.
129711Sesaxe 	 * Work destined for this CPU may become available after
130711Sesaxe 	 * this check. We'll be notified through the clearing of our
131711Sesaxe 	 * bit in the halted CPU bitmask, and a poke.
1320Sstevel@tonic-gate 	 */
1330Sstevel@tonic-gate 	if (disp_anywork()) {
134303Sgirish 		if (hset_update) {
135303Sgirish 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
1362722Sjohnlev 			CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
137303Sgirish 		}
138711Sesaxe 		return;
139711Sesaxe 	}
140711Sesaxe 
141*3156Sgirish #ifdef N2_IDLE_WORKAROUND
142711Sesaxe 	/*
143*3156Sgirish 	 * The following workaround for Niagara2, when enabled, forces the
144*3156Sgirish 	 * IDLE CPU to wait in a tight loop until something becomes runnable
145*3156Sgirish 	 * locally, minimizing the overall CPU usage on an IDLE CPU.
146*3156Sgirish 	 */
147*3156Sgirish 	if (n2_idle_workaround) {
148*3156Sgirish 		while (cpup->cpu_disp->disp_nrunnable == 0) {
149*3156Sgirish 			(void) hv_cpu_yield();
150*3156Sgirish 		}
151*3156Sgirish 	}
152*3156Sgirish #endif
153*3156Sgirish 
154*3156Sgirish 	/*
155*3156Sgirish 	 * We're on our way to being halted.  Wait until something becomes
156*3156Sgirish 	 * runnable locally or we are awaken (i.e. removed from the halt set).
157*3156Sgirish 	 * Note that the call to hv_cpu_yield() can return even if we have
158*3156Sgirish 	 * nothing to do.
159711Sesaxe 	 *
160711Sesaxe 	 * Disable interrupts now, so that we'll awaken immediately
161711Sesaxe 	 * after halting if someone tries to poke us between now and
162711Sesaxe 	 * the time we actually halt.
163711Sesaxe 	 *
164711Sesaxe 	 * We check for the presence of our bit after disabling interrupts.
165711Sesaxe 	 * If it's cleared, we'll return. If the bit is cleared after
166711Sesaxe 	 * we check then the poke will pop us out of the halted state.
167711Sesaxe 	 *
168711Sesaxe 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
169711Sesaxe 	 * is important.
170711Sesaxe 	 * cpu_wakeup() must clear, then poke.
171711Sesaxe 	 * cpu_halt() must disable interrupts, then check for the bit.
172*3156Sgirish 	 *
173711Sesaxe 	 * The check for anything locally runnable is here for performance
174711Sesaxe 	 * and isn't needed for correctness. disp_nrunnable ought to be
175711Sesaxe 	 * in our cache still, so it's inexpensive to check, and if there
176711Sesaxe 	 * is anything runnable we won't have to wait for the poke.
177*3156Sgirish 	 *
178711Sesaxe 	 */
179*3156Sgirish 	s = disable_vec_intr();
180*3156Sgirish 	while (*p == 0 &&
181*3156Sgirish 	    (!hset_update || CPU_IN_SET(cp->cp_mach->mc_haltset, cpun))) {
182*3156Sgirish 		(void) hv_cpu_yield();
183232Sgirish 		enable_vec_intr(s);
184*3156Sgirish 		s = disable_vec_intr();
1850Sstevel@tonic-gate 	}
1860Sstevel@tonic-gate 
1870Sstevel@tonic-gate 	/*
1880Sstevel@tonic-gate 	 * We're no longer halted
1890Sstevel@tonic-gate 	 */
190232Sgirish 	enable_vec_intr(s);
191303Sgirish 	if (hset_update) {
192303Sgirish 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
1932722Sjohnlev 		CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
194303Sgirish 	}
1950Sstevel@tonic-gate }
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate /*
1980Sstevel@tonic-gate  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
1990Sstevel@tonic-gate  * Otherwise, see if other CPUs in the cpu partition are halted and need to
2000Sstevel@tonic-gate  * be woken up so that they can steal the thread we placed on this CPU.
2010Sstevel@tonic-gate  * This function is only used on MP systems.
2020Sstevel@tonic-gate  */
2030Sstevel@tonic-gate static void
2040Sstevel@tonic-gate cpu_wakeup(cpu_t *cpu, int bound)
2050Sstevel@tonic-gate {
2060Sstevel@tonic-gate 	uint_t		cpu_found;
2070Sstevel@tonic-gate 	int		result;
2080Sstevel@tonic-gate 	cpupart_t	*cp;
2090Sstevel@tonic-gate 
2100Sstevel@tonic-gate 	cp = cpu->cpu_part;
2112722Sjohnlev 	if (CPU_IN_SET(cp->cp_mach->mc_haltset, cpu->cpu_id)) {
2120Sstevel@tonic-gate 		/*
2130Sstevel@tonic-gate 		 * Clear the halted bit for that CPU since it will be
2140Sstevel@tonic-gate 		 * poked in a moment.
2150Sstevel@tonic-gate 		 */
2162722Sjohnlev 		CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpu->cpu_id);
2170Sstevel@tonic-gate 		/*
2180Sstevel@tonic-gate 		 * We may find the current CPU present in the halted cpuset
2190Sstevel@tonic-gate 		 * if we're in the context of an interrupt that occurred
2200Sstevel@tonic-gate 		 * before we had a chance to clear our bit in cpu_halt().
2210Sstevel@tonic-gate 		 * Poking ourself is obviously unnecessary, since if
2220Sstevel@tonic-gate 		 * we're here, we're not halted.
2230Sstevel@tonic-gate 		 */
2240Sstevel@tonic-gate 		if (cpu != CPU)
2250Sstevel@tonic-gate 			poke_cpu(cpu->cpu_id);
2260Sstevel@tonic-gate 		return;
2270Sstevel@tonic-gate 	} else {
2280Sstevel@tonic-gate 		/*
2290Sstevel@tonic-gate 		 * This cpu isn't halted, but it's idle or undergoing a
2300Sstevel@tonic-gate 		 * context switch. No need to awaken anyone else.
2310Sstevel@tonic-gate 		 */
2320Sstevel@tonic-gate 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
2330Sstevel@tonic-gate 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
2340Sstevel@tonic-gate 			return;
2350Sstevel@tonic-gate 	}
2360Sstevel@tonic-gate 
2370Sstevel@tonic-gate 	/*
2380Sstevel@tonic-gate 	 * No need to wake up other CPUs if the thread we just enqueued
2390Sstevel@tonic-gate 	 * is bound.
2400Sstevel@tonic-gate 	 */
2410Sstevel@tonic-gate 	if (bound)
2420Sstevel@tonic-gate 		return;
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate 	/*
2450Sstevel@tonic-gate 	 * See if there's any other halted CPUs. If there are, then
2460Sstevel@tonic-gate 	 * select one, and awaken it.
2470Sstevel@tonic-gate 	 * It's possible that after we find a CPU, somebody else
2480Sstevel@tonic-gate 	 * will awaken it before we get the chance.
2490Sstevel@tonic-gate 	 * In that case, look again.
2500Sstevel@tonic-gate 	 */
2510Sstevel@tonic-gate 	do {
2522722Sjohnlev 		CPUSET_FIND(cp->cp_mach->mc_haltset, cpu_found);
2530Sstevel@tonic-gate 		if (cpu_found == CPUSET_NOTINSET)
2540Sstevel@tonic-gate 			return;
2550Sstevel@tonic-gate 
2560Sstevel@tonic-gate 		ASSERT(cpu_found >= 0 && cpu_found < NCPU);
2572722Sjohnlev 		CPUSET_ATOMIC_XDEL(cp->cp_mach->mc_haltset, cpu_found, result);
2580Sstevel@tonic-gate 	} while (result < 0);
2590Sstevel@tonic-gate 
2600Sstevel@tonic-gate 	if (cpu_found != CPU->cpu_id)
2610Sstevel@tonic-gate 		poke_cpu(cpu_found);
2620Sstevel@tonic-gate }
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate void
2650Sstevel@tonic-gate mach_cpu_halt_idle()
2660Sstevel@tonic-gate {
2670Sstevel@tonic-gate 	if (enable_halt_idle_cpus) {
2680Sstevel@tonic-gate 		idle_cpu = cpu_halt;
2690Sstevel@tonic-gate 		disp_enq_thread = cpu_wakeup;
2700Sstevel@tonic-gate 	}
2710Sstevel@tonic-gate }
2720Sstevel@tonic-gate 
2730Sstevel@tonic-gate int
2740Sstevel@tonic-gate ndata_alloc_mmfsa(struct memlist *ndata)
2750Sstevel@tonic-gate {
2760Sstevel@tonic-gate 	size_t	size;
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate 	size = MMFSA_SIZE * max_ncpus;
2790Sstevel@tonic-gate 	mmu_fault_status_area = ndata_alloc(ndata, size, ecache_alignsize);
2800Sstevel@tonic-gate 	if (mmu_fault_status_area == NULL)
2810Sstevel@tonic-gate 		return (-1);
2820Sstevel@tonic-gate 	return (0);
2830Sstevel@tonic-gate }
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate void
2860Sstevel@tonic-gate mach_memscrub(void)
2870Sstevel@tonic-gate {
2880Sstevel@tonic-gate 	/* no memscrub support for sun4v for now */
2890Sstevel@tonic-gate }
2900Sstevel@tonic-gate 
2910Sstevel@tonic-gate void
2920Sstevel@tonic-gate mach_fpras()
2930Sstevel@tonic-gate {
2940Sstevel@tonic-gate 	/* no fpras support for sun4v for now */
2950Sstevel@tonic-gate }
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate void
2980Sstevel@tonic-gate mach_hw_copy_limit(void)
2990Sstevel@tonic-gate {
3000Sstevel@tonic-gate 	/* HW copy limits set by individual CPU module */
3010Sstevel@tonic-gate }
3020Sstevel@tonic-gate 
3030Sstevel@tonic-gate /*
3041184Skrgopi  * We need to enable soft ring functionality on Niagara platform since
3051184Skrgopi  * one strand can't handle interrupts for a 1Gb NIC. Set the tunable
3061184Skrgopi  * ip_squeue_soft_ring by default on this platform. We can also set
3071184Skrgopi  * ip_threads_per_cpu to track number of threads per core. The variables
3081184Skrgopi  * themselves are defined in space.c and used by IP module
3091184Skrgopi  */
3101184Skrgopi extern uint_t ip_threads_per_cpu;
3111184Skrgopi extern boolean_t ip_squeue_soft_ring;
3121184Skrgopi void
3131184Skrgopi startup_platform(void)
3141184Skrgopi {
3151184Skrgopi 	ip_squeue_soft_ring = B_TRUE;
3161184Skrgopi }
3171184Skrgopi 
3181184Skrgopi /*
3190Sstevel@tonic-gate  * This function sets up hypervisor traptrace buffer
3201077Ssvemuri  * This routine is called by the boot cpu only
3210Sstevel@tonic-gate  */
3220Sstevel@tonic-gate void
3231077Ssvemuri mach_htraptrace_setup(int cpuid)
3240Sstevel@tonic-gate {
3250Sstevel@tonic-gate 	TRAP_TRACE_CTL	*ctlp;
3261077Ssvemuri 	int bootcpuid = getprocessorid(); /* invoked on boot cpu only */
3270Sstevel@tonic-gate 
3281077Ssvemuri 	if (mach_htraptrace_enable && ((cpuid != bootcpuid) ||
3291077Ssvemuri 	    !htrap_tr0_inuse)) {
3301077Ssvemuri 		ctlp = &trap_trace_ctl[cpuid];
3311077Ssvemuri 		ctlp->d.hvaddr_base = (cpuid == bootcpuid) ? htrap_tr0 :
3321859Sha137994 		    contig_mem_alloc_align(HTRAP_TSIZE, HTRAP_TSIZE);
3331859Sha137994 		if (ctlp->d.hvaddr_base == NULL) {
3341859Sha137994 			ctlp->d.hlimit = 0;
3351859Sha137994 			ctlp->d.hpaddr_base = NULL;
3361859Sha137994 			cmn_err(CE_WARN, "!cpu%d: failed to allocate HV "
3371859Sha137994 			    "traptrace buffer", cpuid);
3381859Sha137994 		} else {
3391859Sha137994 			ctlp->d.hlimit = HTRAP_TSIZE;
3401859Sha137994 			ctlp->d.hpaddr_base = va_to_pa(ctlp->d.hvaddr_base);
3411859Sha137994 		}
3421077Ssvemuri 	}
3430Sstevel@tonic-gate }
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate /*
3461077Ssvemuri  * This function enables or disables the hypervisor traptracing
3470Sstevel@tonic-gate  */
3480Sstevel@tonic-gate void
3491077Ssvemuri mach_htraptrace_configure(int cpuid)
3500Sstevel@tonic-gate {
3510Sstevel@tonic-gate 	uint64_t ret;
3520Sstevel@tonic-gate 	uint64_t prev_buf, prev_bufsize;
3530Sstevel@tonic-gate 	uint64_t prev_enable;
3540Sstevel@tonic-gate 	uint64_t size;
3550Sstevel@tonic-gate 	TRAP_TRACE_CTL	*ctlp;
3560Sstevel@tonic-gate 
3571077Ssvemuri 	ctlp = &trap_trace_ctl[cpuid];
3581077Ssvemuri 	if (mach_htraptrace_enable) {
3591859Sha137994 		if ((ctlp->d.hvaddr_base != NULL) &&
3601859Sha137994 		    ((ctlp->d.hvaddr_base != htrap_tr0) ||
3611859Sha137994 		    (!htrap_tr0_inuse))) {
3621077Ssvemuri 			ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
3631077Ssvemuri 			if ((ret == H_EOK) && (prev_bufsize != 0)) {
3641077Ssvemuri 				cmn_err(CE_CONT,
3651077Ssvemuri 				    "!cpu%d: previous HV traptrace buffer of "
3661077Ssvemuri 				    "size 0x%lx at address 0x%lx", cpuid,
3671077Ssvemuri 				    prev_bufsize, prev_buf);
3681077Ssvemuri 			}
3690Sstevel@tonic-gate 
3701077Ssvemuri 			ret = hv_ttrace_buf_conf(ctlp->d.hpaddr_base,
3711859Sha137994 			    ctlp->d.hlimit /
3721859Sha137994 			    (sizeof (struct htrap_trace_record)), &size);
3731077Ssvemuri 			if (ret == H_EOK) {
3741077Ssvemuri 				ret = hv_ttrace_enable(\
3751077Ssvemuri 				    (uint64_t)TRAP_TENABLE_ALL, &prev_enable);
3761077Ssvemuri 				if (ret != H_EOK) {
3771077Ssvemuri 					cmn_err(CE_WARN,
3781077Ssvemuri 					    "!cpu%d: HV traptracing not "
3791077Ssvemuri 					    "enabled, ta: 0x%x returned error: "
3801077Ssvemuri 					    "%ld", cpuid, TTRACE_ENABLE, ret);
3811077Ssvemuri 				} else {
3821077Ssvemuri 					if (ctlp->d.hvaddr_base == htrap_tr0)
3831077Ssvemuri 						htrap_tr0_inuse = 1;
3841077Ssvemuri 				}
3851077Ssvemuri 			} else {
3861077Ssvemuri 				cmn_err(CE_WARN,
3871077Ssvemuri 				    "!cpu%d: HV traptrace buffer not "
3881077Ssvemuri 				    "configured, ta: 0x%x returned error: %ld",
3891077Ssvemuri 				    cpuid, TTRACE_BUF_CONF, ret);
3901077Ssvemuri 			}
3911077Ssvemuri 			/*
3921077Ssvemuri 			 * set hvaddr_base to NULL when traptrace buffer
3931077Ssvemuri 			 * registration fails
3941077Ssvemuri 			 */
3951077Ssvemuri 			if (ret != H_EOK) {
3961077Ssvemuri 				ctlp->d.hvaddr_base = NULL;
3971077Ssvemuri 				ctlp->d.hlimit = 0;
3981077Ssvemuri 				ctlp->d.hpaddr_base = NULL;
3991077Ssvemuri 			}
4000Sstevel@tonic-gate 		}
4010Sstevel@tonic-gate 	} else {
4021077Ssvemuri 		ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
4031077Ssvemuri 		if ((ret == H_EOK) && (prev_bufsize != 0)) {
4041077Ssvemuri 			ret = hv_ttrace_enable((uint64_t)TRAP_TDISABLE_ALL,
4051077Ssvemuri 			    &prev_enable);
4061077Ssvemuri 			if (ret == H_EOK) {
4071077Ssvemuri 				if (ctlp->d.hvaddr_base == htrap_tr0)
4081077Ssvemuri 					htrap_tr0_inuse = 0;
4091077Ssvemuri 				ctlp->d.hvaddr_base = NULL;
4101077Ssvemuri 				ctlp->d.hlimit = 0;
4111077Ssvemuri 				ctlp->d.hpaddr_base = NULL;
4121077Ssvemuri 			} else
4131077Ssvemuri 				cmn_err(CE_WARN,
4141077Ssvemuri 				    "!cpu%d: HV traptracing is not disabled, "
4151077Ssvemuri 				    "ta: 0x%x returned error: %ld",
4161077Ssvemuri 				    cpuid, TTRACE_ENABLE, ret);
4171077Ssvemuri 		}
4180Sstevel@tonic-gate 	}
4191077Ssvemuri }
4201077Ssvemuri 
4211077Ssvemuri /*
4221077Ssvemuri  * This function cleans up the hypervisor traptrace buffer
4231077Ssvemuri  */
4241077Ssvemuri void
4251077Ssvemuri mach_htraptrace_cleanup(int cpuid)
4261077Ssvemuri {
4271859Sha137994 	if (mach_htraptrace_enable) {
4281859Sha137994 		TRAP_TRACE_CTL *ctlp;
4291859Sha137994 		caddr_t httrace_buf_va;
4301077Ssvemuri 
4311859Sha137994 		ASSERT(cpuid < max_ncpus);
4321077Ssvemuri 		ctlp = &trap_trace_ctl[cpuid];
4331859Sha137994 		httrace_buf_va = ctlp->d.hvaddr_base;
4341859Sha137994 		if (httrace_buf_va == htrap_tr0) {
4351859Sha137994 			bzero(httrace_buf_va, HTRAP_TSIZE);
4361859Sha137994 		} else if (httrace_buf_va != NULL) {
4371859Sha137994 			contig_mem_free(httrace_buf_va, HTRAP_TSIZE);
4381077Ssvemuri 		}
4390Sstevel@tonic-gate 		ctlp->d.hvaddr_base = NULL;
4400Sstevel@tonic-gate 		ctlp->d.hlimit = 0;
4410Sstevel@tonic-gate 		ctlp->d.hpaddr_base = NULL;
4420Sstevel@tonic-gate 	}
4430Sstevel@tonic-gate }
4441991Sheppo 
4451991Sheppo /*
4461991Sheppo  * Load any required machine class (sun4v) specific drivers.
4471991Sheppo  */
4481991Sheppo void
4491991Sheppo load_mach_drivers(void)
4501991Sheppo {
4511991Sheppo 	/*
4521991Sheppo 	 * We don't want to load these LDOMs-specific
4532531Snarayan 	 * modules if domaining is not supported.  Also,
4541991Sheppo 	 * we must be able to run on non-LDOMs firmware.
4551991Sheppo 	 */
4562531Snarayan 	if (!(domaining_capabilities & DOMAINING_SUPPORTED))
4571991Sheppo 		return;
4581991Sheppo 
4591991Sheppo 	/*
4601991Sheppo 	 * Load the core domain services module
4611991Sheppo 	 */
4621991Sheppo 	if (modload("misc", "ds") == -1)
4631991Sheppo 		cmn_err(CE_NOTE, "!'ds' module failed to load");
4641991Sheppo 
4651991Sheppo 	/*
4661991Sheppo 	 * Load the rest of the domain services
4671991Sheppo 	 */
4681991Sheppo 	if (modload("misc", "fault_iso") == -1)
4691991Sheppo 		cmn_err(CE_NOTE, "!'fault_iso' module failed to load");
4701991Sheppo 
4711991Sheppo 	if (modload("misc", "platsvc") == -1)
4721991Sheppo 		cmn_err(CE_NOTE, "!'platsvc' module failed to load");
4731991Sheppo 
4742531Snarayan 	if ((domaining_capabilities & DOMAINING_ENABLED) &&
4752531Snarayan 	    modload("misc", "dr_cpu") == -1)
4761991Sheppo 		cmn_err(CE_NOTE, "!'dr_cpu' module failed to load");
4771991Sheppo 
4781991Sheppo 	/*
4791991Sheppo 	 * Attempt to attach any virtual device servers. These
4801991Sheppo 	 * drivers must be loaded at start of day so that they
4811991Sheppo 	 * can respond to any updates to the machine description.
4821991Sheppo 	 *
4831991Sheppo 	 * Since it is quite likely that a domain will not support
4841991Sheppo 	 * one or more of these servers, failures are ignored.
4851991Sheppo 	 */
4861991Sheppo 
4871991Sheppo 	/* virtual disk server */
4881991Sheppo 	(void) i_ddi_attach_hw_nodes("vds");
4891991Sheppo 
4901991Sheppo 	/* virtual network switch */
4911991Sheppo 	(void) i_ddi_attach_hw_nodes("vsw");
4921991Sheppo 
4931991Sheppo 	/* virtual console concentrator */
4941991Sheppo 	(void) i_ddi_attach_hw_nodes("vcc");
4951991Sheppo }
496