xref: /onnv-gate/usr/src/uts/i86pc/os/mp_machdep.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #define	PSMI_1_5
30*0Sstevel@tonic-gate #include <sys/smp_impldefs.h>
31*0Sstevel@tonic-gate #include <sys/psm.h>
32*0Sstevel@tonic-gate #include <sys/psm_modctl.h>
33*0Sstevel@tonic-gate #include <sys/pit.h>
34*0Sstevel@tonic-gate #include <sys/cmn_err.h>
35*0Sstevel@tonic-gate #include <sys/strlog.h>
36*0Sstevel@tonic-gate #include <sys/clock.h>
37*0Sstevel@tonic-gate #include <sys/debug.h>
38*0Sstevel@tonic-gate #include <sys/rtc.h>
39*0Sstevel@tonic-gate #include <sys/x86_archext.h>
40*0Sstevel@tonic-gate #include <sys/cpupart.h>
41*0Sstevel@tonic-gate #include <sys/cpuvar.h>
42*0Sstevel@tonic-gate #include <sys/chip.h>
43*0Sstevel@tonic-gate #include <sys/disp.h>
44*0Sstevel@tonic-gate #include <sys/cpu.h>
45*0Sstevel@tonic-gate #include <sys/archsystm.h>
46*0Sstevel@tonic-gate 
47*0Sstevel@tonic-gate #define	OFFSETOF(s, m)		(size_t)(&(((s *)0)->m))
48*0Sstevel@tonic-gate 
49*0Sstevel@tonic-gate /*
50*0Sstevel@tonic-gate  *	Local function prototypes
51*0Sstevel@tonic-gate  */
52*0Sstevel@tonic-gate static int mp_disable_intr(processorid_t cpun);
53*0Sstevel@tonic-gate static void mp_enable_intr(processorid_t cpun);
54*0Sstevel@tonic-gate static void mach_init();
55*0Sstevel@tonic-gate static void mach_picinit();
56*0Sstevel@tonic-gate static uint64_t mach_calchz(uint32_t pit_counter, uint64_t *processor_clks);
57*0Sstevel@tonic-gate static int machhztomhz(uint64_t cpu_freq_hz);
58*0Sstevel@tonic-gate static uint64_t mach_getcpufreq(void);
59*0Sstevel@tonic-gate static void mach_fixcpufreq(void);
60*0Sstevel@tonic-gate static int mach_clkinit(int, int *);
61*0Sstevel@tonic-gate static void mach_smpinit(void);
62*0Sstevel@tonic-gate static void mach_set_softintr(int ipl);
63*0Sstevel@tonic-gate static void mach_cpu_start(int cpun);
64*0Sstevel@tonic-gate static int mach_softlvl_to_vect(int ipl);
65*0Sstevel@tonic-gate static void mach_get_platform(int owner);
66*0Sstevel@tonic-gate static void mach_construct_info();
67*0Sstevel@tonic-gate static int mach_translate_irq(dev_info_t *dip, int irqno);
68*0Sstevel@tonic-gate static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *,
69*0Sstevel@tonic-gate     psm_intr_op_t, int *);
70*0Sstevel@tonic-gate static timestruc_t mach_tod_get(void);
71*0Sstevel@tonic-gate static void mach_tod_set(timestruc_t ts);
72*0Sstevel@tonic-gate static void mach_notify_error(int level, char *errmsg);
73*0Sstevel@tonic-gate static hrtime_t dummy_hrtime(void);
74*0Sstevel@tonic-gate static void dummy_scalehrtime(hrtime_t *);
75*0Sstevel@tonic-gate static void cpu_halt(void);
76*0Sstevel@tonic-gate static void cpu_wakeup(cpu_t *, int);
77*0Sstevel@tonic-gate /*
78*0Sstevel@tonic-gate  *	External reference functions
79*0Sstevel@tonic-gate  */
80*0Sstevel@tonic-gate extern void return_instr();
81*0Sstevel@tonic-gate extern timestruc_t (*todgetf)(void);
82*0Sstevel@tonic-gate extern void (*todsetf)(timestruc_t);
83*0Sstevel@tonic-gate extern long gmt_lag;
84*0Sstevel@tonic-gate extern uint64_t freq_tsc(uint32_t *);
85*0Sstevel@tonic-gate #if defined(__i386)
86*0Sstevel@tonic-gate extern uint64_t freq_notsc(uint32_t *);
87*0Sstevel@tonic-gate #endif
88*0Sstevel@tonic-gate extern void pc_gethrestime(timestruc_t *);
89*0Sstevel@tonic-gate 
90*0Sstevel@tonic-gate /*
91*0Sstevel@tonic-gate  *	PSM functions initialization
92*0Sstevel@tonic-gate  */
93*0Sstevel@tonic-gate void (*psm_shutdownf)(int, int)	= return_instr;
94*0Sstevel@tonic-gate void (*psm_preshutdownf)(int, int) = return_instr;
95*0Sstevel@tonic-gate void (*psm_notifyf)(int)	= return_instr;
96*0Sstevel@tonic-gate void (*psm_set_idle_cpuf)(int)	= return_instr;
97*0Sstevel@tonic-gate void (*psm_unset_idle_cpuf)(int) = return_instr;
98*0Sstevel@tonic-gate void (*psminitf)()		= mach_init;
99*0Sstevel@tonic-gate void (*picinitf)() 		= return_instr;
100*0Sstevel@tonic-gate int (*clkinitf)(int, int *) 	= (int (*)(int, int *))return_instr;
101*0Sstevel@tonic-gate void (*cpu_startf)() 		= return_instr;
102*0Sstevel@tonic-gate int (*ap_mlsetup)() 		= (int (*)(void))return_instr;
103*0Sstevel@tonic-gate void (*send_dirintf)() 		= return_instr;
104*0Sstevel@tonic-gate void (*setspl)(int)		= return_instr;
105*0Sstevel@tonic-gate int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
106*0Sstevel@tonic-gate int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
107*0Sstevel@tonic-gate void (*setsoftint)(int)		= (void (*)(int))return_instr;
108*0Sstevel@tonic-gate int (*slvltovect)(int)		= (int (*)(int))return_instr;
109*0Sstevel@tonic-gate int (*setlvl)(int, int *)	= (int (*)(int, int *))return_instr;
110*0Sstevel@tonic-gate void (*setlvlx)(int, int)	= (void (*)(int, int))return_instr;
111*0Sstevel@tonic-gate int (*psm_disable_intr)(int)	= mp_disable_intr;
112*0Sstevel@tonic-gate void (*psm_enable_intr)(int)	= mp_enable_intr;
113*0Sstevel@tonic-gate hrtime_t (*gethrtimef)(void)	= dummy_hrtime;
114*0Sstevel@tonic-gate hrtime_t (*gethrtimeunscaledf)(void)	= dummy_hrtime;
115*0Sstevel@tonic-gate void (*scalehrtimef)(hrtime_t *)	= dummy_scalehrtime;
116*0Sstevel@tonic-gate int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq;
117*0Sstevel@tonic-gate void (*gethrestimef)(timestruc_t *) = pc_gethrestime;
118*0Sstevel@tonic-gate int (*psm_todgetf)(todinfo_t *) = (int (*)(todinfo_t *))return_instr;
119*0Sstevel@tonic-gate int (*psm_todsetf)(todinfo_t *) = (int (*)(todinfo_t *))return_instr;
120*0Sstevel@tonic-gate void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL;
121*0Sstevel@tonic-gate int (*psm_get_clockirq)(int) = NULL;
122*0Sstevel@tonic-gate int (*psm_get_ipivect)(int, int) = NULL;
123*0Sstevel@tonic-gate 
124*0Sstevel@tonic-gate int (*psm_clkinit)(int) = NULL;
125*0Sstevel@tonic-gate void (*psm_timer_reprogram)(hrtime_t) = NULL;
126*0Sstevel@tonic-gate void (*psm_timer_enable)(void) = NULL;
127*0Sstevel@tonic-gate void (*psm_timer_disable)(void) = NULL;
128*0Sstevel@tonic-gate void (*psm_post_cyclic_setup)(void *arg) = NULL;
129*0Sstevel@tonic-gate int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t,
130*0Sstevel@tonic-gate     int *) = mach_intr_ops;
131*0Sstevel@tonic-gate 
132*0Sstevel@tonic-gate void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr;
133*0Sstevel@tonic-gate void (*hrtime_tick)(void)	= return_instr;
134*0Sstevel@tonic-gate 
135*0Sstevel@tonic-gate int tsc_gethrtime_enable = 1;
136*0Sstevel@tonic-gate int tsc_gethrtime_initted = 0;
137*0Sstevel@tonic-gate 
138*0Sstevel@tonic-gate /*
139*0Sstevel@tonic-gate  * Local Static Data
140*0Sstevel@tonic-gate  */
141*0Sstevel@tonic-gate static struct psm_ops mach_ops;
142*0Sstevel@tonic-gate static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL};
143*0Sstevel@tonic-gate static ushort_t mach_ver[4] = {0, 0, 0, 0};
144*0Sstevel@tonic-gate 
145*0Sstevel@tonic-gate /*
146*0Sstevel@tonic-gate  * If non-zero, idle cpus will "halted" when there's
147*0Sstevel@tonic-gate  * no work to do.
148*0Sstevel@tonic-gate  */
149*0Sstevel@tonic-gate int	halt_idle_cpus = 1;
150*0Sstevel@tonic-gate 
151*0Sstevel@tonic-gate #if defined(__amd64)
152*0Sstevel@tonic-gate /*
153*0Sstevel@tonic-gate  * If non-zero, will use cr8 for interrupt priority masking
154*0Sstevel@tonic-gate  * We declare this here since install_spl is called from here
155*0Sstevel@tonic-gate  * (where this is checked).
156*0Sstevel@tonic-gate  */
157*0Sstevel@tonic-gate int	intpri_use_cr8 = 0;
158*0Sstevel@tonic-gate #endif	/* __amd64 */
159*0Sstevel@tonic-gate 
160*0Sstevel@tonic-gate #ifdef	_SIMULATOR_SUPPORT
161*0Sstevel@tonic-gate 
162*0Sstevel@tonic-gate int simulator_run = 0;	/* patch to non-zero if running under simics */
163*0Sstevel@tonic-gate 
164*0Sstevel@tonic-gate #endif	/* _SIMULATOR_SUPPORT */
165*0Sstevel@tonic-gate 
166*0Sstevel@tonic-gate /* ARGSUSED */
167*0Sstevel@tonic-gate void
168*0Sstevel@tonic-gate chip_plat_define_chip(cpu_t *cp, chip_def_t *cd)
169*0Sstevel@tonic-gate {
170*0Sstevel@tonic-gate 	if (x86_feature & (X86_HTT|X86_CMP))
171*0Sstevel@tonic-gate 		/*
172*0Sstevel@tonic-gate 		 * Hyperthreading is SMT
173*0Sstevel@tonic-gate 		 */
174*0Sstevel@tonic-gate 		cd->chipd_type = CHIP_SMT;
175*0Sstevel@tonic-gate 	else
176*0Sstevel@tonic-gate 		cd->chipd_type = CHIP_DEFAULT;
177*0Sstevel@tonic-gate 
178*0Sstevel@tonic-gate 	cd->chipd_rechoose_adj = 0;
179*0Sstevel@tonic-gate }
180*0Sstevel@tonic-gate 
181*0Sstevel@tonic-gate /*
182*0Sstevel@tonic-gate  * Routine to ensure initial callers to hrtime gets 0 as return
183*0Sstevel@tonic-gate  */
184*0Sstevel@tonic-gate static hrtime_t
185*0Sstevel@tonic-gate dummy_hrtime(void)
186*0Sstevel@tonic-gate {
187*0Sstevel@tonic-gate 	return (0);
188*0Sstevel@tonic-gate }
189*0Sstevel@tonic-gate 
190*0Sstevel@tonic-gate /* ARGSUSED */
191*0Sstevel@tonic-gate static void
192*0Sstevel@tonic-gate dummy_scalehrtime(hrtime_t *ticks)
193*0Sstevel@tonic-gate {}
194*0Sstevel@tonic-gate 
195*0Sstevel@tonic-gate /*
196*0Sstevel@tonic-gate  * Halt the present CPU until awoken via an interrupt
197*0Sstevel@tonic-gate  */
198*0Sstevel@tonic-gate static void
199*0Sstevel@tonic-gate cpu_halt(void)
200*0Sstevel@tonic-gate {
201*0Sstevel@tonic-gate 	cpu_t		*cpup = CPU;
202*0Sstevel@tonic-gate 	processorid_t	cpun = cpup->cpu_id;
203*0Sstevel@tonic-gate 	cpupart_t	*cp;
204*0Sstevel@tonic-gate 	int		hset_update = 1;
205*0Sstevel@tonic-gate 
206*0Sstevel@tonic-gate 	/*
207*0Sstevel@tonic-gate 	 * If this CPU is online, and there's multiple CPUs
208*0Sstevel@tonic-gate 	 * in the system, then we should notate our halting
209*0Sstevel@tonic-gate 	 * by adding ourselves to the partition's halted CPU
210*0Sstevel@tonic-gate 	 * bitmap. This allows other CPUs to find/awaken us when
211*0Sstevel@tonic-gate 	 * work becomes available.
212*0Sstevel@tonic-gate 	 */
213*0Sstevel@tonic-gate 	if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
214*0Sstevel@tonic-gate 		hset_update = 0;
215*0Sstevel@tonic-gate 	/*
216*0Sstevel@tonic-gate 	 * We're on our way to being halted.
217*0Sstevel@tonic-gate 	 * Disable interrupts now, so that we'll awaken immediately
218*0Sstevel@tonic-gate 	 * after halting if someone tries to poke us between now and
219*0Sstevel@tonic-gate 	 * the time we actually halt.
220*0Sstevel@tonic-gate 	 */
221*0Sstevel@tonic-gate 	cli();
222*0Sstevel@tonic-gate 
223*0Sstevel@tonic-gate 	/*
224*0Sstevel@tonic-gate 	 * Add ourselves to the partition's halted CPUs bitmask
225*0Sstevel@tonic-gate 	 * and set our HALTED flag, if necessary.
226*0Sstevel@tonic-gate 	 *
227*0Sstevel@tonic-gate 	 * Note that memory barriers after updating the HALTED flag
228*0Sstevel@tonic-gate 	 * are not necessary since an atomic operation (updating the bitmap)
229*0Sstevel@tonic-gate 	 * immediately follows. On x86 the atomic operation acts as a
230*0Sstevel@tonic-gate 	 * memory barrier for the update of cpu_disp_flags.
231*0Sstevel@tonic-gate 	 * If and when this code is made common (running on SPARC),
232*0Sstevel@tonic-gate 	 * membar_producer()s will be needed after the update of
233*0Sstevel@tonic-gate 	 * cpu_disp_flags to propagate the HALTED flag to global visibility.
234*0Sstevel@tonic-gate 	 */
235*0Sstevel@tonic-gate 	if (hset_update) {
236*0Sstevel@tonic-gate 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
237*0Sstevel@tonic-gate 		cp = cpup->cpu_part;
238*0Sstevel@tonic-gate 		CPUSET_ATOMIC_ADD(cp->cp_haltset, cpun);
239*0Sstevel@tonic-gate 	}
240*0Sstevel@tonic-gate 
241*0Sstevel@tonic-gate 	/*
242*0Sstevel@tonic-gate 	 * Check to make sure there's really nothing to do.
243*0Sstevel@tonic-gate 	 * If work becomes available *after* we do this check
244*0Sstevel@tonic-gate 	 * and it's determined that the work should be ours,
245*0Sstevel@tonic-gate 	 * we won't miss it since we'll be notified with a "poke"
246*0Sstevel@tonic-gate 	 * ...which will pop us right back out of the halted state.
247*0Sstevel@tonic-gate 	 */
248*0Sstevel@tonic-gate 	if (disp_anywork()) {
249*0Sstevel@tonic-gate 		if (hset_update) {
250*0Sstevel@tonic-gate 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
251*0Sstevel@tonic-gate 			CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
252*0Sstevel@tonic-gate 		}
253*0Sstevel@tonic-gate 		sti();
254*0Sstevel@tonic-gate 		return;
255*0Sstevel@tonic-gate 	}
256*0Sstevel@tonic-gate 
257*0Sstevel@tonic-gate 	/*
258*0Sstevel@tonic-gate 	 * Call the halt sequence:
259*0Sstevel@tonic-gate 	 * sti
260*0Sstevel@tonic-gate 	 * hlt
261*0Sstevel@tonic-gate 	 */
262*0Sstevel@tonic-gate 	i86_halt();
263*0Sstevel@tonic-gate 
264*0Sstevel@tonic-gate 	/*
265*0Sstevel@tonic-gate 	 * We're no longer halted
266*0Sstevel@tonic-gate 	 */
267*0Sstevel@tonic-gate 	if (hset_update) {
268*0Sstevel@tonic-gate 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
269*0Sstevel@tonic-gate 		CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
270*0Sstevel@tonic-gate 	}
271*0Sstevel@tonic-gate }
272*0Sstevel@tonic-gate 
273*0Sstevel@tonic-gate 
274*0Sstevel@tonic-gate /*
275*0Sstevel@tonic-gate  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
276*0Sstevel@tonic-gate  * Otherwise, see if other CPUs in the cpu partition are halted and need to
277*0Sstevel@tonic-gate  * be woken up so that they can steal the thread we placed on this CPU.
278*0Sstevel@tonic-gate  * This function is only used on MP systems.
279*0Sstevel@tonic-gate  */
280*0Sstevel@tonic-gate static void
281*0Sstevel@tonic-gate cpu_wakeup(cpu_t *cpu, int bound)
282*0Sstevel@tonic-gate {
283*0Sstevel@tonic-gate 	uint_t		cpu_found;
284*0Sstevel@tonic-gate 	int		result;
285*0Sstevel@tonic-gate 	cpupart_t	*cp;
286*0Sstevel@tonic-gate 
287*0Sstevel@tonic-gate 	cp = cpu->cpu_part;
288*0Sstevel@tonic-gate 	if (CPU_IN_SET(cp->cp_haltset, cpu->cpu_id)) {
289*0Sstevel@tonic-gate 		/*
290*0Sstevel@tonic-gate 		 * Clear the halted bit for that CPU since it will be
291*0Sstevel@tonic-gate 		 * poked in a moment.
292*0Sstevel@tonic-gate 		 */
293*0Sstevel@tonic-gate 		CPUSET_ATOMIC_DEL(cp->cp_haltset, cpu->cpu_id);
294*0Sstevel@tonic-gate 		/*
295*0Sstevel@tonic-gate 		 * We may find the current CPU present in the halted cpuset
296*0Sstevel@tonic-gate 		 * if we're in the context of an interrupt that occurred
297*0Sstevel@tonic-gate 		 * before we had a chance to clear our bit in cpu_halt().
298*0Sstevel@tonic-gate 		 * Poking ourself is obviously unnecessary, since if
299*0Sstevel@tonic-gate 		 * we're here, we're not halted.
300*0Sstevel@tonic-gate 		 */
301*0Sstevel@tonic-gate 		if (cpu != CPU)
302*0Sstevel@tonic-gate 			poke_cpu(cpu->cpu_id);
303*0Sstevel@tonic-gate 		return;
304*0Sstevel@tonic-gate 	} else {
305*0Sstevel@tonic-gate 		/*
306*0Sstevel@tonic-gate 		 * This cpu isn't halted, but it's idle or undergoing a
307*0Sstevel@tonic-gate 		 * context switch. No need to awaken anyone else.
308*0Sstevel@tonic-gate 		 */
309*0Sstevel@tonic-gate 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
310*0Sstevel@tonic-gate 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
311*0Sstevel@tonic-gate 			return;
312*0Sstevel@tonic-gate 	}
313*0Sstevel@tonic-gate 
314*0Sstevel@tonic-gate 	/*
315*0Sstevel@tonic-gate 	 * No need to wake up other CPUs if the thread we just enqueued
316*0Sstevel@tonic-gate 	 * is bound.
317*0Sstevel@tonic-gate 	 */
318*0Sstevel@tonic-gate 	if (bound)
319*0Sstevel@tonic-gate 		return;
320*0Sstevel@tonic-gate 
321*0Sstevel@tonic-gate 
322*0Sstevel@tonic-gate 	/*
323*0Sstevel@tonic-gate 	 * See if there's any other halted CPUs. If there are, then
324*0Sstevel@tonic-gate 	 * select one, and awaken it.
325*0Sstevel@tonic-gate 	 * It's possible that after we find a CPU, somebody else
326*0Sstevel@tonic-gate 	 * will awaken it before we get the chance.
327*0Sstevel@tonic-gate 	 * In that case, look again.
328*0Sstevel@tonic-gate 	 */
329*0Sstevel@tonic-gate 	do {
330*0Sstevel@tonic-gate 		CPUSET_FIND(cp->cp_haltset, cpu_found);
331*0Sstevel@tonic-gate 		if (cpu_found == CPUSET_NOTINSET)
332*0Sstevel@tonic-gate 			return;
333*0Sstevel@tonic-gate 
334*0Sstevel@tonic-gate 		ASSERT(cpu_found >= 0 && cpu_found < NCPU);
335*0Sstevel@tonic-gate 		CPUSET_ATOMIC_XDEL(cp->cp_haltset, cpu_found, result);
336*0Sstevel@tonic-gate 	} while (result < 0);
337*0Sstevel@tonic-gate 
338*0Sstevel@tonic-gate 	if (cpu_found != CPU->cpu_id)
339*0Sstevel@tonic-gate 		poke_cpu(cpu_found);
340*0Sstevel@tonic-gate }
341*0Sstevel@tonic-gate 
342*0Sstevel@tonic-gate static int
343*0Sstevel@tonic-gate mp_disable_intr(int cpun)
344*0Sstevel@tonic-gate {
345*0Sstevel@tonic-gate 	/*
346*0Sstevel@tonic-gate 	 * switch to the offline cpu
347*0Sstevel@tonic-gate 	 */
348*0Sstevel@tonic-gate 	affinity_set(cpun);
349*0Sstevel@tonic-gate 	/*
350*0Sstevel@tonic-gate 	 * raise ipl to just below cross call
351*0Sstevel@tonic-gate 	 */
352*0Sstevel@tonic-gate 	splx(XC_MED_PIL-1);
353*0Sstevel@tonic-gate 	/*
354*0Sstevel@tonic-gate 	 *	set base spl to prevent the next swtch to idle from
355*0Sstevel@tonic-gate 	 *	lowering back to ipl 0
356*0Sstevel@tonic-gate 	 */
357*0Sstevel@tonic-gate 	CPU->cpu_intr_actv |= (1 << (XC_MED_PIL-1));
358*0Sstevel@tonic-gate 	set_base_spl();
359*0Sstevel@tonic-gate 	affinity_clear();
360*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
361*0Sstevel@tonic-gate }
362*0Sstevel@tonic-gate 
363*0Sstevel@tonic-gate static void
364*0Sstevel@tonic-gate mp_enable_intr(int cpun)
365*0Sstevel@tonic-gate {
366*0Sstevel@tonic-gate 	/*
367*0Sstevel@tonic-gate 	 * switch to the online cpu
368*0Sstevel@tonic-gate 	 */
369*0Sstevel@tonic-gate 	affinity_set(cpun);
370*0Sstevel@tonic-gate 	/*
371*0Sstevel@tonic-gate 	 * clear the interrupt active mask
372*0Sstevel@tonic-gate 	 */
373*0Sstevel@tonic-gate 	CPU->cpu_intr_actv &= ~(1 << (XC_MED_PIL-1));
374*0Sstevel@tonic-gate 	set_base_spl();
375*0Sstevel@tonic-gate 	(void) spl0();
376*0Sstevel@tonic-gate 	affinity_clear();
377*0Sstevel@tonic-gate }
378*0Sstevel@tonic-gate 
379*0Sstevel@tonic-gate static void
380*0Sstevel@tonic-gate mach_get_platform(int owner)
381*0Sstevel@tonic-gate {
382*0Sstevel@tonic-gate 	void		**srv_opsp;
383*0Sstevel@tonic-gate 	void		**clt_opsp;
384*0Sstevel@tonic-gate 	int		i;
385*0Sstevel@tonic-gate 	int		total_ops;
386*0Sstevel@tonic-gate 
387*0Sstevel@tonic-gate 	/* fix up psm ops */
388*0Sstevel@tonic-gate 	srv_opsp = (void **)mach_set[0];
389*0Sstevel@tonic-gate 	clt_opsp = (void **)mach_set[owner];
390*0Sstevel@tonic-gate 	if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01)
391*0Sstevel@tonic-gate 		total_ops = sizeof (struct psm_ops_ver01) /
392*0Sstevel@tonic-gate 				sizeof (void (*)(void));
393*0Sstevel@tonic-gate 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1)
394*0Sstevel@tonic-gate 		/* no psm_notify_func */
395*0Sstevel@tonic-gate 		total_ops = OFFSETOF(struct psm_ops, psm_notify_func) /
396*0Sstevel@tonic-gate 		    sizeof (void (*)(void));
397*0Sstevel@tonic-gate 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2)
398*0Sstevel@tonic-gate 		/* no psm_timer funcs */
399*0Sstevel@tonic-gate 		total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) /
400*0Sstevel@tonic-gate 		    sizeof (void (*)(void));
401*0Sstevel@tonic-gate 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3)
402*0Sstevel@tonic-gate 		/* no psm_preshutdown function */
403*0Sstevel@tonic-gate 		total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) /
404*0Sstevel@tonic-gate 		    sizeof (void (*)(void));
405*0Sstevel@tonic-gate 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4)
406*0Sstevel@tonic-gate 		/* no psm_preshutdown function */
407*0Sstevel@tonic-gate 		total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) /
408*0Sstevel@tonic-gate 		    sizeof (void (*)(void));
409*0Sstevel@tonic-gate 	else
410*0Sstevel@tonic-gate 		total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void));
411*0Sstevel@tonic-gate 
412*0Sstevel@tonic-gate 	/*
413*0Sstevel@tonic-gate 	 * Save the version of the PSM module, in case we need to
414*0Sstevel@tonic-gate 	 * bahave differently based on version.
415*0Sstevel@tonic-gate 	 */
416*0Sstevel@tonic-gate 	mach_ver[0] = mach_ver[owner];
417*0Sstevel@tonic-gate 
418*0Sstevel@tonic-gate 	for (i = 0; i < total_ops; i++)
419*0Sstevel@tonic-gate 		if (clt_opsp[i] != NULL)
420*0Sstevel@tonic-gate 			srv_opsp[i] = clt_opsp[i];
421*0Sstevel@tonic-gate }
422*0Sstevel@tonic-gate 
423*0Sstevel@tonic-gate static void
424*0Sstevel@tonic-gate mach_construct_info()
425*0Sstevel@tonic-gate {
426*0Sstevel@tonic-gate 	register struct psm_sw *swp;
427*0Sstevel@tonic-gate 	int	mach_cnt[PSM_OWN_OVERRIDE+1] = {0};
428*0Sstevel@tonic-gate 	int	conflict_owner = 0;
429*0Sstevel@tonic-gate 
430*0Sstevel@tonic-gate 	if (psmsw->psw_forw == psmsw)
431*0Sstevel@tonic-gate 		panic("No valid PSM modules found");
432*0Sstevel@tonic-gate 	mutex_enter(&psmsw_lock);
433*0Sstevel@tonic-gate 	for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) {
434*0Sstevel@tonic-gate 		if (!(swp->psw_flag & PSM_MOD_IDENTIFY))
435*0Sstevel@tonic-gate 			continue;
436*0Sstevel@tonic-gate 		mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops;
437*0Sstevel@tonic-gate 		mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version;
438*0Sstevel@tonic-gate 		mach_cnt[swp->psw_infop->p_owner]++;
439*0Sstevel@tonic-gate 	}
440*0Sstevel@tonic-gate 	mutex_exit(&psmsw_lock);
441*0Sstevel@tonic-gate 
442*0Sstevel@tonic-gate 	mach_get_platform(PSM_OWN_SYS_DEFAULT);
443*0Sstevel@tonic-gate 
444*0Sstevel@tonic-gate 	/* check to see are there any conflicts */
445*0Sstevel@tonic-gate 	if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1)
446*0Sstevel@tonic-gate 		conflict_owner = PSM_OWN_EXCLUSIVE;
447*0Sstevel@tonic-gate 	if (mach_cnt[PSM_OWN_OVERRIDE] > 1)
448*0Sstevel@tonic-gate 		conflict_owner = PSM_OWN_OVERRIDE;
449*0Sstevel@tonic-gate 	if (conflict_owner) {
450*0Sstevel@tonic-gate 		/* remove all psm modules except uppc */
451*0Sstevel@tonic-gate 		cmn_err(CE_WARN,
452*0Sstevel@tonic-gate 			"Conflicts detected on the following PSM modules:");
453*0Sstevel@tonic-gate 		mutex_enter(&psmsw_lock);
454*0Sstevel@tonic-gate 		for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) {
455*0Sstevel@tonic-gate 			if (swp->psw_infop->p_owner == conflict_owner)
456*0Sstevel@tonic-gate 				cmn_err(CE_WARN, "%s ",
457*0Sstevel@tonic-gate 					swp->psw_infop->p_mach_idstring);
458*0Sstevel@tonic-gate 		}
459*0Sstevel@tonic-gate 		mutex_exit(&psmsw_lock);
460*0Sstevel@tonic-gate 		cmn_err(CE_WARN,
461*0Sstevel@tonic-gate 			"Setting the system back to SINGLE processor mode!");
462*0Sstevel@tonic-gate 		cmn_err(CE_WARN,
463*0Sstevel@tonic-gate 		    "Please edit /etc/mach to remove the invalid PSM module.");
464*0Sstevel@tonic-gate 		return;
465*0Sstevel@tonic-gate 	}
466*0Sstevel@tonic-gate 
467*0Sstevel@tonic-gate 	if (mach_set[PSM_OWN_EXCLUSIVE])
468*0Sstevel@tonic-gate 		mach_get_platform(PSM_OWN_EXCLUSIVE);
469*0Sstevel@tonic-gate 
470*0Sstevel@tonic-gate 	if (mach_set[PSM_OWN_OVERRIDE])
471*0Sstevel@tonic-gate 		mach_get_platform(PSM_OWN_OVERRIDE);
472*0Sstevel@tonic-gate }
473*0Sstevel@tonic-gate 
474*0Sstevel@tonic-gate static void
475*0Sstevel@tonic-gate mach_init()
476*0Sstevel@tonic-gate {
477*0Sstevel@tonic-gate 	register struct psm_ops  *pops;
478*0Sstevel@tonic-gate 
479*0Sstevel@tonic-gate 	mach_construct_info();
480*0Sstevel@tonic-gate 
481*0Sstevel@tonic-gate 	pops = mach_set[0];
482*0Sstevel@tonic-gate 
483*0Sstevel@tonic-gate 	/* register the interrupt and clock initialization rotuines */
484*0Sstevel@tonic-gate 	picinitf = mach_picinit;
485*0Sstevel@tonic-gate 	clkinitf = mach_clkinit;
486*0Sstevel@tonic-gate 	psm_get_clockirq = pops->psm_get_clockirq;
487*0Sstevel@tonic-gate 
488*0Sstevel@tonic-gate 	/* register the interrupt setup code */
489*0Sstevel@tonic-gate 	slvltovect = mach_softlvl_to_vect;
490*0Sstevel@tonic-gate 	addspl	= pops->psm_addspl;
491*0Sstevel@tonic-gate 	delspl	= pops->psm_delspl;
492*0Sstevel@tonic-gate 
493*0Sstevel@tonic-gate 	if (pops->psm_translate_irq)
494*0Sstevel@tonic-gate 		psm_translate_irq = pops->psm_translate_irq;
495*0Sstevel@tonic-gate 	if (pops->psm_intr_ops)
496*0Sstevel@tonic-gate 		psm_intr_ops = pops->psm_intr_ops;
497*0Sstevel@tonic-gate 	if (pops->psm_tod_get) {
498*0Sstevel@tonic-gate 		todgetf = mach_tod_get;
499*0Sstevel@tonic-gate 		psm_todgetf = pops->psm_tod_get;
500*0Sstevel@tonic-gate 	}
501*0Sstevel@tonic-gate 	if (pops->psm_tod_set) {
502*0Sstevel@tonic-gate 		todsetf = mach_tod_set;
503*0Sstevel@tonic-gate 		psm_todsetf = pops->psm_tod_set;
504*0Sstevel@tonic-gate 	}
505*0Sstevel@tonic-gate 	if (pops->psm_notify_error) {
506*0Sstevel@tonic-gate 		psm_notify_error = mach_notify_error;
507*0Sstevel@tonic-gate 		notify_error = pops->psm_notify_error;
508*0Sstevel@tonic-gate 	}
509*0Sstevel@tonic-gate 
510*0Sstevel@tonic-gate 	(*pops->psm_softinit)();
511*0Sstevel@tonic-gate 
512*0Sstevel@tonic-gate 	/*
513*0Sstevel@tonic-gate 	 * Initialize the dispatcher's function hooks
514*0Sstevel@tonic-gate 	 * to enable CPU halting when idle
515*0Sstevel@tonic-gate 	 */
516*0Sstevel@tonic-gate #if defined(_SIMULATOR_SUPPORT)
517*0Sstevel@tonic-gate 	if (halt_idle_cpus && !simulator_run)
518*0Sstevel@tonic-gate 		idle_cpu = cpu_halt;
519*0Sstevel@tonic-gate #else
520*0Sstevel@tonic-gate 	if (halt_idle_cpus)
521*0Sstevel@tonic-gate 		idle_cpu = cpu_halt;
522*0Sstevel@tonic-gate #endif	/* _SIMULATOR_SUPPORT */
523*0Sstevel@tonic-gate 
524*0Sstevel@tonic-gate 	mach_smpinit();
525*0Sstevel@tonic-gate }
526*0Sstevel@tonic-gate 
527*0Sstevel@tonic-gate static void
528*0Sstevel@tonic-gate mach_smpinit(void)
529*0Sstevel@tonic-gate {
530*0Sstevel@tonic-gate 	register struct psm_ops  *pops;
531*0Sstevel@tonic-gate 	register processorid_t cpu_id;
532*0Sstevel@tonic-gate 	int	 cnt;
533*0Sstevel@tonic-gate 	int	 cpumask;
534*0Sstevel@tonic-gate 
535*0Sstevel@tonic-gate 	pops = mach_set[0];
536*0Sstevel@tonic-gate 
537*0Sstevel@tonic-gate 	cpu_id = -1;
538*0Sstevel@tonic-gate 	cpu_id = (*pops->psm_get_next_processorid)(cpu_id);
539*0Sstevel@tonic-gate 	for (cnt = 0, cpumask = 0; cpu_id != -1; cnt++) {
540*0Sstevel@tonic-gate 		cpumask |= 1 << cpu_id;
541*0Sstevel@tonic-gate 		cpu_id = (*pops->psm_get_next_processorid)(cpu_id);
542*0Sstevel@tonic-gate 	}
543*0Sstevel@tonic-gate 
544*0Sstevel@tonic-gate 	mp_cpus = cpumask;
545*0Sstevel@tonic-gate 
546*0Sstevel@tonic-gate 	/* MP related routines */
547*0Sstevel@tonic-gate 	cpu_startf = mach_cpu_start;
548*0Sstevel@tonic-gate 	ap_mlsetup = pops->psm_post_cpu_start;
549*0Sstevel@tonic-gate 	send_dirintf = pops->psm_send_ipi;
550*0Sstevel@tonic-gate 
551*0Sstevel@tonic-gate 	/* optional MP related routines */
552*0Sstevel@tonic-gate 	if (pops->psm_shutdown)
553*0Sstevel@tonic-gate 		psm_shutdownf = pops->psm_shutdown;
554*0Sstevel@tonic-gate 	if (pops->psm_preshutdown)
555*0Sstevel@tonic-gate 		psm_preshutdownf = pops->psm_preshutdown;
556*0Sstevel@tonic-gate 	if (pops->psm_notify_func)
557*0Sstevel@tonic-gate 		psm_notifyf = pops->psm_notify_func;
558*0Sstevel@tonic-gate 	if (pops->psm_set_idlecpu)
559*0Sstevel@tonic-gate 		psm_set_idle_cpuf = pops->psm_set_idlecpu;
560*0Sstevel@tonic-gate 	if (pops->psm_unset_idlecpu)
561*0Sstevel@tonic-gate 		psm_unset_idle_cpuf = pops->psm_unset_idlecpu;
562*0Sstevel@tonic-gate 
563*0Sstevel@tonic-gate 	psm_clkinit = pops->psm_clkinit;
564*0Sstevel@tonic-gate 
565*0Sstevel@tonic-gate 	if (pops->psm_timer_reprogram)
566*0Sstevel@tonic-gate 		psm_timer_reprogram = pops->psm_timer_reprogram;
567*0Sstevel@tonic-gate 
568*0Sstevel@tonic-gate 	if (pops->psm_timer_enable)
569*0Sstevel@tonic-gate 		psm_timer_enable = pops->psm_timer_enable;
570*0Sstevel@tonic-gate 
571*0Sstevel@tonic-gate 	if (pops->psm_timer_disable)
572*0Sstevel@tonic-gate 		psm_timer_disable = pops->psm_timer_disable;
573*0Sstevel@tonic-gate 
574*0Sstevel@tonic-gate 	if (pops->psm_post_cyclic_setup)
575*0Sstevel@tonic-gate 		psm_post_cyclic_setup = pops->psm_post_cyclic_setup;
576*0Sstevel@tonic-gate 
577*0Sstevel@tonic-gate 	/* check for multiple cpu's */
578*0Sstevel@tonic-gate 	if (cnt < 2)
579*0Sstevel@tonic-gate 		return;
580*0Sstevel@tonic-gate 
581*0Sstevel@tonic-gate 	/* check for MP platforms */
582*0Sstevel@tonic-gate 	if (pops->psm_cpu_start == NULL)
583*0Sstevel@tonic-gate 		return;
584*0Sstevel@tonic-gate 
585*0Sstevel@tonic-gate 	/*
586*0Sstevel@tonic-gate 	 * Set the dispatcher hook to enable cpu "wake up"
587*0Sstevel@tonic-gate 	 * when a thread becomes runnable.
588*0Sstevel@tonic-gate 	 */
589*0Sstevel@tonic-gate #if defined(_SIMULATOR_SUPPORT)
590*0Sstevel@tonic-gate 	if (halt_idle_cpus && !simulator_run) {
591*0Sstevel@tonic-gate 		disp_enq_thread = cpu_wakeup;
592*0Sstevel@tonic-gate 	}
593*0Sstevel@tonic-gate #else
594*0Sstevel@tonic-gate 	if (halt_idle_cpus) {
595*0Sstevel@tonic-gate 		disp_enq_thread = cpu_wakeup;
596*0Sstevel@tonic-gate 	}
597*0Sstevel@tonic-gate #endif	/* _SIMULATOR_SUPPORT */
598*0Sstevel@tonic-gate 
599*0Sstevel@tonic-gate 	if (pops->psm_disable_intr)
600*0Sstevel@tonic-gate 		psm_disable_intr = pops->psm_disable_intr;
601*0Sstevel@tonic-gate 	if (pops->psm_enable_intr)
602*0Sstevel@tonic-gate 		psm_enable_intr  = pops->psm_enable_intr;
603*0Sstevel@tonic-gate 
604*0Sstevel@tonic-gate 	psm_get_ipivect = pops->psm_get_ipivect;
605*0Sstevel@tonic-gate 
606*0Sstevel@tonic-gate 	(void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_hi_intr",
607*0Sstevel@tonic-gate 		(*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI),
608*0Sstevel@tonic-gate 		(caddr_t)X_CALL_HIPRI, NULL, NULL);
609*0Sstevel@tonic-gate 	(void) add_avintr((void *)NULL, XC_MED_PIL, xc_serv, "xc_med_intr",
610*0Sstevel@tonic-gate 		(*pops->psm_get_ipivect)(XC_MED_PIL, PSM_INTR_IPI_LO),
611*0Sstevel@tonic-gate 		(caddr_t)X_CALL_MEDPRI, NULL, NULL);
612*0Sstevel@tonic-gate 
613*0Sstevel@tonic-gate 	(void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE);
614*0Sstevel@tonic-gate }
615*0Sstevel@tonic-gate 
616*0Sstevel@tonic-gate static void
617*0Sstevel@tonic-gate mach_picinit()
618*0Sstevel@tonic-gate {
619*0Sstevel@tonic-gate 	register struct psm_ops  *pops;
620*0Sstevel@tonic-gate 	extern void install_spl(void);	/* XXX: belongs in a header file */
621*0Sstevel@tonic-gate #if defined(__amd64) && defined(DEBUG)
622*0Sstevel@tonic-gate 	extern void *spl_patch, *slow_spl, *setsplhi_patch, *slow_setsplhi;
623*0Sstevel@tonic-gate #endif
624*0Sstevel@tonic-gate 
625*0Sstevel@tonic-gate 	pops = mach_set[0];
626*0Sstevel@tonic-gate 
627*0Sstevel@tonic-gate 	/* register the interrupt handlers */
628*0Sstevel@tonic-gate 	setlvl = pops->psm_intr_enter;
629*0Sstevel@tonic-gate 	setlvlx = pops->psm_intr_exit;
630*0Sstevel@tonic-gate 
631*0Sstevel@tonic-gate 	/* initialize the interrupt hardware */
632*0Sstevel@tonic-gate 	(*pops->psm_picinit)();
633*0Sstevel@tonic-gate 
634*0Sstevel@tonic-gate 	/* set interrupt mask for current ipl */
635*0Sstevel@tonic-gate 	setspl = pops->psm_setspl;
636*0Sstevel@tonic-gate 	setspl(CPU->cpu_pri);
637*0Sstevel@tonic-gate 
638*0Sstevel@tonic-gate 	/* Install proper spl routine now that we can Program the PIC   */
639*0Sstevel@tonic-gate #if defined(__amd64)
640*0Sstevel@tonic-gate 	/*
641*0Sstevel@tonic-gate 	 * It would be better if we could check this at compile time
642*0Sstevel@tonic-gate 	 */
643*0Sstevel@tonic-gate 	ASSERT(((uintptr_t)&slow_setsplhi - (uintptr_t)&setsplhi_patch < 128) &&
644*0Sstevel@tonic-gate 		((uintptr_t)&slow_spl - (uintptr_t)&spl_patch < 128));
645*0Sstevel@tonic-gate #endif
646*0Sstevel@tonic-gate 	install_spl();
647*0Sstevel@tonic-gate }
648*0Sstevel@tonic-gate 
649*0Sstevel@tonic-gate uint_t	cpu_freq;	/* MHz */
650*0Sstevel@tonic-gate uint64_t cpu_freq_hz;	/* measured (in hertz) */
651*0Sstevel@tonic-gate 
652*0Sstevel@tonic-gate #define	MEGA_HZ		1000000
653*0Sstevel@tonic-gate 
654*0Sstevel@tonic-gate static uint64_t
655*0Sstevel@tonic-gate mach_calchz(uint32_t pit_counter, uint64_t *processor_clks)
656*0Sstevel@tonic-gate {
657*0Sstevel@tonic-gate 	uint64_t cpu_hz;
658*0Sstevel@tonic-gate 
659*0Sstevel@tonic-gate 	if ((pit_counter == 0) || (*processor_clks == 0) ||
660*0Sstevel@tonic-gate 	    (*processor_clks > (((uint64_t)-1) / PIT_HZ)))
661*0Sstevel@tonic-gate 		return (0);
662*0Sstevel@tonic-gate 
663*0Sstevel@tonic-gate 	cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter;
664*0Sstevel@tonic-gate 
665*0Sstevel@tonic-gate 	return (cpu_hz);
666*0Sstevel@tonic-gate }
667*0Sstevel@tonic-gate 
668*0Sstevel@tonic-gate static uint64_t
669*0Sstevel@tonic-gate mach_getcpufreq(void)
670*0Sstevel@tonic-gate {
671*0Sstevel@tonic-gate 	uint32_t pit_counter;
672*0Sstevel@tonic-gate 	uint64_t processor_clks;
673*0Sstevel@tonic-gate 
674*0Sstevel@tonic-gate 	if (x86_feature & X86_TSC) {
675*0Sstevel@tonic-gate 		/*
676*0Sstevel@tonic-gate 		 * We have a TSC. freq_tsc() knows how to measure the number
677*0Sstevel@tonic-gate 		 * of clock cycles sampled against the PIT.
678*0Sstevel@tonic-gate 		 */
679*0Sstevel@tonic-gate 		processor_clks = freq_tsc(&pit_counter);
680*0Sstevel@tonic-gate 		return (mach_calchz(pit_counter, &processor_clks));
681*0Sstevel@tonic-gate 	} else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) {
682*0Sstevel@tonic-gate #if defined(__amd64)
683*0Sstevel@tonic-gate 		panic("mach_getcpufreq: no TSC!");
684*0Sstevel@tonic-gate #elif defined(__i386)
685*0Sstevel@tonic-gate 		/*
686*0Sstevel@tonic-gate 		 * We are a Cyrix based on a 6x86 core or an Intel Pentium
687*0Sstevel@tonic-gate 		 * for which freq_notsc() knows how to measure the number of
688*0Sstevel@tonic-gate 		 * elapsed clock cycles sampled against the PIT
689*0Sstevel@tonic-gate 		 */
690*0Sstevel@tonic-gate 		processor_clks = freq_notsc(&pit_counter);
691*0Sstevel@tonic-gate 		return (mach_calchz(pit_counter, &processor_clks));
692*0Sstevel@tonic-gate #endif	/* __i386 */
693*0Sstevel@tonic-gate 	}
694*0Sstevel@tonic-gate 
695*0Sstevel@tonic-gate 	/* We do not know how to calculate cpu frequency for this cpu. */
696*0Sstevel@tonic-gate 	return (0);
697*0Sstevel@tonic-gate }
698*0Sstevel@tonic-gate 
699*0Sstevel@tonic-gate /*
700*0Sstevel@tonic-gate  * If the clock speed of a cpu is found to be reported incorrectly, do not add
701*0Sstevel@tonic-gate  * to this array, instead improve the accuracy of the algorithm that determines
702*0Sstevel@tonic-gate  * the clock speed of the processor or extend the implementation to support the
703*0Sstevel@tonic-gate  * vendor as appropriate. This is here only to support adjusting the speed on
704*0Sstevel@tonic-gate  * older slower processors that mach_fixcpufreq() would not be able to account
705*0Sstevel@tonic-gate  * for otherwise.
706*0Sstevel@tonic-gate  */
707*0Sstevel@tonic-gate static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 };
708*0Sstevel@tonic-gate 
709*0Sstevel@tonic-gate /*
710*0Sstevel@tonic-gate  * On fast processors the clock frequency that is measured may be off by
711*0Sstevel@tonic-gate  * a few MHz from the value printed on the part. This is a combination of
712*0Sstevel@tonic-gate  * the factors that for such fast parts being off by this much is within
713*0Sstevel@tonic-gate  * the tolerances for manufacture and because of the difficulties in the
714*0Sstevel@tonic-gate  * measurement that can lead to small error. This function uses some
715*0Sstevel@tonic-gate  * heuristics in order to tweak the value that was measured to match what
716*0Sstevel@tonic-gate  * is most likely printed on the part.
717*0Sstevel@tonic-gate  *
718*0Sstevel@tonic-gate  * Some examples:
719*0Sstevel@tonic-gate  * 	AMD Athlon 1000 mhz measured as 998 mhz
720*0Sstevel@tonic-gate  * 	Intel Pentium III Xeon 733 mhz measured as 731 mhz
721*0Sstevel@tonic-gate  * 	Intel Pentium IV 1500 mhz measured as 1495mhz
722*0Sstevel@tonic-gate  *
723*0Sstevel@tonic-gate  * If in the future this function is no longer sufficient to correct
724*0Sstevel@tonic-gate  * for the error in the measurement, then the algorithm used to perform
725*0Sstevel@tonic-gate  * the measurement will have to be improved in order to increase accuracy
726*0Sstevel@tonic-gate  * rather than adding horrible and questionable kludges here.
727*0Sstevel@tonic-gate  *
728*0Sstevel@tonic-gate  * This is called after the cyclics subsystem because of the potential
729*0Sstevel@tonic-gate  * that the heuristics within may give a worse estimate of the clock
730*0Sstevel@tonic-gate  * frequency than the value that was measured.
731*0Sstevel@tonic-gate  */
732*0Sstevel@tonic-gate static void
733*0Sstevel@tonic-gate mach_fixcpufreq(void)
734*0Sstevel@tonic-gate {
735*0Sstevel@tonic-gate 	uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i;
736*0Sstevel@tonic-gate 
737*0Sstevel@tonic-gate 	freq = (uint32_t)cpu_freq;
738*0Sstevel@tonic-gate 
739*0Sstevel@tonic-gate 	/*
740*0Sstevel@tonic-gate 	 * Find the nearest integer multiple of 200/3 (about 66) MHz to the
741*0Sstevel@tonic-gate 	 * measured speed taking into account that the 667 MHz parts were
742*0Sstevel@tonic-gate 	 * the first to round-up.
743*0Sstevel@tonic-gate 	 */
744*0Sstevel@tonic-gate 	mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200);
745*0Sstevel@tonic-gate 	near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3);
746*0Sstevel@tonic-gate 	delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66);
747*0Sstevel@tonic-gate 
748*0Sstevel@tonic-gate 	/* Find the nearest integer multiple of 50 MHz to the measured speed */
749*0Sstevel@tonic-gate 	mul = (freq + 25) / 50;
750*0Sstevel@tonic-gate 	near50 = mul * 50;
751*0Sstevel@tonic-gate 	delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50);
752*0Sstevel@tonic-gate 
753*0Sstevel@tonic-gate 	/* Find the closer of the two */
754*0Sstevel@tonic-gate 	if (delta66 < delta50) {
755*0Sstevel@tonic-gate 		fixed = near66;
756*0Sstevel@tonic-gate 		delta = delta66;
757*0Sstevel@tonic-gate 	} else {
758*0Sstevel@tonic-gate 		fixed = near50;
759*0Sstevel@tonic-gate 		delta = delta50;
760*0Sstevel@tonic-gate 	}
761*0Sstevel@tonic-gate 
762*0Sstevel@tonic-gate 	if (fixed > INT_MAX)
763*0Sstevel@tonic-gate 		return;
764*0Sstevel@tonic-gate 
765*0Sstevel@tonic-gate 	/*
766*0Sstevel@tonic-gate 	 * Some older parts have a core clock frequency that is not an
767*0Sstevel@tonic-gate 	 * integral multiple of 50 or 66 MHz. Check if one of the old
768*0Sstevel@tonic-gate 	 * clock frequencies is closer to the measured value than any
769*0Sstevel@tonic-gate 	 * of the integral multiples of 50 an 66, and if so set fixed
770*0Sstevel@tonic-gate 	 * and delta appropriately to represent the closest value.
771*0Sstevel@tonic-gate 	 */
772*0Sstevel@tonic-gate 	i = sizeof (x86_cpu_freq) / sizeof (int);
773*0Sstevel@tonic-gate 	while (i > 0) {
774*0Sstevel@tonic-gate 		i--;
775*0Sstevel@tonic-gate 
776*0Sstevel@tonic-gate 		if (x86_cpu_freq[i] <= freq) {
777*0Sstevel@tonic-gate 			mul = freq - x86_cpu_freq[i];
778*0Sstevel@tonic-gate 
779*0Sstevel@tonic-gate 			if (mul < delta) {
780*0Sstevel@tonic-gate 				fixed = x86_cpu_freq[i];
781*0Sstevel@tonic-gate 				delta = mul;
782*0Sstevel@tonic-gate 			}
783*0Sstevel@tonic-gate 
784*0Sstevel@tonic-gate 			break;
785*0Sstevel@tonic-gate 		}
786*0Sstevel@tonic-gate 
787*0Sstevel@tonic-gate 		mul = x86_cpu_freq[i] - freq;
788*0Sstevel@tonic-gate 
789*0Sstevel@tonic-gate 		if (mul < delta) {
790*0Sstevel@tonic-gate 			fixed = x86_cpu_freq[i];
791*0Sstevel@tonic-gate 			delta = mul;
792*0Sstevel@tonic-gate 		}
793*0Sstevel@tonic-gate 	}
794*0Sstevel@tonic-gate 
795*0Sstevel@tonic-gate 	/*
796*0Sstevel@tonic-gate 	 * Set a reasonable maximum for how much to correct the measured
797*0Sstevel@tonic-gate 	 * result by. This check is here to prevent the adjustment made
798*0Sstevel@tonic-gate 	 * by this function from being more harm than good. It is entirely
799*0Sstevel@tonic-gate 	 * possible that in the future parts will be made that are not
800*0Sstevel@tonic-gate 	 * integral multiples of 66 or 50 in clock frequency or that
801*0Sstevel@tonic-gate 	 * someone may overclock a part to some odd frequency. If the
802*0Sstevel@tonic-gate 	 * measured value is farther from the corrected value than
803*0Sstevel@tonic-gate 	 * allowed, then assume the corrected value is in error and use
804*0Sstevel@tonic-gate 	 * the measured value.
805*0Sstevel@tonic-gate 	 */
806*0Sstevel@tonic-gate 	if (6 < delta)
807*0Sstevel@tonic-gate 		return;
808*0Sstevel@tonic-gate 
809*0Sstevel@tonic-gate 	cpu_freq = (int)fixed;
810*0Sstevel@tonic-gate }
811*0Sstevel@tonic-gate 
812*0Sstevel@tonic-gate 
813*0Sstevel@tonic-gate static int
814*0Sstevel@tonic-gate machhztomhz(uint64_t cpu_freq_hz)
815*0Sstevel@tonic-gate {
816*0Sstevel@tonic-gate 	uint64_t cpu_mhz;
817*0Sstevel@tonic-gate 
818*0Sstevel@tonic-gate 	/* Round to nearest MHZ */
819*0Sstevel@tonic-gate 	cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ;
820*0Sstevel@tonic-gate 
821*0Sstevel@tonic-gate 	if (cpu_mhz > INT_MAX)
822*0Sstevel@tonic-gate 		return (0);
823*0Sstevel@tonic-gate 
824*0Sstevel@tonic-gate 	return ((int)cpu_mhz);
825*0Sstevel@tonic-gate 
826*0Sstevel@tonic-gate }
827*0Sstevel@tonic-gate 
828*0Sstevel@tonic-gate 
829*0Sstevel@tonic-gate static int
830*0Sstevel@tonic-gate mach_clkinit(int preferred_mode, int *set_mode)
831*0Sstevel@tonic-gate {
832*0Sstevel@tonic-gate 	register struct psm_ops  *pops;
833*0Sstevel@tonic-gate 	int resolution;
834*0Sstevel@tonic-gate 
835*0Sstevel@tonic-gate 	pops = mach_set[0];
836*0Sstevel@tonic-gate 
837*0Sstevel@tonic-gate #ifdef	_SIMULATOR_SUPPORT
838*0Sstevel@tonic-gate 	if (!simulator_run)
839*0Sstevel@tonic-gate 		cpu_freq_hz = mach_getcpufreq();
840*0Sstevel@tonic-gate 	else
841*0Sstevel@tonic-gate 		cpu_freq_hz = 40000000; /* use 40 Mhz (hack for simulator) */
842*0Sstevel@tonic-gate #else
843*0Sstevel@tonic-gate 	cpu_freq_hz = mach_getcpufreq();
844*0Sstevel@tonic-gate #endif	/* _SIMULATOR_SUPPORT */
845*0Sstevel@tonic-gate 
846*0Sstevel@tonic-gate 	cpu_freq = machhztomhz(cpu_freq_hz);
847*0Sstevel@tonic-gate 
848*0Sstevel@tonic-gate 	if (!(x86_feature & X86_TSC) || (cpu_freq == 0))
849*0Sstevel@tonic-gate 		tsc_gethrtime_enable = 0;
850*0Sstevel@tonic-gate 
851*0Sstevel@tonic-gate 	if (tsc_gethrtime_enable) {
852*0Sstevel@tonic-gate 		tsc_hrtimeinit(cpu_freq_hz);
853*0Sstevel@tonic-gate 		gethrtimef = tsc_gethrtime;
854*0Sstevel@tonic-gate 		gethrtimeunscaledf = tsc_gethrtimeunscaled;
855*0Sstevel@tonic-gate 		scalehrtimef = tsc_scalehrtime;
856*0Sstevel@tonic-gate 		hrtime_tick = tsc_tick;
857*0Sstevel@tonic-gate 		tsc_gethrtime_initted = 1;
858*0Sstevel@tonic-gate 	} else {
859*0Sstevel@tonic-gate 		if (pops->psm_hrtimeinit)
860*0Sstevel@tonic-gate 			(*pops->psm_hrtimeinit)();
861*0Sstevel@tonic-gate 		gethrtimef = pops->psm_gethrtime;
862*0Sstevel@tonic-gate 		gethrtimeunscaledf = gethrtimef;
863*0Sstevel@tonic-gate 		/* scalehrtimef will remain dummy */
864*0Sstevel@tonic-gate 	}
865*0Sstevel@tonic-gate 
866*0Sstevel@tonic-gate 	mach_fixcpufreq();
867*0Sstevel@tonic-gate 
868*0Sstevel@tonic-gate 	if (mach_ver[0] >= PSM_INFO_VER01_3) {
869*0Sstevel@tonic-gate 		if ((preferred_mode == TIMER_ONESHOT) &&
870*0Sstevel@tonic-gate 		    (tsc_gethrtime_enable)) {
871*0Sstevel@tonic-gate 
872*0Sstevel@tonic-gate 			resolution = (*pops->psm_clkinit)(0);
873*0Sstevel@tonic-gate 			if (resolution != 0)  {
874*0Sstevel@tonic-gate 				*set_mode = TIMER_ONESHOT;
875*0Sstevel@tonic-gate 				return (resolution);
876*0Sstevel@tonic-gate 			}
877*0Sstevel@tonic-gate 
878*0Sstevel@tonic-gate 		}
879*0Sstevel@tonic-gate 
880*0Sstevel@tonic-gate 		/*
881*0Sstevel@tonic-gate 		 * either periodic mode was requested or could not set to
882*0Sstevel@tonic-gate 		 * one-shot mode
883*0Sstevel@tonic-gate 		 */
884*0Sstevel@tonic-gate 		resolution = (*pops->psm_clkinit)(hz);
885*0Sstevel@tonic-gate 		/*
886*0Sstevel@tonic-gate 		 * psm should be able to do periodic, so we do not check
887*0Sstevel@tonic-gate 		 * for return value of psm_clkinit here.
888*0Sstevel@tonic-gate 		 */
889*0Sstevel@tonic-gate 		*set_mode = TIMER_PERIODIC;
890*0Sstevel@tonic-gate 		return (resolution);
891*0Sstevel@tonic-gate 	} else {
892*0Sstevel@tonic-gate 		/*
893*0Sstevel@tonic-gate 		 * PSMI interface prior to PSMI_3 does not define a return
894*0Sstevel@tonic-gate 		 * value for psm_clkinit, so the return value is ignored.
895*0Sstevel@tonic-gate 		 */
896*0Sstevel@tonic-gate 		(void) (*pops->psm_clkinit)(hz);
897*0Sstevel@tonic-gate 		*set_mode = TIMER_PERIODIC;
898*0Sstevel@tonic-gate 		return (nsec_per_tick);
899*0Sstevel@tonic-gate 	}
900*0Sstevel@tonic-gate }
901*0Sstevel@tonic-gate 
902*0Sstevel@tonic-gate static int
903*0Sstevel@tonic-gate mach_softlvl_to_vect(register int ipl)
904*0Sstevel@tonic-gate {
905*0Sstevel@tonic-gate 	register int softvect;
906*0Sstevel@tonic-gate 	register struct psm_ops  *pops;
907*0Sstevel@tonic-gate 
908*0Sstevel@tonic-gate 	pops = mach_set[0];
909*0Sstevel@tonic-gate 
910*0Sstevel@tonic-gate 	/* check for null handler for set soft interrupt call		*/
911*0Sstevel@tonic-gate 	if (pops->psm_set_softintr == NULL) {
912*0Sstevel@tonic-gate 		setsoftint = set_pending;
913*0Sstevel@tonic-gate 		return (PSM_SV_SOFTWARE);
914*0Sstevel@tonic-gate 	}
915*0Sstevel@tonic-gate 
916*0Sstevel@tonic-gate 	softvect = (*pops->psm_softlvl_to_irq)(ipl);
917*0Sstevel@tonic-gate 	/* check for hardware scheme					*/
918*0Sstevel@tonic-gate 	if (softvect > PSM_SV_SOFTWARE) {
919*0Sstevel@tonic-gate 		setsoftint = pops->psm_set_softintr;
920*0Sstevel@tonic-gate 		return (softvect);
921*0Sstevel@tonic-gate 	}
922*0Sstevel@tonic-gate 
923*0Sstevel@tonic-gate 	if (softvect == PSM_SV_SOFTWARE)
924*0Sstevel@tonic-gate 		setsoftint = set_pending;
925*0Sstevel@tonic-gate 	else	/* hardware and software mixed scheme			*/
926*0Sstevel@tonic-gate 		setsoftint = mach_set_softintr;
927*0Sstevel@tonic-gate 
928*0Sstevel@tonic-gate 	return (PSM_SV_SOFTWARE);
929*0Sstevel@tonic-gate }
930*0Sstevel@tonic-gate 
931*0Sstevel@tonic-gate static void
932*0Sstevel@tonic-gate mach_set_softintr(register int ipl)
933*0Sstevel@tonic-gate {
934*0Sstevel@tonic-gate 	register struct psm_ops  *pops;
935*0Sstevel@tonic-gate 
936*0Sstevel@tonic-gate 	/* set software pending bits					*/
937*0Sstevel@tonic-gate 	set_pending(ipl);
938*0Sstevel@tonic-gate 
939*0Sstevel@tonic-gate 	/*	check if dosoftint will be called at the end of intr	*/
940*0Sstevel@tonic-gate 	if (CPU_ON_INTR(CPU) || (curthread->t_intr))
941*0Sstevel@tonic-gate 		return;
942*0Sstevel@tonic-gate 
943*0Sstevel@tonic-gate 	/* invoke hardware interrupt					*/
944*0Sstevel@tonic-gate 	pops = mach_set[0];
945*0Sstevel@tonic-gate 	(*pops->psm_set_softintr)(ipl);
946*0Sstevel@tonic-gate }
947*0Sstevel@tonic-gate 
948*0Sstevel@tonic-gate static void
949*0Sstevel@tonic-gate mach_cpu_start(register int cpun)
950*0Sstevel@tonic-gate {
951*0Sstevel@tonic-gate 	register struct psm_ops  *pops;
952*0Sstevel@tonic-gate 	int	i;
953*0Sstevel@tonic-gate 
954*0Sstevel@tonic-gate 	pops = mach_set[0];
955*0Sstevel@tonic-gate 
956*0Sstevel@tonic-gate 	(*pops->psm_cpu_start)(cpun, rm_platter_va);
957*0Sstevel@tonic-gate 
958*0Sstevel@tonic-gate 	/* wait for the auxillary cpu to be ready			*/
959*0Sstevel@tonic-gate 	for (i = 20000; i; i--) {
960*0Sstevel@tonic-gate 		if (cpu[cpun]->cpu_flags & CPU_READY)
961*0Sstevel@tonic-gate 			return;
962*0Sstevel@tonic-gate 		drv_usecwait(100);
963*0Sstevel@tonic-gate 	}
964*0Sstevel@tonic-gate }
965*0Sstevel@tonic-gate 
966*0Sstevel@tonic-gate /*ARGSUSED*/
967*0Sstevel@tonic-gate static int
968*0Sstevel@tonic-gate mach_translate_irq(dev_info_t *dip, int irqno)
969*0Sstevel@tonic-gate {
970*0Sstevel@tonic-gate 	return (irqno);	/* default to NO translation */
971*0Sstevel@tonic-gate }
972*0Sstevel@tonic-gate 
973*0Sstevel@tonic-gate static timestruc_t
974*0Sstevel@tonic-gate mach_tod_get(void)
975*0Sstevel@tonic-gate {
976*0Sstevel@tonic-gate 	timestruc_t ts;
977*0Sstevel@tonic-gate 	todinfo_t tod;
978*0Sstevel@tonic-gate 	static int mach_range_warn = 1;	/* warn only once */
979*0Sstevel@tonic-gate 
980*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tod_lock));
981*0Sstevel@tonic-gate 
982*0Sstevel@tonic-gate 	/* The year returned from is the last 2 digit only */
983*0Sstevel@tonic-gate 	if ((*psm_todgetf)(&tod)) {
984*0Sstevel@tonic-gate 		ts.tv_sec = 0;
985*0Sstevel@tonic-gate 		ts.tv_nsec = 0;
986*0Sstevel@tonic-gate 		tod_fault_reset();
987*0Sstevel@tonic-gate 		return (ts);
988*0Sstevel@tonic-gate 	}
989*0Sstevel@tonic-gate 
990*0Sstevel@tonic-gate 	/* assume that we wrap the rtc year back to zero at 2000 */
991*0Sstevel@tonic-gate 	if (tod.tod_year < 69) {
992*0Sstevel@tonic-gate 		if (mach_range_warn && tod.tod_year > 38) {
993*0Sstevel@tonic-gate 			cmn_err(CE_WARN, "hardware real-time clock is out "
994*0Sstevel@tonic-gate 				"of range -- time needs to be reset");
995*0Sstevel@tonic-gate 			mach_range_warn = 0;
996*0Sstevel@tonic-gate 		}
997*0Sstevel@tonic-gate 		tod.tod_year += 100;
998*0Sstevel@tonic-gate 	}
999*0Sstevel@tonic-gate 
1000*0Sstevel@tonic-gate 	/* tod_to_utc uses 1900 as base for the year */
1001*0Sstevel@tonic-gate 	ts.tv_sec = tod_to_utc(tod) + gmt_lag;
1002*0Sstevel@tonic-gate 	ts.tv_nsec = 0;
1003*0Sstevel@tonic-gate 
1004*0Sstevel@tonic-gate 	return (ts);
1005*0Sstevel@tonic-gate }
1006*0Sstevel@tonic-gate 
1007*0Sstevel@tonic-gate static void
1008*0Sstevel@tonic-gate mach_tod_set(timestruc_t ts)
1009*0Sstevel@tonic-gate {
1010*0Sstevel@tonic-gate 	todinfo_t tod = utc_to_tod(ts.tv_sec - gmt_lag);
1011*0Sstevel@tonic-gate 
1012*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tod_lock));
1013*0Sstevel@tonic-gate 
1014*0Sstevel@tonic-gate 	if (tod.tod_year >= 100)
1015*0Sstevel@tonic-gate 		tod.tod_year -= 100;
1016*0Sstevel@tonic-gate 
1017*0Sstevel@tonic-gate 	(*psm_todsetf)(&tod);
1018*0Sstevel@tonic-gate }
1019*0Sstevel@tonic-gate 
1020*0Sstevel@tonic-gate static void
1021*0Sstevel@tonic-gate mach_notify_error(int level, char *errmsg)
1022*0Sstevel@tonic-gate {
1023*0Sstevel@tonic-gate 	/*
1024*0Sstevel@tonic-gate 	 * SL_FATAL is pass in once panicstr is set, deliver it
1025*0Sstevel@tonic-gate 	 * as CE_PANIC.  Also, translate SL_ codes back to CE_
1026*0Sstevel@tonic-gate 	 * codes for the psmi handler
1027*0Sstevel@tonic-gate 	 */
1028*0Sstevel@tonic-gate 	if (level & SL_FATAL)
1029*0Sstevel@tonic-gate 		(*notify_error)(CE_PANIC, errmsg);
1030*0Sstevel@tonic-gate 	else if (level & SL_WARN)
1031*0Sstevel@tonic-gate 		(*notify_error)(CE_WARN, errmsg);
1032*0Sstevel@tonic-gate 	else if (level & SL_NOTE)
1033*0Sstevel@tonic-gate 		(*notify_error)(CE_NOTE, errmsg);
1034*0Sstevel@tonic-gate 	else if (level & SL_CONSOLE)
1035*0Sstevel@tonic-gate 		(*notify_error)(CE_CONT, errmsg);
1036*0Sstevel@tonic-gate }
1037*0Sstevel@tonic-gate 
1038*0Sstevel@tonic-gate /*
1039*0Sstevel@tonic-gate  * It provides the default basic intr_ops interface for the new DDI
1040*0Sstevel@tonic-gate  * interrupt framework if the PSM doesn't have one.
1041*0Sstevel@tonic-gate  *
1042*0Sstevel@tonic-gate  * Input:
1043*0Sstevel@tonic-gate  * dip     - pointer to the dev_info structure of the requested device
1044*0Sstevel@tonic-gate  * hdlp    - pointer to the internal interrupt handle structure for the
1045*0Sstevel@tonic-gate  *	     requested interrupt
1046*0Sstevel@tonic-gate  * intr_op - opcode for this call
1047*0Sstevel@tonic-gate  * result  - pointer to the integer that will hold the result to be
1048*0Sstevel@tonic-gate  *	     passed back if return value is PSM_SUCCESS
1049*0Sstevel@tonic-gate  *
1050*0Sstevel@tonic-gate  * Output:
1051*0Sstevel@tonic-gate  * return value is either PSM_SUCCESS or PSM_FAILURE
1052*0Sstevel@tonic-gate  */
1053*0Sstevel@tonic-gate static int
1054*0Sstevel@tonic-gate mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp,
1055*0Sstevel@tonic-gate     psm_intr_op_t intr_op, int *result)
1056*0Sstevel@tonic-gate {
1057*0Sstevel@tonic-gate 	struct intrspec *ispec;
1058*0Sstevel@tonic-gate 
1059*0Sstevel@tonic-gate 	switch (intr_op) {
1060*0Sstevel@tonic-gate 	case PSM_INTR_OP_CHECK_MSI:
1061*0Sstevel@tonic-gate 		*result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI |
1062*0Sstevel@tonic-gate 			    DDI_INTR_TYPE_MSIX);
1063*0Sstevel@tonic-gate 		break;
1064*0Sstevel@tonic-gate 	case PSM_INTR_OP_ALLOC_VECTORS:
1065*0Sstevel@tonic-gate 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
1066*0Sstevel@tonic-gate 			*result = 1;
1067*0Sstevel@tonic-gate 		else
1068*0Sstevel@tonic-gate 			*result = 0;
1069*0Sstevel@tonic-gate 		break;
1070*0Sstevel@tonic-gate 	case PSM_INTR_OP_FREE_VECTORS:
1071*0Sstevel@tonic-gate 		break;
1072*0Sstevel@tonic-gate 	case PSM_INTR_OP_NAVAIL_VECTORS:
1073*0Sstevel@tonic-gate 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
1074*0Sstevel@tonic-gate 			*result = 1;
1075*0Sstevel@tonic-gate 		else
1076*0Sstevel@tonic-gate 			*result = 0;
1077*0Sstevel@tonic-gate 		break;
1078*0Sstevel@tonic-gate 	case PSM_INTR_OP_XLATE_VECTOR:
1079*0Sstevel@tonic-gate 		ispec = (struct intrspec *)hdlp->ih_private;
1080*0Sstevel@tonic-gate 		*result = psm_translate_irq(dip, ispec->intrspec_vec);
1081*0Sstevel@tonic-gate 		break;
1082*0Sstevel@tonic-gate 	case PSM_INTR_OP_GET_CAP:
1083*0Sstevel@tonic-gate 		*result = 0;
1084*0Sstevel@tonic-gate 		break;
1085*0Sstevel@tonic-gate 	case PSM_INTR_OP_GET_PENDING:
1086*0Sstevel@tonic-gate 	case PSM_INTR_OP_CLEAR_MASK:
1087*0Sstevel@tonic-gate 	case PSM_INTR_OP_SET_MASK:
1088*0Sstevel@tonic-gate 	case PSM_INTR_OP_GET_SHARED:
1089*0Sstevel@tonic-gate 	case PSM_INTR_OP_SET_PRI:
1090*0Sstevel@tonic-gate 	case PSM_INTR_OP_SET_CAP:
1091*0Sstevel@tonic-gate 	default:
1092*0Sstevel@tonic-gate 		return (PSM_FAILURE);
1093*0Sstevel@tonic-gate 	}
1094*0Sstevel@tonic-gate 	return (PSM_SUCCESS);
1095*0Sstevel@tonic-gate }
1096