1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include <sys/types.h>
30*0Sstevel@tonic-gate #include <sys/param.h>
31*0Sstevel@tonic-gate #include <sys/cmn_err.h>
32*0Sstevel@tonic-gate #include <sys/mutex.h>
33*0Sstevel@tonic-gate #include <sys/systm.h>
34*0Sstevel@tonic-gate #include <sys/sysmacros.h>
35*0Sstevel@tonic-gate #include <sys/machsystm.h>
36*0Sstevel@tonic-gate #include <sys/archsystm.h>
37*0Sstevel@tonic-gate #include <sys/x_call.h>
38*0Sstevel@tonic-gate #include <sys/promif.h>
39*0Sstevel@tonic-gate #include <sys/prom_isa.h>
40*0Sstevel@tonic-gate #include <sys/privregs.h>
41*0Sstevel@tonic-gate #include <sys/vmem.h>
42*0Sstevel@tonic-gate #include <sys/atomic.h>
43*0Sstevel@tonic-gate #include <sys/panic.h>
44*0Sstevel@tonic-gate #include <sys/rwlock.h>
45*0Sstevel@tonic-gate #include <sys/reboot.h>
46*0Sstevel@tonic-gate #include <sys/kdi.h>
47*0Sstevel@tonic-gate 
48*0Sstevel@tonic-gate /*
49*0Sstevel@tonic-gate  * We are called with a pointer to a cell-sized argument array.
50*0Sstevel@tonic-gate  * The service name (the first element of the argument array) is
51*0Sstevel@tonic-gate  * the name of the callback being invoked.  When called, we are
52*0Sstevel@tonic-gate  * running on the firmwares trap table as a trusted subroutine
53*0Sstevel@tonic-gate  * of the firmware.
54*0Sstevel@tonic-gate  *
55*0Sstevel@tonic-gate  * We define entry points to allow callback handlers to be dynamically
56*0Sstevel@tonic-gate  * added and removed, to support obpsym, which is a separate module
57*0Sstevel@tonic-gate  * and can be dynamically loaded and unloaded and registers its
58*0Sstevel@tonic-gate  * callback handlers dynamically.
59*0Sstevel@tonic-gate  *
60*0Sstevel@tonic-gate  * Note: The actual callback handler we register, is the assembly lang.
61*0Sstevel@tonic-gate  * glue, callback_handler, which takes care of switching from a 64
62*0Sstevel@tonic-gate  * bit stack and environment to a 32 bit stack and environment, and
63*0Sstevel@tonic-gate  * back again, if the callback handler returns. callback_handler calls
64*0Sstevel@tonic-gate  * vx_handler to process the callback.
65*0Sstevel@tonic-gate  */
66*0Sstevel@tonic-gate 
67*0Sstevel@tonic-gate static kmutex_t vx_cmd_lock;	/* protect vx_cmd table */
68*0Sstevel@tonic-gate 
69*0Sstevel@tonic-gate #define	VX_CMD_MAX	10
70*0Sstevel@tonic-gate #define	ENDADDR(a)	&a[sizeof (a) / sizeof (a[0])]
71*0Sstevel@tonic-gate #define	vx_cmd_end	((struct vx_cmd *)(ENDADDR(vx_cmd)))
72*0Sstevel@tonic-gate 
73*0Sstevel@tonic-gate static struct vx_cmd {
74*0Sstevel@tonic-gate 	char	*service;	/* Service name */
75*0Sstevel@tonic-gate 	int	take_tba;	/* If Non-zero we take over the tba */
76*0Sstevel@tonic-gate 	void	(*func)(cell_t *argument_array);
77*0Sstevel@tonic-gate } vx_cmd[VX_CMD_MAX+1];
78*0Sstevel@tonic-gate 
79*0Sstevel@tonic-gate void
80*0Sstevel@tonic-gate init_vx_handler(void)
81*0Sstevel@tonic-gate {
82*0Sstevel@tonic-gate 	extern int callback_handler(cell_t *arg_array);
83*0Sstevel@tonic-gate 
84*0Sstevel@tonic-gate 	/*
85*0Sstevel@tonic-gate 	 * initialize the lock protecting additions and deletions from
86*0Sstevel@tonic-gate 	 * the vx_cmd table.  At callback time we don't need to grab
87*0Sstevel@tonic-gate 	 * this lock.  Callback handlers do not need to modify the
88*0Sstevel@tonic-gate 	 * callback handler table.
89*0Sstevel@tonic-gate 	 */
90*0Sstevel@tonic-gate 	mutex_init(&vx_cmd_lock, NULL, MUTEX_DEFAULT, NULL);
91*0Sstevel@tonic-gate 
92*0Sstevel@tonic-gate 	/*
93*0Sstevel@tonic-gate 	 * Tell OBP about our callback handler.
94*0Sstevel@tonic-gate 	 */
95*0Sstevel@tonic-gate 	(void) prom_set_callback((void *)callback_handler);
96*0Sstevel@tonic-gate }
97*0Sstevel@tonic-gate 
98*0Sstevel@tonic-gate /*
99*0Sstevel@tonic-gate  * Add a kernel callback handler to the kernel's list.
100*0Sstevel@tonic-gate  * The table is static, so if you add a callback handler, increase
101*0Sstevel@tonic-gate  * the value of VX_CMD_MAX. Find the first empty slot and use it.
102*0Sstevel@tonic-gate  */
103*0Sstevel@tonic-gate void
104*0Sstevel@tonic-gate add_vx_handler(char *name, int flag, void (*func)(cell_t *))
105*0Sstevel@tonic-gate {
106*0Sstevel@tonic-gate 	struct vx_cmd *vp;
107*0Sstevel@tonic-gate 
108*0Sstevel@tonic-gate 	mutex_enter(&vx_cmd_lock);
109*0Sstevel@tonic-gate 	for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
110*0Sstevel@tonic-gate 		if (vp->service == NULL) {
111*0Sstevel@tonic-gate 			vp->service = name;
112*0Sstevel@tonic-gate 			vp->take_tba = flag;
113*0Sstevel@tonic-gate 			vp->func = func;
114*0Sstevel@tonic-gate 			mutex_exit(&vx_cmd_lock);
115*0Sstevel@tonic-gate 			return;
116*0Sstevel@tonic-gate 		}
117*0Sstevel@tonic-gate 	}
118*0Sstevel@tonic-gate 	mutex_exit(&vx_cmd_lock);
119*0Sstevel@tonic-gate 
120*0Sstevel@tonic-gate #ifdef	DEBUG
121*0Sstevel@tonic-gate 
122*0Sstevel@tonic-gate 	/*
123*0Sstevel@tonic-gate 	 * There must be enough entries to handle all callback entries.
124*0Sstevel@tonic-gate 	 * Increase VX_CMD_MAX if this happens. This shouldn't happen.
125*0Sstevel@tonic-gate 	 */
126*0Sstevel@tonic-gate 	cmn_err(CE_PANIC, "add_vx_handler <%s>", name);
127*0Sstevel@tonic-gate 	/* NOTREACHED */
128*0Sstevel@tonic-gate 
129*0Sstevel@tonic-gate #else	/* DEBUG */
130*0Sstevel@tonic-gate 
131*0Sstevel@tonic-gate 	cmn_err(CE_WARN, "add_vx_handler: Can't add callback hander <%s>",
132*0Sstevel@tonic-gate 	    name);
133*0Sstevel@tonic-gate 
134*0Sstevel@tonic-gate #endif	/* DEBUG */
135*0Sstevel@tonic-gate 
136*0Sstevel@tonic-gate }
137*0Sstevel@tonic-gate 
138*0Sstevel@tonic-gate /*
139*0Sstevel@tonic-gate  * Remove a vx_handler function -- find the name string in the table,
140*0Sstevel@tonic-gate  * and clear it.
141*0Sstevel@tonic-gate  */
142*0Sstevel@tonic-gate void
143*0Sstevel@tonic-gate remove_vx_handler(char *name)
144*0Sstevel@tonic-gate {
145*0Sstevel@tonic-gate 	struct vx_cmd *vp;
146*0Sstevel@tonic-gate 
147*0Sstevel@tonic-gate 	mutex_enter(&vx_cmd_lock);
148*0Sstevel@tonic-gate 	for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
149*0Sstevel@tonic-gate 		if (vp->service == NULL)
150*0Sstevel@tonic-gate 			continue;
151*0Sstevel@tonic-gate 		if (strcmp(vp->service, name) != 0)
152*0Sstevel@tonic-gate 			continue;
153*0Sstevel@tonic-gate 		vp->service = 0;
154*0Sstevel@tonic-gate 		vp->take_tba = 0;
155*0Sstevel@tonic-gate 		vp->func = 0;
156*0Sstevel@tonic-gate 		mutex_exit(&vx_cmd_lock);
157*0Sstevel@tonic-gate 		return;
158*0Sstevel@tonic-gate 	}
159*0Sstevel@tonic-gate 	mutex_exit(&vx_cmd_lock);
160*0Sstevel@tonic-gate 	cmn_err(CE_WARN, "remove_vx_handler: <%s> not found", name);
161*0Sstevel@tonic-gate }
162*0Sstevel@tonic-gate 
163*0Sstevel@tonic-gate int
164*0Sstevel@tonic-gate vx_handler(cell_t *argument_array)
165*0Sstevel@tonic-gate {
166*0Sstevel@tonic-gate 	char *name;
167*0Sstevel@tonic-gate 	struct vx_cmd *vp;
168*0Sstevel@tonic-gate 	void *old_tba;
169*0Sstevel@tonic-gate 
170*0Sstevel@tonic-gate 	name = p1275_cell2ptr(*argument_array);
171*0Sstevel@tonic-gate 
172*0Sstevel@tonic-gate 	for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
173*0Sstevel@tonic-gate 		if (vp->service == (char *)0)
174*0Sstevel@tonic-gate 			continue;
175*0Sstevel@tonic-gate 		if (strcmp(vp->service, name) != 0)
176*0Sstevel@tonic-gate 			continue;
177*0Sstevel@tonic-gate 		if (vp->take_tba != 0)  {
178*0Sstevel@tonic-gate 			reestablish_curthread();
179*0Sstevel@tonic-gate 			if (tba_taken_over != 0)
180*0Sstevel@tonic-gate 				old_tba = set_tba((void *)&trap_table);
181*0Sstevel@tonic-gate 		}
182*0Sstevel@tonic-gate 		vp->func(argument_array);
183*0Sstevel@tonic-gate 		if ((vp->take_tba != 0) && (tba_taken_over != 0))
184*0Sstevel@tonic-gate 			(void) set_tba(old_tba);
185*0Sstevel@tonic-gate 		return (0);	/* Service name was known */
186*0Sstevel@tonic-gate 	}
187*0Sstevel@tonic-gate 
188*0Sstevel@tonic-gate 	return (-1);		/* Service name unknown */
189*0Sstevel@tonic-gate }
190*0Sstevel@tonic-gate 
191*0Sstevel@tonic-gate /*
192*0Sstevel@tonic-gate  * PROM Locking Primitives
193*0Sstevel@tonic-gate  *
194*0Sstevel@tonic-gate  * These routines are called immediately before and immediately after calling
195*0Sstevel@tonic-gate  * into the firmware.  The firmware is single-threaded and assumes that the
196*0Sstevel@tonic-gate  * kernel will implement locking to prevent simultaneous service calls.  In
197*0Sstevel@tonic-gate  * addition, some service calls (particularly character rendering) can be
198*0Sstevel@tonic-gate  * slow, so we would like to sleep if we cannot acquire the lock to allow the
199*0Sstevel@tonic-gate  * caller's CPU to continue to perform useful work in the interim.  Service
200*0Sstevel@tonic-gate  * routines may also be called early in boot as part of slave CPU startup
201*0Sstevel@tonic-gate  * when mutexes and cvs are not yet available (i.e. they are still running on
202*0Sstevel@tonic-gate  * the prom's TLB handlers and cannot touch curthread).  Therefore, these
203*0Sstevel@tonic-gate  * routines must reduce to a simple compare-and-swap spin lock when necessary.
204*0Sstevel@tonic-gate  * Finally, kernel code may wish to acquire the firmware lock before executing
205*0Sstevel@tonic-gate  * a block of code that includes service calls, so we also allow the firmware
206*0Sstevel@tonic-gate  * lock to be acquired recursively by the owning CPU after disabling preemption.
207*0Sstevel@tonic-gate  *
208*0Sstevel@tonic-gate  * To meet these constraints, the lock itself is implemented as a compare-and-
209*0Sstevel@tonic-gate  * swap spin lock on the global prom_cpu pointer.  We implement recursion by
210*0Sstevel@tonic-gate  * atomically incrementing the integer prom_holdcnt after acquiring the lock.
211*0Sstevel@tonic-gate  * If the current CPU is an "adult" (determined by testing cpu_m.mutex_ready),
212*0Sstevel@tonic-gate  * we disable preemption before acquiring the lock and leave it disabled once
213*0Sstevel@tonic-gate  * the lock is held.  The kern_postprom() routine then enables preemption if
214*0Sstevel@tonic-gate  * we drop the lock and prom_holdcnt returns to zero.  If the current CPU is
215*0Sstevel@tonic-gate  * an adult and the lock is held by another adult CPU, we can safely sleep
216*0Sstevel@tonic-gate  * until the lock is released.  To do so, we acquire the adaptive prom_mutex
217*0Sstevel@tonic-gate  * and then sleep on prom_cv.  Therefore, service routines must not be called
218*0Sstevel@tonic-gate  * from above LOCK_LEVEL on any adult CPU.  Finally, if recursive entry is
219*0Sstevel@tonic-gate  * attempted on an adult CPU, we must also verify that curthread matches the
220*0Sstevel@tonic-gate  * saved prom_thread (the original owner) to ensure that low-level interrupt
221*0Sstevel@tonic-gate  * threads do not step on other threads running on the same CPU.
222*0Sstevel@tonic-gate  */
223*0Sstevel@tonic-gate 
224*0Sstevel@tonic-gate static cpu_t *volatile prom_cpu;
225*0Sstevel@tonic-gate static kthread_t *volatile prom_thread;
226*0Sstevel@tonic-gate static uint32_t prom_holdcnt;
227*0Sstevel@tonic-gate static kmutex_t prom_mutex;
228*0Sstevel@tonic-gate static kcondvar_t prom_cv;
229*0Sstevel@tonic-gate 
230*0Sstevel@tonic-gate /*
231*0Sstevel@tonic-gate  * The debugger uses PROM services, and is thus unable to run if any of the
232*0Sstevel@tonic-gate  * CPUs on the system are executing in the PROM at the time of debugger entry.
233*0Sstevel@tonic-gate  * If a CPU is determined to be in the PROM when the debugger is entered,
234*0Sstevel@tonic-gate  * prom_return_enter_debugger will be set, thus triggering a programmed debugger
235*0Sstevel@tonic-gate  * entry when the given CPU returns from the PROM.  That CPU is then released by
236*0Sstevel@tonic-gate  * the debugger, and is allowed to complete PROM-related work.
237*0Sstevel@tonic-gate  */
238*0Sstevel@tonic-gate int prom_exit_enter_debugger;
239*0Sstevel@tonic-gate 
240*0Sstevel@tonic-gate void
241*0Sstevel@tonic-gate kern_preprom(void)
242*0Sstevel@tonic-gate {
243*0Sstevel@tonic-gate 	for (;;) {
244*0Sstevel@tonic-gate 		/*
245*0Sstevel@tonic-gate 		 * Load the current CPU pointer and examine the mutex_ready bit.
246*0Sstevel@tonic-gate 		 * It doesn't matter if we are preempted here because we are
247*0Sstevel@tonic-gate 		 * only trying to determine if we are in the *set* of mutex
248*0Sstevel@tonic-gate 		 * ready CPUs.  We cannot disable preemption until we confirm
249*0Sstevel@tonic-gate 		 * that we are running on a CPU in this set, since a call to
250*0Sstevel@tonic-gate 		 * kpreempt_disable() requires access to curthread.
251*0Sstevel@tonic-gate 		 */
252*0Sstevel@tonic-gate 		processorid_t cpuid = getprocessorid();
253*0Sstevel@tonic-gate 		cpu_t *cp = cpu[cpuid];
254*0Sstevel@tonic-gate 		cpu_t *prcp;
255*0Sstevel@tonic-gate 
256*0Sstevel@tonic-gate 		if (panicstr)
257*0Sstevel@tonic-gate 			return; /* just return if we are currently panicking */
258*0Sstevel@tonic-gate 
259*0Sstevel@tonic-gate 		if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
260*0Sstevel@tonic-gate 			/*
261*0Sstevel@tonic-gate 			 * Disable premption, and reload the current CPU.  We
262*0Sstevel@tonic-gate 			 * can't move from a mutex_ready cpu to a non-ready cpu
263*0Sstevel@tonic-gate 			 * so we don't need to re-check cp->cpu_m.mutex_ready.
264*0Sstevel@tonic-gate 			 */
265*0Sstevel@tonic-gate 			kpreempt_disable();
266*0Sstevel@tonic-gate 			cp = CPU;
267*0Sstevel@tonic-gate 			ASSERT(cp->cpu_m.mutex_ready);
268*0Sstevel@tonic-gate 
269*0Sstevel@tonic-gate 			/*
270*0Sstevel@tonic-gate 			 * Try the lock.  If we don't get the lock, re-enable
271*0Sstevel@tonic-gate 			 * preemption and see if we should sleep.  If we are
272*0Sstevel@tonic-gate 			 * already the lock holder, remove the effect of the
273*0Sstevel@tonic-gate 			 * previous kpreempt_disable() before returning since
274*0Sstevel@tonic-gate 			 * preemption was disabled by an earlier kern_preprom.
275*0Sstevel@tonic-gate 			 */
276*0Sstevel@tonic-gate 			prcp = casptr((void *)&prom_cpu, NULL, cp);
277*0Sstevel@tonic-gate 			if (prcp == NULL ||
278*0Sstevel@tonic-gate 			    (prcp == cp && prom_thread == curthread)) {
279*0Sstevel@tonic-gate 				if (prcp == cp)
280*0Sstevel@tonic-gate 					kpreempt_enable();
281*0Sstevel@tonic-gate 				break;
282*0Sstevel@tonic-gate 			}
283*0Sstevel@tonic-gate 
284*0Sstevel@tonic-gate 			kpreempt_enable();
285*0Sstevel@tonic-gate 
286*0Sstevel@tonic-gate 			/*
287*0Sstevel@tonic-gate 			 * We have to be very careful here since both prom_cpu
288*0Sstevel@tonic-gate 			 * and prcp->cpu_m.mutex_ready can be changed at any
289*0Sstevel@tonic-gate 			 * time by a non mutex_ready cpu holding the lock.
290*0Sstevel@tonic-gate 			 * If the owner is mutex_ready, holding prom_mutex
291*0Sstevel@tonic-gate 			 * prevents kern_postprom() from completing.  If the
292*0Sstevel@tonic-gate 			 * owner isn't mutex_ready, we only know it will clear
293*0Sstevel@tonic-gate 			 * prom_cpu before changing cpu_m.mutex_ready, so we
294*0Sstevel@tonic-gate 			 * issue a membar after checking mutex_ready and then
295*0Sstevel@tonic-gate 			 * re-verify that prom_cpu is still held by the same
296*0Sstevel@tonic-gate 			 * cpu before actually proceeding to cv_wait().
297*0Sstevel@tonic-gate 			 */
298*0Sstevel@tonic-gate 			mutex_enter(&prom_mutex);
299*0Sstevel@tonic-gate 			prcp = prom_cpu;
300*0Sstevel@tonic-gate 			if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
301*0Sstevel@tonic-gate 				membar_consumer();
302*0Sstevel@tonic-gate 				if (prcp == prom_cpu)
303*0Sstevel@tonic-gate 					cv_wait(&prom_cv, &prom_mutex);
304*0Sstevel@tonic-gate 			}
305*0Sstevel@tonic-gate 			mutex_exit(&prom_mutex);
306*0Sstevel@tonic-gate 
307*0Sstevel@tonic-gate 		} else {
308*0Sstevel@tonic-gate 			/*
309*0Sstevel@tonic-gate 			 * If we are not yet mutex_ready, just attempt to grab
310*0Sstevel@tonic-gate 			 * the lock.  If we get it or already hold it, break.
311*0Sstevel@tonic-gate 			 */
312*0Sstevel@tonic-gate 			ASSERT(getpil() == PIL_MAX);
313*0Sstevel@tonic-gate 			prcp = casptr((void *)&prom_cpu, NULL, cp);
314*0Sstevel@tonic-gate 			if (prcp == NULL || prcp == cp)
315*0Sstevel@tonic-gate 				break;
316*0Sstevel@tonic-gate 		}
317*0Sstevel@tonic-gate 	}
318*0Sstevel@tonic-gate 
319*0Sstevel@tonic-gate 	/*
320*0Sstevel@tonic-gate 	 * We now hold the prom_cpu lock.  Increment the hold count by one
321*0Sstevel@tonic-gate 	 * and assert our current state before returning to the caller.
322*0Sstevel@tonic-gate 	 */
323*0Sstevel@tonic-gate 	atomic_add_32(&prom_holdcnt, 1);
324*0Sstevel@tonic-gate 	ASSERT(prom_holdcnt >= 1);
325*0Sstevel@tonic-gate 	prom_thread = curthread;
326*0Sstevel@tonic-gate }
327*0Sstevel@tonic-gate 
328*0Sstevel@tonic-gate /*
329*0Sstevel@tonic-gate  * Drop the prom lock if it is held by the current CPU.  If the lock is held
330*0Sstevel@tonic-gate  * recursively, return without clearing prom_cpu.  If the hold count is now
331*0Sstevel@tonic-gate  * zero, clear prom_cpu and cv_signal any waiting CPU.
332*0Sstevel@tonic-gate  */
333*0Sstevel@tonic-gate void
334*0Sstevel@tonic-gate kern_postprom(void)
335*0Sstevel@tonic-gate {
336*0Sstevel@tonic-gate 	processorid_t cpuid = getprocessorid();
337*0Sstevel@tonic-gate 	cpu_t *cp = cpu[cpuid];
338*0Sstevel@tonic-gate 
339*0Sstevel@tonic-gate 	if (panicstr)
340*0Sstevel@tonic-gate 		return; /* do not modify lock further if we have panicked */
341*0Sstevel@tonic-gate 
342*0Sstevel@tonic-gate 	if (prom_cpu != cp)
343*0Sstevel@tonic-gate 		panic("kern_postprom: not owner, cp=%p owner=%p", cp, prom_cpu);
344*0Sstevel@tonic-gate 
345*0Sstevel@tonic-gate 	if (prom_holdcnt == 0)
346*0Sstevel@tonic-gate 		panic("kern_postprom: prom_holdcnt == 0, owner=%p", prom_cpu);
347*0Sstevel@tonic-gate 
348*0Sstevel@tonic-gate 	if (atomic_add_32_nv(&prom_holdcnt, -1) != 0)
349*0Sstevel@tonic-gate 		return; /* prom lock is held recursively by this CPU */
350*0Sstevel@tonic-gate 
351*0Sstevel@tonic-gate 	if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
352*0Sstevel@tonic-gate 		kdi_dvec_enter();
353*0Sstevel@tonic-gate 
354*0Sstevel@tonic-gate 	prom_thread = NULL;
355*0Sstevel@tonic-gate 	membar_producer();
356*0Sstevel@tonic-gate 
357*0Sstevel@tonic-gate 	prom_cpu = NULL;
358*0Sstevel@tonic-gate 	membar_producer();
359*0Sstevel@tonic-gate 
360*0Sstevel@tonic-gate 	if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
361*0Sstevel@tonic-gate 		mutex_enter(&prom_mutex);
362*0Sstevel@tonic-gate 		cv_signal(&prom_cv);
363*0Sstevel@tonic-gate 		mutex_exit(&prom_mutex);
364*0Sstevel@tonic-gate 		kpreempt_enable();
365*0Sstevel@tonic-gate 	}
366*0Sstevel@tonic-gate }
367*0Sstevel@tonic-gate 
368*0Sstevel@tonic-gate /*
369*0Sstevel@tonic-gate  * If the frame buffer device is busy, briefly capture the other CPUs so that
370*0Sstevel@tonic-gate  * another CPU executing code to manipulate the device does not execute at the
371*0Sstevel@tonic-gate  * same time we are rendering characters.  Refer to the comments and code in
372*0Sstevel@tonic-gate  * common/os/console.c for more information on these callbacks.
373*0Sstevel@tonic-gate  *
374*0Sstevel@tonic-gate  * Notice that we explicitly acquire the PROM lock using kern_preprom() prior
375*0Sstevel@tonic-gate  * to idling other CPUs.  The idling mechanism will cross-trap the other CPUs
376*0Sstevel@tonic-gate  * and have them spin at MAX(%pil, XCALL_PIL), so we must be sure that none of
377*0Sstevel@tonic-gate  * them are holding the PROM lock before we idle them and then call into the
378*0Sstevel@tonic-gate  * PROM routines that render characters to the frame buffer.
379*0Sstevel@tonic-gate  */
380*0Sstevel@tonic-gate int
381*0Sstevel@tonic-gate console_enter(int busy)
382*0Sstevel@tonic-gate {
383*0Sstevel@tonic-gate 	int s = 0;
384*0Sstevel@tonic-gate 
385*0Sstevel@tonic-gate 	if (busy && panicstr == NULL) {
386*0Sstevel@tonic-gate 		kern_preprom();
387*0Sstevel@tonic-gate 		s = splhi();
388*0Sstevel@tonic-gate 		idle_other_cpus();
389*0Sstevel@tonic-gate 	}
390*0Sstevel@tonic-gate 
391*0Sstevel@tonic-gate 	return (s);
392*0Sstevel@tonic-gate }
393*0Sstevel@tonic-gate 
394*0Sstevel@tonic-gate void
395*0Sstevel@tonic-gate console_exit(int busy, int spl)
396*0Sstevel@tonic-gate {
397*0Sstevel@tonic-gate 	if (busy && panicstr == NULL) {
398*0Sstevel@tonic-gate 		resume_other_cpus();
399*0Sstevel@tonic-gate 		splx(spl);
400*0Sstevel@tonic-gate 		kern_postprom();
401*0Sstevel@tonic-gate 	}
402*0Sstevel@tonic-gate }
403*0Sstevel@tonic-gate 
404*0Sstevel@tonic-gate /*
405*0Sstevel@tonic-gate  * This routine is a special form of pause_cpus().  It ensures that
406*0Sstevel@tonic-gate  * prom functions are callable while the cpus are paused.
407*0Sstevel@tonic-gate  */
408*0Sstevel@tonic-gate void
409*0Sstevel@tonic-gate promsafe_pause_cpus(void)
410*0Sstevel@tonic-gate {
411*0Sstevel@tonic-gate 	pause_cpus(NULL);
412*0Sstevel@tonic-gate 
413*0Sstevel@tonic-gate 	/* If some other cpu is entering or is in the prom, spin */
414*0Sstevel@tonic-gate 	while (prom_cpu || mutex_owner(&prom_mutex)) {
415*0Sstevel@tonic-gate 
416*0Sstevel@tonic-gate 		start_cpus();
417*0Sstevel@tonic-gate 		mutex_enter(&prom_mutex);
418*0Sstevel@tonic-gate 
419*0Sstevel@tonic-gate 		/* Wait for other cpu to exit prom */
420*0Sstevel@tonic-gate 		while (prom_cpu)
421*0Sstevel@tonic-gate 			cv_wait(&prom_cv, &prom_mutex);
422*0Sstevel@tonic-gate 
423*0Sstevel@tonic-gate 		mutex_exit(&prom_mutex);
424*0Sstevel@tonic-gate 		pause_cpus(NULL);
425*0Sstevel@tonic-gate 	}
426*0Sstevel@tonic-gate 
427*0Sstevel@tonic-gate 	/* At this point all cpus are paused and none are in the prom */
428*0Sstevel@tonic-gate }
429*0Sstevel@tonic-gate 
430*0Sstevel@tonic-gate /*
431*0Sstevel@tonic-gate  * This routine is a special form of xc_attention().  It ensures that
432*0Sstevel@tonic-gate  * prom functions are callable while the cpus are at attention.
433*0Sstevel@tonic-gate  */
434*0Sstevel@tonic-gate void
435*0Sstevel@tonic-gate promsafe_xc_attention(cpuset_t cpuset)
436*0Sstevel@tonic-gate {
437*0Sstevel@tonic-gate 	xc_attention(cpuset);
438*0Sstevel@tonic-gate 
439*0Sstevel@tonic-gate 	/* If some other cpu is entering or is in the prom, spin */
440*0Sstevel@tonic-gate 	while (prom_cpu || mutex_owner(&prom_mutex)) {
441*0Sstevel@tonic-gate 
442*0Sstevel@tonic-gate 		xc_dismissed(cpuset);
443*0Sstevel@tonic-gate 		mutex_enter(&prom_mutex);
444*0Sstevel@tonic-gate 
445*0Sstevel@tonic-gate 		/* Wait for other cpu to exit prom */
446*0Sstevel@tonic-gate 		while (prom_cpu)
447*0Sstevel@tonic-gate 			cv_wait(&prom_cv, &prom_mutex);
448*0Sstevel@tonic-gate 
449*0Sstevel@tonic-gate 		mutex_exit(&prom_mutex);
450*0Sstevel@tonic-gate 		xc_attention(cpuset);
451*0Sstevel@tonic-gate 	}
452*0Sstevel@tonic-gate 
453*0Sstevel@tonic-gate 	/* At this point all cpus are paused and none are in the prom */
454*0Sstevel@tonic-gate }
455*0Sstevel@tonic-gate 
456*0Sstevel@tonic-gate 
457*0Sstevel@tonic-gate #if defined(PROM_32BIT_ADDRS)
458*0Sstevel@tonic-gate 
459*0Sstevel@tonic-gate #include <sys/promimpl.h>
460*0Sstevel@tonic-gate #include <vm/seg_kmem.h>
461*0Sstevel@tonic-gate #include <sys/kmem.h>
462*0Sstevel@tonic-gate #include <sys/bootconf.h>
463*0Sstevel@tonic-gate 
464*0Sstevel@tonic-gate /*
465*0Sstevel@tonic-gate  * These routines are only used to workaround "poor feature interaction"
466*0Sstevel@tonic-gate  * in OBP.  See bug 4115680 for details.
467*0Sstevel@tonic-gate  *
468*0Sstevel@tonic-gate  * Many of the promif routines need to allocate temporary buffers
469*0Sstevel@tonic-gate  * with 32-bit addresses to pass in/out of the CIF.  The lifetime
470*0Sstevel@tonic-gate  * of the buffers is extremely short, they are allocated and freed
471*0Sstevel@tonic-gate  * around the CIF call.  We use vmem_alloc() to cache 32-bit memory.
472*0Sstevel@tonic-gate  *
473*0Sstevel@tonic-gate  * Note the code in promplat_free() to prevent exhausting the 32 bit
474*0Sstevel@tonic-gate  * heap during boot.
475*0Sstevel@tonic-gate  */
476*0Sstevel@tonic-gate static void *promplat_last_free = NULL;
477*0Sstevel@tonic-gate static size_t promplat_last_size;
478*0Sstevel@tonic-gate static vmem_t *promplat_arena;
479*0Sstevel@tonic-gate static kmutex_t promplat_lock;  /* protect arena, last_free, and last_size */
480*0Sstevel@tonic-gate 
481*0Sstevel@tonic-gate void *
482*0Sstevel@tonic-gate promplat_alloc(size_t size)
483*0Sstevel@tonic-gate {
484*0Sstevel@tonic-gate 
485*0Sstevel@tonic-gate 	mutex_enter(&promplat_lock);
486*0Sstevel@tonic-gate 	if (promplat_arena == NULL) {
487*0Sstevel@tonic-gate 		promplat_arena = vmem_create("promplat", NULL, 0, 8,
488*0Sstevel@tonic-gate 		    segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
489*0Sstevel@tonic-gate 	}
490*0Sstevel@tonic-gate 	mutex_exit(&promplat_lock);
491*0Sstevel@tonic-gate 
492*0Sstevel@tonic-gate 	return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
493*0Sstevel@tonic-gate }
494*0Sstevel@tonic-gate 
495*0Sstevel@tonic-gate /*
496*0Sstevel@tonic-gate  * Delaying the free() of small allocations gets more mileage
497*0Sstevel@tonic-gate  * from pages during boot, otherwise a cycle of allocate/free
498*0Sstevel@tonic-gate  * calls could burn through available heap32 space too quickly.
499*0Sstevel@tonic-gate  */
500*0Sstevel@tonic-gate void
501*0Sstevel@tonic-gate promplat_free(void *p, size_t size)
502*0Sstevel@tonic-gate {
503*0Sstevel@tonic-gate 	void *p2 = NULL;
504*0Sstevel@tonic-gate 	size_t s2;
505*0Sstevel@tonic-gate 
506*0Sstevel@tonic-gate 	/*
507*0Sstevel@tonic-gate 	 * If VM is initialized, clean up any delayed free().
508*0Sstevel@tonic-gate 	 */
509*0Sstevel@tonic-gate 	if (kvseg.s_base != 0 && promplat_last_free != NULL) {
510*0Sstevel@tonic-gate 		mutex_enter(&promplat_lock);
511*0Sstevel@tonic-gate 		p2 = promplat_last_free;
512*0Sstevel@tonic-gate 		s2 = promplat_last_size;
513*0Sstevel@tonic-gate 		promplat_last_free = NULL;
514*0Sstevel@tonic-gate 		promplat_last_size = 0;
515*0Sstevel@tonic-gate 		mutex_exit(&promplat_lock);
516*0Sstevel@tonic-gate 		if (p2 != NULL) {
517*0Sstevel@tonic-gate 			vmem_free(promplat_arena, p2, s2);
518*0Sstevel@tonic-gate 			p2 = NULL;
519*0Sstevel@tonic-gate 		}
520*0Sstevel@tonic-gate 	}
521*0Sstevel@tonic-gate 
522*0Sstevel@tonic-gate 	/*
523*0Sstevel@tonic-gate 	 * Do the free if VM is initialized or it's a large allocation.
524*0Sstevel@tonic-gate 	 */
525*0Sstevel@tonic-gate 	if (kvseg.s_base != 0 || size >= PAGESIZE) {
526*0Sstevel@tonic-gate 		vmem_free(promplat_arena, p, size);
527*0Sstevel@tonic-gate 		return;
528*0Sstevel@tonic-gate 	}
529*0Sstevel@tonic-gate 
530*0Sstevel@tonic-gate 	/*
531*0Sstevel@tonic-gate 	 * Otherwise, do the last free request and delay this one.
532*0Sstevel@tonic-gate 	 */
533*0Sstevel@tonic-gate 	mutex_enter(&promplat_lock);
534*0Sstevel@tonic-gate 	if (promplat_last_free != NULL) {
535*0Sstevel@tonic-gate 		p2 = promplat_last_free;
536*0Sstevel@tonic-gate 		s2 = promplat_last_size;
537*0Sstevel@tonic-gate 	}
538*0Sstevel@tonic-gate 	promplat_last_free = p;
539*0Sstevel@tonic-gate 	promplat_last_size = size;
540*0Sstevel@tonic-gate 	mutex_exit(&promplat_lock);
541*0Sstevel@tonic-gate 
542*0Sstevel@tonic-gate 	if (p2 != NULL)
543*0Sstevel@tonic-gate 		vmem_free(promplat_arena, p2, s2);
544*0Sstevel@tonic-gate }
545*0Sstevel@tonic-gate 
546*0Sstevel@tonic-gate void
547*0Sstevel@tonic-gate promplat_bcopy(const void *src, void *dst, size_t count)
548*0Sstevel@tonic-gate {
549*0Sstevel@tonic-gate 	bcopy(src, dst, count);
550*0Sstevel@tonic-gate }
551*0Sstevel@tonic-gate 
552*0Sstevel@tonic-gate #endif /* PROM_32BIT_ADDRS */
553*0Sstevel@tonic-gate 
554*0Sstevel@tonic-gate static prom_generation_cookie_t prom_tree_gen;
555*0Sstevel@tonic-gate static krwlock_t prom_tree_lock;
556*0Sstevel@tonic-gate 
557*0Sstevel@tonic-gate int
558*0Sstevel@tonic-gate prom_tree_access(int (*callback)(void *arg, int has_changed), void *arg,
559*0Sstevel@tonic-gate     prom_generation_cookie_t *ckp)
560*0Sstevel@tonic-gate {
561*0Sstevel@tonic-gate 	int chg, rv;
562*0Sstevel@tonic-gate 
563*0Sstevel@tonic-gate 	rw_enter(&prom_tree_lock, RW_READER);
564*0Sstevel@tonic-gate 	/*
565*0Sstevel@tonic-gate 	 * If the tree has changed since the caller last accessed it
566*0Sstevel@tonic-gate 	 * pass 1 as the second argument to the callback function,
567*0Sstevel@tonic-gate 	 * otherwise 0.
568*0Sstevel@tonic-gate 	 */
569*0Sstevel@tonic-gate 	if (ckp != NULL && *ckp != prom_tree_gen) {
570*0Sstevel@tonic-gate 		*ckp = prom_tree_gen;
571*0Sstevel@tonic-gate 		chg = 1;
572*0Sstevel@tonic-gate 	} else
573*0Sstevel@tonic-gate 		chg = 0;
574*0Sstevel@tonic-gate 	rv = callback(arg, chg);
575*0Sstevel@tonic-gate 	rw_exit(&prom_tree_lock);
576*0Sstevel@tonic-gate 	return (rv);
577*0Sstevel@tonic-gate }
578*0Sstevel@tonic-gate 
579*0Sstevel@tonic-gate int
580*0Sstevel@tonic-gate prom_tree_update(int (*callback)(void *arg), void *arg)
581*0Sstevel@tonic-gate {
582*0Sstevel@tonic-gate 	int rv;
583*0Sstevel@tonic-gate 
584*0Sstevel@tonic-gate 	rw_enter(&prom_tree_lock, RW_WRITER);
585*0Sstevel@tonic-gate 	prom_tree_gen++;
586*0Sstevel@tonic-gate 	rv = callback(arg);
587*0Sstevel@tonic-gate 	rw_exit(&prom_tree_lock);
588*0Sstevel@tonic-gate 	return (rv);
589*0Sstevel@tonic-gate }
590