10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
53446Smrj * Common Development and Distribution License (the "License").
63446Smrj * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*7240Srh87107 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
270Sstevel@tonic-gate
280Sstevel@tonic-gate #include <sys/types.h>
290Sstevel@tonic-gate #include <sys/param.h>
300Sstevel@tonic-gate #include <sys/cmn_err.h>
310Sstevel@tonic-gate #include <sys/mutex.h>
320Sstevel@tonic-gate #include <sys/systm.h>
330Sstevel@tonic-gate #include <sys/sysmacros.h>
340Sstevel@tonic-gate #include <sys/machsystm.h>
350Sstevel@tonic-gate #include <sys/archsystm.h>
360Sstevel@tonic-gate #include <sys/x_call.h>
370Sstevel@tonic-gate #include <sys/promif.h>
380Sstevel@tonic-gate #include <sys/prom_isa.h>
390Sstevel@tonic-gate #include <sys/privregs.h>
400Sstevel@tonic-gate #include <sys/vmem.h>
410Sstevel@tonic-gate #include <sys/atomic.h>
420Sstevel@tonic-gate #include <sys/panic.h>
430Sstevel@tonic-gate #include <sys/rwlock.h>
440Sstevel@tonic-gate #include <sys/reboot.h>
450Sstevel@tonic-gate #include <sys/kdi.h>
463446Smrj #include <sys/kdi_machimpl.h>
470Sstevel@tonic-gate
480Sstevel@tonic-gate /*
490Sstevel@tonic-gate * We are called with a pointer to a cell-sized argument array.
500Sstevel@tonic-gate * The service name (the first element of the argument array) is
510Sstevel@tonic-gate * the name of the callback being invoked. When called, we are
520Sstevel@tonic-gate * running on the firmwares trap table as a trusted subroutine
530Sstevel@tonic-gate * of the firmware.
540Sstevel@tonic-gate *
550Sstevel@tonic-gate * We define entry points to allow callback handlers to be dynamically
560Sstevel@tonic-gate * added and removed, to support obpsym, which is a separate module
570Sstevel@tonic-gate * and can be dynamically loaded and unloaded and registers its
580Sstevel@tonic-gate * callback handlers dynamically.
590Sstevel@tonic-gate *
600Sstevel@tonic-gate * Note: The actual callback handler we register, is the assembly lang.
610Sstevel@tonic-gate * glue, callback_handler, which takes care of switching from a 64
620Sstevel@tonic-gate * bit stack and environment to a 32 bit stack and environment, and
630Sstevel@tonic-gate * back again, if the callback handler returns. callback_handler calls
640Sstevel@tonic-gate * vx_handler to process the callback.
650Sstevel@tonic-gate */
660Sstevel@tonic-gate
670Sstevel@tonic-gate static kmutex_t vx_cmd_lock; /* protect vx_cmd table */
680Sstevel@tonic-gate
690Sstevel@tonic-gate #define VX_CMD_MAX 10
700Sstevel@tonic-gate #define ENDADDR(a) &a[sizeof (a) / sizeof (a[0])]
710Sstevel@tonic-gate #define vx_cmd_end ((struct vx_cmd *)(ENDADDR(vx_cmd)))
720Sstevel@tonic-gate
730Sstevel@tonic-gate static struct vx_cmd {
740Sstevel@tonic-gate char *service; /* Service name */
750Sstevel@tonic-gate int take_tba; /* If Non-zero we take over the tba */
760Sstevel@tonic-gate void (*func)(cell_t *argument_array);
770Sstevel@tonic-gate } vx_cmd[VX_CMD_MAX+1];
780Sstevel@tonic-gate
790Sstevel@tonic-gate void
init_vx_handler(void)800Sstevel@tonic-gate init_vx_handler(void)
810Sstevel@tonic-gate {
820Sstevel@tonic-gate extern int callback_handler(cell_t *arg_array);
830Sstevel@tonic-gate
840Sstevel@tonic-gate /*
850Sstevel@tonic-gate * initialize the lock protecting additions and deletions from
860Sstevel@tonic-gate * the vx_cmd table. At callback time we don't need to grab
870Sstevel@tonic-gate * this lock. Callback handlers do not need to modify the
880Sstevel@tonic-gate * callback handler table.
890Sstevel@tonic-gate */
900Sstevel@tonic-gate mutex_init(&vx_cmd_lock, NULL, MUTEX_DEFAULT, NULL);
910Sstevel@tonic-gate
920Sstevel@tonic-gate /*
930Sstevel@tonic-gate * Tell OBP about our callback handler.
940Sstevel@tonic-gate */
950Sstevel@tonic-gate (void) prom_set_callback((void *)callback_handler);
960Sstevel@tonic-gate }
970Sstevel@tonic-gate
980Sstevel@tonic-gate /*
990Sstevel@tonic-gate * Add a kernel callback handler to the kernel's list.
1000Sstevel@tonic-gate * The table is static, so if you add a callback handler, increase
1010Sstevel@tonic-gate * the value of VX_CMD_MAX. Find the first empty slot and use it.
1020Sstevel@tonic-gate */
1030Sstevel@tonic-gate void
add_vx_handler(char * name,int flag,void (* func)(cell_t *))1040Sstevel@tonic-gate add_vx_handler(char *name, int flag, void (*func)(cell_t *))
1050Sstevel@tonic-gate {
1060Sstevel@tonic-gate struct vx_cmd *vp;
1070Sstevel@tonic-gate
1080Sstevel@tonic-gate mutex_enter(&vx_cmd_lock);
1090Sstevel@tonic-gate for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
1100Sstevel@tonic-gate if (vp->service == NULL) {
1110Sstevel@tonic-gate vp->service = name;
1120Sstevel@tonic-gate vp->take_tba = flag;
1130Sstevel@tonic-gate vp->func = func;
1140Sstevel@tonic-gate mutex_exit(&vx_cmd_lock);
1150Sstevel@tonic-gate return;
1160Sstevel@tonic-gate }
1170Sstevel@tonic-gate }
1180Sstevel@tonic-gate mutex_exit(&vx_cmd_lock);
1190Sstevel@tonic-gate
1200Sstevel@tonic-gate #ifdef DEBUG
1210Sstevel@tonic-gate
1220Sstevel@tonic-gate /*
1230Sstevel@tonic-gate * There must be enough entries to handle all callback entries.
1240Sstevel@tonic-gate * Increase VX_CMD_MAX if this happens. This shouldn't happen.
1250Sstevel@tonic-gate */
1260Sstevel@tonic-gate cmn_err(CE_PANIC, "add_vx_handler <%s>", name);
1270Sstevel@tonic-gate /* NOTREACHED */
1280Sstevel@tonic-gate
1290Sstevel@tonic-gate #else /* DEBUG */
1300Sstevel@tonic-gate
1310Sstevel@tonic-gate cmn_err(CE_WARN, "add_vx_handler: Can't add callback hander <%s>",
1320Sstevel@tonic-gate name);
1330Sstevel@tonic-gate
1340Sstevel@tonic-gate #endif /* DEBUG */
1350Sstevel@tonic-gate
1360Sstevel@tonic-gate }
1370Sstevel@tonic-gate
1380Sstevel@tonic-gate /*
1390Sstevel@tonic-gate * Remove a vx_handler function -- find the name string in the table,
1400Sstevel@tonic-gate * and clear it.
1410Sstevel@tonic-gate */
1420Sstevel@tonic-gate void
remove_vx_handler(char * name)1430Sstevel@tonic-gate remove_vx_handler(char *name)
1440Sstevel@tonic-gate {
1450Sstevel@tonic-gate struct vx_cmd *vp;
1460Sstevel@tonic-gate
1470Sstevel@tonic-gate mutex_enter(&vx_cmd_lock);
1480Sstevel@tonic-gate for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
1490Sstevel@tonic-gate if (vp->service == NULL)
1500Sstevel@tonic-gate continue;
1510Sstevel@tonic-gate if (strcmp(vp->service, name) != 0)
1520Sstevel@tonic-gate continue;
1530Sstevel@tonic-gate vp->service = 0;
1540Sstevel@tonic-gate vp->take_tba = 0;
1550Sstevel@tonic-gate vp->func = 0;
1560Sstevel@tonic-gate mutex_exit(&vx_cmd_lock);
1570Sstevel@tonic-gate return;
1580Sstevel@tonic-gate }
1590Sstevel@tonic-gate mutex_exit(&vx_cmd_lock);
1600Sstevel@tonic-gate cmn_err(CE_WARN, "remove_vx_handler: <%s> not found", name);
1610Sstevel@tonic-gate }
1620Sstevel@tonic-gate
1630Sstevel@tonic-gate int
vx_handler(cell_t * argument_array)1640Sstevel@tonic-gate vx_handler(cell_t *argument_array)
1650Sstevel@tonic-gate {
1660Sstevel@tonic-gate char *name;
1670Sstevel@tonic-gate struct vx_cmd *vp;
1680Sstevel@tonic-gate void *old_tba;
1690Sstevel@tonic-gate
1700Sstevel@tonic-gate name = p1275_cell2ptr(*argument_array);
1710Sstevel@tonic-gate
1720Sstevel@tonic-gate for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
1730Sstevel@tonic-gate if (vp->service == (char *)0)
1740Sstevel@tonic-gate continue;
1750Sstevel@tonic-gate if (strcmp(vp->service, name) != 0)
1760Sstevel@tonic-gate continue;
1770Sstevel@tonic-gate if (vp->take_tba != 0) {
1780Sstevel@tonic-gate reestablish_curthread();
1790Sstevel@tonic-gate if (tba_taken_over != 0)
1800Sstevel@tonic-gate old_tba = set_tba((void *)&trap_table);
1810Sstevel@tonic-gate }
1820Sstevel@tonic-gate vp->func(argument_array);
1830Sstevel@tonic-gate if ((vp->take_tba != 0) && (tba_taken_over != 0))
1840Sstevel@tonic-gate (void) set_tba(old_tba);
1850Sstevel@tonic-gate return (0); /* Service name was known */
1860Sstevel@tonic-gate }
1870Sstevel@tonic-gate
1880Sstevel@tonic-gate return (-1); /* Service name unknown */
1890Sstevel@tonic-gate }
1900Sstevel@tonic-gate
1910Sstevel@tonic-gate /*
1920Sstevel@tonic-gate * PROM Locking Primitives
1930Sstevel@tonic-gate *
1940Sstevel@tonic-gate * These routines are called immediately before and immediately after calling
1950Sstevel@tonic-gate * into the firmware. The firmware is single-threaded and assumes that the
1960Sstevel@tonic-gate * kernel will implement locking to prevent simultaneous service calls. In
1970Sstevel@tonic-gate * addition, some service calls (particularly character rendering) can be
1980Sstevel@tonic-gate * slow, so we would like to sleep if we cannot acquire the lock to allow the
1990Sstevel@tonic-gate * caller's CPU to continue to perform useful work in the interim. Service
2000Sstevel@tonic-gate * routines may also be called early in boot as part of slave CPU startup
2010Sstevel@tonic-gate * when mutexes and cvs are not yet available (i.e. they are still running on
2020Sstevel@tonic-gate * the prom's TLB handlers and cannot touch curthread). Therefore, these
2030Sstevel@tonic-gate * routines must reduce to a simple compare-and-swap spin lock when necessary.
2040Sstevel@tonic-gate * Finally, kernel code may wish to acquire the firmware lock before executing
2050Sstevel@tonic-gate * a block of code that includes service calls, so we also allow the firmware
2060Sstevel@tonic-gate * lock to be acquired recursively by the owning CPU after disabling preemption.
2070Sstevel@tonic-gate *
2080Sstevel@tonic-gate * To meet these constraints, the lock itself is implemented as a compare-and-
2090Sstevel@tonic-gate * swap spin lock on the global prom_cpu pointer. We implement recursion by
2100Sstevel@tonic-gate * atomically incrementing the integer prom_holdcnt after acquiring the lock.
2110Sstevel@tonic-gate * If the current CPU is an "adult" (determined by testing cpu_m.mutex_ready),
2120Sstevel@tonic-gate * we disable preemption before acquiring the lock and leave it disabled once
2130Sstevel@tonic-gate * the lock is held. The kern_postprom() routine then enables preemption if
2140Sstevel@tonic-gate * we drop the lock and prom_holdcnt returns to zero. If the current CPU is
2150Sstevel@tonic-gate * an adult and the lock is held by another adult CPU, we can safely sleep
2160Sstevel@tonic-gate * until the lock is released. To do so, we acquire the adaptive prom_mutex
2170Sstevel@tonic-gate * and then sleep on prom_cv. Therefore, service routines must not be called
2180Sstevel@tonic-gate * from above LOCK_LEVEL on any adult CPU. Finally, if recursive entry is
2190Sstevel@tonic-gate * attempted on an adult CPU, we must also verify that curthread matches the
2200Sstevel@tonic-gate * saved prom_thread (the original owner) to ensure that low-level interrupt
2210Sstevel@tonic-gate * threads do not step on other threads running on the same CPU.
2220Sstevel@tonic-gate */
2230Sstevel@tonic-gate
2240Sstevel@tonic-gate static cpu_t *volatile prom_cpu;
2250Sstevel@tonic-gate static kthread_t *volatile prom_thread;
2260Sstevel@tonic-gate static uint32_t prom_holdcnt;
2270Sstevel@tonic-gate static kmutex_t prom_mutex;
2280Sstevel@tonic-gate static kcondvar_t prom_cv;
2290Sstevel@tonic-gate
2300Sstevel@tonic-gate /*
2310Sstevel@tonic-gate * The debugger uses PROM services, and is thus unable to run if any of the
2320Sstevel@tonic-gate * CPUs on the system are executing in the PROM at the time of debugger entry.
2330Sstevel@tonic-gate * If a CPU is determined to be in the PROM when the debugger is entered,
2340Sstevel@tonic-gate * prom_return_enter_debugger will be set, thus triggering a programmed debugger
2350Sstevel@tonic-gate * entry when the given CPU returns from the PROM. That CPU is then released by
2360Sstevel@tonic-gate * the debugger, and is allowed to complete PROM-related work.
2370Sstevel@tonic-gate */
2380Sstevel@tonic-gate int prom_exit_enter_debugger;
2390Sstevel@tonic-gate
2400Sstevel@tonic-gate void
kern_preprom(void)2410Sstevel@tonic-gate kern_preprom(void)
2420Sstevel@tonic-gate {
2430Sstevel@tonic-gate for (;;) {
2440Sstevel@tonic-gate /*
2450Sstevel@tonic-gate * Load the current CPU pointer and examine the mutex_ready bit.
2460Sstevel@tonic-gate * It doesn't matter if we are preempted here because we are
2470Sstevel@tonic-gate * only trying to determine if we are in the *set* of mutex
2480Sstevel@tonic-gate * ready CPUs. We cannot disable preemption until we confirm
2490Sstevel@tonic-gate * that we are running on a CPU in this set, since a call to
2500Sstevel@tonic-gate * kpreempt_disable() requires access to curthread.
2510Sstevel@tonic-gate */
2520Sstevel@tonic-gate processorid_t cpuid = getprocessorid();
2530Sstevel@tonic-gate cpu_t *cp = cpu[cpuid];
2540Sstevel@tonic-gate cpu_t *prcp;
2550Sstevel@tonic-gate
2560Sstevel@tonic-gate if (panicstr)
2570Sstevel@tonic-gate return; /* just return if we are currently panicking */
2580Sstevel@tonic-gate
2590Sstevel@tonic-gate if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
2600Sstevel@tonic-gate /*
2610Sstevel@tonic-gate * Disable premption, and reload the current CPU. We
2620Sstevel@tonic-gate * can't move from a mutex_ready cpu to a non-ready cpu
2630Sstevel@tonic-gate * so we don't need to re-check cp->cpu_m.mutex_ready.
2640Sstevel@tonic-gate */
2650Sstevel@tonic-gate kpreempt_disable();
2660Sstevel@tonic-gate cp = CPU;
2670Sstevel@tonic-gate ASSERT(cp->cpu_m.mutex_ready);
2680Sstevel@tonic-gate
2690Sstevel@tonic-gate /*
2700Sstevel@tonic-gate * Try the lock. If we don't get the lock, re-enable
2710Sstevel@tonic-gate * preemption and see if we should sleep. If we are
2720Sstevel@tonic-gate * already the lock holder, remove the effect of the
2730Sstevel@tonic-gate * previous kpreempt_disable() before returning since
2740Sstevel@tonic-gate * preemption was disabled by an earlier kern_preprom.
2750Sstevel@tonic-gate */
2760Sstevel@tonic-gate prcp = casptr((void *)&prom_cpu, NULL, cp);
2770Sstevel@tonic-gate if (prcp == NULL ||
2780Sstevel@tonic-gate (prcp == cp && prom_thread == curthread)) {
2790Sstevel@tonic-gate if (prcp == cp)
2800Sstevel@tonic-gate kpreempt_enable();
2810Sstevel@tonic-gate break;
2820Sstevel@tonic-gate }
2830Sstevel@tonic-gate
2840Sstevel@tonic-gate kpreempt_enable();
2850Sstevel@tonic-gate
2860Sstevel@tonic-gate /*
2870Sstevel@tonic-gate * We have to be very careful here since both prom_cpu
2880Sstevel@tonic-gate * and prcp->cpu_m.mutex_ready can be changed at any
2890Sstevel@tonic-gate * time by a non mutex_ready cpu holding the lock.
2900Sstevel@tonic-gate * If the owner is mutex_ready, holding prom_mutex
2910Sstevel@tonic-gate * prevents kern_postprom() from completing. If the
2920Sstevel@tonic-gate * owner isn't mutex_ready, we only know it will clear
2930Sstevel@tonic-gate * prom_cpu before changing cpu_m.mutex_ready, so we
2940Sstevel@tonic-gate * issue a membar after checking mutex_ready and then
2950Sstevel@tonic-gate * re-verify that prom_cpu is still held by the same
2960Sstevel@tonic-gate * cpu before actually proceeding to cv_wait().
2970Sstevel@tonic-gate */
2980Sstevel@tonic-gate mutex_enter(&prom_mutex);
2990Sstevel@tonic-gate prcp = prom_cpu;
3000Sstevel@tonic-gate if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
3010Sstevel@tonic-gate membar_consumer();
3020Sstevel@tonic-gate if (prcp == prom_cpu)
3030Sstevel@tonic-gate cv_wait(&prom_cv, &prom_mutex);
3040Sstevel@tonic-gate }
3050Sstevel@tonic-gate mutex_exit(&prom_mutex);
3060Sstevel@tonic-gate
3070Sstevel@tonic-gate } else {
3080Sstevel@tonic-gate /*
3090Sstevel@tonic-gate * If we are not yet mutex_ready, just attempt to grab
3100Sstevel@tonic-gate * the lock. If we get it or already hold it, break.
3110Sstevel@tonic-gate */
3120Sstevel@tonic-gate ASSERT(getpil() == PIL_MAX);
3130Sstevel@tonic-gate prcp = casptr((void *)&prom_cpu, NULL, cp);
3140Sstevel@tonic-gate if (prcp == NULL || prcp == cp)
3150Sstevel@tonic-gate break;
3160Sstevel@tonic-gate }
3170Sstevel@tonic-gate }
3180Sstevel@tonic-gate
3190Sstevel@tonic-gate /*
3200Sstevel@tonic-gate * We now hold the prom_cpu lock. Increment the hold count by one
3210Sstevel@tonic-gate * and assert our current state before returning to the caller.
3220Sstevel@tonic-gate */
3230Sstevel@tonic-gate atomic_add_32(&prom_holdcnt, 1);
3240Sstevel@tonic-gate ASSERT(prom_holdcnt >= 1);
3250Sstevel@tonic-gate prom_thread = curthread;
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate
3280Sstevel@tonic-gate /*
3290Sstevel@tonic-gate * Drop the prom lock if it is held by the current CPU. If the lock is held
3300Sstevel@tonic-gate * recursively, return without clearing prom_cpu. If the hold count is now
3310Sstevel@tonic-gate * zero, clear prom_cpu and cv_signal any waiting CPU.
3320Sstevel@tonic-gate */
3330Sstevel@tonic-gate void
kern_postprom(void)3340Sstevel@tonic-gate kern_postprom(void)
3350Sstevel@tonic-gate {
3360Sstevel@tonic-gate processorid_t cpuid = getprocessorid();
3370Sstevel@tonic-gate cpu_t *cp = cpu[cpuid];
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate if (panicstr)
3400Sstevel@tonic-gate return; /* do not modify lock further if we have panicked */
3410Sstevel@tonic-gate
3420Sstevel@tonic-gate if (prom_cpu != cp)
343*7240Srh87107 panic("kern_postprom: not owner, cp=%p owner=%p",
344*7240Srh87107 (void *)cp, (void *)prom_cpu);
3450Sstevel@tonic-gate
3460Sstevel@tonic-gate if (prom_holdcnt == 0)
347*7240Srh87107 panic("kern_postprom: prom_holdcnt == 0, owner=%p",
348*7240Srh87107 (void *)prom_cpu);
3490Sstevel@tonic-gate
3500Sstevel@tonic-gate if (atomic_add_32_nv(&prom_holdcnt, -1) != 0)
3510Sstevel@tonic-gate return; /* prom lock is held recursively by this CPU */
3520Sstevel@tonic-gate
3530Sstevel@tonic-gate if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
3543446Smrj kmdb_enter();
3550Sstevel@tonic-gate
3560Sstevel@tonic-gate prom_thread = NULL;
3570Sstevel@tonic-gate membar_producer();
3580Sstevel@tonic-gate
3590Sstevel@tonic-gate prom_cpu = NULL;
3600Sstevel@tonic-gate membar_producer();
3610Sstevel@tonic-gate
3620Sstevel@tonic-gate if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
3630Sstevel@tonic-gate mutex_enter(&prom_mutex);
3640Sstevel@tonic-gate cv_signal(&prom_cv);
3650Sstevel@tonic-gate mutex_exit(&prom_mutex);
3660Sstevel@tonic-gate kpreempt_enable();
3670Sstevel@tonic-gate }
3680Sstevel@tonic-gate }
3690Sstevel@tonic-gate
3700Sstevel@tonic-gate /*
3710Sstevel@tonic-gate * If the frame buffer device is busy, briefly capture the other CPUs so that
3720Sstevel@tonic-gate * another CPU executing code to manipulate the device does not execute at the
3730Sstevel@tonic-gate * same time we are rendering characters. Refer to the comments and code in
3740Sstevel@tonic-gate * common/os/console.c for more information on these callbacks.
3750Sstevel@tonic-gate *
3760Sstevel@tonic-gate * Notice that we explicitly acquire the PROM lock using kern_preprom() prior
3770Sstevel@tonic-gate * to idling other CPUs. The idling mechanism will cross-trap the other CPUs
3780Sstevel@tonic-gate * and have them spin at MAX(%pil, XCALL_PIL), so we must be sure that none of
3790Sstevel@tonic-gate * them are holding the PROM lock before we idle them and then call into the
3800Sstevel@tonic-gate * PROM routines that render characters to the frame buffer.
3810Sstevel@tonic-gate */
3820Sstevel@tonic-gate int
console_enter(int busy)3830Sstevel@tonic-gate console_enter(int busy)
3840Sstevel@tonic-gate {
3850Sstevel@tonic-gate int s = 0;
3860Sstevel@tonic-gate
3870Sstevel@tonic-gate if (busy && panicstr == NULL) {
3880Sstevel@tonic-gate kern_preprom();
3890Sstevel@tonic-gate s = splhi();
3900Sstevel@tonic-gate idle_other_cpus();
3910Sstevel@tonic-gate }
3920Sstevel@tonic-gate
3930Sstevel@tonic-gate return (s);
3940Sstevel@tonic-gate }
3950Sstevel@tonic-gate
3960Sstevel@tonic-gate void
console_exit(int busy,int spl)3970Sstevel@tonic-gate console_exit(int busy, int spl)
3980Sstevel@tonic-gate {
3990Sstevel@tonic-gate if (busy && panicstr == NULL) {
4000Sstevel@tonic-gate resume_other_cpus();
4010Sstevel@tonic-gate splx(spl);
4020Sstevel@tonic-gate kern_postprom();
4030Sstevel@tonic-gate }
4040Sstevel@tonic-gate }
4050Sstevel@tonic-gate
4060Sstevel@tonic-gate /*
4070Sstevel@tonic-gate * This routine is a special form of pause_cpus(). It ensures that
4080Sstevel@tonic-gate * prom functions are callable while the cpus are paused.
4090Sstevel@tonic-gate */
4100Sstevel@tonic-gate void
promsafe_pause_cpus(void)4110Sstevel@tonic-gate promsafe_pause_cpus(void)
4120Sstevel@tonic-gate {
4130Sstevel@tonic-gate pause_cpus(NULL);
4140Sstevel@tonic-gate
4150Sstevel@tonic-gate /* If some other cpu is entering or is in the prom, spin */
4160Sstevel@tonic-gate while (prom_cpu || mutex_owner(&prom_mutex)) {
4170Sstevel@tonic-gate
4180Sstevel@tonic-gate start_cpus();
4190Sstevel@tonic-gate mutex_enter(&prom_mutex);
4200Sstevel@tonic-gate
4210Sstevel@tonic-gate /* Wait for other cpu to exit prom */
4220Sstevel@tonic-gate while (prom_cpu)
4230Sstevel@tonic-gate cv_wait(&prom_cv, &prom_mutex);
4240Sstevel@tonic-gate
4250Sstevel@tonic-gate mutex_exit(&prom_mutex);
4260Sstevel@tonic-gate pause_cpus(NULL);
4270Sstevel@tonic-gate }
4280Sstevel@tonic-gate
4290Sstevel@tonic-gate /* At this point all cpus are paused and none are in the prom */
4300Sstevel@tonic-gate }
4310Sstevel@tonic-gate
4320Sstevel@tonic-gate /*
4330Sstevel@tonic-gate * This routine is a special form of xc_attention(). It ensures that
4340Sstevel@tonic-gate * prom functions are callable while the cpus are at attention.
4350Sstevel@tonic-gate */
4360Sstevel@tonic-gate void
promsafe_xc_attention(cpuset_t cpuset)4370Sstevel@tonic-gate promsafe_xc_attention(cpuset_t cpuset)
4380Sstevel@tonic-gate {
4390Sstevel@tonic-gate xc_attention(cpuset);
4400Sstevel@tonic-gate
4410Sstevel@tonic-gate /* If some other cpu is entering or is in the prom, spin */
4420Sstevel@tonic-gate while (prom_cpu || mutex_owner(&prom_mutex)) {
4430Sstevel@tonic-gate
4440Sstevel@tonic-gate xc_dismissed(cpuset);
4450Sstevel@tonic-gate mutex_enter(&prom_mutex);
4460Sstevel@tonic-gate
4470Sstevel@tonic-gate /* Wait for other cpu to exit prom */
4480Sstevel@tonic-gate while (prom_cpu)
4490Sstevel@tonic-gate cv_wait(&prom_cv, &prom_mutex);
4500Sstevel@tonic-gate
4510Sstevel@tonic-gate mutex_exit(&prom_mutex);
4520Sstevel@tonic-gate xc_attention(cpuset);
4530Sstevel@tonic-gate }
4540Sstevel@tonic-gate
4550Sstevel@tonic-gate /* At this point all cpus are paused and none are in the prom */
4560Sstevel@tonic-gate }
4570Sstevel@tonic-gate
4580Sstevel@tonic-gate
4590Sstevel@tonic-gate #if defined(PROM_32BIT_ADDRS)
4600Sstevel@tonic-gate
4610Sstevel@tonic-gate #include <sys/promimpl.h>
4620Sstevel@tonic-gate #include <vm/seg_kmem.h>
4630Sstevel@tonic-gate #include <sys/kmem.h>
4640Sstevel@tonic-gate #include <sys/bootconf.h>
4650Sstevel@tonic-gate
4660Sstevel@tonic-gate /*
4670Sstevel@tonic-gate * These routines are only used to workaround "poor feature interaction"
4680Sstevel@tonic-gate * in OBP. See bug 4115680 for details.
4690Sstevel@tonic-gate *
4700Sstevel@tonic-gate * Many of the promif routines need to allocate temporary buffers
4710Sstevel@tonic-gate * with 32-bit addresses to pass in/out of the CIF. The lifetime
4720Sstevel@tonic-gate * of the buffers is extremely short, they are allocated and freed
4730Sstevel@tonic-gate * around the CIF call. We use vmem_alloc() to cache 32-bit memory.
4740Sstevel@tonic-gate *
4750Sstevel@tonic-gate * Note the code in promplat_free() to prevent exhausting the 32 bit
4760Sstevel@tonic-gate * heap during boot.
4770Sstevel@tonic-gate */
4780Sstevel@tonic-gate static void *promplat_last_free = NULL;
4790Sstevel@tonic-gate static size_t promplat_last_size;
4800Sstevel@tonic-gate static vmem_t *promplat_arena;
4810Sstevel@tonic-gate static kmutex_t promplat_lock; /* protect arena, last_free, and last_size */
4820Sstevel@tonic-gate
4830Sstevel@tonic-gate void *
promplat_alloc(size_t size)4840Sstevel@tonic-gate promplat_alloc(size_t size)
4850Sstevel@tonic-gate {
4860Sstevel@tonic-gate
4870Sstevel@tonic-gate mutex_enter(&promplat_lock);
4880Sstevel@tonic-gate if (promplat_arena == NULL) {
4890Sstevel@tonic-gate promplat_arena = vmem_create("promplat", NULL, 0, 8,
4900Sstevel@tonic-gate segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
4910Sstevel@tonic-gate }
4920Sstevel@tonic-gate mutex_exit(&promplat_lock);
4930Sstevel@tonic-gate
4940Sstevel@tonic-gate return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
4950Sstevel@tonic-gate }
4960Sstevel@tonic-gate
4970Sstevel@tonic-gate /*
4980Sstevel@tonic-gate * Delaying the free() of small allocations gets more mileage
4990Sstevel@tonic-gate * from pages during boot, otherwise a cycle of allocate/free
5000Sstevel@tonic-gate * calls could burn through available heap32 space too quickly.
5010Sstevel@tonic-gate */
5020Sstevel@tonic-gate void
promplat_free(void * p,size_t size)5030Sstevel@tonic-gate promplat_free(void *p, size_t size)
5040Sstevel@tonic-gate {
5050Sstevel@tonic-gate void *p2 = NULL;
5060Sstevel@tonic-gate size_t s2;
5070Sstevel@tonic-gate
5080Sstevel@tonic-gate /*
5090Sstevel@tonic-gate * If VM is initialized, clean up any delayed free().
5100Sstevel@tonic-gate */
5110Sstevel@tonic-gate if (kvseg.s_base != 0 && promplat_last_free != NULL) {
5120Sstevel@tonic-gate mutex_enter(&promplat_lock);
5130Sstevel@tonic-gate p2 = promplat_last_free;
5140Sstevel@tonic-gate s2 = promplat_last_size;
5150Sstevel@tonic-gate promplat_last_free = NULL;
5160Sstevel@tonic-gate promplat_last_size = 0;
5170Sstevel@tonic-gate mutex_exit(&promplat_lock);
5180Sstevel@tonic-gate if (p2 != NULL) {
5190Sstevel@tonic-gate vmem_free(promplat_arena, p2, s2);
5200Sstevel@tonic-gate p2 = NULL;
5210Sstevel@tonic-gate }
5220Sstevel@tonic-gate }
5230Sstevel@tonic-gate
5240Sstevel@tonic-gate /*
5250Sstevel@tonic-gate * Do the free if VM is initialized or it's a large allocation.
5260Sstevel@tonic-gate */
5270Sstevel@tonic-gate if (kvseg.s_base != 0 || size >= PAGESIZE) {
5280Sstevel@tonic-gate vmem_free(promplat_arena, p, size);
5290Sstevel@tonic-gate return;
5300Sstevel@tonic-gate }
5310Sstevel@tonic-gate
5320Sstevel@tonic-gate /*
5330Sstevel@tonic-gate * Otherwise, do the last free request and delay this one.
5340Sstevel@tonic-gate */
5350Sstevel@tonic-gate mutex_enter(&promplat_lock);
5360Sstevel@tonic-gate if (promplat_last_free != NULL) {
5370Sstevel@tonic-gate p2 = promplat_last_free;
5380Sstevel@tonic-gate s2 = promplat_last_size;
5390Sstevel@tonic-gate }
5400Sstevel@tonic-gate promplat_last_free = p;
5410Sstevel@tonic-gate promplat_last_size = size;
5420Sstevel@tonic-gate mutex_exit(&promplat_lock);
5430Sstevel@tonic-gate
5440Sstevel@tonic-gate if (p2 != NULL)
5450Sstevel@tonic-gate vmem_free(promplat_arena, p2, s2);
5460Sstevel@tonic-gate }
5470Sstevel@tonic-gate
5480Sstevel@tonic-gate void
promplat_bcopy(const void * src,void * dst,size_t count)5490Sstevel@tonic-gate promplat_bcopy(const void *src, void *dst, size_t count)
5500Sstevel@tonic-gate {
5510Sstevel@tonic-gate bcopy(src, dst, count);
5520Sstevel@tonic-gate }
5530Sstevel@tonic-gate
5540Sstevel@tonic-gate #endif /* PROM_32BIT_ADDRS */
5550Sstevel@tonic-gate
5560Sstevel@tonic-gate static prom_generation_cookie_t prom_tree_gen;
5570Sstevel@tonic-gate static krwlock_t prom_tree_lock;
5580Sstevel@tonic-gate
5590Sstevel@tonic-gate int
prom_tree_access(int (* callback)(void * arg,int has_changed),void * arg,prom_generation_cookie_t * ckp)5600Sstevel@tonic-gate prom_tree_access(int (*callback)(void *arg, int has_changed), void *arg,
5610Sstevel@tonic-gate prom_generation_cookie_t *ckp)
5620Sstevel@tonic-gate {
5630Sstevel@tonic-gate int chg, rv;
5640Sstevel@tonic-gate
5650Sstevel@tonic-gate rw_enter(&prom_tree_lock, RW_READER);
5660Sstevel@tonic-gate /*
5670Sstevel@tonic-gate * If the tree has changed since the caller last accessed it
5680Sstevel@tonic-gate * pass 1 as the second argument to the callback function,
5690Sstevel@tonic-gate * otherwise 0.
5700Sstevel@tonic-gate */
5710Sstevel@tonic-gate if (ckp != NULL && *ckp != prom_tree_gen) {
5720Sstevel@tonic-gate *ckp = prom_tree_gen;
5730Sstevel@tonic-gate chg = 1;
5740Sstevel@tonic-gate } else
5750Sstevel@tonic-gate chg = 0;
5760Sstevel@tonic-gate rv = callback(arg, chg);
5770Sstevel@tonic-gate rw_exit(&prom_tree_lock);
5780Sstevel@tonic-gate return (rv);
5790Sstevel@tonic-gate }
5800Sstevel@tonic-gate
5810Sstevel@tonic-gate int
prom_tree_update(int (* callback)(void * arg),void * arg)5820Sstevel@tonic-gate prom_tree_update(int (*callback)(void *arg), void *arg)
5830Sstevel@tonic-gate {
5840Sstevel@tonic-gate int rv;
5850Sstevel@tonic-gate
5860Sstevel@tonic-gate rw_enter(&prom_tree_lock, RW_WRITER);
5870Sstevel@tonic-gate prom_tree_gen++;
5880Sstevel@tonic-gate rv = callback(arg);
5890Sstevel@tonic-gate rw_exit(&prom_tree_lock);
5900Sstevel@tonic-gate return (rv);
5910Sstevel@tonic-gate }
592