10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only
60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance
70Sstevel@tonic-gate * with the License.
80Sstevel@tonic-gate *
90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
110Sstevel@tonic-gate * See the License for the specific language governing permissions
120Sstevel@tonic-gate * and limitations under the License.
130Sstevel@tonic-gate *
140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
190Sstevel@tonic-gate *
200Sstevel@tonic-gate * CDDL HEADER END
210Sstevel@tonic-gate */
220Sstevel@tonic-gate /*
23*762Sdhain * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24*762Sdhain * Use is subject to license terms.
250Sstevel@tonic-gate */
260Sstevel@tonic-gate
270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
280Sstevel@tonic-gate
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate * Kernel framework functions for the fcode interpreter
310Sstevel@tonic-gate */
320Sstevel@tonic-gate
330Sstevel@tonic-gate #include <sys/types.h>
340Sstevel@tonic-gate #include <sys/conf.h>
350Sstevel@tonic-gate #include <sys/debug.h>
360Sstevel@tonic-gate #include <sys/kmem.h>
370Sstevel@tonic-gate #include <sys/ddi.h>
380Sstevel@tonic-gate #include <sys/sunddi.h>
390Sstevel@tonic-gate #include <sys/sunndi.h>
400Sstevel@tonic-gate #include <sys/esunddi.h>
410Sstevel@tonic-gate #include <sys/ksynch.h>
420Sstevel@tonic-gate #include <sys/modctl.h>
430Sstevel@tonic-gate #include <sys/errno.h>
440Sstevel@tonic-gate #include <sys/fcode.h>
450Sstevel@tonic-gate
460Sstevel@tonic-gate #ifdef DEBUG
470Sstevel@tonic-gate int fcode_debug = 0;
480Sstevel@tonic-gate #else
490Sstevel@tonic-gate int fcode_debug = 0;
500Sstevel@tonic-gate #endif
510Sstevel@tonic-gate
520Sstevel@tonic-gate static kmutex_t fc_request_lock;
530Sstevel@tonic-gate static kmutex_t fc_resource_lock;
540Sstevel@tonic-gate static kmutex_t fc_hash_lock;
550Sstevel@tonic-gate static kmutex_t fc_device_tree_lock;
560Sstevel@tonic-gate static kmutex_t fc_phandle_lock;
570Sstevel@tonic-gate static kcondvar_t fc_request_cv;
580Sstevel@tonic-gate static struct fc_request *fc_request_head;
590Sstevel@tonic-gate static int fc_initialized;
600Sstevel@tonic-gate
610Sstevel@tonic-gate static void fcode_timer(void *);
620Sstevel@tonic-gate
630Sstevel@tonic-gate int fcode_timeout = 300; /* seconds */
640Sstevel@tonic-gate
650Sstevel@tonic-gate int fcodem_unloadable;
660Sstevel@tonic-gate
670Sstevel@tonic-gate extern int hz;
680Sstevel@tonic-gate
690Sstevel@tonic-gate /*
700Sstevel@tonic-gate * Initialize the fcode interpreter framework ... must be called
710Sstevel@tonic-gate * prior to activating any of the fcode interpreter framework including
720Sstevel@tonic-gate * the driver.
730Sstevel@tonic-gate */
740Sstevel@tonic-gate static void
fcode_init(void)750Sstevel@tonic-gate fcode_init(void)
760Sstevel@tonic-gate {
770Sstevel@tonic-gate if (fc_initialized)
780Sstevel@tonic-gate return;
790Sstevel@tonic-gate
800Sstevel@tonic-gate mutex_init(&fc_request_lock, NULL, MUTEX_DRIVER, NULL);
810Sstevel@tonic-gate mutex_init(&fc_resource_lock, NULL, MUTEX_DRIVER, NULL);
820Sstevel@tonic-gate mutex_init(&fc_hash_lock, NULL, MUTEX_DRIVER, NULL);
830Sstevel@tonic-gate mutex_init(&fc_device_tree_lock, NULL, MUTEX_DRIVER, NULL);
840Sstevel@tonic-gate mutex_init(&fc_phandle_lock, NULL, MUTEX_DRIVER, NULL);
850Sstevel@tonic-gate cv_init(&fc_request_cv, NULL, CV_DRIVER, NULL);
860Sstevel@tonic-gate ++fc_initialized;
870Sstevel@tonic-gate }
880Sstevel@tonic-gate
890Sstevel@tonic-gate static void
fcode_fini(void)900Sstevel@tonic-gate fcode_fini(void)
910Sstevel@tonic-gate {
920Sstevel@tonic-gate mutex_destroy(&fc_request_lock);
930Sstevel@tonic-gate mutex_destroy(&fc_resource_lock);
940Sstevel@tonic-gate mutex_destroy(&fc_hash_lock);
950Sstevel@tonic-gate cv_destroy(&fc_request_cv);
960Sstevel@tonic-gate fc_initialized = 0;
970Sstevel@tonic-gate }
980Sstevel@tonic-gate
990Sstevel@tonic-gate /*
1000Sstevel@tonic-gate * Module linkage information for the kernel.
1010Sstevel@tonic-gate */
1020Sstevel@tonic-gate static struct modlmisc modlmisc = {
1030Sstevel@tonic-gate &mod_miscops, "FCode framework 1.13"
1040Sstevel@tonic-gate };
1050Sstevel@tonic-gate
1060Sstevel@tonic-gate static struct modlinkage modlinkage = {
1070Sstevel@tonic-gate MODREV_1, (void *)&modlmisc, NULL
1080Sstevel@tonic-gate };
1090Sstevel@tonic-gate
1100Sstevel@tonic-gate int
_init(void)1110Sstevel@tonic-gate _init(void)
1120Sstevel@tonic-gate {
1130Sstevel@tonic-gate int error;
1140Sstevel@tonic-gate
1150Sstevel@tonic-gate fcode_init();
1160Sstevel@tonic-gate if ((error = mod_install(&modlinkage)) != 0)
1170Sstevel@tonic-gate fcode_fini();
1180Sstevel@tonic-gate return (error);
1190Sstevel@tonic-gate }
1200Sstevel@tonic-gate
1210Sstevel@tonic-gate int
_fini(void)1220Sstevel@tonic-gate _fini(void)
1230Sstevel@tonic-gate {
1240Sstevel@tonic-gate int error = EBUSY;
1250Sstevel@tonic-gate
1260Sstevel@tonic-gate if (fcodem_unloadable)
1270Sstevel@tonic-gate if ((error = mod_remove(&modlinkage)) == 0)
1280Sstevel@tonic-gate fcode_fini();
1290Sstevel@tonic-gate
1300Sstevel@tonic-gate return (error);
1310Sstevel@tonic-gate }
1320Sstevel@tonic-gate
1330Sstevel@tonic-gate int
_info(struct modinfo * modinfop)1340Sstevel@tonic-gate _info(struct modinfo *modinfop)
1350Sstevel@tonic-gate {
1360Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop));
1370Sstevel@tonic-gate }
1380Sstevel@tonic-gate
1390Sstevel@tonic-gate /*
1400Sstevel@tonic-gate * Framework function to invoke the interpreter. Wait and return when the
1410Sstevel@tonic-gate * interpreter is done. See fcode.h for details.
1420Sstevel@tonic-gate */
1430Sstevel@tonic-gate int
fcode_interpreter(dev_info_t * ap,fc_ops_t * ops,fco_handle_t handle)1440Sstevel@tonic-gate fcode_interpreter(dev_info_t *ap, fc_ops_t *ops, fco_handle_t handle)
1450Sstevel@tonic-gate {
1460Sstevel@tonic-gate struct fc_request *fp, *qp;
1470Sstevel@tonic-gate int error;
1480Sstevel@tonic-gate
1490Sstevel@tonic-gate ASSERT(fc_initialized);
1500Sstevel@tonic-gate ASSERT(ap);
1510Sstevel@tonic-gate ASSERT(ops);
1520Sstevel@tonic-gate ASSERT(handle);
1530Sstevel@tonic-gate
1540Sstevel@tonic-gate /*
1550Sstevel@tonic-gate * Create a request structure
1560Sstevel@tonic-gate */
1570Sstevel@tonic-gate fp = kmem_zalloc(sizeof (struct fc_request), KM_SLEEP);
1580Sstevel@tonic-gate
1590Sstevel@tonic-gate fp->next = NULL;
1600Sstevel@tonic-gate fp->busy = FC_R_INIT;
1610Sstevel@tonic-gate fp->error = FC_SUCCESS;
1620Sstevel@tonic-gate fp->ap_dip = ap;
1630Sstevel@tonic-gate fp->ap_ops = ops;
1640Sstevel@tonic-gate fp->handle = handle;
1650Sstevel@tonic-gate
1660Sstevel@tonic-gate /*
1670Sstevel@tonic-gate * Add the request to the end of the request list.
1680Sstevel@tonic-gate */
1690Sstevel@tonic-gate mutex_enter(&fc_request_lock);
1700Sstevel@tonic-gate
1710Sstevel@tonic-gate if (fc_request_head == NULL)
1720Sstevel@tonic-gate fc_request_head = fp;
1730Sstevel@tonic-gate else {
1740Sstevel@tonic-gate for (qp = fc_request_head; qp->next != NULL; qp = qp->next)
1750Sstevel@tonic-gate /* empty */;
1760Sstevel@tonic-gate qp->next = fp;
1770Sstevel@tonic-gate }
1780Sstevel@tonic-gate mutex_exit(&fc_request_lock);
1790Sstevel@tonic-gate
1800Sstevel@tonic-gate /*
1810Sstevel@tonic-gate * log a message (ie: i_ddi_log_event) indicating that a request
1820Sstevel@tonic-gate * has been queued to start the userland fcode interpreter.
1830Sstevel@tonic-gate * This call is the glue to the eventd and automates the process.
1840Sstevel@tonic-gate */
1850Sstevel@tonic-gate
1860Sstevel@tonic-gate /*
1870Sstevel@tonic-gate * Signal the driver if it's waiting for a request to be queued.
1880Sstevel@tonic-gate */
1890Sstevel@tonic-gate cv_broadcast(&fc_request_cv);
1900Sstevel@tonic-gate
1910Sstevel@tonic-gate /*
1920Sstevel@tonic-gate * Wait for the request to be serviced
1930Sstevel@tonic-gate */
1940Sstevel@tonic-gate mutex_enter(&fc_request_lock);
1950Sstevel@tonic-gate fp->timeout = timeout(fcode_timer, fp, hz * fcode_timeout);
1960Sstevel@tonic-gate while (fp->busy != FC_R_DONE)
1970Sstevel@tonic-gate cv_wait(&fc_request_cv, &fc_request_lock);
1980Sstevel@tonic-gate
1990Sstevel@tonic-gate if (fp->timeout) {
2000Sstevel@tonic-gate (void) untimeout(fp->timeout);
2010Sstevel@tonic-gate fp->timeout = NULL;
2020Sstevel@tonic-gate }
2030Sstevel@tonic-gate
2040Sstevel@tonic-gate /*
2050Sstevel@tonic-gate * Remove the request from the queue (while still holding the lock)
2060Sstevel@tonic-gate */
2070Sstevel@tonic-gate if (fc_request_head == fp)
2080Sstevel@tonic-gate fc_request_head = fp->next;
2090Sstevel@tonic-gate else {
2100Sstevel@tonic-gate for (qp = fc_request_head; qp->next != fp; qp = qp->next)
2110Sstevel@tonic-gate /* empty */;
2120Sstevel@tonic-gate qp->next = fp->next;
2130Sstevel@tonic-gate }
2140Sstevel@tonic-gate mutex_exit(&fc_request_lock);
2150Sstevel@tonic-gate
2160Sstevel@tonic-gate FC_DEBUG1(2, CE_CONT, "fcode_interpreter: request finished, fp %p\n",
2170Sstevel@tonic-gate fp);
2180Sstevel@tonic-gate
2190Sstevel@tonic-gate /*
2200Sstevel@tonic-gate * Free the request structure and return any errors.
2210Sstevel@tonic-gate */
2220Sstevel@tonic-gate error = fp->error;
2230Sstevel@tonic-gate kmem_free(fp, sizeof (struct fc_request));
2240Sstevel@tonic-gate return (error);
2250Sstevel@tonic-gate }
2260Sstevel@tonic-gate
2270Sstevel@tonic-gate /*
2280Sstevel@tonic-gate * Timeout requests thet don't get picked up by the interpreter. This
2290Sstevel@tonic-gate * would happen if the daemon is not running. If the timer goes off
2300Sstevel@tonic-gate * and it's state is not FC_R_INIT, then the interpreter has picked up the
2310Sstevel@tonic-gate * request.
2320Sstevel@tonic-gate */
2330Sstevel@tonic-gate static void
fcode_timer(void * arg)2340Sstevel@tonic-gate fcode_timer(void *arg)
2350Sstevel@tonic-gate {
2360Sstevel@tonic-gate struct fc_request *fp = arg;
2370Sstevel@tonic-gate
2380Sstevel@tonic-gate mutex_enter(&fc_request_lock);
2390Sstevel@tonic-gate fp->timeout = 0;
2400Sstevel@tonic-gate if (fp->busy == FC_R_INIT) {
2410Sstevel@tonic-gate cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
2420Sstevel@tonic-gate "interpreter - Interpreter did not pick up request\n");
2430Sstevel@tonic-gate fp->busy = FC_R_DONE;
2440Sstevel@tonic-gate fp->error = FC_TIMEOUT;
2450Sstevel@tonic-gate mutex_exit(&fc_request_lock);
2460Sstevel@tonic-gate cv_broadcast(&fc_request_cv);
2470Sstevel@tonic-gate return;
248*762Sdhain } else if (fp->error != FC_SUCCESS) {
249*762Sdhain /*
250*762Sdhain * An error was detected, but didn't close the driver.
251*762Sdhain * This will allow the process to error out, returning
252*762Sdhain * the interpreter error code instead of FC_TIMEOUT.
253*762Sdhain */
254*762Sdhain fp->busy = FC_R_DONE;
255*762Sdhain cv_broadcast(&fc_request_cv);
256*762Sdhain mutex_exit(&fc_request_lock);
257*762Sdhain return;
2580Sstevel@tonic-gate } else {
2590Sstevel@tonic-gate cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
2600Sstevel@tonic-gate "interpreter - Interpreter is executing request\n");
2610Sstevel@tonic-gate }
2620Sstevel@tonic-gate mutex_exit(&fc_request_lock);
2630Sstevel@tonic-gate }
2640Sstevel@tonic-gate
2650Sstevel@tonic-gate /*
2660Sstevel@tonic-gate * This is the function the driver calls to wait for and get
2670Sstevel@tonic-gate * a request. The call should be interruptable since it's done
2680Sstevel@tonic-gate * at read(2) time, so allow for signals to interrupt us.
2690Sstevel@tonic-gate *
2700Sstevel@tonic-gate * Return NULL if the wait was interrupted, else return a pointer
2710Sstevel@tonic-gate * to the fc_request structure (marked as busy).
2720Sstevel@tonic-gate *
2730Sstevel@tonic-gate * Note that we have to check for a request first, before waiting,
2740Sstevel@tonic-gate * in case the request is already queued. In this case, the signal
2750Sstevel@tonic-gate * may have already been delivered.
2760Sstevel@tonic-gate */
2770Sstevel@tonic-gate struct fc_request *
fc_get_request(void)2780Sstevel@tonic-gate fc_get_request(void)
2790Sstevel@tonic-gate {
2800Sstevel@tonic-gate struct fc_request *fp;
2810Sstevel@tonic-gate
2820Sstevel@tonic-gate ASSERT(fc_initialized);
2830Sstevel@tonic-gate
2840Sstevel@tonic-gate mutex_enter(&fc_request_lock);
2850Sstevel@tonic-gate
2860Sstevel@tonic-gate /*CONSTANTCONDITION*/
2870Sstevel@tonic-gate while (1) {
2880Sstevel@tonic-gate for (fp = fc_request_head; fp != NULL; fp = fp->next) {
2890Sstevel@tonic-gate if (fp->busy == FC_R_INIT) {
2900Sstevel@tonic-gate fp->busy = FC_R_BUSY;
2910Sstevel@tonic-gate mutex_exit(&fc_request_lock);
2920Sstevel@tonic-gate return (fp);
2930Sstevel@tonic-gate }
2940Sstevel@tonic-gate }
2950Sstevel@tonic-gate if (cv_wait_sig(&fc_request_cv, &fc_request_lock) == 0) {
2960Sstevel@tonic-gate mutex_exit(&fc_request_lock);
2970Sstevel@tonic-gate return (NULL);
2980Sstevel@tonic-gate }
2990Sstevel@tonic-gate }
3000Sstevel@tonic-gate /*NOTREACHED*/
3010Sstevel@tonic-gate }
3020Sstevel@tonic-gate
3030Sstevel@tonic-gate /*
3040Sstevel@tonic-gate * This is the function the driver calls when it's finished with
3050Sstevel@tonic-gate * a request. Mark the request as done and signal the thread that
3060Sstevel@tonic-gate * enqueued the request.
3070Sstevel@tonic-gate */
3080Sstevel@tonic-gate void
fc_finish_request(struct fc_request * fp)3090Sstevel@tonic-gate fc_finish_request(struct fc_request *fp)
3100Sstevel@tonic-gate {
3110Sstevel@tonic-gate ASSERT(fc_initialized);
3120Sstevel@tonic-gate ASSERT(fp);
3130Sstevel@tonic-gate ASSERT(fp->busy == FC_R_BUSY);
3140Sstevel@tonic-gate
3150Sstevel@tonic-gate mutex_enter(&fc_request_lock);
3160Sstevel@tonic-gate fp->busy = FC_R_DONE;
3170Sstevel@tonic-gate mutex_exit(&fc_request_lock);
3180Sstevel@tonic-gate
3190Sstevel@tonic-gate cv_broadcast(&fc_request_cv);
3200Sstevel@tonic-gate }
3210Sstevel@tonic-gate
3220Sstevel@tonic-gate /*
3230Sstevel@tonic-gate * Generic resource list management subroutines
3240Sstevel@tonic-gate */
3250Sstevel@tonic-gate void
fc_add_resource(fco_handle_t rp,struct fc_resource * ip)3260Sstevel@tonic-gate fc_add_resource(fco_handle_t rp, struct fc_resource *ip)
3270Sstevel@tonic-gate {
3280Sstevel@tonic-gate ASSERT(rp);
3290Sstevel@tonic-gate ASSERT(ip);
3300Sstevel@tonic-gate
3310Sstevel@tonic-gate mutex_enter(&fc_resource_lock);
3320Sstevel@tonic-gate ip->next = NULL;
3330Sstevel@tonic-gate if (rp->head != NULL)
3340Sstevel@tonic-gate ip->next = rp->head;
3350Sstevel@tonic-gate rp->head = ip;
3360Sstevel@tonic-gate mutex_exit(&fc_resource_lock);
3370Sstevel@tonic-gate }
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate void
fc_rem_resource(fco_handle_t rp,struct fc_resource * ip)3400Sstevel@tonic-gate fc_rem_resource(fco_handle_t rp, struct fc_resource *ip)
3410Sstevel@tonic-gate {
3420Sstevel@tonic-gate struct fc_resource *fp;
3430Sstevel@tonic-gate
3440Sstevel@tonic-gate ASSERT(rp);
3450Sstevel@tonic-gate ASSERT(ip);
3460Sstevel@tonic-gate
3470Sstevel@tonic-gate if (rp->head == NULL) {
3480Sstevel@tonic-gate cmn_err(CE_CONT, "fc_rem_resource: NULL list head!\n");
3490Sstevel@tonic-gate return;
3500Sstevel@tonic-gate }
3510Sstevel@tonic-gate
3520Sstevel@tonic-gate mutex_enter(&fc_resource_lock);
3530Sstevel@tonic-gate if (rp->head == ip) {
3540Sstevel@tonic-gate rp->head = ip->next;
3550Sstevel@tonic-gate mutex_exit(&fc_resource_lock);
3560Sstevel@tonic-gate return;
3570Sstevel@tonic-gate }
3580Sstevel@tonic-gate
3590Sstevel@tonic-gate for (fp = rp->head; fp && (fp->next != ip); fp = fp->next)
3600Sstevel@tonic-gate /* empty */;
3610Sstevel@tonic-gate
3620Sstevel@tonic-gate if (fp == NULL) {
3630Sstevel@tonic-gate mutex_exit(&fc_resource_lock);
3640Sstevel@tonic-gate cmn_err(CE_CONT, "fc_rem_resource: Item not on list!\n");
3650Sstevel@tonic-gate return;
3660Sstevel@tonic-gate }
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate fp->next = ip->next;
3690Sstevel@tonic-gate mutex_exit(&fc_resource_lock);
3700Sstevel@tonic-gate }
3710Sstevel@tonic-gate
3720Sstevel@tonic-gate /*ARGSUSED*/
3730Sstevel@tonic-gate void
fc_lock_resource_list(fco_handle_t rp)3740Sstevel@tonic-gate fc_lock_resource_list(fco_handle_t rp)
3750Sstevel@tonic-gate {
3760Sstevel@tonic-gate mutex_enter(&fc_resource_lock);
3770Sstevel@tonic-gate }
3780Sstevel@tonic-gate
3790Sstevel@tonic-gate /*ARGSUSED*/
3800Sstevel@tonic-gate void
fc_unlock_resource_list(fco_handle_t rp)3810Sstevel@tonic-gate fc_unlock_resource_list(fco_handle_t rp)
3820Sstevel@tonic-gate {
3830Sstevel@tonic-gate mutex_exit(&fc_resource_lock);
3840Sstevel@tonic-gate }
3850Sstevel@tonic-gate
3860Sstevel@tonic-gate /*
3870Sstevel@tonic-gate * Common helper ops and subroutines
3880Sstevel@tonic-gate */
3890Sstevel@tonic-gate /*ARGSUSED*/
3900Sstevel@tonic-gate int
fc_syntax_error(fc_ci_t * cp,char * msg)3910Sstevel@tonic-gate fc_syntax_error(fc_ci_t *cp, char *msg)
3920Sstevel@tonic-gate {
3930Sstevel@tonic-gate cp->error = fc_int2cell(-1);
3940Sstevel@tonic-gate cp->nresults = fc_int2cell(0);
3950Sstevel@tonic-gate return (0);
3960Sstevel@tonic-gate }
3970Sstevel@tonic-gate
3980Sstevel@tonic-gate /*ARGSUSED*/
3990Sstevel@tonic-gate int
fc_priv_error(fc_ci_t * cp,char * msg)4000Sstevel@tonic-gate fc_priv_error(fc_ci_t *cp, char *msg)
4010Sstevel@tonic-gate {
4020Sstevel@tonic-gate cp->priv_error = fc_int2cell(-1);
4030Sstevel@tonic-gate cp->error = fc_int2cell(0);
4040Sstevel@tonic-gate cp->nresults = fc_int2cell(0);
4050Sstevel@tonic-gate return (0);
4060Sstevel@tonic-gate }
4070Sstevel@tonic-gate
4080Sstevel@tonic-gate /*ARGSUSED*/
4090Sstevel@tonic-gate int
fc_success_op(dev_info_t * ap,fco_handle_t handle,fc_ci_t * cp)4100Sstevel@tonic-gate fc_success_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
4110Sstevel@tonic-gate {
4120Sstevel@tonic-gate cp->priv_error = cp->error = fc_int2cell(0);
4130Sstevel@tonic-gate return (0);
4140Sstevel@tonic-gate }
4150Sstevel@tonic-gate
4160Sstevel@tonic-gate /*
4170Sstevel@tonic-gate * fc_fail_op: This 'handles' a request by specifically failing it,
4180Sstevel@tonic-gate * as opposed to not handling it and returning '-1' to indicate
4190Sstevel@tonic-gate * 'service unknown' and allowing somebody else in the chain to
4200Sstevel@tonic-gate * handle it.
4210Sstevel@tonic-gate */
4220Sstevel@tonic-gate /*ARGSUSED*/
4230Sstevel@tonic-gate int
fc_fail_op(dev_info_t * ap,fco_handle_t handle,fc_ci_t * cp)4240Sstevel@tonic-gate fc_fail_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
4250Sstevel@tonic-gate {
4260Sstevel@tonic-gate cmn_err(CE_CONT, "fcode ops: fail service name <%s>\n",
4270Sstevel@tonic-gate (char *)fc_cell2ptr(cp->svc_name));
4280Sstevel@tonic-gate
4290Sstevel@tonic-gate cp->nresults = fc_int2cell(0);
4300Sstevel@tonic-gate cp->error = fc_int2cell(-1);
4310Sstevel@tonic-gate return (0);
4320Sstevel@tonic-gate }
4330Sstevel@tonic-gate
4340Sstevel@tonic-gate /*
4350Sstevel@tonic-gate * Functions to manage the set of handles we give to the interpreter.
4360Sstevel@tonic-gate * The handles are opaque and internally represent dev_info_t pointers.
4370Sstevel@tonic-gate */
4380Sstevel@tonic-gate struct fc_phandle_entry **
fc_handle_to_phandle_head(fco_handle_t rp)4390Sstevel@tonic-gate fc_handle_to_phandle_head(fco_handle_t rp)
4400Sstevel@tonic-gate {
4410Sstevel@tonic-gate while (rp->next_handle)
4420Sstevel@tonic-gate rp = rp->next_handle;
4430Sstevel@tonic-gate
4440Sstevel@tonic-gate return (&rp->ptable);
4450Sstevel@tonic-gate }
4460Sstevel@tonic-gate
4470Sstevel@tonic-gate /*ARGSUSED*/
4480Sstevel@tonic-gate void
fc_phandle_table_alloc(struct fc_phandle_entry ** head)4490Sstevel@tonic-gate fc_phandle_table_alloc(struct fc_phandle_entry **head)
4500Sstevel@tonic-gate {
4510Sstevel@tonic-gate }
4520Sstevel@tonic-gate
4530Sstevel@tonic-gate void
fc_phandle_table_free(struct fc_phandle_entry ** head)4540Sstevel@tonic-gate fc_phandle_table_free(struct fc_phandle_entry **head)
4550Sstevel@tonic-gate {
4560Sstevel@tonic-gate struct fc_phandle_entry *ip, *np;
4570Sstevel@tonic-gate
4580Sstevel@tonic-gate /*
4590Sstevel@tonic-gate * Free each entry in the table.
4600Sstevel@tonic-gate */
4610Sstevel@tonic-gate for (ip = *head; ip; ip = np) {
4620Sstevel@tonic-gate np = ip->next;
4630Sstevel@tonic-gate kmem_free(ip, sizeof (struct fc_phandle_entry));
4640Sstevel@tonic-gate }
4650Sstevel@tonic-gate *head = NULL;
4660Sstevel@tonic-gate }
4670Sstevel@tonic-gate
4680Sstevel@tonic-gate dev_info_t *
fc_phandle_to_dip(struct fc_phandle_entry ** head,fc_phandle_t handle)4690Sstevel@tonic-gate fc_phandle_to_dip(struct fc_phandle_entry **head, fc_phandle_t handle)
4700Sstevel@tonic-gate {
4710Sstevel@tonic-gate struct fc_phandle_entry *ip;
4720Sstevel@tonic-gate
4730Sstevel@tonic-gate mutex_enter(&fc_hash_lock);
4740Sstevel@tonic-gate
4750Sstevel@tonic-gate for (ip = *head; ip; ip = ip->next)
4760Sstevel@tonic-gate if (ip->h == handle)
4770Sstevel@tonic-gate break;
4780Sstevel@tonic-gate
4790Sstevel@tonic-gate mutex_exit(&fc_hash_lock);
4800Sstevel@tonic-gate
4810Sstevel@tonic-gate return (ip ? ip->dip : NULL);
4820Sstevel@tonic-gate }
4830Sstevel@tonic-gate
4840Sstevel@tonic-gate fc_phandle_t
fc_dip_to_phandle(struct fc_phandle_entry ** head,dev_info_t * dip)4850Sstevel@tonic-gate fc_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip)
4860Sstevel@tonic-gate {
4870Sstevel@tonic-gate struct fc_phandle_entry *hp, *np;
4880Sstevel@tonic-gate fc_phandle_t h;
4890Sstevel@tonic-gate
4900Sstevel@tonic-gate ASSERT(dip);
4910Sstevel@tonic-gate h = (fc_phandle_t)ddi_get_nodeid(dip);
4920Sstevel@tonic-gate
4930Sstevel@tonic-gate /*
4940Sstevel@tonic-gate * Just in case, allocate a new entry ...
4950Sstevel@tonic-gate */
4960Sstevel@tonic-gate np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
4970Sstevel@tonic-gate
4980Sstevel@tonic-gate mutex_enter(&fc_hash_lock);
4990Sstevel@tonic-gate
5000Sstevel@tonic-gate /*
5010Sstevel@tonic-gate * If we already have this dip in the table, just return the handle
5020Sstevel@tonic-gate */
5030Sstevel@tonic-gate for (hp = *head; hp; hp = hp->next) {
5040Sstevel@tonic-gate if (hp->dip == dip) {
5050Sstevel@tonic-gate mutex_exit(&fc_hash_lock);
5060Sstevel@tonic-gate kmem_free(np, sizeof (struct fc_phandle_entry));
5070Sstevel@tonic-gate return (h);
5080Sstevel@tonic-gate }
5090Sstevel@tonic-gate }
5100Sstevel@tonic-gate
5110Sstevel@tonic-gate /*
5120Sstevel@tonic-gate * Insert this entry to the list of known entries
5130Sstevel@tonic-gate */
5140Sstevel@tonic-gate np->next = *head;
5150Sstevel@tonic-gate np->dip = dip;
5160Sstevel@tonic-gate np->h = h;
5170Sstevel@tonic-gate *head = np;
5180Sstevel@tonic-gate mutex_exit(&fc_hash_lock);
5190Sstevel@tonic-gate return (h);
5200Sstevel@tonic-gate }
5210Sstevel@tonic-gate
5220Sstevel@tonic-gate /*
5230Sstevel@tonic-gate * We won't need this function once the ddi is modified to handle
5240Sstevel@tonic-gate * unique non-prom nodeids. For now, this allows us to add a given
5250Sstevel@tonic-gate * nodeid to the device tree without dereferencing the value in the
5260Sstevel@tonic-gate * devinfo node, so we have a parallel mechanism.
5270Sstevel@tonic-gate */
5280Sstevel@tonic-gate void
fc_add_dip_to_phandle(struct fc_phandle_entry ** head,dev_info_t * dip,fc_phandle_t h)5290Sstevel@tonic-gate fc_add_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip,
5300Sstevel@tonic-gate fc_phandle_t h)
5310Sstevel@tonic-gate {
5320Sstevel@tonic-gate struct fc_phandle_entry *hp, *np;
5330Sstevel@tonic-gate
5340Sstevel@tonic-gate ASSERT(dip);
5350Sstevel@tonic-gate
5360Sstevel@tonic-gate /*
5370Sstevel@tonic-gate * Just in case, allocate a new entry ...
5380Sstevel@tonic-gate */
5390Sstevel@tonic-gate np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
5400Sstevel@tonic-gate
5410Sstevel@tonic-gate mutex_enter(&fc_hash_lock);
5420Sstevel@tonic-gate
5430Sstevel@tonic-gate /*
5440Sstevel@tonic-gate * If we already have this dip in the table, just return the handle
5450Sstevel@tonic-gate */
5460Sstevel@tonic-gate for (hp = *head; hp; hp = hp->next) {
5470Sstevel@tonic-gate if (hp->dip == dip) {
5480Sstevel@tonic-gate mutex_exit(&fc_hash_lock);
5490Sstevel@tonic-gate kmem_free(np, sizeof (struct fc_phandle_entry));
5500Sstevel@tonic-gate return;
5510Sstevel@tonic-gate }
5520Sstevel@tonic-gate }
5530Sstevel@tonic-gate
5540Sstevel@tonic-gate /*
5550Sstevel@tonic-gate * Insert this entry to the list of known entries
5560Sstevel@tonic-gate */
5570Sstevel@tonic-gate np->next = *head;
5580Sstevel@tonic-gate np->dip = dip;
5590Sstevel@tonic-gate np->h = h;
5600Sstevel@tonic-gate *head = np;
5610Sstevel@tonic-gate mutex_exit(&fc_hash_lock);
5620Sstevel@tonic-gate }
5630Sstevel@tonic-gate
5640Sstevel@tonic-gate /*
5650Sstevel@tonic-gate * Functions to manage our copy of our subtree.
5660Sstevel@tonic-gate *
5670Sstevel@tonic-gate * The head of the device tree is always stored in the last 'handle'
5680Sstevel@tonic-gate * in the handle chain.
5690Sstevel@tonic-gate */
5700Sstevel@tonic-gate struct fc_device_tree **
fc_handle_to_dtree_head(fco_handle_t rp)5710Sstevel@tonic-gate fc_handle_to_dtree_head(fco_handle_t rp)
5720Sstevel@tonic-gate {
5730Sstevel@tonic-gate while (rp->next_handle)
5740Sstevel@tonic-gate rp = rp->next_handle;
5750Sstevel@tonic-gate
5760Sstevel@tonic-gate return (&rp->dtree);
5770Sstevel@tonic-gate }
5780Sstevel@tonic-gate
5790Sstevel@tonic-gate struct fc_device_tree *
fc_handle_to_dtree(fco_handle_t rp)5800Sstevel@tonic-gate fc_handle_to_dtree(fco_handle_t rp)
5810Sstevel@tonic-gate {
5820Sstevel@tonic-gate struct fc_device_tree **head = fc_handle_to_dtree_head(rp);
5830Sstevel@tonic-gate
5840Sstevel@tonic-gate return (*head);
5850Sstevel@tonic-gate }
5860Sstevel@tonic-gate
5870Sstevel@tonic-gate /*
5880Sstevel@tonic-gate * The root of the subtree is the attachment point ...
5890Sstevel@tonic-gate * Thus, there is never an empty device tree.
5900Sstevel@tonic-gate */
5910Sstevel@tonic-gate void
fc_create_device_tree(dev_info_t * ap,struct fc_device_tree ** head)5920Sstevel@tonic-gate fc_create_device_tree(dev_info_t *ap, struct fc_device_tree **head)
5930Sstevel@tonic-gate {
5940Sstevel@tonic-gate struct fc_device_tree *dp;
5950Sstevel@tonic-gate
5960Sstevel@tonic-gate dp = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
5970Sstevel@tonic-gate dp->dip = ap;
5980Sstevel@tonic-gate *head = dp;
5990Sstevel@tonic-gate }
6000Sstevel@tonic-gate
6010Sstevel@tonic-gate #ifdef notdef
6020Sstevel@tonic-gate static void
fc_remove_subtree(struct fc_device_tree * dp)6030Sstevel@tonic-gate fc_remove_subtree(struct fc_device_tree *dp)
6040Sstevel@tonic-gate {
6050Sstevel@tonic-gate struct fc_device_tree *np;
6060Sstevel@tonic-gate
6070Sstevel@tonic-gate if (dp->child) {
6080Sstevel@tonic-gate fc_remove_subtree(dp->child);
6090Sstevel@tonic-gate dp->child = NULL;
6100Sstevel@tonic-gate }
6110Sstevel@tonic-gate
6120Sstevel@tonic-gate /*
6130Sstevel@tonic-gate * Remove each peer node, working our way backwards from the
6140Sstevel@tonic-gate * last peer node to the first peer node.
6150Sstevel@tonic-gate */
6160Sstevel@tonic-gate if (dp->peer != NULL) {
6170Sstevel@tonic-gate for (np = dp->peer; np->peer; np = dp->peer) {
6180Sstevel@tonic-gate for (/* empty */; np->peer; np = np->peer)
6190Sstevel@tonic-gate /* empty */;
6200Sstevel@tonic-gate fc_remove_subtree(np->peer);
6210Sstevel@tonic-gate np->peer = NULL;
6220Sstevel@tonic-gate }
6230Sstevel@tonic-gate fc_remove_subtree(dp->peer)
6240Sstevel@tonic-gate dp->peer = NULL;
6250Sstevel@tonic-gate }
6260Sstevel@tonic-gate
6270Sstevel@tonic-gate ASSERT((dp->child == NULL) && (dp->peer == NULL));
6280Sstevel@tonic-gate kmem_free(dp, sizeof (struct fc_device_tree));
6290Sstevel@tonic-gate }
6300Sstevel@tonic-gate
6310Sstevel@tonic-gate void
fc_remove_device_tree(struct fc_device_tree ** head)6320Sstevel@tonic-gate fc_remove_device_tree(struct fc_device_tree **head)
6330Sstevel@tonic-gate {
6340Sstevel@tonic-gate ASSERT(head && (*head != NULL));
6350Sstevel@tonic-gate
6360Sstevel@tonic-gate fc_remove_subtree(*head);
6370Sstevel@tonic-gate *head = NULL;
6380Sstevel@tonic-gate }
6390Sstevel@tonic-gate #endif /* notdef */
6400Sstevel@tonic-gate
6410Sstevel@tonic-gate void
fc_remove_device_tree(struct fc_device_tree ** head)6420Sstevel@tonic-gate fc_remove_device_tree(struct fc_device_tree **head)
6430Sstevel@tonic-gate {
6440Sstevel@tonic-gate struct fc_device_tree *dp;
6450Sstevel@tonic-gate
6460Sstevel@tonic-gate ASSERT(head && (*head != NULL));
6470Sstevel@tonic-gate
6480Sstevel@tonic-gate dp = *head;
6490Sstevel@tonic-gate
6500Sstevel@tonic-gate if (dp->child)
6510Sstevel@tonic-gate fc_remove_device_tree(&dp->child);
6520Sstevel@tonic-gate
6530Sstevel@tonic-gate if (dp->peer)
6540Sstevel@tonic-gate fc_remove_device_tree(&dp->peer);
6550Sstevel@tonic-gate
6560Sstevel@tonic-gate ASSERT((dp->child == NULL) && (dp->peer == NULL));
6570Sstevel@tonic-gate
6580Sstevel@tonic-gate kmem_free(dp, sizeof (struct fc_device_tree));
6590Sstevel@tonic-gate *head = NULL;
6600Sstevel@tonic-gate }
6610Sstevel@tonic-gate
6620Sstevel@tonic-gate struct fc_device_tree *
fc_find_node(dev_info_t * dip,struct fc_device_tree * hp)6630Sstevel@tonic-gate fc_find_node(dev_info_t *dip, struct fc_device_tree *hp)
6640Sstevel@tonic-gate {
6650Sstevel@tonic-gate struct fc_device_tree *p;
6660Sstevel@tonic-gate
6670Sstevel@tonic-gate while (hp) {
6680Sstevel@tonic-gate if (hp->dip == dip)
6690Sstevel@tonic-gate return (hp);
6700Sstevel@tonic-gate
6710Sstevel@tonic-gate if (hp->child)
6720Sstevel@tonic-gate if ((p = fc_find_node(dip, hp->child)) != NULL)
6730Sstevel@tonic-gate return (p);
6740Sstevel@tonic-gate
6750Sstevel@tonic-gate hp = hp->peer;
6760Sstevel@tonic-gate }
6770Sstevel@tonic-gate return (NULL);
6780Sstevel@tonic-gate }
6790Sstevel@tonic-gate
6800Sstevel@tonic-gate void
fc_add_child(dev_info_t * child,dev_info_t * parent,struct fc_device_tree * hp)6810Sstevel@tonic-gate fc_add_child(dev_info_t *child, dev_info_t *parent, struct fc_device_tree *hp)
6820Sstevel@tonic-gate {
6830Sstevel@tonic-gate struct fc_device_tree *p, *q;
6840Sstevel@tonic-gate
6850Sstevel@tonic-gate q = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
6860Sstevel@tonic-gate q->dip = child;
6870Sstevel@tonic-gate
6880Sstevel@tonic-gate mutex_enter(&fc_device_tree_lock);
6890Sstevel@tonic-gate
6900Sstevel@tonic-gate #ifdef DEBUG
6910Sstevel@tonic-gate /* XXX: Revisit ASSERT vs PANIC */
6920Sstevel@tonic-gate p = fc_find_node(child, hp);
6930Sstevel@tonic-gate ASSERT(p == NULL);
6940Sstevel@tonic-gate #endif
6950Sstevel@tonic-gate
6960Sstevel@tonic-gate p = fc_find_node(parent, hp);
6970Sstevel@tonic-gate ASSERT(p != NULL);
6980Sstevel@tonic-gate
6990Sstevel@tonic-gate q->peer = p->child;
7000Sstevel@tonic-gate p->child = q;
7010Sstevel@tonic-gate
7020Sstevel@tonic-gate mutex_exit(&fc_device_tree_lock);
7030Sstevel@tonic-gate }
7040Sstevel@tonic-gate
7050Sstevel@tonic-gate void
fc_remove_child(dev_info_t * child,struct fc_device_tree * head)7060Sstevel@tonic-gate fc_remove_child(dev_info_t *child, struct fc_device_tree *head)
7070Sstevel@tonic-gate {
7080Sstevel@tonic-gate struct fc_device_tree *p, *c, *n;
7090Sstevel@tonic-gate dev_info_t *parent = ddi_get_parent(child);
7100Sstevel@tonic-gate
7110Sstevel@tonic-gate mutex_enter(&fc_device_tree_lock);
7120Sstevel@tonic-gate
7130Sstevel@tonic-gate p = fc_find_node(parent, head);
7140Sstevel@tonic-gate ASSERT(p != NULL);
7150Sstevel@tonic-gate
7160Sstevel@tonic-gate /*
7170Sstevel@tonic-gate * Find the child within the parent's subtree ...
7180Sstevel@tonic-gate */
7190Sstevel@tonic-gate c = fc_find_node(child, p);
7200Sstevel@tonic-gate ASSERT(c != NULL);
7210Sstevel@tonic-gate ASSERT(c->child == NULL);
7220Sstevel@tonic-gate
7230Sstevel@tonic-gate /*
7240Sstevel@tonic-gate * If it's the first child, remove it, otherwise
7250Sstevel@tonic-gate * remove it from the child's peer list.
7260Sstevel@tonic-gate */
7270Sstevel@tonic-gate if (p->child == c) {
7280Sstevel@tonic-gate p->child = c->peer;
7290Sstevel@tonic-gate } else {
7300Sstevel@tonic-gate int found = 0;
7310Sstevel@tonic-gate for (n = p->child; n->peer; n = n->peer) {
7320Sstevel@tonic-gate if (n->peer == c) {
7330Sstevel@tonic-gate n->peer = c->peer;
7340Sstevel@tonic-gate found = 1;
7350Sstevel@tonic-gate break;
7360Sstevel@tonic-gate }
7370Sstevel@tonic-gate }
7380Sstevel@tonic-gate if (!found)
7390Sstevel@tonic-gate cmn_err(CE_PANIC, "fc_remove_child: not found\n");
7400Sstevel@tonic-gate }
7410Sstevel@tonic-gate mutex_exit(&fc_device_tree_lock);
7420Sstevel@tonic-gate
7430Sstevel@tonic-gate kmem_free(c, sizeof (struct fc_device_tree));
7440Sstevel@tonic-gate }
7450Sstevel@tonic-gate
7460Sstevel@tonic-gate dev_info_t *
fc_child_node(dev_info_t * parent,struct fc_device_tree * hp)7470Sstevel@tonic-gate fc_child_node(dev_info_t *parent, struct fc_device_tree *hp)
7480Sstevel@tonic-gate {
7490Sstevel@tonic-gate struct fc_device_tree *p;
7500Sstevel@tonic-gate dev_info_t *dip = NULL;
7510Sstevel@tonic-gate
7520Sstevel@tonic-gate mutex_enter(&fc_device_tree_lock);
7530Sstevel@tonic-gate p = fc_find_node(parent, hp);
7540Sstevel@tonic-gate if (p && p->child)
7550Sstevel@tonic-gate dip = p->child->dip;
7560Sstevel@tonic-gate mutex_exit(&fc_device_tree_lock);
7570Sstevel@tonic-gate
7580Sstevel@tonic-gate return (dip);
7590Sstevel@tonic-gate }
7600Sstevel@tonic-gate
7610Sstevel@tonic-gate dev_info_t *
fc_peer_node(dev_info_t * devi,struct fc_device_tree * hp)7620Sstevel@tonic-gate fc_peer_node(dev_info_t *devi, struct fc_device_tree *hp)
7630Sstevel@tonic-gate {
7640Sstevel@tonic-gate struct fc_device_tree *p;
7650Sstevel@tonic-gate dev_info_t *dip = NULL;
7660Sstevel@tonic-gate
7670Sstevel@tonic-gate mutex_enter(&fc_device_tree_lock);
7680Sstevel@tonic-gate p = fc_find_node(devi, hp);
7690Sstevel@tonic-gate if (p && p->peer)
7700Sstevel@tonic-gate dip = p->peer->dip;
7710Sstevel@tonic-gate mutex_exit(&fc_device_tree_lock);
7720Sstevel@tonic-gate
7730Sstevel@tonic-gate return (dip);
7740Sstevel@tonic-gate }
775