1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright (c) 2000-2001 by Sun Microsystems, Inc.
24*0Sstevel@tonic-gate  * All rights reserved.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate  * Kernel framework functions for the fcode interpreter
31*0Sstevel@tonic-gate  */
32*0Sstevel@tonic-gate 
33*0Sstevel@tonic-gate #include <sys/types.h>
34*0Sstevel@tonic-gate #include <sys/conf.h>
35*0Sstevel@tonic-gate #include <sys/debug.h>
36*0Sstevel@tonic-gate #include <sys/kmem.h>
37*0Sstevel@tonic-gate #include <sys/ddi.h>
38*0Sstevel@tonic-gate #include <sys/sunddi.h>
39*0Sstevel@tonic-gate #include <sys/sunndi.h>
40*0Sstevel@tonic-gate #include <sys/esunddi.h>
41*0Sstevel@tonic-gate #include <sys/ksynch.h>
42*0Sstevel@tonic-gate #include <sys/modctl.h>
43*0Sstevel@tonic-gate #include <sys/errno.h>
44*0Sstevel@tonic-gate #include <sys/fcode.h>
45*0Sstevel@tonic-gate 
46*0Sstevel@tonic-gate #ifdef	DEBUG
47*0Sstevel@tonic-gate int fcode_debug = 0;
48*0Sstevel@tonic-gate #else
49*0Sstevel@tonic-gate int fcode_debug = 0;
50*0Sstevel@tonic-gate #endif
51*0Sstevel@tonic-gate 
52*0Sstevel@tonic-gate static kmutex_t fc_request_lock;
53*0Sstevel@tonic-gate static kmutex_t fc_resource_lock;
54*0Sstevel@tonic-gate static kmutex_t fc_hash_lock;
55*0Sstevel@tonic-gate static kmutex_t fc_device_tree_lock;
56*0Sstevel@tonic-gate static kmutex_t fc_phandle_lock;
57*0Sstevel@tonic-gate static kcondvar_t fc_request_cv;
58*0Sstevel@tonic-gate static struct fc_request *fc_request_head;
59*0Sstevel@tonic-gate static int fc_initialized;
60*0Sstevel@tonic-gate 
61*0Sstevel@tonic-gate static void fcode_timer(void *);
62*0Sstevel@tonic-gate 
63*0Sstevel@tonic-gate int fcode_timeout = 300;	/* seconds */
64*0Sstevel@tonic-gate 
65*0Sstevel@tonic-gate int fcodem_unloadable;
66*0Sstevel@tonic-gate 
67*0Sstevel@tonic-gate extern int hz;
68*0Sstevel@tonic-gate 
69*0Sstevel@tonic-gate /*
70*0Sstevel@tonic-gate  * Initialize the fcode interpreter framework ... must be called
71*0Sstevel@tonic-gate  * prior to activating any of the fcode interpreter framework including
72*0Sstevel@tonic-gate  * the driver.
73*0Sstevel@tonic-gate  */
74*0Sstevel@tonic-gate static void
75*0Sstevel@tonic-gate fcode_init(void)
76*0Sstevel@tonic-gate {
77*0Sstevel@tonic-gate 	if (fc_initialized)
78*0Sstevel@tonic-gate 		return;
79*0Sstevel@tonic-gate 
80*0Sstevel@tonic-gate 	mutex_init(&fc_request_lock, NULL, MUTEX_DRIVER, NULL);
81*0Sstevel@tonic-gate 	mutex_init(&fc_resource_lock, NULL, MUTEX_DRIVER, NULL);
82*0Sstevel@tonic-gate 	mutex_init(&fc_hash_lock, NULL, MUTEX_DRIVER, NULL);
83*0Sstevel@tonic-gate 	mutex_init(&fc_device_tree_lock, NULL, MUTEX_DRIVER, NULL);
84*0Sstevel@tonic-gate 	mutex_init(&fc_phandle_lock, NULL, MUTEX_DRIVER, NULL);
85*0Sstevel@tonic-gate 	cv_init(&fc_request_cv, NULL, CV_DRIVER, NULL);
86*0Sstevel@tonic-gate 	++fc_initialized;
87*0Sstevel@tonic-gate }
88*0Sstevel@tonic-gate 
89*0Sstevel@tonic-gate static void
90*0Sstevel@tonic-gate fcode_fini(void)
91*0Sstevel@tonic-gate {
92*0Sstevel@tonic-gate 	mutex_destroy(&fc_request_lock);
93*0Sstevel@tonic-gate 	mutex_destroy(&fc_resource_lock);
94*0Sstevel@tonic-gate 	mutex_destroy(&fc_hash_lock);
95*0Sstevel@tonic-gate 	cv_destroy(&fc_request_cv);
96*0Sstevel@tonic-gate 	fc_initialized = 0;
97*0Sstevel@tonic-gate }
98*0Sstevel@tonic-gate 
99*0Sstevel@tonic-gate /*
100*0Sstevel@tonic-gate  * Module linkage information for the kernel.
101*0Sstevel@tonic-gate  */
102*0Sstevel@tonic-gate static struct modlmisc modlmisc = {
103*0Sstevel@tonic-gate 	&mod_miscops, "FCode framework 1.13"
104*0Sstevel@tonic-gate };
105*0Sstevel@tonic-gate 
106*0Sstevel@tonic-gate static struct modlinkage modlinkage = {
107*0Sstevel@tonic-gate 	MODREV_1, (void *)&modlmisc, NULL
108*0Sstevel@tonic-gate };
109*0Sstevel@tonic-gate 
110*0Sstevel@tonic-gate int
111*0Sstevel@tonic-gate _init(void)
112*0Sstevel@tonic-gate {
113*0Sstevel@tonic-gate 	int error;
114*0Sstevel@tonic-gate 
115*0Sstevel@tonic-gate 	fcode_init();
116*0Sstevel@tonic-gate 	if ((error = mod_install(&modlinkage)) != 0)
117*0Sstevel@tonic-gate 		fcode_fini();
118*0Sstevel@tonic-gate 	return (error);
119*0Sstevel@tonic-gate }
120*0Sstevel@tonic-gate 
121*0Sstevel@tonic-gate int
122*0Sstevel@tonic-gate _fini(void)
123*0Sstevel@tonic-gate {
124*0Sstevel@tonic-gate 	int error = EBUSY;
125*0Sstevel@tonic-gate 
126*0Sstevel@tonic-gate 	if (fcodem_unloadable)
127*0Sstevel@tonic-gate 		if ((error = mod_remove(&modlinkage)) == 0)
128*0Sstevel@tonic-gate 			fcode_fini();
129*0Sstevel@tonic-gate 
130*0Sstevel@tonic-gate 	return (error);
131*0Sstevel@tonic-gate }
132*0Sstevel@tonic-gate 
133*0Sstevel@tonic-gate int
134*0Sstevel@tonic-gate _info(struct modinfo *modinfop)
135*0Sstevel@tonic-gate {
136*0Sstevel@tonic-gate 	return (mod_info(&modlinkage, modinfop));
137*0Sstevel@tonic-gate }
138*0Sstevel@tonic-gate 
139*0Sstevel@tonic-gate /*
140*0Sstevel@tonic-gate  * Framework function to invoke the interpreter. Wait and return when the
141*0Sstevel@tonic-gate  * interpreter is done. See fcode.h for details.
142*0Sstevel@tonic-gate  */
143*0Sstevel@tonic-gate int
144*0Sstevel@tonic-gate fcode_interpreter(dev_info_t *ap, fc_ops_t *ops, fco_handle_t handle)
145*0Sstevel@tonic-gate {
146*0Sstevel@tonic-gate 	struct fc_request *fp, *qp;
147*0Sstevel@tonic-gate 	int error;
148*0Sstevel@tonic-gate 
149*0Sstevel@tonic-gate 	ASSERT(fc_initialized);
150*0Sstevel@tonic-gate 	ASSERT(ap);
151*0Sstevel@tonic-gate 	ASSERT(ops);
152*0Sstevel@tonic-gate 	ASSERT(handle);
153*0Sstevel@tonic-gate 
154*0Sstevel@tonic-gate 	/*
155*0Sstevel@tonic-gate 	 * Create a request structure
156*0Sstevel@tonic-gate 	 */
157*0Sstevel@tonic-gate 	fp = kmem_zalloc(sizeof (struct fc_request), KM_SLEEP);
158*0Sstevel@tonic-gate 
159*0Sstevel@tonic-gate 	fp->next = NULL;
160*0Sstevel@tonic-gate 	fp->busy = FC_R_INIT;
161*0Sstevel@tonic-gate 	fp->error = FC_SUCCESS;
162*0Sstevel@tonic-gate 	fp->ap_dip = ap;
163*0Sstevel@tonic-gate 	fp->ap_ops = ops;
164*0Sstevel@tonic-gate 	fp->handle = handle;
165*0Sstevel@tonic-gate 
166*0Sstevel@tonic-gate 	/*
167*0Sstevel@tonic-gate 	 * Add the request to the end of the request list.
168*0Sstevel@tonic-gate 	 */
169*0Sstevel@tonic-gate 	mutex_enter(&fc_request_lock);
170*0Sstevel@tonic-gate 
171*0Sstevel@tonic-gate 	if (fc_request_head == NULL)
172*0Sstevel@tonic-gate 		fc_request_head = fp;
173*0Sstevel@tonic-gate 	else {
174*0Sstevel@tonic-gate 		for (qp = fc_request_head; qp->next != NULL; qp = qp->next)
175*0Sstevel@tonic-gate 			/* empty */;
176*0Sstevel@tonic-gate 		qp->next = fp;
177*0Sstevel@tonic-gate 	}
178*0Sstevel@tonic-gate 	mutex_exit(&fc_request_lock);
179*0Sstevel@tonic-gate 
180*0Sstevel@tonic-gate 	/*
181*0Sstevel@tonic-gate 	 * log a message (ie: i_ddi_log_event) indicating that a request
182*0Sstevel@tonic-gate 	 * has been queued to start the userland fcode interpreter.
183*0Sstevel@tonic-gate 	 * This call is the glue to the eventd and automates the process.
184*0Sstevel@tonic-gate 	 */
185*0Sstevel@tonic-gate 
186*0Sstevel@tonic-gate 	/*
187*0Sstevel@tonic-gate 	 * Signal the driver if it's waiting for a request to be queued.
188*0Sstevel@tonic-gate 	 */
189*0Sstevel@tonic-gate 	cv_broadcast(&fc_request_cv);
190*0Sstevel@tonic-gate 
191*0Sstevel@tonic-gate 	/*
192*0Sstevel@tonic-gate 	 * Wait for the request to be serviced
193*0Sstevel@tonic-gate 	 */
194*0Sstevel@tonic-gate 	mutex_enter(&fc_request_lock);
195*0Sstevel@tonic-gate 	fp->timeout = timeout(fcode_timer, fp, hz * fcode_timeout);
196*0Sstevel@tonic-gate 	while (fp->busy != FC_R_DONE)
197*0Sstevel@tonic-gate 		cv_wait(&fc_request_cv, &fc_request_lock);
198*0Sstevel@tonic-gate 
199*0Sstevel@tonic-gate 	if (fp->timeout) {
200*0Sstevel@tonic-gate 		(void) untimeout(fp->timeout);
201*0Sstevel@tonic-gate 		fp->timeout = NULL;
202*0Sstevel@tonic-gate 	}
203*0Sstevel@tonic-gate 
204*0Sstevel@tonic-gate 	/*
205*0Sstevel@tonic-gate 	 * Remove the request from the queue (while still holding the lock)
206*0Sstevel@tonic-gate 	 */
207*0Sstevel@tonic-gate 	if (fc_request_head == fp)
208*0Sstevel@tonic-gate 		fc_request_head = fp->next;
209*0Sstevel@tonic-gate 	else {
210*0Sstevel@tonic-gate 		for (qp = fc_request_head; qp->next != fp; qp = qp->next)
211*0Sstevel@tonic-gate 			/* empty */;
212*0Sstevel@tonic-gate 		qp->next = fp->next;
213*0Sstevel@tonic-gate 	}
214*0Sstevel@tonic-gate 	mutex_exit(&fc_request_lock);
215*0Sstevel@tonic-gate 
216*0Sstevel@tonic-gate 	FC_DEBUG1(2, CE_CONT, "fcode_interpreter: request finished, fp %p\n",
217*0Sstevel@tonic-gate 	    fp);
218*0Sstevel@tonic-gate 
219*0Sstevel@tonic-gate 	/*
220*0Sstevel@tonic-gate 	 * Free the request structure and return any errors.
221*0Sstevel@tonic-gate 	 */
222*0Sstevel@tonic-gate 	error = fp->error;
223*0Sstevel@tonic-gate 	kmem_free(fp, sizeof (struct fc_request));
224*0Sstevel@tonic-gate 	return (error);
225*0Sstevel@tonic-gate }
226*0Sstevel@tonic-gate 
227*0Sstevel@tonic-gate /*
228*0Sstevel@tonic-gate  * Timeout requests thet don't get picked up by the interpreter.  This
229*0Sstevel@tonic-gate  * would happen if the daemon is not running.  If the timer goes off
230*0Sstevel@tonic-gate  * and it's state is not FC_R_INIT, then the interpreter has picked up the
231*0Sstevel@tonic-gate  * request.
232*0Sstevel@tonic-gate  */
233*0Sstevel@tonic-gate static void
234*0Sstevel@tonic-gate fcode_timer(void *arg)
235*0Sstevel@tonic-gate {
236*0Sstevel@tonic-gate 	struct fc_request *fp = arg;
237*0Sstevel@tonic-gate 
238*0Sstevel@tonic-gate 	mutex_enter(&fc_request_lock);
239*0Sstevel@tonic-gate 	fp->timeout = 0;
240*0Sstevel@tonic-gate 	if (fp->busy == FC_R_INIT) {
241*0Sstevel@tonic-gate 		cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
242*0Sstevel@tonic-gate 		    "interpreter - Interpreter did not pick up request\n");
243*0Sstevel@tonic-gate 		fp->busy = FC_R_DONE;
244*0Sstevel@tonic-gate 		fp->error = FC_TIMEOUT;
245*0Sstevel@tonic-gate 		mutex_exit(&fc_request_lock);
246*0Sstevel@tonic-gate 		cv_broadcast(&fc_request_cv);
247*0Sstevel@tonic-gate 		return;
248*0Sstevel@tonic-gate 	} else {
249*0Sstevel@tonic-gate 		cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
250*0Sstevel@tonic-gate 		    "interpreter - Interpreter is executing request\n");
251*0Sstevel@tonic-gate 	}
252*0Sstevel@tonic-gate 	mutex_exit(&fc_request_lock);
253*0Sstevel@tonic-gate }
254*0Sstevel@tonic-gate 
255*0Sstevel@tonic-gate /*
256*0Sstevel@tonic-gate  * This is the function the driver calls to wait for and get
257*0Sstevel@tonic-gate  * a request.  The call should be interruptable since it's done
258*0Sstevel@tonic-gate  * at read(2) time, so allow for signals to interrupt us.
259*0Sstevel@tonic-gate  *
260*0Sstevel@tonic-gate  * Return NULL if the wait was interrupted, else return a pointer
261*0Sstevel@tonic-gate  * to the fc_request structure (marked as busy).
262*0Sstevel@tonic-gate  *
263*0Sstevel@tonic-gate  * Note that we have to check for a request first, before waiting,
264*0Sstevel@tonic-gate  * in case the request is already queued. In this case, the signal
265*0Sstevel@tonic-gate  * may have already been delivered.
266*0Sstevel@tonic-gate  */
267*0Sstevel@tonic-gate struct fc_request *
268*0Sstevel@tonic-gate fc_get_request(void)
269*0Sstevel@tonic-gate {
270*0Sstevel@tonic-gate 	struct fc_request *fp;
271*0Sstevel@tonic-gate 
272*0Sstevel@tonic-gate 	ASSERT(fc_initialized);
273*0Sstevel@tonic-gate 
274*0Sstevel@tonic-gate 	mutex_enter(&fc_request_lock);
275*0Sstevel@tonic-gate 
276*0Sstevel@tonic-gate 	/*CONSTANTCONDITION*/
277*0Sstevel@tonic-gate 	while (1) {
278*0Sstevel@tonic-gate 		for (fp = fc_request_head; fp != NULL; fp = fp->next) {
279*0Sstevel@tonic-gate 			if (fp->busy == FC_R_INIT) {
280*0Sstevel@tonic-gate 				fp->busy = FC_R_BUSY;
281*0Sstevel@tonic-gate 				mutex_exit(&fc_request_lock);
282*0Sstevel@tonic-gate 				return (fp);
283*0Sstevel@tonic-gate 			}
284*0Sstevel@tonic-gate 		}
285*0Sstevel@tonic-gate 		if (cv_wait_sig(&fc_request_cv, &fc_request_lock) == 0) {
286*0Sstevel@tonic-gate 			mutex_exit(&fc_request_lock);
287*0Sstevel@tonic-gate 			return (NULL);
288*0Sstevel@tonic-gate 		}
289*0Sstevel@tonic-gate 	}
290*0Sstevel@tonic-gate 	/*NOTREACHED*/
291*0Sstevel@tonic-gate }
292*0Sstevel@tonic-gate 
293*0Sstevel@tonic-gate /*
294*0Sstevel@tonic-gate  * This is the function the driver calls when it's finished with
295*0Sstevel@tonic-gate  * a request.  Mark the request as done and signal the thread that
296*0Sstevel@tonic-gate  * enqueued the request.
297*0Sstevel@tonic-gate  */
298*0Sstevel@tonic-gate void
299*0Sstevel@tonic-gate fc_finish_request(struct fc_request *fp)
300*0Sstevel@tonic-gate {
301*0Sstevel@tonic-gate 	ASSERT(fc_initialized);
302*0Sstevel@tonic-gate 	ASSERT(fp);
303*0Sstevel@tonic-gate 	ASSERT(fp->busy == FC_R_BUSY);
304*0Sstevel@tonic-gate 
305*0Sstevel@tonic-gate 	mutex_enter(&fc_request_lock);
306*0Sstevel@tonic-gate 	fp->busy = FC_R_DONE;
307*0Sstevel@tonic-gate 	mutex_exit(&fc_request_lock);
308*0Sstevel@tonic-gate 
309*0Sstevel@tonic-gate 	cv_broadcast(&fc_request_cv);
310*0Sstevel@tonic-gate }
311*0Sstevel@tonic-gate 
312*0Sstevel@tonic-gate /*
313*0Sstevel@tonic-gate  * Generic resource list management subroutines
314*0Sstevel@tonic-gate  */
315*0Sstevel@tonic-gate void
316*0Sstevel@tonic-gate fc_add_resource(fco_handle_t rp, struct fc_resource *ip)
317*0Sstevel@tonic-gate {
318*0Sstevel@tonic-gate 	ASSERT(rp);
319*0Sstevel@tonic-gate 	ASSERT(ip);
320*0Sstevel@tonic-gate 
321*0Sstevel@tonic-gate 	mutex_enter(&fc_resource_lock);
322*0Sstevel@tonic-gate 	ip->next = NULL;
323*0Sstevel@tonic-gate 	if (rp->head != NULL)
324*0Sstevel@tonic-gate 		ip->next = rp->head;
325*0Sstevel@tonic-gate 	rp->head = ip;
326*0Sstevel@tonic-gate 	mutex_exit(&fc_resource_lock);
327*0Sstevel@tonic-gate }
328*0Sstevel@tonic-gate 
329*0Sstevel@tonic-gate void
330*0Sstevel@tonic-gate fc_rem_resource(fco_handle_t rp, struct fc_resource *ip)
331*0Sstevel@tonic-gate {
332*0Sstevel@tonic-gate 	struct fc_resource *fp;
333*0Sstevel@tonic-gate 
334*0Sstevel@tonic-gate 	ASSERT(rp);
335*0Sstevel@tonic-gate 	ASSERT(ip);
336*0Sstevel@tonic-gate 
337*0Sstevel@tonic-gate 	if (rp->head == NULL)  {
338*0Sstevel@tonic-gate 		cmn_err(CE_CONT, "fc_rem_resource: NULL list head!\n");
339*0Sstevel@tonic-gate 		return;
340*0Sstevel@tonic-gate 	}
341*0Sstevel@tonic-gate 
342*0Sstevel@tonic-gate 	mutex_enter(&fc_resource_lock);
343*0Sstevel@tonic-gate 	if (rp->head == ip) {
344*0Sstevel@tonic-gate 		rp->head = ip->next;
345*0Sstevel@tonic-gate 		mutex_exit(&fc_resource_lock);
346*0Sstevel@tonic-gate 		return;
347*0Sstevel@tonic-gate 	}
348*0Sstevel@tonic-gate 
349*0Sstevel@tonic-gate 	for (fp = rp->head; fp && (fp->next != ip); fp = fp->next)
350*0Sstevel@tonic-gate 		/* empty */;
351*0Sstevel@tonic-gate 
352*0Sstevel@tonic-gate 	if (fp == NULL)  {
353*0Sstevel@tonic-gate 		mutex_exit(&fc_resource_lock);
354*0Sstevel@tonic-gate 		cmn_err(CE_CONT, "fc_rem_resource: Item not on list!\n");
355*0Sstevel@tonic-gate 		return;
356*0Sstevel@tonic-gate 	}
357*0Sstevel@tonic-gate 
358*0Sstevel@tonic-gate 	fp->next = ip->next;
359*0Sstevel@tonic-gate 	mutex_exit(&fc_resource_lock);
360*0Sstevel@tonic-gate }
361*0Sstevel@tonic-gate 
362*0Sstevel@tonic-gate /*ARGSUSED*/
363*0Sstevel@tonic-gate void
364*0Sstevel@tonic-gate fc_lock_resource_list(fco_handle_t rp)
365*0Sstevel@tonic-gate {
366*0Sstevel@tonic-gate 	mutex_enter(&fc_resource_lock);
367*0Sstevel@tonic-gate }
368*0Sstevel@tonic-gate 
369*0Sstevel@tonic-gate /*ARGSUSED*/
370*0Sstevel@tonic-gate void
371*0Sstevel@tonic-gate fc_unlock_resource_list(fco_handle_t rp)
372*0Sstevel@tonic-gate {
373*0Sstevel@tonic-gate 	mutex_exit(&fc_resource_lock);
374*0Sstevel@tonic-gate }
375*0Sstevel@tonic-gate 
376*0Sstevel@tonic-gate /*
377*0Sstevel@tonic-gate  * Common helper ops and subroutines
378*0Sstevel@tonic-gate  */
379*0Sstevel@tonic-gate /*ARGSUSED*/
380*0Sstevel@tonic-gate int
381*0Sstevel@tonic-gate fc_syntax_error(fc_ci_t *cp, char *msg)
382*0Sstevel@tonic-gate {
383*0Sstevel@tonic-gate 	cp->error = fc_int2cell(-1);
384*0Sstevel@tonic-gate 	cp->nresults = fc_int2cell(0);
385*0Sstevel@tonic-gate 	return (0);
386*0Sstevel@tonic-gate }
387*0Sstevel@tonic-gate 
388*0Sstevel@tonic-gate /*ARGSUSED*/
389*0Sstevel@tonic-gate int
390*0Sstevel@tonic-gate fc_priv_error(fc_ci_t *cp, char *msg)
391*0Sstevel@tonic-gate {
392*0Sstevel@tonic-gate 	cp->priv_error = fc_int2cell(-1);
393*0Sstevel@tonic-gate 	cp->error = fc_int2cell(0);
394*0Sstevel@tonic-gate 	cp->nresults = fc_int2cell(0);
395*0Sstevel@tonic-gate 	return (0);
396*0Sstevel@tonic-gate }
397*0Sstevel@tonic-gate 
398*0Sstevel@tonic-gate /*ARGSUSED*/
399*0Sstevel@tonic-gate int
400*0Sstevel@tonic-gate fc_success_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
401*0Sstevel@tonic-gate {
402*0Sstevel@tonic-gate 	cp->priv_error = cp->error = fc_int2cell(0);
403*0Sstevel@tonic-gate 	return (0);
404*0Sstevel@tonic-gate }
405*0Sstevel@tonic-gate 
406*0Sstevel@tonic-gate /*
407*0Sstevel@tonic-gate  * fc_fail_op: This 'handles' a request by specifically failing it,
408*0Sstevel@tonic-gate  * as opposed to not handling it and returning '-1' to indicate
409*0Sstevel@tonic-gate  * 'service unknown' and allowing somebody else in the chain to
410*0Sstevel@tonic-gate  * handle it.
411*0Sstevel@tonic-gate  */
412*0Sstevel@tonic-gate /*ARGSUSED*/
413*0Sstevel@tonic-gate int
414*0Sstevel@tonic-gate fc_fail_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
415*0Sstevel@tonic-gate {
416*0Sstevel@tonic-gate 	cmn_err(CE_CONT, "fcode ops: fail service name <%s>\n",
417*0Sstevel@tonic-gate 	    (char *)fc_cell2ptr(cp->svc_name));
418*0Sstevel@tonic-gate 
419*0Sstevel@tonic-gate 	cp->nresults = fc_int2cell(0);
420*0Sstevel@tonic-gate 	cp->error = fc_int2cell(-1);
421*0Sstevel@tonic-gate 	return (0);
422*0Sstevel@tonic-gate }
423*0Sstevel@tonic-gate 
424*0Sstevel@tonic-gate /*
425*0Sstevel@tonic-gate  * Functions to manage the set of handles we give to the interpreter.
426*0Sstevel@tonic-gate  * The handles are opaque and internally represent dev_info_t pointers.
427*0Sstevel@tonic-gate  */
428*0Sstevel@tonic-gate struct fc_phandle_entry **
429*0Sstevel@tonic-gate fc_handle_to_phandle_head(fco_handle_t rp)
430*0Sstevel@tonic-gate {
431*0Sstevel@tonic-gate 	while (rp->next_handle)
432*0Sstevel@tonic-gate 		rp = rp->next_handle;
433*0Sstevel@tonic-gate 
434*0Sstevel@tonic-gate 	return (&rp->ptable);
435*0Sstevel@tonic-gate }
436*0Sstevel@tonic-gate 
437*0Sstevel@tonic-gate /*ARGSUSED*/
438*0Sstevel@tonic-gate void
439*0Sstevel@tonic-gate fc_phandle_table_alloc(struct fc_phandle_entry **head)
440*0Sstevel@tonic-gate {
441*0Sstevel@tonic-gate }
442*0Sstevel@tonic-gate 
443*0Sstevel@tonic-gate void
444*0Sstevel@tonic-gate fc_phandle_table_free(struct fc_phandle_entry **head)
445*0Sstevel@tonic-gate {
446*0Sstevel@tonic-gate 	struct fc_phandle_entry *ip, *np;
447*0Sstevel@tonic-gate 
448*0Sstevel@tonic-gate 	/*
449*0Sstevel@tonic-gate 	 * Free each entry in the table.
450*0Sstevel@tonic-gate 	 */
451*0Sstevel@tonic-gate 	for (ip = *head; ip; ip = np) {
452*0Sstevel@tonic-gate 		np = ip->next;
453*0Sstevel@tonic-gate 		kmem_free(ip, sizeof (struct fc_phandle_entry));
454*0Sstevel@tonic-gate 	}
455*0Sstevel@tonic-gate 	*head = NULL;
456*0Sstevel@tonic-gate }
457*0Sstevel@tonic-gate 
458*0Sstevel@tonic-gate dev_info_t *
459*0Sstevel@tonic-gate fc_phandle_to_dip(struct fc_phandle_entry **head, fc_phandle_t handle)
460*0Sstevel@tonic-gate {
461*0Sstevel@tonic-gate 	struct fc_phandle_entry *ip;
462*0Sstevel@tonic-gate 
463*0Sstevel@tonic-gate 	mutex_enter(&fc_hash_lock);
464*0Sstevel@tonic-gate 
465*0Sstevel@tonic-gate 	for (ip = *head; ip; ip = ip->next)
466*0Sstevel@tonic-gate 		if (ip->h == handle)
467*0Sstevel@tonic-gate 			break;
468*0Sstevel@tonic-gate 
469*0Sstevel@tonic-gate 	mutex_exit(&fc_hash_lock);
470*0Sstevel@tonic-gate 
471*0Sstevel@tonic-gate 	return (ip ? ip->dip : NULL);
472*0Sstevel@tonic-gate }
473*0Sstevel@tonic-gate 
474*0Sstevel@tonic-gate fc_phandle_t
475*0Sstevel@tonic-gate fc_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip)
476*0Sstevel@tonic-gate {
477*0Sstevel@tonic-gate 	struct fc_phandle_entry *hp, *np;
478*0Sstevel@tonic-gate 	fc_phandle_t h;
479*0Sstevel@tonic-gate 
480*0Sstevel@tonic-gate 	ASSERT(dip);
481*0Sstevel@tonic-gate 	h = (fc_phandle_t)ddi_get_nodeid(dip);
482*0Sstevel@tonic-gate 
483*0Sstevel@tonic-gate 	/*
484*0Sstevel@tonic-gate 	 * Just in case, allocate a new entry ...
485*0Sstevel@tonic-gate 	 */
486*0Sstevel@tonic-gate 	np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
487*0Sstevel@tonic-gate 
488*0Sstevel@tonic-gate 	mutex_enter(&fc_hash_lock);
489*0Sstevel@tonic-gate 
490*0Sstevel@tonic-gate 	/*
491*0Sstevel@tonic-gate 	 * If we already have this dip in the table, just return the handle
492*0Sstevel@tonic-gate 	 */
493*0Sstevel@tonic-gate 	for (hp = *head; hp; hp = hp->next) {
494*0Sstevel@tonic-gate 		if (hp->dip == dip) {
495*0Sstevel@tonic-gate 			mutex_exit(&fc_hash_lock);
496*0Sstevel@tonic-gate 			kmem_free(np, sizeof (struct fc_phandle_entry));
497*0Sstevel@tonic-gate 			return (h);
498*0Sstevel@tonic-gate 		}
499*0Sstevel@tonic-gate 	}
500*0Sstevel@tonic-gate 
501*0Sstevel@tonic-gate 	/*
502*0Sstevel@tonic-gate 	 * Insert this entry to the list of known entries
503*0Sstevel@tonic-gate 	 */
504*0Sstevel@tonic-gate 	np->next = *head;
505*0Sstevel@tonic-gate 	np->dip = dip;
506*0Sstevel@tonic-gate 	np->h = h;
507*0Sstevel@tonic-gate 	*head = np;
508*0Sstevel@tonic-gate 	mutex_exit(&fc_hash_lock);
509*0Sstevel@tonic-gate 	return (h);
510*0Sstevel@tonic-gate }
511*0Sstevel@tonic-gate 
512*0Sstevel@tonic-gate /*
513*0Sstevel@tonic-gate  * We won't need this function once the ddi is modified to handle
514*0Sstevel@tonic-gate  * unique non-prom nodeids.  For now, this allows us to add a given
515*0Sstevel@tonic-gate  * nodeid to the device tree without dereferencing the value in the
516*0Sstevel@tonic-gate  * devinfo node, so we have a parallel mechanism.
517*0Sstevel@tonic-gate  */
518*0Sstevel@tonic-gate void
519*0Sstevel@tonic-gate fc_add_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip,
520*0Sstevel@tonic-gate     fc_phandle_t h)
521*0Sstevel@tonic-gate {
522*0Sstevel@tonic-gate 	struct fc_phandle_entry *hp, *np;
523*0Sstevel@tonic-gate 
524*0Sstevel@tonic-gate 	ASSERT(dip);
525*0Sstevel@tonic-gate 
526*0Sstevel@tonic-gate 	/*
527*0Sstevel@tonic-gate 	 * Just in case, allocate a new entry ...
528*0Sstevel@tonic-gate 	 */
529*0Sstevel@tonic-gate 	np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
530*0Sstevel@tonic-gate 
531*0Sstevel@tonic-gate 	mutex_enter(&fc_hash_lock);
532*0Sstevel@tonic-gate 
533*0Sstevel@tonic-gate 	/*
534*0Sstevel@tonic-gate 	 * If we already have this dip in the table, just return the handle
535*0Sstevel@tonic-gate 	 */
536*0Sstevel@tonic-gate 	for (hp = *head; hp; hp = hp->next) {
537*0Sstevel@tonic-gate 		if (hp->dip == dip) {
538*0Sstevel@tonic-gate 			mutex_exit(&fc_hash_lock);
539*0Sstevel@tonic-gate 			kmem_free(np, sizeof (struct fc_phandle_entry));
540*0Sstevel@tonic-gate 			return;
541*0Sstevel@tonic-gate 		}
542*0Sstevel@tonic-gate 	}
543*0Sstevel@tonic-gate 
544*0Sstevel@tonic-gate 	/*
545*0Sstevel@tonic-gate 	 * Insert this entry to the list of known entries
546*0Sstevel@tonic-gate 	 */
547*0Sstevel@tonic-gate 	np->next = *head;
548*0Sstevel@tonic-gate 	np->dip = dip;
549*0Sstevel@tonic-gate 	np->h = h;
550*0Sstevel@tonic-gate 	*head = np;
551*0Sstevel@tonic-gate 	mutex_exit(&fc_hash_lock);
552*0Sstevel@tonic-gate }
553*0Sstevel@tonic-gate 
554*0Sstevel@tonic-gate /*
555*0Sstevel@tonic-gate  * Functions to manage our copy of our subtree.
556*0Sstevel@tonic-gate  *
557*0Sstevel@tonic-gate  * The head of the device tree is always stored in the last 'handle'
558*0Sstevel@tonic-gate  * in the handle chain.
559*0Sstevel@tonic-gate  */
560*0Sstevel@tonic-gate struct fc_device_tree **
561*0Sstevel@tonic-gate fc_handle_to_dtree_head(fco_handle_t rp)
562*0Sstevel@tonic-gate {
563*0Sstevel@tonic-gate 	while (rp->next_handle)
564*0Sstevel@tonic-gate 		rp = rp->next_handle;
565*0Sstevel@tonic-gate 
566*0Sstevel@tonic-gate 	return (&rp->dtree);
567*0Sstevel@tonic-gate }
568*0Sstevel@tonic-gate 
569*0Sstevel@tonic-gate struct fc_device_tree *
570*0Sstevel@tonic-gate fc_handle_to_dtree(fco_handle_t rp)
571*0Sstevel@tonic-gate {
572*0Sstevel@tonic-gate 	struct fc_device_tree **head = fc_handle_to_dtree_head(rp);
573*0Sstevel@tonic-gate 
574*0Sstevel@tonic-gate 	return (*head);
575*0Sstevel@tonic-gate }
576*0Sstevel@tonic-gate 
577*0Sstevel@tonic-gate /*
578*0Sstevel@tonic-gate  * The root of the subtree is the attachment point ...
579*0Sstevel@tonic-gate  * Thus, there is never an empty device tree.
580*0Sstevel@tonic-gate  */
581*0Sstevel@tonic-gate void
582*0Sstevel@tonic-gate fc_create_device_tree(dev_info_t *ap, struct fc_device_tree **head)
583*0Sstevel@tonic-gate {
584*0Sstevel@tonic-gate 	struct fc_device_tree *dp;
585*0Sstevel@tonic-gate 
586*0Sstevel@tonic-gate 	dp = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
587*0Sstevel@tonic-gate 	dp->dip = ap;
588*0Sstevel@tonic-gate 	*head = dp;
589*0Sstevel@tonic-gate }
590*0Sstevel@tonic-gate 
591*0Sstevel@tonic-gate #ifdef	notdef
592*0Sstevel@tonic-gate static void
593*0Sstevel@tonic-gate fc_remove_subtree(struct fc_device_tree *dp)
594*0Sstevel@tonic-gate {
595*0Sstevel@tonic-gate 	struct fc_device_tree *np;
596*0Sstevel@tonic-gate 
597*0Sstevel@tonic-gate 	if (dp->child) {
598*0Sstevel@tonic-gate 		fc_remove_subtree(dp->child);
599*0Sstevel@tonic-gate 		dp->child = NULL;
600*0Sstevel@tonic-gate 	}
601*0Sstevel@tonic-gate 
602*0Sstevel@tonic-gate 	/*
603*0Sstevel@tonic-gate 	 * Remove each peer node, working our way backwards from the
604*0Sstevel@tonic-gate 	 * last peer node to the first peer node.
605*0Sstevel@tonic-gate 	 */
606*0Sstevel@tonic-gate 	if (dp->peer != NULL) {
607*0Sstevel@tonic-gate 		for (np = dp->peer; np->peer; np = dp->peer) {
608*0Sstevel@tonic-gate 			for (/* empty */; np->peer; np = np->peer)
609*0Sstevel@tonic-gate 				/* empty */;
610*0Sstevel@tonic-gate 			fc_remove_subtree(np->peer);
611*0Sstevel@tonic-gate 			np->peer = NULL;
612*0Sstevel@tonic-gate 		}
613*0Sstevel@tonic-gate 		fc_remove_subtree(dp->peer)
614*0Sstevel@tonic-gate 		dp->peer = NULL;
615*0Sstevel@tonic-gate 	}
616*0Sstevel@tonic-gate 
617*0Sstevel@tonic-gate 	ASSERT((dp->child == NULL) && (dp->peer == NULL));
618*0Sstevel@tonic-gate 	kmem_free(dp, sizeof (struct fc_device_tree));
619*0Sstevel@tonic-gate }
620*0Sstevel@tonic-gate 
621*0Sstevel@tonic-gate void
622*0Sstevel@tonic-gate fc_remove_device_tree(struct fc_device_tree **head)
623*0Sstevel@tonic-gate {
624*0Sstevel@tonic-gate 	ASSERT(head && (*head != NULL));
625*0Sstevel@tonic-gate 
626*0Sstevel@tonic-gate 	fc_remove_subtree(*head);
627*0Sstevel@tonic-gate 	*head = NULL;
628*0Sstevel@tonic-gate }
629*0Sstevel@tonic-gate #endif	/* notdef */
630*0Sstevel@tonic-gate 
631*0Sstevel@tonic-gate void
632*0Sstevel@tonic-gate fc_remove_device_tree(struct fc_device_tree **head)
633*0Sstevel@tonic-gate {
634*0Sstevel@tonic-gate 	struct fc_device_tree *dp;
635*0Sstevel@tonic-gate 
636*0Sstevel@tonic-gate 	ASSERT(head && (*head != NULL));
637*0Sstevel@tonic-gate 
638*0Sstevel@tonic-gate 	dp = *head;
639*0Sstevel@tonic-gate 
640*0Sstevel@tonic-gate 	if (dp->child)
641*0Sstevel@tonic-gate 		fc_remove_device_tree(&dp->child);
642*0Sstevel@tonic-gate 
643*0Sstevel@tonic-gate 	if (dp->peer)
644*0Sstevel@tonic-gate 		fc_remove_device_tree(&dp->peer);
645*0Sstevel@tonic-gate 
646*0Sstevel@tonic-gate 	ASSERT((dp->child == NULL) && (dp->peer == NULL));
647*0Sstevel@tonic-gate 
648*0Sstevel@tonic-gate 	kmem_free(dp, sizeof (struct fc_device_tree));
649*0Sstevel@tonic-gate 	*head = NULL;
650*0Sstevel@tonic-gate }
651*0Sstevel@tonic-gate 
652*0Sstevel@tonic-gate struct fc_device_tree *
653*0Sstevel@tonic-gate fc_find_node(dev_info_t *dip, struct fc_device_tree *hp)
654*0Sstevel@tonic-gate {
655*0Sstevel@tonic-gate 	struct fc_device_tree *p;
656*0Sstevel@tonic-gate 
657*0Sstevel@tonic-gate 	while (hp) {
658*0Sstevel@tonic-gate 		if (hp->dip == dip)
659*0Sstevel@tonic-gate 			return (hp);
660*0Sstevel@tonic-gate 
661*0Sstevel@tonic-gate 		if (hp->child)
662*0Sstevel@tonic-gate 			if ((p = fc_find_node(dip, hp->child)) != NULL)
663*0Sstevel@tonic-gate 				return (p);
664*0Sstevel@tonic-gate 
665*0Sstevel@tonic-gate 		hp = hp->peer;
666*0Sstevel@tonic-gate 	}
667*0Sstevel@tonic-gate 	return (NULL);
668*0Sstevel@tonic-gate }
669*0Sstevel@tonic-gate 
670*0Sstevel@tonic-gate void
671*0Sstevel@tonic-gate fc_add_child(dev_info_t *child, dev_info_t *parent, struct fc_device_tree *hp)
672*0Sstevel@tonic-gate {
673*0Sstevel@tonic-gate 	struct fc_device_tree *p, *q;
674*0Sstevel@tonic-gate 
675*0Sstevel@tonic-gate 	q = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
676*0Sstevel@tonic-gate 	q->dip = child;
677*0Sstevel@tonic-gate 
678*0Sstevel@tonic-gate 	mutex_enter(&fc_device_tree_lock);
679*0Sstevel@tonic-gate 
680*0Sstevel@tonic-gate #ifdef	DEBUG
681*0Sstevel@tonic-gate 	/* XXX: Revisit ASSERT vs PANIC */
682*0Sstevel@tonic-gate 	p = fc_find_node(child, hp);
683*0Sstevel@tonic-gate 	ASSERT(p == NULL);
684*0Sstevel@tonic-gate #endif
685*0Sstevel@tonic-gate 
686*0Sstevel@tonic-gate 	p = fc_find_node(parent, hp);
687*0Sstevel@tonic-gate 	ASSERT(p != NULL);
688*0Sstevel@tonic-gate 
689*0Sstevel@tonic-gate 	q->peer = p->child;
690*0Sstevel@tonic-gate 	p->child = q;
691*0Sstevel@tonic-gate 
692*0Sstevel@tonic-gate 	mutex_exit(&fc_device_tree_lock);
693*0Sstevel@tonic-gate }
694*0Sstevel@tonic-gate 
695*0Sstevel@tonic-gate void
696*0Sstevel@tonic-gate fc_remove_child(dev_info_t *child, struct fc_device_tree *head)
697*0Sstevel@tonic-gate {
698*0Sstevel@tonic-gate 	struct fc_device_tree *p, *c, *n;
699*0Sstevel@tonic-gate 	dev_info_t *parent = ddi_get_parent(child);
700*0Sstevel@tonic-gate 
701*0Sstevel@tonic-gate 	mutex_enter(&fc_device_tree_lock);
702*0Sstevel@tonic-gate 
703*0Sstevel@tonic-gate 	p = fc_find_node(parent, head);
704*0Sstevel@tonic-gate 	ASSERT(p != NULL);
705*0Sstevel@tonic-gate 
706*0Sstevel@tonic-gate 	/*
707*0Sstevel@tonic-gate 	 * Find the child within the parent's subtree ...
708*0Sstevel@tonic-gate 	 */
709*0Sstevel@tonic-gate 	c = fc_find_node(child, p);
710*0Sstevel@tonic-gate 	ASSERT(c != NULL);
711*0Sstevel@tonic-gate 	ASSERT(c->child == NULL);
712*0Sstevel@tonic-gate 
713*0Sstevel@tonic-gate 	/*
714*0Sstevel@tonic-gate 	 * If it's the first child, remove it, otherwise
715*0Sstevel@tonic-gate 	 * remove it from the child's peer list.
716*0Sstevel@tonic-gate 	 */
717*0Sstevel@tonic-gate 	if (p->child == c) {
718*0Sstevel@tonic-gate 		p->child = c->peer;
719*0Sstevel@tonic-gate 	} else {
720*0Sstevel@tonic-gate 		int found = 0;
721*0Sstevel@tonic-gate 		for (n = p->child; n->peer; n = n->peer) {
722*0Sstevel@tonic-gate 			if (n->peer == c) {
723*0Sstevel@tonic-gate 				n->peer = c->peer;
724*0Sstevel@tonic-gate 				found = 1;
725*0Sstevel@tonic-gate 				break;
726*0Sstevel@tonic-gate 			}
727*0Sstevel@tonic-gate 		}
728*0Sstevel@tonic-gate 		if (!found)
729*0Sstevel@tonic-gate 			cmn_err(CE_PANIC, "fc_remove_child: not found\n");
730*0Sstevel@tonic-gate 	}
731*0Sstevel@tonic-gate 	mutex_exit(&fc_device_tree_lock);
732*0Sstevel@tonic-gate 
733*0Sstevel@tonic-gate 	kmem_free(c, sizeof (struct fc_device_tree));
734*0Sstevel@tonic-gate }
735*0Sstevel@tonic-gate 
736*0Sstevel@tonic-gate dev_info_t *
737*0Sstevel@tonic-gate fc_child_node(dev_info_t *parent, struct fc_device_tree *hp)
738*0Sstevel@tonic-gate {
739*0Sstevel@tonic-gate 	struct fc_device_tree *p;
740*0Sstevel@tonic-gate 	dev_info_t *dip = NULL;
741*0Sstevel@tonic-gate 
742*0Sstevel@tonic-gate 	mutex_enter(&fc_device_tree_lock);
743*0Sstevel@tonic-gate 	p = fc_find_node(parent, hp);
744*0Sstevel@tonic-gate 	if (p && p->child)
745*0Sstevel@tonic-gate 		dip = p->child->dip;
746*0Sstevel@tonic-gate 	mutex_exit(&fc_device_tree_lock);
747*0Sstevel@tonic-gate 
748*0Sstevel@tonic-gate 	return (dip);
749*0Sstevel@tonic-gate }
750*0Sstevel@tonic-gate 
751*0Sstevel@tonic-gate dev_info_t *
752*0Sstevel@tonic-gate fc_peer_node(dev_info_t *devi, struct fc_device_tree *hp)
753*0Sstevel@tonic-gate {
754*0Sstevel@tonic-gate 	struct fc_device_tree *p;
755*0Sstevel@tonic-gate 	dev_info_t *dip = NULL;
756*0Sstevel@tonic-gate 
757*0Sstevel@tonic-gate 	mutex_enter(&fc_device_tree_lock);
758*0Sstevel@tonic-gate 	p = fc_find_node(devi, hp);
759*0Sstevel@tonic-gate 	if (p && p->peer)
760*0Sstevel@tonic-gate 		dip = p->peer->dip;
761*0Sstevel@tonic-gate 	mutex_exit(&fc_device_tree_lock);
762*0Sstevel@tonic-gate 
763*0Sstevel@tonic-gate 	return (dip);
764*0Sstevel@tonic-gate }
765