xref: /onnv-gate/usr/src/uts/common/xen/os/xvdi.c (revision 5084:7d838c5c0eed)
1*5084Sjohnlev /*
2*5084Sjohnlev  * CDDL HEADER START
3*5084Sjohnlev  *
4*5084Sjohnlev  * The contents of this file are subject to the terms of the
5*5084Sjohnlev  * Common Development and Distribution License (the "License").
6*5084Sjohnlev  * You may not use this file except in compliance with the License.
7*5084Sjohnlev  *
8*5084Sjohnlev  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*5084Sjohnlev  * or http://www.opensolaris.org/os/licensing.
10*5084Sjohnlev  * See the License for the specific language governing permissions
11*5084Sjohnlev  * and limitations under the License.
12*5084Sjohnlev  *
13*5084Sjohnlev  * When distributing Covered Code, include this CDDL HEADER in each
14*5084Sjohnlev  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*5084Sjohnlev  * If applicable, add the following below this CDDL HEADER, with the
16*5084Sjohnlev  * fields enclosed by brackets "[]" replaced with your own identifying
17*5084Sjohnlev  * information: Portions Copyright [yyyy] [name of copyright owner]
18*5084Sjohnlev  *
19*5084Sjohnlev  * CDDL HEADER END
20*5084Sjohnlev  */
21*5084Sjohnlev 
22*5084Sjohnlev /*
23*5084Sjohnlev  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24*5084Sjohnlev  * Use is subject to license terms.
25*5084Sjohnlev  */
26*5084Sjohnlev 
27*5084Sjohnlev #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*5084Sjohnlev 
29*5084Sjohnlev /*
30*5084Sjohnlev  * Xen virtual device driver interfaces
31*5084Sjohnlev  */
32*5084Sjohnlev 
33*5084Sjohnlev /*
34*5084Sjohnlev  * todo:
35*5084Sjohnlev  * + name space clean up:
36*5084Sjohnlev  *	xvdi_* - public xen interfaces, for use by all leaf drivers
37*5084Sjohnlev  *	xd_* - public xen data structures
38*5084Sjohnlev  *	i_xvdi_* - implementation private functions
39*5084Sjohnlev  *	xendev_* - xendev driver interfaces, both internal and in cb_ops/bus_ops
40*5084Sjohnlev  * + add mdb dcmds to dump ring status
41*5084Sjohnlev  * + implement xvdi_xxx to wrap xenbus_xxx read/write function
42*5084Sjohnlev  * + convert (xendev_ring_t *) into xvdi_ring_handle_t
43*5084Sjohnlev  */
44*5084Sjohnlev #include <sys/conf.h>
45*5084Sjohnlev #include <sys/param.h>
46*5084Sjohnlev #include <sys/hypervisor.h>
47*5084Sjohnlev #include <sys/xen_mmu.h>
48*5084Sjohnlev #include <sys/kmem.h>
49*5084Sjohnlev #include <vm/seg_kmem.h>
50*5084Sjohnlev #include <sys/debug.h>
51*5084Sjohnlev #include <sys/modctl.h>
52*5084Sjohnlev #include <sys/autoconf.h>
53*5084Sjohnlev #include <sys/ddi_impldefs.h>
54*5084Sjohnlev #include <sys/ddi_subrdefs.h>
55*5084Sjohnlev #include <sys/ddi.h>
56*5084Sjohnlev #include <sys/sunddi.h>
57*5084Sjohnlev #include <sys/sunndi.h>
58*5084Sjohnlev #include <sys/sunldi.h>
59*5084Sjohnlev #include <sys/fs/dv_node.h>
60*5084Sjohnlev #include <sys/evtchn_impl.h>
61*5084Sjohnlev #include <sys/gnttab.h>
62*5084Sjohnlev #include <sys/avintr.h>
63*5084Sjohnlev #include <sys/psm.h>
64*5084Sjohnlev #include <sys/spl.h>
65*5084Sjohnlev #include <sys/promif.h>
66*5084Sjohnlev #include <sys/list.h>
67*5084Sjohnlev #include <sys/bootconf.h>
68*5084Sjohnlev #include <sys/bootsvcs.h>
69*5084Sjohnlev #include <sys/bootinfo.h>
70*5084Sjohnlev #include <sys/note.h>
71*5084Sjohnlev #include <sys/xen_mmu.h>
72*5084Sjohnlev #include <xen/sys/xenbus_impl.h>
73*5084Sjohnlev #include <xen/sys/xendev.h>
74*5084Sjohnlev #include <vm/hat_i86.h>
75*5084Sjohnlev #include <sys/scsi/generic/inquiry.h>
76*5084Sjohnlev #include <util/sscanf.h>
77*5084Sjohnlev #include <xen/public/io/xs_wire.h>
78*5084Sjohnlev 
79*5084Sjohnlev 
80*5084Sjohnlev static void xvdi_ring_init_sring(xendev_ring_t *);
81*5084Sjohnlev static void xvdi_ring_init_front_ring(xendev_ring_t *, size_t, size_t);
82*5084Sjohnlev static void xvdi_ring_init_back_ring(xendev_ring_t *, size_t, size_t);
83*5084Sjohnlev static void xvdi_reinit_ring(dev_info_t *, grant_ref_t *, xendev_ring_t *);
84*5084Sjohnlev 
85*5084Sjohnlev static int i_xvdi_add_watches(dev_info_t *);
86*5084Sjohnlev static void i_xvdi_rem_watches(dev_info_t *);
87*5084Sjohnlev 
88*5084Sjohnlev static int i_xvdi_add_watch_oestate(dev_info_t *);
89*5084Sjohnlev static void i_xvdi_rem_watch_oestate(dev_info_t *);
90*5084Sjohnlev static void i_xvdi_oestate_cb(struct xenbus_device *, XenbusState);
91*5084Sjohnlev static void i_xvdi_oestate_handler(void *);
92*5084Sjohnlev 
93*5084Sjohnlev static int i_xvdi_add_watch_hpstate(dev_info_t *);
94*5084Sjohnlev static void i_xvdi_rem_watch_hpstate(dev_info_t *);
95*5084Sjohnlev static void i_xvdi_hpstate_cb(struct xenbus_watch *, const char **,
96*5084Sjohnlev     unsigned int);
97*5084Sjohnlev static void i_xvdi_hpstate_handler(void *);
98*5084Sjohnlev 
99*5084Sjohnlev static int i_xvdi_add_watch_bepath(dev_info_t *);
100*5084Sjohnlev static void i_xvdi_rem_watch_bepath(dev_info_t *);
101*5084Sjohnlev static void i_xvdi_bepath_cb(struct xenbus_watch *, const char **,
102*5084Sjohnlev     unsigned in);
103*5084Sjohnlev 
104*5084Sjohnlev static void xendev_offline_device(void *);
105*5084Sjohnlev 
106*5084Sjohnlev static void i_xvdi_probe_path_cb(struct xenbus_watch *, const char **,
107*5084Sjohnlev     unsigned int);
108*5084Sjohnlev static void i_xvdi_probe_path_handler(void *);
109*5084Sjohnlev 
110*5084Sjohnlev typedef struct xd_cfg {
111*5084Sjohnlev 	xendev_devclass_t devclass;
112*5084Sjohnlev 	char *xsdev;
113*5084Sjohnlev 	char *xs_path_fe;
114*5084Sjohnlev 	char *xs_path_be;
115*5084Sjohnlev 	char *node_fe;
116*5084Sjohnlev 	char *node_be;
117*5084Sjohnlev 	char *device_type;
118*5084Sjohnlev 	int xd_ipl;
119*5084Sjohnlev 	int flags;
120*5084Sjohnlev } i_xd_cfg_t;
121*5084Sjohnlev 
122*5084Sjohnlev #define	XD_DOM_ZERO	0x01	/* dom0 only. */
123*5084Sjohnlev #define	XD_DOM_GUEST	0x02	/* Guest domains (i.e. non-dom0). */
124*5084Sjohnlev #define	XD_DOM_IO	0x04	/* IO domains. */
125*5084Sjohnlev 
126*5084Sjohnlev #define	XD_DOM_ALL	(XD_DOM_ZERO | XD_DOM_GUEST)
127*5084Sjohnlev 
128*5084Sjohnlev static i_xd_cfg_t xdci[] = {
129*5084Sjohnlev 	{ XEN_CONSOLE, NULL, NULL, NULL, "xencons", NULL,
130*5084Sjohnlev 	    "console", IPL_CONS, XD_DOM_ALL, },
131*5084Sjohnlev 
132*5084Sjohnlev 	{ XEN_VNET, "vif", "device/vif", "backend/vif", "xnf", "xnb",
133*5084Sjohnlev 	    "network", IPL_VIF, XD_DOM_ALL, },
134*5084Sjohnlev 
135*5084Sjohnlev 	{ XEN_VBLK, "vbd", "device/vbd", "backend/vbd", "xdf", "xdb",
136*5084Sjohnlev 	    "block", IPL_VBD, XD_DOM_ALL, },
137*5084Sjohnlev 
138*5084Sjohnlev 	{ XEN_XENBUS, NULL, NULL, NULL, "xenbus", NULL,
139*5084Sjohnlev 	    NULL, 0, XD_DOM_ALL, },
140*5084Sjohnlev 
141*5084Sjohnlev 	{ XEN_DOMCAPS, NULL, NULL, NULL, "domcaps", NULL,
142*5084Sjohnlev 	    NULL, 0, XD_DOM_ALL, },
143*5084Sjohnlev 
144*5084Sjohnlev 	{ XEN_BALLOON, NULL, NULL, NULL, "balloon", NULL,
145*5084Sjohnlev 	    NULL, 0, XD_DOM_ALL, },
146*5084Sjohnlev 
147*5084Sjohnlev 	{ XEN_EVTCHN, NULL, NULL, NULL, "evtchn", NULL,
148*5084Sjohnlev 	    NULL, 0, XD_DOM_ZERO, },
149*5084Sjohnlev 
150*5084Sjohnlev 	{ XEN_PRIVCMD, NULL, NULL, NULL, "privcmd", NULL,
151*5084Sjohnlev 	    NULL, 0, XD_DOM_ZERO, },
152*5084Sjohnlev };
153*5084Sjohnlev #define	NXDC	(sizeof (xdci) / sizeof (xdci[0]))
154*5084Sjohnlev 
155*5084Sjohnlev static void i_xvdi_enum_fe(dev_info_t *, i_xd_cfg_t *);
156*5084Sjohnlev static void i_xvdi_enum_be(dev_info_t *, i_xd_cfg_t *);
157*5084Sjohnlev static void i_xvdi_enum_worker(dev_info_t *, i_xd_cfg_t *, char *);
158*5084Sjohnlev 
159*5084Sjohnlev /*
160*5084Sjohnlev  * Xen device channel device access and DMA attributes
161*5084Sjohnlev  */
162*5084Sjohnlev static ddi_device_acc_attr_t xendev_dc_accattr = {
163*5084Sjohnlev 	DDI_DEVICE_ATTR_V0, DDI_NEVERSWAP_ACC, DDI_STRICTORDER_ACC
164*5084Sjohnlev };
165*5084Sjohnlev 
166*5084Sjohnlev static ddi_dma_attr_t xendev_dc_dmaattr = {
167*5084Sjohnlev 	DMA_ATTR_V0,		/* version of this structure */
168*5084Sjohnlev 	0,			/* lowest usable address */
169*5084Sjohnlev 	0xffffffffffffffffULL,	/* highest usable address */
170*5084Sjohnlev 	0x7fffffff,		/* maximum DMAable byte count */
171*5084Sjohnlev 	MMU_PAGESIZE,		/* alignment in bytes */
172*5084Sjohnlev 	0x7ff,			/* bitmap of burst sizes */
173*5084Sjohnlev 	1,			/* minimum transfer */
174*5084Sjohnlev 	0xffffffffU,		/* maximum transfer */
175*5084Sjohnlev 	0xffffffffffffffffULL,	/* maximum segment length */
176*5084Sjohnlev 	1,			/* maximum number of segments */
177*5084Sjohnlev 	1,			/* granularity */
178*5084Sjohnlev 	0,			/* flags (reserved) */
179*5084Sjohnlev };
180*5084Sjohnlev 
181*5084Sjohnlev static dev_info_t *xendev_dip = NULL;
182*5084Sjohnlev 
183*5084Sjohnlev #define	XVDI_DBG_STATE	0x01
184*5084Sjohnlev #define	XVDI_DBG_PROBE	0x02
185*5084Sjohnlev 
186*5084Sjohnlev #ifdef DEBUG
187*5084Sjohnlev static int i_xvdi_debug = 0;
188*5084Sjohnlev 
189*5084Sjohnlev #define	XVDI_DPRINTF(flag, format, ...)			\
190*5084Sjohnlev {							\
191*5084Sjohnlev 	if (i_xvdi_debug & (flag))			\
192*5084Sjohnlev 		prom_printf((format), __VA_ARGS__);	\
193*5084Sjohnlev }
194*5084Sjohnlev #else
195*5084Sjohnlev #define	XVDI_DPRINTF(flag, format, ...)
196*5084Sjohnlev #endif /* DEBUG */
197*5084Sjohnlev 
198*5084Sjohnlev static i_xd_cfg_t *
199*5084Sjohnlev i_xvdi_devclass2cfg(xendev_devclass_t devclass)
200*5084Sjohnlev {
201*5084Sjohnlev 	i_xd_cfg_t *xdcp;
202*5084Sjohnlev 	int i;
203*5084Sjohnlev 
204*5084Sjohnlev 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++)
205*5084Sjohnlev 		if (xdcp->devclass == devclass)
206*5084Sjohnlev 			return (xdcp);
207*5084Sjohnlev 
208*5084Sjohnlev 	return (NULL);
209*5084Sjohnlev }
210*5084Sjohnlev 
211*5084Sjohnlev int
212*5084Sjohnlev xvdi_init_dev(dev_info_t *dip)
213*5084Sjohnlev {
214*5084Sjohnlev 	xendev_devclass_t devcls;
215*5084Sjohnlev 	int vdevnum;
216*5084Sjohnlev 	domid_t domid;
217*5084Sjohnlev 	struct xendev_ppd *pdp;
218*5084Sjohnlev 	i_xd_cfg_t *xdcp;
219*5084Sjohnlev 	boolean_t backend;
220*5084Sjohnlev 	char xsnamebuf[TYPICALMAXPATHLEN];
221*5084Sjohnlev 	char *xsname;
222*5084Sjohnlev 
223*5084Sjohnlev 	devcls = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
224*5084Sjohnlev 	    DDI_PROP_DONTPASS, "devclass", XEN_INVAL);
225*5084Sjohnlev 	vdevnum = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
226*5084Sjohnlev 	    DDI_PROP_DONTPASS, "vdev", -1);
227*5084Sjohnlev 	domid = (domid_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
228*5084Sjohnlev 	    DDI_PROP_DONTPASS, "domain", DOMID_SELF);
229*5084Sjohnlev 
230*5084Sjohnlev 	backend = (domid != DOMID_SELF);
231*5084Sjohnlev 	xdcp = i_xvdi_devclass2cfg(devcls);
232*5084Sjohnlev 	if (xdcp->device_type != NULL)
233*5084Sjohnlev 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
234*5084Sjohnlev 		    "device_type", xdcp->device_type);
235*5084Sjohnlev 
236*5084Sjohnlev 	pdp = kmem_zalloc(sizeof (*pdp), KM_SLEEP);
237*5084Sjohnlev 	pdp->xd_domain = domid;
238*5084Sjohnlev 	pdp->xd_vdevnum = vdevnum;
239*5084Sjohnlev 	pdp->xd_devclass = devcls;
240*5084Sjohnlev 	pdp->xd_evtchn = INVALID_EVTCHN;
241*5084Sjohnlev 	mutex_init(&pdp->xd_lk, NULL, MUTEX_DRIVER, NULL);
242*5084Sjohnlev 	ddi_set_parent_data(dip, pdp);
243*5084Sjohnlev 
244*5084Sjohnlev 	/*
245*5084Sjohnlev 	 * devices that do not need to interact with xenstore
246*5084Sjohnlev 	 */
247*5084Sjohnlev 	if (vdevnum == -1) {
248*5084Sjohnlev 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
249*5084Sjohnlev 		    "unit-address", "0");
250*5084Sjohnlev 		if (devcls == XEN_CONSOLE)
251*5084Sjohnlev 			(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
252*5084Sjohnlev 			    "pm-hardware-state", "needs-suspend-resume");
253*5084Sjohnlev 		return (DDI_SUCCESS);
254*5084Sjohnlev 	}
255*5084Sjohnlev 
256*5084Sjohnlev 	/*
257*5084Sjohnlev 	 * PV devices that need to probe xenstore
258*5084Sjohnlev 	 */
259*5084Sjohnlev 
260*5084Sjohnlev 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
261*5084Sjohnlev 	    "pm-hardware-state", "needs-suspend-resume");
262*5084Sjohnlev 
263*5084Sjohnlev 	xsname = xsnamebuf;
264*5084Sjohnlev 	if (!backend)
265*5084Sjohnlev 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
266*5084Sjohnlev 		    "%s/%d", xdcp->xs_path_fe, vdevnum);
267*5084Sjohnlev 	else
268*5084Sjohnlev 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
269*5084Sjohnlev 		    "%s/%d/%d", xdcp->xs_path_be, domid, vdevnum);
270*5084Sjohnlev 
271*5084Sjohnlev 	pdp->xd_xsdev.nodename = i_ddi_strdup(xsname, KM_SLEEP);
272*5084Sjohnlev 	pdp->xd_xsdev.devicetype = xdcp->xsdev;
273*5084Sjohnlev 	pdp->xd_xsdev.frontend = (backend ? 0 : 1);
274*5084Sjohnlev 	pdp->xd_xsdev.data = dip;
275*5084Sjohnlev 	pdp->xd_xsdev.otherend_id = (backend ? domid : -1);
276*5084Sjohnlev 	if (i_xvdi_add_watches(dip) != DDI_SUCCESS) {
277*5084Sjohnlev 		cmn_err(CE_WARN, "xvdi_init_dev: "
278*5084Sjohnlev 		    "cannot add watches for %s", xsname);
279*5084Sjohnlev 		xvdi_uninit_dev(dip);
280*5084Sjohnlev 		return (DDI_FAILURE);
281*5084Sjohnlev 	}
282*5084Sjohnlev 
283*5084Sjohnlev 	/*
284*5084Sjohnlev 	 * frontend device will use "unit-addr" as
285*5084Sjohnlev 	 * the bus address, which will be set here
286*5084Sjohnlev 	 */
287*5084Sjohnlev 	if (!backend) {
288*5084Sjohnlev 		void *prop_str;
289*5084Sjohnlev 		unsigned int prop_len, addr;
290*5084Sjohnlev 
291*5084Sjohnlev 		switch (devcls) {
292*5084Sjohnlev 		case XEN_VNET:
293*5084Sjohnlev 			if (xenbus_read(XBT_NULL, xsname, "mac", &prop_str,
294*5084Sjohnlev 			    &prop_len) == 0) {
295*5084Sjohnlev 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
296*5084Sjohnlev 				    dip, "mac", prop_str);
297*5084Sjohnlev 				kmem_free(prop_str, prop_len);
298*5084Sjohnlev 			}
299*5084Sjohnlev 			prop_str = NULL;
300*5084Sjohnlev 			if (xenbus_scanf(XBT_NULL, xsname, "handle", "%u",
301*5084Sjohnlev 			    &addr) == 0) {
302*5084Sjohnlev 				char unitaddr[9]; /* hold 32-bit hex */
303*5084Sjohnlev 
304*5084Sjohnlev 				(void) snprintf(unitaddr, 9, "%x", addr);
305*5084Sjohnlev 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
306*5084Sjohnlev 				    dip, "unit-address", unitaddr);
307*5084Sjohnlev 			}
308*5084Sjohnlev 			break;
309*5084Sjohnlev 		case XEN_VBLK:
310*5084Sjohnlev 			if (xenbus_read(XBT_NULL, pdp->xd_xsdev.otherend,
311*5084Sjohnlev 			    "dev", &prop_str, &prop_len) == 0) {
312*5084Sjohnlev 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
313*5084Sjohnlev 				    dip, "unit-address", prop_str);
314*5084Sjohnlev 				kmem_free(prop_str, prop_len);
315*5084Sjohnlev 			}
316*5084Sjohnlev 			break;
317*5084Sjohnlev 		default:
318*5084Sjohnlev 			break;
319*5084Sjohnlev 		}
320*5084Sjohnlev 	}
321*5084Sjohnlev 
322*5084Sjohnlev 	return (DDI_SUCCESS);
323*5084Sjohnlev }
324*5084Sjohnlev 
325*5084Sjohnlev void
326*5084Sjohnlev xvdi_uninit_dev(dev_info_t *dip)
327*5084Sjohnlev {
328*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
329*5084Sjohnlev 
330*5084Sjohnlev 	if (pdp != NULL) {
331*5084Sjohnlev 		/* Remove any registered callbacks. */
332*5084Sjohnlev 		xvdi_remove_event_handler(dip, NULL);
333*5084Sjohnlev 
334*5084Sjohnlev 		/* Remove any registered watches. */
335*5084Sjohnlev 		i_xvdi_rem_watches(dip);
336*5084Sjohnlev 
337*5084Sjohnlev 		if (pdp->xd_xsdev.nodename != NULL)
338*5084Sjohnlev 			kmem_free((char *)(pdp->xd_xsdev.nodename),
339*5084Sjohnlev 			    strlen(pdp->xd_xsdev.nodename) + 1);
340*5084Sjohnlev 
341*5084Sjohnlev 		ddi_set_parent_data(dip, NULL);
342*5084Sjohnlev 
343*5084Sjohnlev 		mutex_destroy(&pdp->xd_lk);
344*5084Sjohnlev 		kmem_free(pdp, sizeof (*pdp));
345*5084Sjohnlev 	}
346*5084Sjohnlev }
347*5084Sjohnlev 
348*5084Sjohnlev /*
349*5084Sjohnlev  * Bind the event channel for this device instance.
350*5084Sjohnlev  * Currently we only support one evtchn per device instance.
351*5084Sjohnlev  */
352*5084Sjohnlev int
353*5084Sjohnlev xvdi_bind_evtchn(dev_info_t *dip, evtchn_port_t evtchn)
354*5084Sjohnlev {
355*5084Sjohnlev 	struct xendev_ppd *pdp;
356*5084Sjohnlev 	domid_t oeid;
357*5084Sjohnlev 	int r;
358*5084Sjohnlev 
359*5084Sjohnlev 	pdp = ddi_get_parent_data(dip);
360*5084Sjohnlev 	ASSERT(pdp != NULL);
361*5084Sjohnlev 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
362*5084Sjohnlev 
363*5084Sjohnlev 	mutex_enter(&pdp->xd_lk);
364*5084Sjohnlev 	if (pdp->xd_devclass == XEN_CONSOLE) {
365*5084Sjohnlev 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
366*5084Sjohnlev 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
367*5084Sjohnlev 		} else {
368*5084Sjohnlev 			pdp->xd_evtchn = INVALID_EVTCHN;
369*5084Sjohnlev 			mutex_exit(&pdp->xd_lk);
370*5084Sjohnlev 			return (DDI_SUCCESS);
371*5084Sjohnlev 		}
372*5084Sjohnlev 	} else {
373*5084Sjohnlev 		oeid = pdp->xd_xsdev.otherend_id;
374*5084Sjohnlev 		if (oeid == (domid_t)-1) {
375*5084Sjohnlev 			mutex_exit(&pdp->xd_lk);
376*5084Sjohnlev 			return (DDI_FAILURE);
377*5084Sjohnlev 		}
378*5084Sjohnlev 
379*5084Sjohnlev 		if ((r = xen_bind_interdomain(oeid, evtchn, &pdp->xd_evtchn))) {
380*5084Sjohnlev 			xvdi_dev_error(dip, r, "bind event channel");
381*5084Sjohnlev 			mutex_exit(&pdp->xd_lk);
382*5084Sjohnlev 			return (DDI_FAILURE);
383*5084Sjohnlev 		}
384*5084Sjohnlev 	}
385*5084Sjohnlev 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
386*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
387*5084Sjohnlev 
388*5084Sjohnlev 	return (DDI_SUCCESS);
389*5084Sjohnlev }
390*5084Sjohnlev 
391*5084Sjohnlev /*
392*5084Sjohnlev  * Allocate an event channel for this device instance.
393*5084Sjohnlev  * Currently we only support one evtchn per device instance.
394*5084Sjohnlev  */
395*5084Sjohnlev int
396*5084Sjohnlev xvdi_alloc_evtchn(dev_info_t *dip)
397*5084Sjohnlev {
398*5084Sjohnlev 	struct xendev_ppd *pdp;
399*5084Sjohnlev 	domid_t oeid;
400*5084Sjohnlev 	int rv;
401*5084Sjohnlev 
402*5084Sjohnlev 	pdp = ddi_get_parent_data(dip);
403*5084Sjohnlev 	ASSERT(pdp != NULL);
404*5084Sjohnlev 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
405*5084Sjohnlev 
406*5084Sjohnlev 	mutex_enter(&pdp->xd_lk);
407*5084Sjohnlev 	if (pdp->xd_devclass == XEN_CONSOLE) {
408*5084Sjohnlev 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
409*5084Sjohnlev 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
410*5084Sjohnlev 		} else {
411*5084Sjohnlev 			pdp->xd_evtchn = INVALID_EVTCHN;
412*5084Sjohnlev 			mutex_exit(&pdp->xd_lk);
413*5084Sjohnlev 			return (DDI_SUCCESS);
414*5084Sjohnlev 		}
415*5084Sjohnlev 	} else {
416*5084Sjohnlev 		oeid = pdp->xd_xsdev.otherend_id;
417*5084Sjohnlev 		if (oeid == (domid_t)-1) {
418*5084Sjohnlev 			mutex_exit(&pdp->xd_lk);
419*5084Sjohnlev 			return (DDI_FAILURE);
420*5084Sjohnlev 		}
421*5084Sjohnlev 
422*5084Sjohnlev 		if ((rv = xen_alloc_unbound_evtchn(oeid, &pdp->xd_evtchn))) {
423*5084Sjohnlev 			xvdi_dev_error(dip, rv, "bind event channel");
424*5084Sjohnlev 			mutex_exit(&pdp->xd_lk);
425*5084Sjohnlev 			return (DDI_FAILURE);
426*5084Sjohnlev 		}
427*5084Sjohnlev 	}
428*5084Sjohnlev 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
429*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
430*5084Sjohnlev 
431*5084Sjohnlev 	return (DDI_SUCCESS);
432*5084Sjohnlev }
433*5084Sjohnlev 
434*5084Sjohnlev /*
435*5084Sjohnlev  * Unbind the event channel for this device instance.
436*5084Sjohnlev  * Currently we only support one evtchn per device instance.
437*5084Sjohnlev  */
438*5084Sjohnlev void
439*5084Sjohnlev xvdi_free_evtchn(dev_info_t *dip)
440*5084Sjohnlev {
441*5084Sjohnlev 	struct xendev_ppd *pdp;
442*5084Sjohnlev 
443*5084Sjohnlev 	pdp = ddi_get_parent_data(dip);
444*5084Sjohnlev 	ASSERT(pdp != NULL);
445*5084Sjohnlev 
446*5084Sjohnlev 	mutex_enter(&pdp->xd_lk);
447*5084Sjohnlev 	if (pdp->xd_evtchn != INVALID_EVTCHN) {
448*5084Sjohnlev 		ec_unbind_irq(pdp->xd_ispec.intrspec_vec);
449*5084Sjohnlev 		pdp->xd_evtchn = INVALID_EVTCHN;
450*5084Sjohnlev 		pdp->xd_ispec.intrspec_vec = 0;
451*5084Sjohnlev 	}
452*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
453*5084Sjohnlev }
454*5084Sjohnlev 
455*5084Sjohnlev /*
456*5084Sjohnlev  * Map an inter-domain communication ring for a virtual device.
457*5084Sjohnlev  * This is used by backend drivers.
458*5084Sjohnlev  */
459*5084Sjohnlev int
460*5084Sjohnlev xvdi_map_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
461*5084Sjohnlev     grant_ref_t gref, xendev_ring_t **ringpp)
462*5084Sjohnlev {
463*5084Sjohnlev 	domid_t oeid;
464*5084Sjohnlev 	gnttab_map_grant_ref_t mapop;
465*5084Sjohnlev 	gnttab_unmap_grant_ref_t unmapop;
466*5084Sjohnlev 	caddr_t ringva;
467*5084Sjohnlev 	ddi_acc_hdl_t *ap;
468*5084Sjohnlev 	ddi_acc_impl_t *iap;
469*5084Sjohnlev 	xendev_ring_t *ring;
470*5084Sjohnlev 	int err;
471*5084Sjohnlev 	char errstr[] = "mapping in ring buffer";
472*5084Sjohnlev 
473*5084Sjohnlev 	ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
474*5084Sjohnlev 	oeid = xvdi_get_oeid(dip);
475*5084Sjohnlev 
476*5084Sjohnlev 	/* alloc va in backend dom for ring buffer */
477*5084Sjohnlev 	ringva = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
478*5084Sjohnlev 	    0, 0, 0, 0, VM_SLEEP);
479*5084Sjohnlev 
480*5084Sjohnlev 	/* map in ring page */
481*5084Sjohnlev 	hat_prepare_mapping(kas.a_hat, ringva);
482*5084Sjohnlev 	mapop.host_addr = (uint64_t)(uintptr_t)ringva;
483*5084Sjohnlev 	mapop.flags = GNTMAP_host_map;
484*5084Sjohnlev 	mapop.ref = gref;
485*5084Sjohnlev 	mapop.dom = oeid;
486*5084Sjohnlev 	err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &mapop, 1);
487*5084Sjohnlev 	if (err) {
488*5084Sjohnlev 		xvdi_fatal_error(dip, err, errstr);
489*5084Sjohnlev 		goto errout1;
490*5084Sjohnlev 	}
491*5084Sjohnlev 
492*5084Sjohnlev 	if (mapop.status != 0) {
493*5084Sjohnlev 		xvdi_fatal_error(dip, err, errstr);
494*5084Sjohnlev 		goto errout2;
495*5084Sjohnlev 	}
496*5084Sjohnlev 	ring->xr_vaddr = ringva;
497*5084Sjohnlev 	ring->xr_grant_hdl = mapop.handle;
498*5084Sjohnlev 	ring->xr_gref = gref;
499*5084Sjohnlev 
500*5084Sjohnlev 	/*
501*5084Sjohnlev 	 * init an acc handle and associate it w/ this ring
502*5084Sjohnlev 	 * this is only for backend drivers. we get the memory by calling
503*5084Sjohnlev 	 * vmem_xalloc(), instead of calling any ddi function, so we have
504*5084Sjohnlev 	 * to init an acc handle by ourselves
505*5084Sjohnlev 	 */
506*5084Sjohnlev 	ring->xr_acc_hdl = impl_acc_hdl_alloc(KM_SLEEP, NULL);
507*5084Sjohnlev 	ap = impl_acc_hdl_get(ring->xr_acc_hdl);
508*5084Sjohnlev 	ap->ah_vers = VERS_ACCHDL;
509*5084Sjohnlev 	ap->ah_dip = dip;
510*5084Sjohnlev 	ap->ah_xfermodes = DDI_DMA_CONSISTENT;
511*5084Sjohnlev 	ap->ah_acc = xendev_dc_accattr;
512*5084Sjohnlev 	iap = (ddi_acc_impl_t *)ap->ah_platform_private;
513*5084Sjohnlev 	iap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
514*5084Sjohnlev 	impl_acc_hdl_init(ap);
515*5084Sjohnlev 	ap->ah_offset = 0;
516*5084Sjohnlev 	ap->ah_len = (off_t)PAGESIZE;
517*5084Sjohnlev 	ap->ah_addr = ring->xr_vaddr;
518*5084Sjohnlev 
519*5084Sjohnlev 	/* init backend ring */
520*5084Sjohnlev 	xvdi_ring_init_back_ring(ring, nentry, entrysize);
521*5084Sjohnlev 
522*5084Sjohnlev 	*ringpp = ring;
523*5084Sjohnlev 
524*5084Sjohnlev 	return (DDI_SUCCESS);
525*5084Sjohnlev 
526*5084Sjohnlev errout2:
527*5084Sjohnlev 	/* unmap ring page */
528*5084Sjohnlev 	unmapop.host_addr = (uint64_t)(uintptr_t)ringva;
529*5084Sjohnlev 	unmapop.handle = ring->xr_grant_hdl;
530*5084Sjohnlev 	unmapop.dev_bus_addr = NULL;
531*5084Sjohnlev 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
532*5084Sjohnlev 	hat_release_mapping(kas.a_hat, ringva);
533*5084Sjohnlev errout1:
534*5084Sjohnlev 	vmem_xfree(heap_arena, ringva, PAGESIZE);
535*5084Sjohnlev 	kmem_free(ring, sizeof (xendev_ring_t));
536*5084Sjohnlev 	return (DDI_FAILURE);
537*5084Sjohnlev }
538*5084Sjohnlev 
539*5084Sjohnlev /*
540*5084Sjohnlev  * Unmap a ring for a virtual device.
541*5084Sjohnlev  * This is used by backend drivers.
542*5084Sjohnlev  */
543*5084Sjohnlev void
544*5084Sjohnlev xvdi_unmap_ring(xendev_ring_t *ring)
545*5084Sjohnlev {
546*5084Sjohnlev 	gnttab_unmap_grant_ref_t unmapop;
547*5084Sjohnlev 
548*5084Sjohnlev 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
549*5084Sjohnlev 
550*5084Sjohnlev 	impl_acc_hdl_free(ring->xr_acc_hdl);
551*5084Sjohnlev 	unmapop.host_addr = (uint64_t)(uintptr_t)ring->xr_vaddr;
552*5084Sjohnlev 	unmapop.handle = ring->xr_grant_hdl;
553*5084Sjohnlev 	unmapop.dev_bus_addr = NULL;
554*5084Sjohnlev 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
555*5084Sjohnlev 	hat_release_mapping(kas.a_hat, ring->xr_vaddr);
556*5084Sjohnlev 	vmem_xfree(heap_arena, ring->xr_vaddr, PAGESIZE);
557*5084Sjohnlev 	kmem_free(ring, sizeof (xendev_ring_t));
558*5084Sjohnlev }
559*5084Sjohnlev 
560*5084Sjohnlev /*
561*5084Sjohnlev  * Re-initialise an inter-domain communications ring for the backend domain.
562*5084Sjohnlev  * ring will be re-initialized after re-grant succeed
563*5084Sjohnlev  * ring will be freed if fails to re-grant access to backend domain
564*5084Sjohnlev  * so, don't keep useful data in the ring
565*5084Sjohnlev  * used only in frontend driver
566*5084Sjohnlev  */
567*5084Sjohnlev static void
568*5084Sjohnlev xvdi_reinit_ring(dev_info_t *dip, grant_ref_t *gref, xendev_ring_t *ringp)
569*5084Sjohnlev {
570*5084Sjohnlev 	paddr_t rpaddr;
571*5084Sjohnlev 	maddr_t rmaddr;
572*5084Sjohnlev 
573*5084Sjohnlev 	ASSERT((ringp != NULL) && (ringp->xr_paddr != 0));
574*5084Sjohnlev 	rpaddr = ringp->xr_paddr;
575*5084Sjohnlev 
576*5084Sjohnlev 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? rpaddr : pa_to_ma(rpaddr);
577*5084Sjohnlev 	gnttab_grant_foreign_access_ref(ringp->xr_gref, xvdi_get_oeid(dip),
578*5084Sjohnlev 	    rmaddr >> PAGESHIFT, 0);
579*5084Sjohnlev 	*gref = ringp->xr_gref;
580*5084Sjohnlev 
581*5084Sjohnlev 	/* init frontend ring */
582*5084Sjohnlev 	xvdi_ring_init_sring(ringp);
583*5084Sjohnlev 	xvdi_ring_init_front_ring(ringp, ringp->xr_sring.fr.nr_ents,
584*5084Sjohnlev 	    ringp->xr_entry_size);
585*5084Sjohnlev }
586*5084Sjohnlev 
587*5084Sjohnlev /*
588*5084Sjohnlev  * allocate Xen inter-domain communications ring for Xen virtual devices
589*5084Sjohnlev  * used only in frontend driver
590*5084Sjohnlev  * if *ringpp is not NULL, we'll simply re-init it
591*5084Sjohnlev  */
592*5084Sjohnlev int
593*5084Sjohnlev xvdi_alloc_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
594*5084Sjohnlev     grant_ref_t *gref, xendev_ring_t **ringpp)
595*5084Sjohnlev {
596*5084Sjohnlev 	size_t len;
597*5084Sjohnlev 	xendev_ring_t *ring;
598*5084Sjohnlev 	ddi_dma_cookie_t dma_cookie;
599*5084Sjohnlev 	uint_t ncookies;
600*5084Sjohnlev 	grant_ref_t ring_gref;
601*5084Sjohnlev 	domid_t oeid;
602*5084Sjohnlev 	maddr_t rmaddr;
603*5084Sjohnlev 
604*5084Sjohnlev 	if (*ringpp) {
605*5084Sjohnlev 		xvdi_reinit_ring(dip, gref, *ringpp);
606*5084Sjohnlev 		return (DDI_SUCCESS);
607*5084Sjohnlev 	}
608*5084Sjohnlev 
609*5084Sjohnlev 	*ringpp = ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
610*5084Sjohnlev 	oeid = xvdi_get_oeid(dip);
611*5084Sjohnlev 
612*5084Sjohnlev 	/*
613*5084Sjohnlev 	 * Allocate page for this ring buffer
614*5084Sjohnlev 	 */
615*5084Sjohnlev 	if (ddi_dma_alloc_handle(dip, &xendev_dc_dmaattr, DDI_DMA_SLEEP,
616*5084Sjohnlev 	    0, &ring->xr_dma_hdl) != DDI_SUCCESS)
617*5084Sjohnlev 		goto err;
618*5084Sjohnlev 
619*5084Sjohnlev 	if (ddi_dma_mem_alloc(ring->xr_dma_hdl, PAGESIZE,
620*5084Sjohnlev 	    &xendev_dc_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
621*5084Sjohnlev 	    &ring->xr_vaddr, &len, &ring->xr_acc_hdl) != DDI_SUCCESS) {
622*5084Sjohnlev 		ddi_dma_free_handle(&ring->xr_dma_hdl);
623*5084Sjohnlev 		goto err;
624*5084Sjohnlev 	}
625*5084Sjohnlev 
626*5084Sjohnlev 	if (ddi_dma_addr_bind_handle(ring->xr_dma_hdl, NULL,
627*5084Sjohnlev 	    ring->xr_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
628*5084Sjohnlev 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_DMA_MAPPED) {
629*5084Sjohnlev 		ddi_dma_mem_free(&ring->xr_acc_hdl);
630*5084Sjohnlev 		ring->xr_vaddr = NULL;
631*5084Sjohnlev 		ddi_dma_free_handle(&ring->xr_dma_hdl);
632*5084Sjohnlev 		goto err;
633*5084Sjohnlev 	}
634*5084Sjohnlev 	ASSERT(ncookies == 1);
635*5084Sjohnlev 	ring->xr_paddr = dma_cookie.dmac_laddress;
636*5084Sjohnlev 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? ring->xr_paddr :
637*5084Sjohnlev 	    pa_to_ma(ring->xr_paddr);
638*5084Sjohnlev 
639*5084Sjohnlev 	if ((ring_gref = gnttab_grant_foreign_access(oeid,
640*5084Sjohnlev 	    rmaddr >> PAGESHIFT, 0)) == (grant_ref_t)-1) {
641*5084Sjohnlev 		(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
642*5084Sjohnlev 		ddi_dma_mem_free(&ring->xr_acc_hdl);
643*5084Sjohnlev 		ring->xr_vaddr = NULL;
644*5084Sjohnlev 		ddi_dma_free_handle(&ring->xr_dma_hdl);
645*5084Sjohnlev 		goto err;
646*5084Sjohnlev 	}
647*5084Sjohnlev 	*gref = ring->xr_gref = ring_gref;
648*5084Sjohnlev 
649*5084Sjohnlev 	/* init frontend ring */
650*5084Sjohnlev 	xvdi_ring_init_sring(ring);
651*5084Sjohnlev 	xvdi_ring_init_front_ring(ring, nentry, entrysize);
652*5084Sjohnlev 
653*5084Sjohnlev 	return (DDI_SUCCESS);
654*5084Sjohnlev 
655*5084Sjohnlev err:
656*5084Sjohnlev 	kmem_free(ring, sizeof (xendev_ring_t));
657*5084Sjohnlev 	return (DDI_FAILURE);
658*5084Sjohnlev }
659*5084Sjohnlev 
660*5084Sjohnlev /*
661*5084Sjohnlev  * Release ring buffers allocated for Xen devices
662*5084Sjohnlev  * used for frontend driver
663*5084Sjohnlev  */
664*5084Sjohnlev void
665*5084Sjohnlev xvdi_free_ring(xendev_ring_t *ring)
666*5084Sjohnlev {
667*5084Sjohnlev 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
668*5084Sjohnlev 
669*5084Sjohnlev 	(void) gnttab_end_foreign_access_ref(ring->xr_gref, 0);
670*5084Sjohnlev 	(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
671*5084Sjohnlev 	ddi_dma_mem_free(&ring->xr_acc_hdl);
672*5084Sjohnlev 	ddi_dma_free_handle(&ring->xr_dma_hdl);
673*5084Sjohnlev 	kmem_free(ring, sizeof (xendev_ring_t));
674*5084Sjohnlev }
675*5084Sjohnlev 
676*5084Sjohnlev dev_info_t *
677*5084Sjohnlev xvdi_create_dev(dev_info_t *parent, xendev_devclass_t devclass,
678*5084Sjohnlev     domid_t dom, int vdev)
679*5084Sjohnlev {
680*5084Sjohnlev 	dev_info_t *dip;
681*5084Sjohnlev 	boolean_t backend;
682*5084Sjohnlev 	i_xd_cfg_t *xdcp;
683*5084Sjohnlev 	char xsnamebuf[TYPICALMAXPATHLEN];
684*5084Sjohnlev 	char *type, *node = NULL, *xsname = NULL;
685*5084Sjohnlev 	unsigned int tlen;
686*5084Sjohnlev 
687*5084Sjohnlev 	ASSERT(DEVI_BUSY_OWNED(parent));
688*5084Sjohnlev 
689*5084Sjohnlev 	backend = (dom != DOMID_SELF);
690*5084Sjohnlev 	xdcp = i_xvdi_devclass2cfg(devclass);
691*5084Sjohnlev 	ASSERT(xdcp != NULL);
692*5084Sjohnlev 
693*5084Sjohnlev 	if (vdev != -1) {
694*5084Sjohnlev 		if (!backend) {
695*5084Sjohnlev 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
696*5084Sjohnlev 			    "%s/%d", xdcp->xs_path_fe, vdev);
697*5084Sjohnlev 			xsname = xsnamebuf;
698*5084Sjohnlev 			node = xdcp->node_fe;
699*5084Sjohnlev 		} else {
700*5084Sjohnlev 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
701*5084Sjohnlev 			    "%s/%d/%d", xdcp->xs_path_be, dom, vdev);
702*5084Sjohnlev 			xsname = xsnamebuf;
703*5084Sjohnlev 			node = xdcp->node_be;
704*5084Sjohnlev 		}
705*5084Sjohnlev 	} else {
706*5084Sjohnlev 		node = xdcp->node_fe;
707*5084Sjohnlev 	}
708*5084Sjohnlev 
709*5084Sjohnlev 	/* Must have a driver to use. */
710*5084Sjohnlev 	if (node == NULL)
711*5084Sjohnlev 		return (NULL);
712*5084Sjohnlev 
713*5084Sjohnlev 	/*
714*5084Sjohnlev 	 * We need to check the state of this device before we go
715*5084Sjohnlev 	 * further, otherwise we'll end up with a dead loop if
716*5084Sjohnlev 	 * anything goes wrong.
717*5084Sjohnlev 	 */
718*5084Sjohnlev 	if ((xsname != NULL) &&
719*5084Sjohnlev 	    (xenbus_read_driver_state(xsname) >= XenbusStateClosing))
720*5084Sjohnlev 		return (NULL);
721*5084Sjohnlev 
722*5084Sjohnlev 	ndi_devi_alloc_sleep(parent, node, DEVI_SID_NODEID, &dip);
723*5084Sjohnlev 
724*5084Sjohnlev 	/*
725*5084Sjohnlev 	 * Driver binding uses the compatible property _before_ the
726*5084Sjohnlev 	 * node name, so we set the node name to the 'model' of the
727*5084Sjohnlev 	 * device (i.e. 'xnb' or 'xdb') and, if 'type' is present,
728*5084Sjohnlev 	 * encode both the model and the type in a compatible property
729*5084Sjohnlev 	 * (i.e. 'xnb,netfront' or 'xnb,SUNW_mac').  This allows a
730*5084Sjohnlev 	 * driver binding based on the <model,type> pair _before_ a
731*5084Sjohnlev 	 * binding based on the node name.
732*5084Sjohnlev 	 */
733*5084Sjohnlev 	if ((xsname != NULL) &&
734*5084Sjohnlev 	    (xenbus_read(XBT_NULL, xsname, "type", (void *)&type, &tlen)
735*5084Sjohnlev 	    == 0)) {
736*5084Sjohnlev 		size_t clen;
737*5084Sjohnlev 		char *c[1];
738*5084Sjohnlev 
739*5084Sjohnlev 		clen = strlen(node) + strlen(type) + 2;
740*5084Sjohnlev 		c[0] = kmem_alloc(clen, KM_SLEEP);
741*5084Sjohnlev 		(void) snprintf(c[0], clen, "%s,%s", node, type);
742*5084Sjohnlev 
743*5084Sjohnlev 		(void) ndi_prop_update_string_array(DDI_DEV_T_NONE,
744*5084Sjohnlev 		    dip, "compatible", (char **)c, 1);
745*5084Sjohnlev 
746*5084Sjohnlev 		kmem_free(c[0], clen);
747*5084Sjohnlev 		kmem_free(type, tlen);
748*5084Sjohnlev 	}
749*5084Sjohnlev 
750*5084Sjohnlev 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "devclass", devclass);
751*5084Sjohnlev 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "domain", dom);
752*5084Sjohnlev 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "vdev", vdev);
753*5084Sjohnlev 
754*5084Sjohnlev 	if (i_ddi_devi_attached(parent))
755*5084Sjohnlev 		/*
756*5084Sjohnlev 		 * Cleanup happens in xendev_removechild when the
757*5084Sjohnlev 		 * other end closes or a driver fails to attach.
758*5084Sjohnlev 		 */
759*5084Sjohnlev 		(void) ndi_devi_online(dip, 0);
760*5084Sjohnlev 	else
761*5084Sjohnlev 		(void) ndi_devi_bind_driver(dip, 0);
762*5084Sjohnlev 
763*5084Sjohnlev 	return (dip);
764*5084Sjohnlev }
765*5084Sjohnlev 
766*5084Sjohnlev /*
767*5084Sjohnlev  * xendev_enum_class()
768*5084Sjohnlev  */
769*5084Sjohnlev void
770*5084Sjohnlev xendev_enum_class(dev_info_t *parent, xendev_devclass_t devclass)
771*5084Sjohnlev {
772*5084Sjohnlev 	i_xd_cfg_t *xdcp;
773*5084Sjohnlev 
774*5084Sjohnlev 	xdcp = i_xvdi_devclass2cfg(devclass);
775*5084Sjohnlev 	ASSERT(xdcp != NULL);
776*5084Sjohnlev 
777*5084Sjohnlev 	if (xdcp->xsdev == NULL) {
778*5084Sjohnlev 		int circ;
779*5084Sjohnlev 
780*5084Sjohnlev 		/*
781*5084Sjohnlev 		 * Don't need to probe this kind of device from the
782*5084Sjohnlev 		 * store, just create one if it doesn't exist.
783*5084Sjohnlev 		 */
784*5084Sjohnlev 
785*5084Sjohnlev 		ndi_devi_enter(parent, &circ);
786*5084Sjohnlev 		if (xvdi_find_dev(parent, devclass, DOMID_SELF, -1)
787*5084Sjohnlev 		    == NULL)
788*5084Sjohnlev 			(void) xvdi_create_dev(parent, devclass,
789*5084Sjohnlev 			    DOMID_SELF, -1);
790*5084Sjohnlev 		ndi_devi_exit(parent, circ);
791*5084Sjohnlev 	} else {
792*5084Sjohnlev 		/*
793*5084Sjohnlev 		 * Probe this kind of device from the store, both
794*5084Sjohnlev 		 * frontend and backend.
795*5084Sjohnlev 		 */
796*5084Sjohnlev 
797*5084Sjohnlev 		i_xvdi_enum_fe(parent, xdcp);
798*5084Sjohnlev 		i_xvdi_enum_be(parent, xdcp);
799*5084Sjohnlev 	}
800*5084Sjohnlev }
801*5084Sjohnlev 
802*5084Sjohnlev /*
803*5084Sjohnlev  * xendev_enum_all()
804*5084Sjohnlev  */
805*5084Sjohnlev void
806*5084Sjohnlev xendev_enum_all(dev_info_t *parent, boolean_t store_unavailable)
807*5084Sjohnlev {
808*5084Sjohnlev 	int i;
809*5084Sjohnlev 	i_xd_cfg_t *xdcp;
810*5084Sjohnlev 	boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
811*5084Sjohnlev 	boolean_t domU = !dom0;
812*5084Sjohnlev 
813*5084Sjohnlev 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
814*5084Sjohnlev 
815*5084Sjohnlev 		if (dom0 && !(xdcp->flags & XD_DOM_ZERO))
816*5084Sjohnlev 			continue;
817*5084Sjohnlev 
818*5084Sjohnlev 		if (domU && !(xdcp->flags & XD_DOM_GUEST))
819*5084Sjohnlev 			continue;
820*5084Sjohnlev 
821*5084Sjohnlev 		/*
822*5084Sjohnlev 		 * Dom0 relies on watchpoints to create non-soft
823*5084Sjohnlev 		 * devices - don't attempt to iterate over the store.
824*5084Sjohnlev 		 */
825*5084Sjohnlev 		if (dom0 && (xdcp->xsdev != NULL))
826*5084Sjohnlev 			continue;
827*5084Sjohnlev 
828*5084Sjohnlev 		/*
829*5084Sjohnlev 		 * If the store is not yet available, don't attempt to
830*5084Sjohnlev 		 * iterate.
831*5084Sjohnlev 		 */
832*5084Sjohnlev 		if (store_unavailable && (xdcp->xsdev != NULL))
833*5084Sjohnlev 			continue;
834*5084Sjohnlev 
835*5084Sjohnlev 		xendev_enum_class(parent, xdcp->devclass);
836*5084Sjohnlev 	}
837*5084Sjohnlev }
838*5084Sjohnlev 
839*5084Sjohnlev xendev_devclass_t
840*5084Sjohnlev xendev_nodename_to_devclass(char *nodename)
841*5084Sjohnlev {
842*5084Sjohnlev 	int i;
843*5084Sjohnlev 	i_xd_cfg_t *xdcp;
844*5084Sjohnlev 
845*5084Sjohnlev 	/*
846*5084Sjohnlev 	 * This relies on the convention that variants of a base
847*5084Sjohnlev 	 * driver share the same prefix and that there are no drivers
848*5084Sjohnlev 	 * which share a common prefix with the name of any other base
849*5084Sjohnlev 	 * drivers.
850*5084Sjohnlev 	 *
851*5084Sjohnlev 	 * So for a base driver 'xnb' (which is the name listed in
852*5084Sjohnlev 	 * xdci) the variants all begin with the string 'xnb' (in fact
853*5084Sjohnlev 	 * they are 'xnbe', 'xnbo' and 'xnbu') and there are no other
854*5084Sjohnlev 	 * base drivers which have the prefix 'xnb'.
855*5084Sjohnlev 	 */
856*5084Sjohnlev 	ASSERT(nodename != NULL);
857*5084Sjohnlev 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
858*5084Sjohnlev 		if (((xdcp->node_fe != NULL) &&
859*5084Sjohnlev 		    (strncmp(nodename, xdcp->node_fe,
860*5084Sjohnlev 		    strlen(xdcp->node_fe)) == 0)) ||
861*5084Sjohnlev 		    ((xdcp->node_be != NULL) &&
862*5084Sjohnlev 		    (strncmp(nodename, xdcp->node_be,
863*5084Sjohnlev 		    strlen(xdcp->node_be)) == 0)))
864*5084Sjohnlev 
865*5084Sjohnlev 			return (xdcp->devclass);
866*5084Sjohnlev 	}
867*5084Sjohnlev 	return (XEN_INVAL);
868*5084Sjohnlev }
869*5084Sjohnlev 
870*5084Sjohnlev int
871*5084Sjohnlev xendev_devclass_ipl(xendev_devclass_t devclass)
872*5084Sjohnlev {
873*5084Sjohnlev 	i_xd_cfg_t *xdcp;
874*5084Sjohnlev 
875*5084Sjohnlev 	xdcp = i_xvdi_devclass2cfg(devclass);
876*5084Sjohnlev 	ASSERT(xdcp != NULL);
877*5084Sjohnlev 
878*5084Sjohnlev 	return (xdcp->xd_ipl);
879*5084Sjohnlev }
880*5084Sjohnlev 
881*5084Sjohnlev /*
882*5084Sjohnlev  * Determine if a devinfo instance exists of a particular device
883*5084Sjohnlev  * class, domain and xenstore virtual device number.
884*5084Sjohnlev  */
885*5084Sjohnlev dev_info_t *
886*5084Sjohnlev xvdi_find_dev(dev_info_t *parent, xendev_devclass_t devclass,
887*5084Sjohnlev     domid_t dom, int vdev)
888*5084Sjohnlev {
889*5084Sjohnlev 	dev_info_t *dip;
890*5084Sjohnlev 
891*5084Sjohnlev 	ASSERT(DEVI_BUSY_OWNED(parent));
892*5084Sjohnlev 
893*5084Sjohnlev 	switch (devclass) {
894*5084Sjohnlev 	case XEN_CONSOLE:
895*5084Sjohnlev 	case XEN_XENBUS:
896*5084Sjohnlev 	case XEN_DOMCAPS:
897*5084Sjohnlev 	case XEN_BALLOON:
898*5084Sjohnlev 	case XEN_EVTCHN:
899*5084Sjohnlev 	case XEN_PRIVCMD:
900*5084Sjohnlev 		/* Console and soft devices have no vdev. */
901*5084Sjohnlev 		vdev = -1;
902*5084Sjohnlev 		break;
903*5084Sjohnlev 	default:
904*5084Sjohnlev 		break;
905*5084Sjohnlev 	}
906*5084Sjohnlev 
907*5084Sjohnlev 	for (dip = ddi_get_child(parent); dip != NULL;
908*5084Sjohnlev 	    dip = ddi_get_next_sibling(dip)) {
909*5084Sjohnlev 		int *vdevnump, *domidp, *devclsp, vdevnum;
910*5084Sjohnlev 		uint_t ndomid, nvdevnum, ndevcls;
911*5084Sjohnlev 		xendev_devclass_t devcls;
912*5084Sjohnlev 		domid_t domid;
913*5084Sjohnlev 		struct xendev_ppd *pdp = ddi_get_parent_data(dip);
914*5084Sjohnlev 
915*5084Sjohnlev 		if (pdp == NULL) {
916*5084Sjohnlev 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
917*5084Sjohnlev 			    DDI_PROP_DONTPASS, "domain", &domidp, &ndomid) !=
918*5084Sjohnlev 			    DDI_PROP_SUCCESS)
919*5084Sjohnlev 				continue;
920*5084Sjohnlev 			ASSERT(ndomid == 1);
921*5084Sjohnlev 			domid = (domid_t)*domidp;
922*5084Sjohnlev 			ddi_prop_free(domidp);
923*5084Sjohnlev 
924*5084Sjohnlev 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
925*5084Sjohnlev 			    DDI_PROP_DONTPASS, "vdev", &vdevnump, &nvdevnum) !=
926*5084Sjohnlev 			    DDI_PROP_SUCCESS)
927*5084Sjohnlev 				continue;
928*5084Sjohnlev 			ASSERT(nvdevnum == 1);
929*5084Sjohnlev 			vdevnum = *vdevnump;
930*5084Sjohnlev 			ddi_prop_free(vdevnump);
931*5084Sjohnlev 
932*5084Sjohnlev 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
933*5084Sjohnlev 			    DDI_PROP_DONTPASS, "devclass", &devclsp,
934*5084Sjohnlev 			    &ndevcls) != DDI_PROP_SUCCESS)
935*5084Sjohnlev 				continue;
936*5084Sjohnlev 			ASSERT(ndevcls == 1);
937*5084Sjohnlev 			devcls = (xendev_devclass_t)*devclsp;
938*5084Sjohnlev 			ddi_prop_free(devclsp);
939*5084Sjohnlev 		} else {
940*5084Sjohnlev 			domid = pdp->xd_domain;
941*5084Sjohnlev 			vdevnum = pdp->xd_vdevnum;
942*5084Sjohnlev 			devcls = pdp->xd_devclass;
943*5084Sjohnlev 		}
944*5084Sjohnlev 
945*5084Sjohnlev 		if ((domid == dom) && (vdevnum == vdev) && (devcls == devclass))
946*5084Sjohnlev 			return (dip);
947*5084Sjohnlev 	}
948*5084Sjohnlev 	return (NULL);
949*5084Sjohnlev }
950*5084Sjohnlev 
951*5084Sjohnlev int
952*5084Sjohnlev xvdi_get_evtchn(dev_info_t *xdip)
953*5084Sjohnlev {
954*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
955*5084Sjohnlev 
956*5084Sjohnlev 	ASSERT(pdp != NULL);
957*5084Sjohnlev 	return (pdp->xd_evtchn);
958*5084Sjohnlev }
959*5084Sjohnlev 
960*5084Sjohnlev int
961*5084Sjohnlev xvdi_get_vdevnum(dev_info_t *xdip)
962*5084Sjohnlev {
963*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
964*5084Sjohnlev 
965*5084Sjohnlev 	ASSERT(pdp != NULL);
966*5084Sjohnlev 	return (pdp->xd_vdevnum);
967*5084Sjohnlev }
968*5084Sjohnlev 
969*5084Sjohnlev char *
970*5084Sjohnlev xvdi_get_xsname(dev_info_t *xdip)
971*5084Sjohnlev {
972*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
973*5084Sjohnlev 
974*5084Sjohnlev 	ASSERT(pdp != NULL);
975*5084Sjohnlev 	return ((char *)(pdp->xd_xsdev.nodename));
976*5084Sjohnlev }
977*5084Sjohnlev 
978*5084Sjohnlev char *
979*5084Sjohnlev xvdi_get_oename(dev_info_t *xdip)
980*5084Sjohnlev {
981*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
982*5084Sjohnlev 
983*5084Sjohnlev 	ASSERT(pdp != NULL);
984*5084Sjohnlev 	if (pdp->xd_devclass == XEN_CONSOLE)
985*5084Sjohnlev 		return (NULL);
986*5084Sjohnlev 	return ((char *)(pdp->xd_xsdev.otherend));
987*5084Sjohnlev }
988*5084Sjohnlev 
989*5084Sjohnlev struct xenbus_device *
990*5084Sjohnlev xvdi_get_xsd(dev_info_t *xdip)
991*5084Sjohnlev {
992*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
993*5084Sjohnlev 
994*5084Sjohnlev 	ASSERT(pdp != NULL);
995*5084Sjohnlev 	return (&pdp->xd_xsdev);
996*5084Sjohnlev }
997*5084Sjohnlev 
998*5084Sjohnlev domid_t
999*5084Sjohnlev xvdi_get_oeid(dev_info_t *xdip)
1000*5084Sjohnlev {
1001*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1002*5084Sjohnlev 
1003*5084Sjohnlev 	ASSERT(pdp != NULL);
1004*5084Sjohnlev 	if (pdp->xd_devclass == XEN_CONSOLE)
1005*5084Sjohnlev 		return ((domid_t)-1);
1006*5084Sjohnlev 	return ((domid_t)(pdp->xd_xsdev.otherend_id));
1007*5084Sjohnlev }
1008*5084Sjohnlev 
1009*5084Sjohnlev void
1010*5084Sjohnlev xvdi_dev_error(dev_info_t *dip, int errno, char *errstr)
1011*5084Sjohnlev {
1012*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1013*5084Sjohnlev 
1014*5084Sjohnlev 	ASSERT(pdp != NULL);
1015*5084Sjohnlev 	xenbus_dev_error(&pdp->xd_xsdev, errno, errstr);
1016*5084Sjohnlev }
1017*5084Sjohnlev 
1018*5084Sjohnlev void
1019*5084Sjohnlev xvdi_fatal_error(dev_info_t *dip, int errno, char *errstr)
1020*5084Sjohnlev {
1021*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1022*5084Sjohnlev 
1023*5084Sjohnlev 	ASSERT(pdp != NULL);
1024*5084Sjohnlev 	xenbus_dev_fatal(&pdp->xd_xsdev, errno, errstr);
1025*5084Sjohnlev }
1026*5084Sjohnlev 
1027*5084Sjohnlev static void
1028*5084Sjohnlev i_xvdi_oestate_handler(void *arg)
1029*5084Sjohnlev {
1030*5084Sjohnlev 	dev_info_t *dip = arg;
1031*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1032*5084Sjohnlev 	XenbusState oestate = pdp->xd_xsdev.otherend_state;
1033*5084Sjohnlev 	ddi_eventcookie_t evc;
1034*5084Sjohnlev 
1035*5084Sjohnlev 	mutex_enter(&pdp->xd_lk);
1036*5084Sjohnlev 
1037*5084Sjohnlev 	if (pdp->xd_oe_ehid != NULL) {
1038*5084Sjohnlev 		/* send notification to driver */
1039*5084Sjohnlev 		if (ddi_get_eventcookie(dip, XS_OE_STATE,
1040*5084Sjohnlev 		    &evc) == DDI_SUCCESS) {
1041*5084Sjohnlev 			mutex_exit(&pdp->xd_lk);
1042*5084Sjohnlev 			(void) ndi_post_event(dip, dip, evc, &oestate);
1043*5084Sjohnlev 			mutex_enter(&pdp->xd_lk);
1044*5084Sjohnlev 		}
1045*5084Sjohnlev 	} else {
1046*5084Sjohnlev 		/*
1047*5084Sjohnlev 		 * take default action, if driver hasn't registered its
1048*5084Sjohnlev 		 * event handler yet
1049*5084Sjohnlev 		 */
1050*5084Sjohnlev 		if (oestate == XenbusStateClosing) {
1051*5084Sjohnlev 			(void) xvdi_switch_state(dip, XBT_NULL,
1052*5084Sjohnlev 			    XenbusStateClosed);
1053*5084Sjohnlev 		} else if (oestate == XenbusStateClosed) {
1054*5084Sjohnlev 			(void) xvdi_switch_state(dip, XBT_NULL,
1055*5084Sjohnlev 			    XenbusStateClosed);
1056*5084Sjohnlev 			(void) xvdi_post_event(dip, XEN_HP_REMOVE);
1057*5084Sjohnlev 		}
1058*5084Sjohnlev 	}
1059*5084Sjohnlev 
1060*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
1061*5084Sjohnlev 
1062*5084Sjohnlev 	/*
1063*5084Sjohnlev 	 * We'll try to remove the devinfo node of this device if the
1064*5084Sjohnlev 	 * other end has closed.
1065*5084Sjohnlev 	 */
1066*5084Sjohnlev 	if (oestate == XenbusStateClosed)
1067*5084Sjohnlev 		(void) ddi_taskq_dispatch(DEVI(ddi_get_parent(dip))->devi_taskq,
1068*5084Sjohnlev 		    xendev_offline_device, dip, DDI_SLEEP);
1069*5084Sjohnlev }
1070*5084Sjohnlev 
1071*5084Sjohnlev static void
1072*5084Sjohnlev i_xvdi_hpstate_handler(void *arg)
1073*5084Sjohnlev {
1074*5084Sjohnlev 	dev_info_t *dip = (dev_info_t *)arg;
1075*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1076*5084Sjohnlev 	ddi_eventcookie_t evc;
1077*5084Sjohnlev 	char *hp_status;
1078*5084Sjohnlev 	unsigned int hpl;
1079*5084Sjohnlev 
1080*5084Sjohnlev 	mutex_enter(&pdp->xd_lk);
1081*5084Sjohnlev 	if ((ddi_get_eventcookie(dip, XS_HP_STATE, &evc) == DDI_SUCCESS) &&
1082*5084Sjohnlev 	    (xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
1083*5084Sjohnlev 	    (void *)&hp_status, &hpl) == 0)) {
1084*5084Sjohnlev 
1085*5084Sjohnlev 		xendev_hotplug_state_t new_state = Unrecognized;
1086*5084Sjohnlev 
1087*5084Sjohnlev 		if (strcmp(hp_status, "connected") == 0)
1088*5084Sjohnlev 			new_state = Connected;
1089*5084Sjohnlev 
1090*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1091*5084Sjohnlev 
1092*5084Sjohnlev 		(void) ndi_post_event(dip, dip, evc, &new_state);
1093*5084Sjohnlev 		kmem_free(hp_status, hpl);
1094*5084Sjohnlev 		return;
1095*5084Sjohnlev 	}
1096*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
1097*5084Sjohnlev }
1098*5084Sjohnlev 
1099*5084Sjohnlev void
1100*5084Sjohnlev xvdi_notify_oe(dev_info_t *dip)
1101*5084Sjohnlev {
1102*5084Sjohnlev 	struct xendev_ppd *pdp;
1103*5084Sjohnlev 
1104*5084Sjohnlev 	pdp = ddi_get_parent_data(dip);
1105*5084Sjohnlev 	ASSERT(pdp->xd_evtchn != INVALID_EVTCHN);
1106*5084Sjohnlev 	ec_notify_via_evtchn(pdp->xd_evtchn);
1107*5084Sjohnlev }
1108*5084Sjohnlev 
1109*5084Sjohnlev static void
1110*5084Sjohnlev i_xvdi_bepath_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1111*5084Sjohnlev {
1112*5084Sjohnlev 	dev_info_t *dip = (dev_info_t *)w->dev;
1113*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1114*5084Sjohnlev 	char *be = NULL;
1115*5084Sjohnlev 	unsigned int bel;
1116*5084Sjohnlev 
1117*5084Sjohnlev 	ASSERT(len > XS_WATCH_PATH);
1118*5084Sjohnlev 	ASSERT(vec[XS_WATCH_PATH] != NULL);
1119*5084Sjohnlev 
1120*5084Sjohnlev 	/*
1121*5084Sjohnlev 	 * If the backend is not the same as that we already stored,
1122*5084Sjohnlev 	 * re-set our watch for its' state.
1123*5084Sjohnlev 	 */
1124*5084Sjohnlev 	if ((xenbus_read(XBT_NULL, "", vec[XS_WATCH_PATH], (void *)be, &bel)
1125*5084Sjohnlev 	    == 0) && (strcmp(be, pdp->xd_xsdev.otherend) != 0))
1126*5084Sjohnlev 		(void) i_xvdi_add_watch_oestate(dip);
1127*5084Sjohnlev 
1128*5084Sjohnlev 	if (be != NULL) {
1129*5084Sjohnlev 		ASSERT(bel > 0);
1130*5084Sjohnlev 		kmem_free(be, bel);
1131*5084Sjohnlev 	}
1132*5084Sjohnlev }
1133*5084Sjohnlev 
1134*5084Sjohnlev static int
1135*5084Sjohnlev i_xvdi_add_watch_oestate(dev_info_t *dip)
1136*5084Sjohnlev {
1137*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1138*5084Sjohnlev 
1139*5084Sjohnlev 	ASSERT(pdp != NULL);
1140*5084Sjohnlev 	ASSERT(pdp->xd_xsdev.nodename != NULL);
1141*5084Sjohnlev 	ASSERT(mutex_owned(&pdp->xd_lk));
1142*5084Sjohnlev 
1143*5084Sjohnlev 	/*
1144*5084Sjohnlev 	 * Create taskq for delivering other end state change event to
1145*5084Sjohnlev 	 * this device later.
1146*5084Sjohnlev 	 *
1147*5084Sjohnlev 	 * Set nthreads to 1 to make sure that events can be delivered
1148*5084Sjohnlev 	 * in order.
1149*5084Sjohnlev 	 *
1150*5084Sjohnlev 	 * Note: It is _not_ guaranteed that driver can see every
1151*5084Sjohnlev 	 * xenstore change under the path that it is watching. If two
1152*5084Sjohnlev 	 * changes happen consecutively in a very short amount of
1153*5084Sjohnlev 	 * time, it is likely that the driver will see only the last
1154*5084Sjohnlev 	 * one.
1155*5084Sjohnlev 	 */
1156*5084Sjohnlev 	if (pdp->xd_oe_taskq == NULL)
1157*5084Sjohnlev 		if ((pdp->xd_oe_taskq = ddi_taskq_create(dip,
1158*5084Sjohnlev 		    "xendev_oe_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1159*5084Sjohnlev 			return (DDI_FAILURE);
1160*5084Sjohnlev 
1161*5084Sjohnlev 	/*
1162*5084Sjohnlev 	 * Watch for changes to the XenbusState of otherend.
1163*5084Sjohnlev 	 */
1164*5084Sjohnlev 	pdp->xd_xsdev.otherend_state = XenbusStateUnknown;
1165*5084Sjohnlev 	pdp->xd_xsdev.otherend_changed = i_xvdi_oestate_cb;
1166*5084Sjohnlev 
1167*5084Sjohnlev 	if (talk_to_otherend(&pdp->xd_xsdev) != 0) {
1168*5084Sjohnlev 		i_xvdi_rem_watch_oestate(dip);
1169*5084Sjohnlev 		return (DDI_FAILURE);
1170*5084Sjohnlev 	}
1171*5084Sjohnlev 
1172*5084Sjohnlev 	return (DDI_SUCCESS);
1173*5084Sjohnlev }
1174*5084Sjohnlev 
1175*5084Sjohnlev static void
1176*5084Sjohnlev i_xvdi_rem_watch_oestate(dev_info_t *dip)
1177*5084Sjohnlev {
1178*5084Sjohnlev 	struct xendev_ppd *pdp;
1179*5084Sjohnlev 	struct xenbus_device *dev;
1180*5084Sjohnlev 
1181*5084Sjohnlev 	pdp = ddi_get_parent_data(dip);
1182*5084Sjohnlev 	ASSERT(pdp != NULL);
1183*5084Sjohnlev 	ASSERT(mutex_owned(&pdp->xd_lk));
1184*5084Sjohnlev 
1185*5084Sjohnlev 	dev = &pdp->xd_xsdev;
1186*5084Sjohnlev 
1187*5084Sjohnlev 	/* Unwatch for changes to XenbusState of otherend */
1188*5084Sjohnlev 	if (dev->otherend_watch.node != NULL) {
1189*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1190*5084Sjohnlev 		unregister_xenbus_watch(&dev->otherend_watch);
1191*5084Sjohnlev 		mutex_enter(&pdp->xd_lk);
1192*5084Sjohnlev 	}
1193*5084Sjohnlev 
1194*5084Sjohnlev 	/* make sure no event handler is running */
1195*5084Sjohnlev 	if (pdp->xd_oe_taskq != NULL) {
1196*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1197*5084Sjohnlev 		ddi_taskq_destroy(pdp->xd_oe_taskq);
1198*5084Sjohnlev 		mutex_enter(&pdp->xd_lk);
1199*5084Sjohnlev 		pdp->xd_oe_taskq = NULL;
1200*5084Sjohnlev 	}
1201*5084Sjohnlev 
1202*5084Sjohnlev 	/* clean up */
1203*5084Sjohnlev 	dev->otherend_state = XenbusStateUnknown;
1204*5084Sjohnlev 	dev->otherend_id = (domid_t)-1;
1205*5084Sjohnlev 	if (dev->otherend_watch.node != NULL)
1206*5084Sjohnlev 		kmem_free((void *)dev->otherend_watch.node,
1207*5084Sjohnlev 		    strlen(dev->otherend_watch.node) + 1);
1208*5084Sjohnlev 	dev->otherend_watch.node = NULL;
1209*5084Sjohnlev 	if (dev->otherend != NULL)
1210*5084Sjohnlev 		kmem_free((void *)dev->otherend, strlen(dev->otherend) + 1);
1211*5084Sjohnlev 	dev->otherend = NULL;
1212*5084Sjohnlev }
1213*5084Sjohnlev 
1214*5084Sjohnlev static int
1215*5084Sjohnlev i_xvdi_add_watch_hpstate(dev_info_t *dip)
1216*5084Sjohnlev {
1217*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1218*5084Sjohnlev 
1219*5084Sjohnlev 	ASSERT(pdp != NULL);
1220*5084Sjohnlev 	ASSERT(pdp->xd_xsdev.frontend == 0);
1221*5084Sjohnlev 	ASSERT(mutex_owned(&pdp->xd_lk));
1222*5084Sjohnlev 
1223*5084Sjohnlev 	/*
1224*5084Sjohnlev 	 * Create taskq for delivering hotplug status change event to
1225*5084Sjohnlev 	 * this device later.
1226*5084Sjohnlev 	 *
1227*5084Sjohnlev 	 * Set nthreads to 1 to make sure that events can be delivered
1228*5084Sjohnlev 	 * in order.
1229*5084Sjohnlev 	 *
1230*5084Sjohnlev 	 * Note: It is _not_ guaranteed that driver can see every
1231*5084Sjohnlev 	 * hotplug status change under the path that it is
1232*5084Sjohnlev 	 * watching. If two changes happen consecutively in a very
1233*5084Sjohnlev 	 * short amount of time, it is likely that the driver only
1234*5084Sjohnlev 	 * sees the last one.
1235*5084Sjohnlev 	 */
1236*5084Sjohnlev 	if (pdp->xd_hp_taskq == NULL)
1237*5084Sjohnlev 		if ((pdp->xd_hp_taskq = ddi_taskq_create(dip,
1238*5084Sjohnlev 		    "xendev_hp_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1239*5084Sjohnlev 			return (DDI_FAILURE);
1240*5084Sjohnlev 
1241*5084Sjohnlev 	if (pdp->xd_hp_watch.node == NULL) {
1242*5084Sjohnlev 		size_t len;
1243*5084Sjohnlev 		char *path;
1244*5084Sjohnlev 
1245*5084Sjohnlev 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1246*5084Sjohnlev 
1247*5084Sjohnlev 		len = strlen(pdp->xd_xsdev.nodename) +
1248*5084Sjohnlev 		    strlen("/hotplug-status") + 1;
1249*5084Sjohnlev 		path = kmem_alloc(len, KM_SLEEP);
1250*5084Sjohnlev 		(void) snprintf(path, len, "%s/hotplug-status",
1251*5084Sjohnlev 		    pdp->xd_xsdev.nodename);
1252*5084Sjohnlev 
1253*5084Sjohnlev 		pdp->xd_hp_watch.node = path;
1254*5084Sjohnlev 		pdp->xd_hp_watch.callback = i_xvdi_hpstate_cb;
1255*5084Sjohnlev 		pdp->xd_hp_watch.dev = (struct xenbus_device *)dip; /* yuck! */
1256*5084Sjohnlev 		if (register_xenbus_watch(&pdp->xd_hp_watch) != 0) {
1257*5084Sjohnlev 			i_xvdi_rem_watch_hpstate(dip);
1258*5084Sjohnlev 			return (DDI_FAILURE);
1259*5084Sjohnlev 		}
1260*5084Sjohnlev 	}
1261*5084Sjohnlev 
1262*5084Sjohnlev 	return (DDI_SUCCESS);
1263*5084Sjohnlev }
1264*5084Sjohnlev 
1265*5084Sjohnlev static void
1266*5084Sjohnlev i_xvdi_rem_watch_hpstate(dev_info_t *dip)
1267*5084Sjohnlev {
1268*5084Sjohnlev 	struct xendev_ppd *pdp;
1269*5084Sjohnlev 	pdp = ddi_get_parent_data(dip);
1270*5084Sjohnlev 
1271*5084Sjohnlev 	ASSERT(pdp != NULL);
1272*5084Sjohnlev 	ASSERT(pdp->xd_xsdev.frontend == 0);
1273*5084Sjohnlev 	ASSERT(mutex_owned(&pdp->xd_lk));
1274*5084Sjohnlev 
1275*5084Sjohnlev 	/* Unwatch for changes to "hotplug-status" node for backend device. */
1276*5084Sjohnlev 	if (pdp->xd_hp_watch.node != NULL) {
1277*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1278*5084Sjohnlev 		unregister_xenbus_watch(&pdp->xd_hp_watch);
1279*5084Sjohnlev 		mutex_enter(&pdp->xd_lk);
1280*5084Sjohnlev 	}
1281*5084Sjohnlev 
1282*5084Sjohnlev 	/* Make sure no event handler is running. */
1283*5084Sjohnlev 	if (pdp->xd_hp_taskq != NULL) {
1284*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1285*5084Sjohnlev 		ddi_taskq_destroy(pdp->xd_hp_taskq);
1286*5084Sjohnlev 		mutex_enter(&pdp->xd_lk);
1287*5084Sjohnlev 		pdp->xd_hp_taskq = NULL;
1288*5084Sjohnlev 	}
1289*5084Sjohnlev 
1290*5084Sjohnlev 	/* Clean up. */
1291*5084Sjohnlev 	if (pdp->xd_hp_watch.node != NULL) {
1292*5084Sjohnlev 		kmem_free((void *)pdp->xd_hp_watch.node,
1293*5084Sjohnlev 		    strlen(pdp->xd_hp_watch.node) + 1);
1294*5084Sjohnlev 		pdp->xd_hp_watch.node = NULL;
1295*5084Sjohnlev 	}
1296*5084Sjohnlev }
1297*5084Sjohnlev 
1298*5084Sjohnlev static int
1299*5084Sjohnlev i_xvdi_add_watches(dev_info_t *dip)
1300*5084Sjohnlev {
1301*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1302*5084Sjohnlev 
1303*5084Sjohnlev 	ASSERT(pdp != NULL);
1304*5084Sjohnlev 
1305*5084Sjohnlev 	mutex_enter(&pdp->xd_lk);
1306*5084Sjohnlev 
1307*5084Sjohnlev 	if (i_xvdi_add_watch_oestate(dip) != DDI_SUCCESS) {
1308*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1309*5084Sjohnlev 		return (DDI_FAILURE);
1310*5084Sjohnlev 	}
1311*5084Sjohnlev 
1312*5084Sjohnlev 	if (pdp->xd_xsdev.frontend == 1) {
1313*5084Sjohnlev 		/*
1314*5084Sjohnlev 		 * Frontend devices must watch for the backend path
1315*5084Sjohnlev 		 * changing.
1316*5084Sjohnlev 		 */
1317*5084Sjohnlev 		if (i_xvdi_add_watch_bepath(dip) != DDI_SUCCESS)
1318*5084Sjohnlev 			goto unwatch_and_fail;
1319*5084Sjohnlev 	} else {
1320*5084Sjohnlev 		/*
1321*5084Sjohnlev 		 * Backend devices must watch for hotplug events.
1322*5084Sjohnlev 		 */
1323*5084Sjohnlev 		if (i_xvdi_add_watch_hpstate(dip) != DDI_SUCCESS)
1324*5084Sjohnlev 			goto unwatch_and_fail;
1325*5084Sjohnlev 	}
1326*5084Sjohnlev 
1327*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
1328*5084Sjohnlev 
1329*5084Sjohnlev 	return (DDI_SUCCESS);
1330*5084Sjohnlev 
1331*5084Sjohnlev unwatch_and_fail:
1332*5084Sjohnlev 	i_xvdi_rem_watch_oestate(dip);
1333*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
1334*5084Sjohnlev 
1335*5084Sjohnlev 	return (DDI_FAILURE);
1336*5084Sjohnlev }
1337*5084Sjohnlev 
1338*5084Sjohnlev static void
1339*5084Sjohnlev i_xvdi_rem_watches(dev_info_t *dip)
1340*5084Sjohnlev {
1341*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1342*5084Sjohnlev 
1343*5084Sjohnlev 	ASSERT(pdp != NULL);
1344*5084Sjohnlev 
1345*5084Sjohnlev 	mutex_enter(&pdp->xd_lk);
1346*5084Sjohnlev 
1347*5084Sjohnlev 	i_xvdi_rem_watch_oestate(dip);
1348*5084Sjohnlev 
1349*5084Sjohnlev 	if (pdp->xd_xsdev.frontend == 1)
1350*5084Sjohnlev 		i_xvdi_rem_watch_bepath(dip);
1351*5084Sjohnlev 	else
1352*5084Sjohnlev 		i_xvdi_rem_watch_hpstate(dip);
1353*5084Sjohnlev 
1354*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
1355*5084Sjohnlev }
1356*5084Sjohnlev 
1357*5084Sjohnlev static int
1358*5084Sjohnlev i_xvdi_add_watch_bepath(dev_info_t *dip)
1359*5084Sjohnlev {
1360*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1361*5084Sjohnlev 
1362*5084Sjohnlev 	ASSERT(pdp != NULL);
1363*5084Sjohnlev 	ASSERT(pdp->xd_xsdev.frontend == 1);
1364*5084Sjohnlev 
1365*5084Sjohnlev 	/*
1366*5084Sjohnlev 	 * Frontend devices need to watch for the backend path changing.
1367*5084Sjohnlev 	 */
1368*5084Sjohnlev 	if (pdp->xd_bepath_watch.node == NULL) {
1369*5084Sjohnlev 		size_t len;
1370*5084Sjohnlev 		char *path;
1371*5084Sjohnlev 
1372*5084Sjohnlev 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1373*5084Sjohnlev 
1374*5084Sjohnlev 		len = strlen(pdp->xd_xsdev.nodename) + strlen("/backend") + 1;
1375*5084Sjohnlev 		path = kmem_alloc(len, KM_SLEEP);
1376*5084Sjohnlev 		(void) snprintf(path, len, "%s/backend",
1377*5084Sjohnlev 		    pdp->xd_xsdev.nodename);
1378*5084Sjohnlev 
1379*5084Sjohnlev 		pdp->xd_bepath_watch.node = path;
1380*5084Sjohnlev 		pdp->xd_bepath_watch.callback = i_xvdi_bepath_cb;
1381*5084Sjohnlev 		pdp->xd_bepath_watch.dev = (struct xenbus_device *)dip;
1382*5084Sjohnlev 		if (register_xenbus_watch(&pdp->xd_bepath_watch) != 0) {
1383*5084Sjohnlev 			kmem_free(path, len);
1384*5084Sjohnlev 			pdp->xd_bepath_watch.node = NULL;
1385*5084Sjohnlev 			return (DDI_FAILURE);
1386*5084Sjohnlev 		}
1387*5084Sjohnlev 	}
1388*5084Sjohnlev 
1389*5084Sjohnlev 	return (DDI_SUCCESS);
1390*5084Sjohnlev }
1391*5084Sjohnlev 
1392*5084Sjohnlev static void
1393*5084Sjohnlev i_xvdi_rem_watch_bepath(dev_info_t *dip)
1394*5084Sjohnlev {
1395*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1396*5084Sjohnlev 
1397*5084Sjohnlev 	ASSERT(pdp != NULL);
1398*5084Sjohnlev 	ASSERT(pdp->xd_xsdev.frontend == 1);
1399*5084Sjohnlev 	ASSERT(mutex_owned(&pdp->xd_lk));
1400*5084Sjohnlev 
1401*5084Sjohnlev 	if (pdp->xd_bepath_watch.node != NULL) {
1402*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1403*5084Sjohnlev 		unregister_xenbus_watch(&pdp->xd_bepath_watch);
1404*5084Sjohnlev 		mutex_enter(&pdp->xd_lk);
1405*5084Sjohnlev 
1406*5084Sjohnlev 		kmem_free((void *)(pdp->xd_bepath_watch.node),
1407*5084Sjohnlev 		    strlen(pdp->xd_bepath_watch.node) + 1);
1408*5084Sjohnlev 		pdp->xd_bepath_watch.node = NULL;
1409*5084Sjohnlev 	}
1410*5084Sjohnlev }
1411*5084Sjohnlev 
1412*5084Sjohnlev int
1413*5084Sjohnlev xvdi_switch_state(dev_info_t *dip, xenbus_transaction_t xbt,
1414*5084Sjohnlev     XenbusState newState)
1415*5084Sjohnlev {
1416*5084Sjohnlev 	int rv;
1417*5084Sjohnlev 	struct xendev_ppd *pdp;
1418*5084Sjohnlev 
1419*5084Sjohnlev 	pdp = ddi_get_parent_data(dip);
1420*5084Sjohnlev 	ASSERT(pdp != NULL);
1421*5084Sjohnlev 
1422*5084Sjohnlev 	XVDI_DPRINTF(XVDI_DBG_STATE,
1423*5084Sjohnlev 	    "xvdi_switch_state: dip 0x%p moves to %d",
1424*5084Sjohnlev 	    (void *)dip, newState);
1425*5084Sjohnlev 
1426*5084Sjohnlev 	rv = xenbus_switch_state(&pdp->xd_xsdev, xbt, newState);
1427*5084Sjohnlev 	if (rv > 0)
1428*5084Sjohnlev 		cmn_err(CE_WARN, "xvdi_switch_state: change state failed");
1429*5084Sjohnlev 
1430*5084Sjohnlev 	return (rv);
1431*5084Sjohnlev }
1432*5084Sjohnlev 
1433*5084Sjohnlev /*
1434*5084Sjohnlev  * Notify hotplug script running in userland
1435*5084Sjohnlev  */
1436*5084Sjohnlev int
1437*5084Sjohnlev xvdi_post_event(dev_info_t *dip, xendev_hotplug_cmd_t hpc)
1438*5084Sjohnlev {
1439*5084Sjohnlev 	struct xendev_ppd *pdp;
1440*5084Sjohnlev 	nvlist_t *attr_list = NULL;
1441*5084Sjohnlev 	i_xd_cfg_t *xdcp;
1442*5084Sjohnlev 	sysevent_id_t eid;
1443*5084Sjohnlev 	int err;
1444*5084Sjohnlev 	char devname[256]; /* XXPV dme: ? */
1445*5084Sjohnlev 
1446*5084Sjohnlev 	pdp = ddi_get_parent_data(dip);
1447*5084Sjohnlev 	ASSERT(pdp != NULL);
1448*5084Sjohnlev 
1449*5084Sjohnlev 	xdcp = i_xvdi_devclass2cfg(pdp->xd_devclass);
1450*5084Sjohnlev 	ASSERT(xdcp != NULL);
1451*5084Sjohnlev 
1452*5084Sjohnlev 	(void) snprintf(devname, sizeof (devname) - 1, "%s%d",
1453*5084Sjohnlev 	    ddi_driver_name(dip),  ddi_get_instance(dip));
1454*5084Sjohnlev 
1455*5084Sjohnlev 	err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME, KM_NOSLEEP);
1456*5084Sjohnlev 	if (err != DDI_SUCCESS)
1457*5084Sjohnlev 		goto failure;
1458*5084Sjohnlev 
1459*5084Sjohnlev 	err = nvlist_add_int32(attr_list, "domain", pdp->xd_domain);
1460*5084Sjohnlev 	if (err != DDI_SUCCESS)
1461*5084Sjohnlev 		goto failure;
1462*5084Sjohnlev 	err = nvlist_add_int32(attr_list, "vdev", pdp->xd_vdevnum);
1463*5084Sjohnlev 	if (err != DDI_SUCCESS)
1464*5084Sjohnlev 		goto failure;
1465*5084Sjohnlev 	err = nvlist_add_string(attr_list, "devclass", xdcp->xsdev);
1466*5084Sjohnlev 	if (err != DDI_SUCCESS)
1467*5084Sjohnlev 		goto failure;
1468*5084Sjohnlev 	err = nvlist_add_string(attr_list, "device", devname);
1469*5084Sjohnlev 	if (err != DDI_SUCCESS)
1470*5084Sjohnlev 		goto failure;
1471*5084Sjohnlev 	err = nvlist_add_string(attr_list, "fob",
1472*5084Sjohnlev 	    ((pdp->xd_xsdev.frontend == 1) ? "frontend" : "backend"));
1473*5084Sjohnlev 	if (err != DDI_SUCCESS)
1474*5084Sjohnlev 		goto failure;
1475*5084Sjohnlev 
1476*5084Sjohnlev 	switch (hpc) {
1477*5084Sjohnlev 	case XEN_HP_ADD:
1478*5084Sjohnlev 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1479*5084Sjohnlev 		    "add", attr_list, &eid, DDI_NOSLEEP);
1480*5084Sjohnlev 		break;
1481*5084Sjohnlev 	case XEN_HP_REMOVE:
1482*5084Sjohnlev 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1483*5084Sjohnlev 		    "remove", attr_list, &eid, DDI_NOSLEEP);
1484*5084Sjohnlev 		break;
1485*5084Sjohnlev 	default:
1486*5084Sjohnlev 		err = DDI_FAILURE;
1487*5084Sjohnlev 		goto failure;
1488*5084Sjohnlev 	}
1489*5084Sjohnlev 
1490*5084Sjohnlev failure:
1491*5084Sjohnlev 	if (attr_list != NULL)
1492*5084Sjohnlev 		nvlist_free(attr_list);
1493*5084Sjohnlev 
1494*5084Sjohnlev 	return (err);
1495*5084Sjohnlev }
1496*5084Sjohnlev 
1497*5084Sjohnlev /* ARGSUSED */
1498*5084Sjohnlev static void
1499*5084Sjohnlev i_xvdi_probe_path_cb(struct xenbus_watch *w, const char **vec,
1500*5084Sjohnlev     unsigned int len)
1501*5084Sjohnlev {
1502*5084Sjohnlev 	char *path;
1503*5084Sjohnlev 
1504*5084Sjohnlev 	if (xendev_dip == NULL)
1505*5084Sjohnlev 		xendev_dip = ddi_find_devinfo("xpvd", -1, 0);
1506*5084Sjohnlev 
1507*5084Sjohnlev 	path = i_ddi_strdup((char *)vec[XS_WATCH_PATH], KM_SLEEP);
1508*5084Sjohnlev 
1509*5084Sjohnlev 	(void) ddi_taskq_dispatch(DEVI(xendev_dip)->devi_taskq,
1510*5084Sjohnlev 	    i_xvdi_probe_path_handler, (void *)path, DDI_SLEEP);
1511*5084Sjohnlev }
1512*5084Sjohnlev 
1513*5084Sjohnlev static void
1514*5084Sjohnlev i_xvdi_watch_device(char *path)
1515*5084Sjohnlev {
1516*5084Sjohnlev 	struct xenbus_watch *w;
1517*5084Sjohnlev 
1518*5084Sjohnlev 	ASSERT(path != NULL);
1519*5084Sjohnlev 
1520*5084Sjohnlev 	w = kmem_zalloc(sizeof (*w), KM_SLEEP);
1521*5084Sjohnlev 	w->node = path;
1522*5084Sjohnlev 	w->callback = &i_xvdi_probe_path_cb;
1523*5084Sjohnlev 	w->dev = NULL;
1524*5084Sjohnlev 
1525*5084Sjohnlev 	if (register_xenbus_watch(w) != 0) {
1526*5084Sjohnlev 		cmn_err(CE_WARN, "i_xvdi_watch_device: "
1527*5084Sjohnlev 		    "cannot set watch on %s", path);
1528*5084Sjohnlev 		kmem_free(w, sizeof (*w));
1529*5084Sjohnlev 		return;
1530*5084Sjohnlev 	}
1531*5084Sjohnlev }
1532*5084Sjohnlev 
1533*5084Sjohnlev void
1534*5084Sjohnlev xvdi_watch_devices(int newstate)
1535*5084Sjohnlev {
1536*5084Sjohnlev 	int devclass;
1537*5084Sjohnlev 
1538*5084Sjohnlev 	/*
1539*5084Sjohnlev 	 * Watch for devices being created in the store.
1540*5084Sjohnlev 	 */
1541*5084Sjohnlev 	if (newstate == XENSTORE_DOWN)
1542*5084Sjohnlev 		return;
1543*5084Sjohnlev 	for (devclass = 0; devclass < NXDC; devclass++) {
1544*5084Sjohnlev 		if (xdci[devclass].xs_path_fe != NULL)
1545*5084Sjohnlev 			i_xvdi_watch_device(xdci[devclass].xs_path_fe);
1546*5084Sjohnlev 		if (xdci[devclass].xs_path_be != NULL)
1547*5084Sjohnlev 			i_xvdi_watch_device(xdci[devclass].xs_path_be);
1548*5084Sjohnlev 	}
1549*5084Sjohnlev }
1550*5084Sjohnlev 
1551*5084Sjohnlev /*
1552*5084Sjohnlev  * Iterate over the store looking for backend devices to create.
1553*5084Sjohnlev  */
1554*5084Sjohnlev static void
1555*5084Sjohnlev i_xvdi_enum_be(dev_info_t *parent, i_xd_cfg_t *xdcp)
1556*5084Sjohnlev {
1557*5084Sjohnlev 	char **domains;
1558*5084Sjohnlev 	unsigned int ndomains;
1559*5084Sjohnlev 	int ldomains, i;
1560*5084Sjohnlev 
1561*5084Sjohnlev 	if ((domains = xenbus_directory(XBT_NULL, xdcp->xs_path_be, "",
1562*5084Sjohnlev 	    &ndomains)) == NULL)
1563*5084Sjohnlev 		return;
1564*5084Sjohnlev 
1565*5084Sjohnlev 	for (i = 0, ldomains = 0; i < ndomains; i++) {
1566*5084Sjohnlev 		ldomains += strlen(domains[i]) + 1 + sizeof (char *);
1567*5084Sjohnlev 
1568*5084Sjohnlev 		i_xvdi_enum_worker(parent, xdcp, domains[i]);
1569*5084Sjohnlev 	}
1570*5084Sjohnlev 	kmem_free(domains, ldomains);
1571*5084Sjohnlev }
1572*5084Sjohnlev 
1573*5084Sjohnlev /*
1574*5084Sjohnlev  * Iterate over the store looking for frontend devices to create.
1575*5084Sjohnlev  */
1576*5084Sjohnlev static void
1577*5084Sjohnlev i_xvdi_enum_fe(dev_info_t *parent, i_xd_cfg_t *xdcp)
1578*5084Sjohnlev {
1579*5084Sjohnlev 	i_xvdi_enum_worker(parent, xdcp, NULL);
1580*5084Sjohnlev }
1581*5084Sjohnlev 
1582*5084Sjohnlev static void
1583*5084Sjohnlev i_xvdi_enum_worker(dev_info_t *parent, i_xd_cfg_t *xdcp,
1584*5084Sjohnlev     char *domain)
1585*5084Sjohnlev {
1586*5084Sjohnlev 	char *path, *domain_path, *ep;
1587*5084Sjohnlev 	char **devices;
1588*5084Sjohnlev 	unsigned int ndevices;
1589*5084Sjohnlev 	int ldevices, j, circ;
1590*5084Sjohnlev 	domid_t dom;
1591*5084Sjohnlev 
1592*5084Sjohnlev 	if (domain == NULL) {
1593*5084Sjohnlev 		dom = DOMID_SELF;
1594*5084Sjohnlev 		path = xdcp->xs_path_fe;
1595*5084Sjohnlev 		domain_path = "";
1596*5084Sjohnlev 	} else {
1597*5084Sjohnlev 		(void) ddi_strtol(domain, &ep, 0, (long *)&dom);
1598*5084Sjohnlev 		path = xdcp->xs_path_be;
1599*5084Sjohnlev 		domain_path = domain;
1600*5084Sjohnlev 	}
1601*5084Sjohnlev 
1602*5084Sjohnlev 	if ((devices = xenbus_directory(XBT_NULL, path, domain_path,
1603*5084Sjohnlev 	    &ndevices)) == NULL)
1604*5084Sjohnlev 		return;
1605*5084Sjohnlev 
1606*5084Sjohnlev 	for (j = 0, ldevices = 0; j < ndevices; j++) {
1607*5084Sjohnlev 		int vdev;
1608*5084Sjohnlev 
1609*5084Sjohnlev 		ldevices += strlen(devices[j]) + 1 + sizeof (char *);
1610*5084Sjohnlev 		(void) ddi_strtol(devices[j], &ep, 0, (long *)&vdev);
1611*5084Sjohnlev 
1612*5084Sjohnlev 		ndi_devi_enter(parent, &circ);
1613*5084Sjohnlev 
1614*5084Sjohnlev 		if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev)
1615*5084Sjohnlev 		    == NULL)
1616*5084Sjohnlev 			(void) xvdi_create_dev(parent, xdcp->devclass,
1617*5084Sjohnlev 			    dom, vdev);
1618*5084Sjohnlev 
1619*5084Sjohnlev 		ndi_devi_exit(parent, circ);
1620*5084Sjohnlev 	}
1621*5084Sjohnlev 	kmem_free(devices, ldevices);
1622*5084Sjohnlev }
1623*5084Sjohnlev 
1624*5084Sjohnlev /*
1625*5084Sjohnlev  * Leaf drivers should call this in their detach() routine during suspend.
1626*5084Sjohnlev  */
1627*5084Sjohnlev void
1628*5084Sjohnlev xvdi_suspend(dev_info_t *dip)
1629*5084Sjohnlev {
1630*5084Sjohnlev 	i_xvdi_rem_watches(dip);
1631*5084Sjohnlev }
1632*5084Sjohnlev 
1633*5084Sjohnlev /*
1634*5084Sjohnlev  * Leaf drivers should call this in their attach() routine during resume.
1635*5084Sjohnlev  */
1636*5084Sjohnlev int
1637*5084Sjohnlev xvdi_resume(dev_info_t *dip)
1638*5084Sjohnlev {
1639*5084Sjohnlev 	return (i_xvdi_add_watches(dip));
1640*5084Sjohnlev }
1641*5084Sjohnlev 
1642*5084Sjohnlev /*
1643*5084Sjohnlev  * Add event handler for the leaf driver
1644*5084Sjohnlev  * to handle event triggered by the change in xenstore
1645*5084Sjohnlev  */
1646*5084Sjohnlev int
1647*5084Sjohnlev xvdi_add_event_handler(dev_info_t *dip, char *name,
1648*5084Sjohnlev     void (*evthandler)(dev_info_t *, ddi_eventcookie_t, void *, void *))
1649*5084Sjohnlev {
1650*5084Sjohnlev 	ddi_eventcookie_t ecv;
1651*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1652*5084Sjohnlev 	ddi_callback_id_t *cbid;
1653*5084Sjohnlev 
1654*5084Sjohnlev 	ASSERT(pdp != NULL);
1655*5084Sjohnlev 
1656*5084Sjohnlev 	mutex_enter(&pdp->xd_lk);
1657*5084Sjohnlev 
1658*5084Sjohnlev 	if (strcmp(name, XS_OE_STATE) == 0) {
1659*5084Sjohnlev 		ASSERT(pdp->xd_xsdev.otherend != NULL);
1660*5084Sjohnlev 
1661*5084Sjohnlev 		cbid = &pdp->xd_oe_ehid;
1662*5084Sjohnlev 	} else if (strcmp(name, XS_HP_STATE) == 0) {
1663*5084Sjohnlev 		if (pdp->xd_xsdev.frontend == 1) {
1664*5084Sjohnlev 			mutex_exit(&pdp->xd_lk);
1665*5084Sjohnlev 			return (DDI_FAILURE);
1666*5084Sjohnlev 		}
1667*5084Sjohnlev 
1668*5084Sjohnlev 		ASSERT(pdp->xd_hp_watch.node != NULL);
1669*5084Sjohnlev 
1670*5084Sjohnlev 		cbid = &pdp->xd_hp_ehid;
1671*5084Sjohnlev 	} else {
1672*5084Sjohnlev 		/* Unsupported watch. */
1673*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1674*5084Sjohnlev 		return (DDI_FAILURE);
1675*5084Sjohnlev 	}
1676*5084Sjohnlev 
1677*5084Sjohnlev 	/*
1678*5084Sjohnlev 	 * No event handler provided, take default action to handle
1679*5084Sjohnlev 	 * event.
1680*5084Sjohnlev 	 */
1681*5084Sjohnlev 	if (evthandler == NULL) {
1682*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1683*5084Sjohnlev 		return (DDI_SUCCESS);
1684*5084Sjohnlev 	}
1685*5084Sjohnlev 
1686*5084Sjohnlev 	ASSERT(*cbid == NULL);
1687*5084Sjohnlev 
1688*5084Sjohnlev 	if (ddi_get_eventcookie(dip, name, &ecv) != DDI_SUCCESS) {
1689*5084Sjohnlev 		cmn_err(CE_WARN, "failed to find %s cookie for %s@%s",
1690*5084Sjohnlev 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1691*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1692*5084Sjohnlev 		return (DDI_FAILURE);
1693*5084Sjohnlev 	}
1694*5084Sjohnlev 	if (ddi_add_event_handler(dip, ecv, evthandler, NULL, cbid)
1695*5084Sjohnlev 	    != DDI_SUCCESS) {
1696*5084Sjohnlev 		cmn_err(CE_WARN, "failed to add %s event handler for %s@%s",
1697*5084Sjohnlev 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1698*5084Sjohnlev 		*cbid = NULL;
1699*5084Sjohnlev 		mutex_exit(&pdp->xd_lk);
1700*5084Sjohnlev 		return (DDI_FAILURE);
1701*5084Sjohnlev 	}
1702*5084Sjohnlev 
1703*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
1704*5084Sjohnlev 
1705*5084Sjohnlev 	return (DDI_SUCCESS);
1706*5084Sjohnlev }
1707*5084Sjohnlev 
1708*5084Sjohnlev /*
1709*5084Sjohnlev  * Remove event handler for the leaf driver and unwatch xenstore
1710*5084Sjohnlev  * so, driver will not be notified when xenstore entry changed later
1711*5084Sjohnlev  */
1712*5084Sjohnlev void
1713*5084Sjohnlev xvdi_remove_event_handler(dev_info_t *dip, char *name)
1714*5084Sjohnlev {
1715*5084Sjohnlev 	struct xendev_ppd *pdp;
1716*5084Sjohnlev 	boolean_t rem_oe = B_FALSE, rem_hp = B_FALSE;
1717*5084Sjohnlev 	ddi_callback_id_t oeid = NULL, hpid = NULL;
1718*5084Sjohnlev 
1719*5084Sjohnlev 	pdp = ddi_get_parent_data(dip);
1720*5084Sjohnlev 	ASSERT(pdp != NULL);
1721*5084Sjohnlev 
1722*5084Sjohnlev 	if (name == NULL) {
1723*5084Sjohnlev 		rem_oe = B_TRUE;
1724*5084Sjohnlev 		rem_hp = B_TRUE;
1725*5084Sjohnlev 	} else if (strcmp(name, XS_OE_STATE) == 0) {
1726*5084Sjohnlev 		rem_oe = B_TRUE;
1727*5084Sjohnlev 	} else if (strcmp(name, XS_HP_STATE) == 0) {
1728*5084Sjohnlev 		rem_hp = B_TRUE;
1729*5084Sjohnlev 	} else {
1730*5084Sjohnlev 		cmn_err(CE_WARN, "event %s not supported, cannot remove", name);
1731*5084Sjohnlev 		return;
1732*5084Sjohnlev 	}
1733*5084Sjohnlev 
1734*5084Sjohnlev 	mutex_enter(&pdp->xd_lk);
1735*5084Sjohnlev 
1736*5084Sjohnlev 	if (rem_oe && (pdp->xd_oe_ehid != NULL)) {
1737*5084Sjohnlev 		oeid = pdp->xd_oe_ehid;
1738*5084Sjohnlev 		pdp->xd_oe_ehid = NULL;
1739*5084Sjohnlev 	}
1740*5084Sjohnlev 
1741*5084Sjohnlev 	if (rem_hp && (pdp->xd_hp_ehid != NULL)) {
1742*5084Sjohnlev 		hpid = pdp->xd_hp_ehid;
1743*5084Sjohnlev 		pdp->xd_hp_ehid = NULL;
1744*5084Sjohnlev 	}
1745*5084Sjohnlev 
1746*5084Sjohnlev 	mutex_exit(&pdp->xd_lk);
1747*5084Sjohnlev 
1748*5084Sjohnlev 	if (oeid != NULL)
1749*5084Sjohnlev 		(void) ddi_remove_event_handler(oeid);
1750*5084Sjohnlev 	if (hpid != NULL)
1751*5084Sjohnlev 		(void) ddi_remove_event_handler(hpid);
1752*5084Sjohnlev }
1753*5084Sjohnlev 
1754*5084Sjohnlev 
1755*5084Sjohnlev /*
1756*5084Sjohnlev  * common ring interfaces
1757*5084Sjohnlev  */
1758*5084Sjohnlev 
1759*5084Sjohnlev #define	FRONT_RING(_ringp)	(&(_ringp)->xr_sring.fr)
1760*5084Sjohnlev #define	BACK_RING(_ringp)	(&(_ringp)->xr_sring.br)
1761*5084Sjohnlev #define	GET_RING_SIZE(_ringp)	RING_SIZE(FRONT_RING(ringp))
1762*5084Sjohnlev #define	GET_RING_ENTRY_FE(_ringp, _idx)		\
1763*5084Sjohnlev 	(FRONT_RING(_ringp)->sring->ring +	\
1764*5084Sjohnlev 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
1765*5084Sjohnlev #define	GET_RING_ENTRY_BE(_ringp, _idx)		\
1766*5084Sjohnlev 	(BACK_RING(_ringp)->sring->ring +	\
1767*5084Sjohnlev 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
1768*5084Sjohnlev 
1769*5084Sjohnlev unsigned int
1770*5084Sjohnlev xvdi_ring_avail_slots(xendev_ring_t *ringp)
1771*5084Sjohnlev {
1772*5084Sjohnlev 	comif_ring_fe_t *frp;
1773*5084Sjohnlev 	comif_ring_be_t *brp;
1774*5084Sjohnlev 
1775*5084Sjohnlev 	if (ringp->xr_frontend) {
1776*5084Sjohnlev 		frp = FRONT_RING(ringp);
1777*5084Sjohnlev 		return (GET_RING_SIZE(ringp) -
1778*5084Sjohnlev 		    (frp->req_prod_pvt - frp->rsp_cons));
1779*5084Sjohnlev 	} else {
1780*5084Sjohnlev 		brp = BACK_RING(ringp);
1781*5084Sjohnlev 		return (GET_RING_SIZE(ringp) -
1782*5084Sjohnlev 		    (brp->rsp_prod_pvt - brp->req_cons));
1783*5084Sjohnlev 	}
1784*5084Sjohnlev }
1785*5084Sjohnlev 
1786*5084Sjohnlev int
1787*5084Sjohnlev xvdi_ring_has_unconsumed_requests(xendev_ring_t *ringp)
1788*5084Sjohnlev {
1789*5084Sjohnlev 	comif_ring_be_t *brp;
1790*5084Sjohnlev 
1791*5084Sjohnlev 	ASSERT(!ringp->xr_frontend);
1792*5084Sjohnlev 	brp = BACK_RING(ringp);
1793*5084Sjohnlev 	return ((brp->req_cons !=
1794*5084Sjohnlev 	    ddi_get32(ringp->xr_acc_hdl, &brp->sring->req_prod)) &&
1795*5084Sjohnlev 	    ((brp->req_cons - brp->rsp_prod_pvt) != RING_SIZE(brp)));
1796*5084Sjohnlev }
1797*5084Sjohnlev 
1798*5084Sjohnlev int
1799*5084Sjohnlev xvdi_ring_has_incomp_request(xendev_ring_t *ringp)
1800*5084Sjohnlev {
1801*5084Sjohnlev 	comif_ring_fe_t *frp;
1802*5084Sjohnlev 
1803*5084Sjohnlev 	ASSERT(ringp->xr_frontend);
1804*5084Sjohnlev 	frp = FRONT_RING(ringp);
1805*5084Sjohnlev 	return (frp->req_prod_pvt !=
1806*5084Sjohnlev 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
1807*5084Sjohnlev }
1808*5084Sjohnlev 
1809*5084Sjohnlev int
1810*5084Sjohnlev xvdi_ring_has_unconsumed_responses(xendev_ring_t *ringp)
1811*5084Sjohnlev {
1812*5084Sjohnlev 	comif_ring_fe_t *frp;
1813*5084Sjohnlev 
1814*5084Sjohnlev 	ASSERT(ringp->xr_frontend);
1815*5084Sjohnlev 	frp = FRONT_RING(ringp);
1816*5084Sjohnlev 	return (frp->rsp_cons !=
1817*5084Sjohnlev 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
1818*5084Sjohnlev }
1819*5084Sjohnlev 
1820*5084Sjohnlev /* NOTE: req_event will be increased as needed */
1821*5084Sjohnlev void *
1822*5084Sjohnlev xvdi_ring_get_request(xendev_ring_t *ringp)
1823*5084Sjohnlev {
1824*5084Sjohnlev 	comif_ring_fe_t *frp;
1825*5084Sjohnlev 	comif_ring_be_t *brp;
1826*5084Sjohnlev 
1827*5084Sjohnlev 	if (ringp->xr_frontend) {
1828*5084Sjohnlev 		/* for frontend ring */
1829*5084Sjohnlev 		frp = FRONT_RING(ringp);
1830*5084Sjohnlev 		if (!RING_FULL(frp))
1831*5084Sjohnlev 			return (GET_RING_ENTRY_FE(ringp, frp->req_prod_pvt++));
1832*5084Sjohnlev 		else
1833*5084Sjohnlev 			return (NULL);
1834*5084Sjohnlev 	} else {
1835*5084Sjohnlev 		/* for backend ring */
1836*5084Sjohnlev 		brp = BACK_RING(ringp);
1837*5084Sjohnlev 		/* RING_FINAL_CHECK_FOR_REQUESTS() */
1838*5084Sjohnlev 		if (xvdi_ring_has_unconsumed_requests(ringp))
1839*5084Sjohnlev 			return (GET_RING_ENTRY_BE(ringp, brp->req_cons++));
1840*5084Sjohnlev 		else {
1841*5084Sjohnlev 			ddi_put32(ringp->xr_acc_hdl, &brp->sring->req_event,
1842*5084Sjohnlev 			    brp->req_cons + 1);
1843*5084Sjohnlev 			membar_enter();
1844*5084Sjohnlev 			if (xvdi_ring_has_unconsumed_requests(ringp))
1845*5084Sjohnlev 				return (GET_RING_ENTRY_BE(ringp,
1846*5084Sjohnlev 				    brp->req_cons++));
1847*5084Sjohnlev 			else
1848*5084Sjohnlev 				return (NULL);
1849*5084Sjohnlev 		}
1850*5084Sjohnlev 	}
1851*5084Sjohnlev }
1852*5084Sjohnlev 
1853*5084Sjohnlev int
1854*5084Sjohnlev xvdi_ring_push_request(xendev_ring_t *ringp)
1855*5084Sjohnlev {
1856*5084Sjohnlev 	RING_IDX old, new, reqevt;
1857*5084Sjohnlev 	comif_ring_fe_t *frp;
1858*5084Sjohnlev 
1859*5084Sjohnlev 	/* only frontend should be able to push request */
1860*5084Sjohnlev 	ASSERT(ringp->xr_frontend);
1861*5084Sjohnlev 
1862*5084Sjohnlev 	/* RING_PUSH_REQUEST_AND_CHECK_NOTIFY() */
1863*5084Sjohnlev 	frp = FRONT_RING(ringp);
1864*5084Sjohnlev 	old = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_prod);
1865*5084Sjohnlev 	new = frp->req_prod_pvt;
1866*5084Sjohnlev 	ddi_put32(ringp->xr_acc_hdl, &frp->sring->req_prod, new);
1867*5084Sjohnlev 	membar_enter();
1868*5084Sjohnlev 	reqevt = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_event);
1869*5084Sjohnlev 	return ((RING_IDX)(new - reqevt) < (RING_IDX)(new - old));
1870*5084Sjohnlev }
1871*5084Sjohnlev 
1872*5084Sjohnlev /* NOTE: rsp_event will be increased as needed */
1873*5084Sjohnlev void *
1874*5084Sjohnlev xvdi_ring_get_response(xendev_ring_t *ringp)
1875*5084Sjohnlev {
1876*5084Sjohnlev 	comif_ring_fe_t *frp;
1877*5084Sjohnlev 	comif_ring_be_t *brp;
1878*5084Sjohnlev 
1879*5084Sjohnlev 	if (!ringp->xr_frontend) {
1880*5084Sjohnlev 		/* for backend ring */
1881*5084Sjohnlev 		brp = BACK_RING(ringp);
1882*5084Sjohnlev 		return (GET_RING_ENTRY_BE(ringp, brp->rsp_prod_pvt++));
1883*5084Sjohnlev 	} else {
1884*5084Sjohnlev 		/* for frontend ring */
1885*5084Sjohnlev 		frp = FRONT_RING(ringp);
1886*5084Sjohnlev 		/* RING_FINAL_CHECK_FOR_RESPONSES() */
1887*5084Sjohnlev 		if (xvdi_ring_has_unconsumed_responses(ringp))
1888*5084Sjohnlev 			return (GET_RING_ENTRY_FE(ringp, frp->rsp_cons++));
1889*5084Sjohnlev 		else {
1890*5084Sjohnlev 			ddi_put32(ringp->xr_acc_hdl, &frp->sring->rsp_event,
1891*5084Sjohnlev 			    frp->rsp_cons + 1);
1892*5084Sjohnlev 			membar_enter();
1893*5084Sjohnlev 			if (xvdi_ring_has_unconsumed_responses(ringp))
1894*5084Sjohnlev 				return (GET_RING_ENTRY_FE(ringp,
1895*5084Sjohnlev 				    frp->rsp_cons++));
1896*5084Sjohnlev 			else
1897*5084Sjohnlev 				return (NULL);
1898*5084Sjohnlev 		}
1899*5084Sjohnlev 	}
1900*5084Sjohnlev }
1901*5084Sjohnlev 
1902*5084Sjohnlev int
1903*5084Sjohnlev xvdi_ring_push_response(xendev_ring_t *ringp)
1904*5084Sjohnlev {
1905*5084Sjohnlev 	RING_IDX old, new, rspevt;
1906*5084Sjohnlev 	comif_ring_be_t *brp;
1907*5084Sjohnlev 
1908*5084Sjohnlev 	/* only backend should be able to push response */
1909*5084Sjohnlev 	ASSERT(!ringp->xr_frontend);
1910*5084Sjohnlev 
1911*5084Sjohnlev 	/* RING_PUSH_RESPONSE_AND_CHECK_NOTIFY() */
1912*5084Sjohnlev 	brp = BACK_RING(ringp);
1913*5084Sjohnlev 	old = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_prod);
1914*5084Sjohnlev 	new = brp->rsp_prod_pvt;
1915*5084Sjohnlev 	ddi_put32(ringp->xr_acc_hdl, &brp->sring->rsp_prod, new);
1916*5084Sjohnlev 	membar_enter();
1917*5084Sjohnlev 	rspevt = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_event);
1918*5084Sjohnlev 	return ((RING_IDX)(new - rspevt) < (RING_IDX)(new - old));
1919*5084Sjohnlev }
1920*5084Sjohnlev 
1921*5084Sjohnlev static void
1922*5084Sjohnlev xvdi_ring_init_sring(xendev_ring_t *ringp)
1923*5084Sjohnlev {
1924*5084Sjohnlev 	ddi_acc_handle_t acchdl;
1925*5084Sjohnlev 	comif_sring_t *xsrp;
1926*5084Sjohnlev 	int i;
1927*5084Sjohnlev 
1928*5084Sjohnlev 	xsrp = (comif_sring_t *)ringp->xr_vaddr;
1929*5084Sjohnlev 	acchdl = ringp->xr_acc_hdl;
1930*5084Sjohnlev 
1931*5084Sjohnlev 	/* shared ring initialization */
1932*5084Sjohnlev 	ddi_put32(acchdl, &xsrp->req_prod, 0);
1933*5084Sjohnlev 	ddi_put32(acchdl, &xsrp->rsp_prod, 0);
1934*5084Sjohnlev 	ddi_put32(acchdl, &xsrp->req_event, 1);
1935*5084Sjohnlev 	ddi_put32(acchdl, &xsrp->rsp_event, 1);
1936*5084Sjohnlev 	for (i = 0; i < sizeof (xsrp->pad); i++)
1937*5084Sjohnlev 		ddi_put8(acchdl, xsrp->pad + i, 0);
1938*5084Sjohnlev }
1939*5084Sjohnlev 
1940*5084Sjohnlev static void
1941*5084Sjohnlev xvdi_ring_init_front_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
1942*5084Sjohnlev {
1943*5084Sjohnlev 	comif_ring_fe_t *xfrp;
1944*5084Sjohnlev 
1945*5084Sjohnlev 	xfrp = &ringp->xr_sring.fr;
1946*5084Sjohnlev 	xfrp->req_prod_pvt = 0;
1947*5084Sjohnlev 	xfrp->rsp_cons = 0;
1948*5084Sjohnlev 	xfrp->nr_ents = nentry;
1949*5084Sjohnlev 	xfrp->sring = (comif_sring_t *)ringp->xr_vaddr;
1950*5084Sjohnlev 
1951*5084Sjohnlev 	ringp->xr_frontend = 1;
1952*5084Sjohnlev 	ringp->xr_entry_size = entrysize;
1953*5084Sjohnlev }
1954*5084Sjohnlev 
1955*5084Sjohnlev static void
1956*5084Sjohnlev xvdi_ring_init_back_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
1957*5084Sjohnlev {
1958*5084Sjohnlev 	comif_ring_be_t *xbrp;
1959*5084Sjohnlev 
1960*5084Sjohnlev 	xbrp = &ringp->xr_sring.br;
1961*5084Sjohnlev 	xbrp->rsp_prod_pvt = 0;
1962*5084Sjohnlev 	xbrp->req_cons = 0;
1963*5084Sjohnlev 	xbrp->nr_ents = nentry;
1964*5084Sjohnlev 	xbrp->sring = (comif_sring_t *)ringp->xr_vaddr;
1965*5084Sjohnlev 
1966*5084Sjohnlev 	ringp->xr_frontend = 0;
1967*5084Sjohnlev 	ringp->xr_entry_size = entrysize;
1968*5084Sjohnlev }
1969*5084Sjohnlev 
1970*5084Sjohnlev static void
1971*5084Sjohnlev xendev_offline_device(void *arg)
1972*5084Sjohnlev {
1973*5084Sjohnlev 	dev_info_t *dip = (dev_info_t *)arg;
1974*5084Sjohnlev 	char devname[MAXNAMELEN] = {0};
1975*5084Sjohnlev 
1976*5084Sjohnlev 	/*
1977*5084Sjohnlev 	 * This is currently the only chance to delete a devinfo node, which
1978*5084Sjohnlev 	 * is _not_ always successful.
1979*5084Sjohnlev 	 */
1980*5084Sjohnlev 	(void) ddi_deviname(dip, devname);
1981*5084Sjohnlev 	(void) devfs_clean(ddi_get_parent(dip), devname + 1, DV_CLEAN_FORCE);
1982*5084Sjohnlev 	(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
1983*5084Sjohnlev }
1984*5084Sjohnlev 
1985*5084Sjohnlev static void
1986*5084Sjohnlev i_xvdi_oestate_cb(struct xenbus_device *dev, XenbusState oestate)
1987*5084Sjohnlev {
1988*5084Sjohnlev 	dev_info_t *dip = (dev_info_t *)dev->data;
1989*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1990*5084Sjohnlev 
1991*5084Sjohnlev 	/*
1992*5084Sjohnlev 	 * Don't trigger two consecutive ndi_devi_offline on the same
1993*5084Sjohnlev 	 * dip.
1994*5084Sjohnlev 	 */
1995*5084Sjohnlev 	if ((oestate == XenbusStateClosed) &&
1996*5084Sjohnlev 	    (dev->otherend_state == XenbusStateClosed))
1997*5084Sjohnlev 		return;
1998*5084Sjohnlev 
1999*5084Sjohnlev 	dev->otherend_state = oestate;
2000*5084Sjohnlev 	(void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
2001*5084Sjohnlev 	    i_xvdi_oestate_handler, (void *)dip, DDI_SLEEP);
2002*5084Sjohnlev }
2003*5084Sjohnlev 
2004*5084Sjohnlev /*ARGSUSED*/
2005*5084Sjohnlev static void
2006*5084Sjohnlev i_xvdi_hpstate_cb(struct xenbus_watch *w, const char **vec,
2007*5084Sjohnlev     unsigned int len)
2008*5084Sjohnlev {
2009*5084Sjohnlev 	dev_info_t *dip = (dev_info_t *)w->dev;
2010*5084Sjohnlev 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2011*5084Sjohnlev 
2012*5084Sjohnlev 	(void) ddi_taskq_dispatch(pdp->xd_hp_taskq,
2013*5084Sjohnlev 	    i_xvdi_hpstate_handler, (void *)dip, DDI_SLEEP);
2014*5084Sjohnlev }
2015*5084Sjohnlev 
2016*5084Sjohnlev static void
2017*5084Sjohnlev i_xvdi_probe_path_handler(void *arg)
2018*5084Sjohnlev {
2019*5084Sjohnlev 	dev_info_t *parent;
2020*5084Sjohnlev 	char *path = arg, *p = NULL;
2021*5084Sjohnlev 	int i, vdev, circ;
2022*5084Sjohnlev 	i_xd_cfg_t *xdcp;
2023*5084Sjohnlev 	boolean_t frontend;
2024*5084Sjohnlev 	domid_t dom;
2025*5084Sjohnlev 
2026*5084Sjohnlev 	for (i = 0, xdcp = &xdci[0]; i < NXDC; i++, xdcp++) {
2027*5084Sjohnlev 
2028*5084Sjohnlev 		if ((xdcp->xs_path_fe != NULL) &&
2029*5084Sjohnlev 		    (strncmp(path, xdcp->xs_path_fe, strlen(xdcp->xs_path_fe))
2030*5084Sjohnlev 		    == 0)) {
2031*5084Sjohnlev 
2032*5084Sjohnlev 			frontend = B_TRUE;
2033*5084Sjohnlev 			p = path + strlen(xdcp->xs_path_fe);
2034*5084Sjohnlev 			break;
2035*5084Sjohnlev 		}
2036*5084Sjohnlev 
2037*5084Sjohnlev 		if ((xdcp->xs_path_be != NULL) &&
2038*5084Sjohnlev 		    (strncmp(path, xdcp->xs_path_be, strlen(xdcp->xs_path_be))
2039*5084Sjohnlev 		    == 0)) {
2040*5084Sjohnlev 
2041*5084Sjohnlev 			frontend = B_FALSE;
2042*5084Sjohnlev 			p = path + strlen(xdcp->xs_path_be);
2043*5084Sjohnlev 			break;
2044*5084Sjohnlev 		}
2045*5084Sjohnlev 
2046*5084Sjohnlev 	}
2047*5084Sjohnlev 
2048*5084Sjohnlev 	if (p == NULL) {
2049*5084Sjohnlev 		cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2050*5084Sjohnlev 		    "unexpected path prefix in %s", path);
2051*5084Sjohnlev 		goto done;
2052*5084Sjohnlev 	}
2053*5084Sjohnlev 
2054*5084Sjohnlev 	if (frontend) {
2055*5084Sjohnlev 		dom = DOMID_SELF;
2056*5084Sjohnlev 		if (sscanf(p, "/%d/", &vdev) != 1) {
2057*5084Sjohnlev 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2058*5084Sjohnlev 			    "i_xvdi_probe_path_handler: "
2059*5084Sjohnlev 			    "cannot parse frontend path %s",
2060*5084Sjohnlev 			    path);
2061*5084Sjohnlev 			goto done;
2062*5084Sjohnlev 		}
2063*5084Sjohnlev 	} else {
2064*5084Sjohnlev 		if (sscanf(p, "/%d/%d/", &dom, &vdev) != 2) {
2065*5084Sjohnlev 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2066*5084Sjohnlev 			    "i_xvdi_probe_path_handler: "
2067*5084Sjohnlev 			    "cannot parse backend path %s",
2068*5084Sjohnlev 			    path);
2069*5084Sjohnlev 			goto done;
2070*5084Sjohnlev 		}
2071*5084Sjohnlev 	}
2072*5084Sjohnlev 
2073*5084Sjohnlev 	parent = xendev_dip;
2074*5084Sjohnlev 	ASSERT(parent != NULL);
2075*5084Sjohnlev 
2076*5084Sjohnlev 	ndi_devi_enter(parent, &circ);
2077*5084Sjohnlev 
2078*5084Sjohnlev 	if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL) {
2079*5084Sjohnlev 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2080*5084Sjohnlev 		    "i_xvdi_probe_path_handler: create for %s", path);
2081*5084Sjohnlev 		(void) xvdi_create_dev(parent, xdcp->devclass, dom, vdev);
2082*5084Sjohnlev 	} else {
2083*5084Sjohnlev 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2084*5084Sjohnlev 		    "i_xvdi_probe_path_handler: %s already exists", path);
2085*5084Sjohnlev 	}
2086*5084Sjohnlev 
2087*5084Sjohnlev 	ndi_devi_exit(parent, circ);
2088*5084Sjohnlev 
2089*5084Sjohnlev done:
2090*5084Sjohnlev 	kmem_free(path, strlen(path) + 1);
2091*5084Sjohnlev }
2092