xref: /onnv-gate/usr/src/uts/common/xen/io/xdb.c (revision 10175:dd9708d1f561)
15084Sjohnlev /*
25084Sjohnlev  * CDDL HEADER START
35084Sjohnlev  *
45084Sjohnlev  * The contents of this file are subject to the terms of the
55084Sjohnlev  * Common Development and Distribution License (the "License").
65084Sjohnlev  * You may not use this file except in compliance with the License.
75084Sjohnlev  *
85084Sjohnlev  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
95084Sjohnlev  * or http://www.opensolaris.org/os/licensing.
105084Sjohnlev  * See the License for the specific language governing permissions
115084Sjohnlev  * and limitations under the License.
125084Sjohnlev  *
135084Sjohnlev  * When distributing Covered Code, include this CDDL HEADER in each
145084Sjohnlev  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
155084Sjohnlev  * If applicable, add the following below this CDDL HEADER, with the
165084Sjohnlev  * fields enclosed by brackets "[]" replaced with your own identifying
175084Sjohnlev  * information: Portions Copyright [yyyy] [name of copyright owner]
185084Sjohnlev  *
195084Sjohnlev  * CDDL HEADER END
205084Sjohnlev  */
215084Sjohnlev 
225084Sjohnlev /*
238863SEdward.Pilatowicz@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
245084Sjohnlev  * Use is subject to license terms.
255084Sjohnlev  */
265084Sjohnlev 
275084Sjohnlev /*
285084Sjohnlev  * Note: This is the backend part of the split PV disk driver. This driver
295084Sjohnlev  * is not a nexus driver, nor is it a leaf driver(block/char/stream driver).
305084Sjohnlev  * Currently, it does not create any minor node. So, although, it runs in
315084Sjohnlev  * backend domain, it will not be used directly from within dom0.
325084Sjohnlev  * It simply gets block I/O requests issued by frontend from a shared page
335084Sjohnlev  * (blkif ring buffer - defined by Xen) between backend and frontend domain,
345084Sjohnlev  * generates a buf, and push it down to underlying disk target driver via
355084Sjohnlev  * ldi interface. When buf is done, this driver will generate a response
365084Sjohnlev  * and put it into ring buffer to inform frontend of the status of the I/O
375084Sjohnlev  * request issued by it. When a new virtual device entry is added in xenstore,
385084Sjohnlev  * there will be an watch event sent from Xen to xvdi framework, who will,
395084Sjohnlev  * in turn, create the devinfo node and try to attach this driver
405084Sjohnlev  * (see xvdi_create_dev). When frontend peer changes its state to
415084Sjohnlev  * XenbusStateClose, an event will also be sent from Xen to xvdi framework,
425084Sjohnlev  * who will detach and remove this devinfo node (see i_xvdi_oestate_handler).
435084Sjohnlev  * I/O requests get from ring buffer and event coming from xenstore cannot be
445084Sjohnlev  * trusted. We verify them in xdb_get_buf() and xdb_check_state_transition().
455084Sjohnlev  *
465084Sjohnlev  * Virtual device configuration is read/written from/to the database via
475084Sjohnlev  * xenbus_* interfaces. Driver also use xvdi_* to interact with hypervisor.
485084Sjohnlev  * There is an on-going effort to make xvdi_* cover all xenbus_*.
495084Sjohnlev  */
505084Sjohnlev 
515741Smrj #include <sys/types.h>
525741Smrj #include <sys/conf.h>
535741Smrj #include <sys/ddi.h>
545741Smrj #include <sys/dditypes.h>
555741Smrj #include <sys/sunddi.h>
565741Smrj #include <sys/list.h>
575741Smrj #include <sys/dkio.h>
585741Smrj #include <sys/cmlb.h>
595741Smrj #include <sys/vtoc.h>
605741Smrj #include <sys/modctl.h>
615741Smrj #include <sys/bootconf.h>
625741Smrj #include <sys/promif.h>
635741Smrj #include <sys/sysmacros.h>
645741Smrj #include <public/io/xenbus.h>
658863SEdward.Pilatowicz@Sun.COM #include <public/io/xs_wire.h>
665741Smrj #include <xen/sys/xenbus_impl.h>
675741Smrj #include <xen/sys/xendev.h>
685741Smrj #include <sys/gnttab.h>
695741Smrj #include <sys/scsi/generic/inquiry.h>
705741Smrj #include <vm/seg_kmem.h>
715741Smrj #include <vm/hat_i86.h>
725741Smrj #include <sys/gnttab.h>
735084Sjohnlev #include <sys/lofi.h>
745741Smrj #include <io/xdf.h>
756144Srab #include <xen/io/blkif_impl.h>
765741Smrj #include <io/xdb.h>
775084Sjohnlev 
785084Sjohnlev static xdb_t *xdb_statep;
795084Sjohnlev static int xdb_debug = 0;
805084Sjohnlev 
818863SEdward.Pilatowicz@Sun.COM static void xdb_close(dev_info_t *);
826144Srab static int xdb_push_response(xdb_t *, uint64_t, uint8_t, uint16_t);
836144Srab static int xdb_get_request(xdb_t *, blkif_request_t *);
846144Srab static void blkif_get_x86_32_req(blkif_request_t *, blkif_x86_32_request_t *);
856144Srab static void blkif_get_x86_64_req(blkif_request_t *, blkif_x86_64_request_t *);
868863SEdward.Pilatowicz@Sun.COM static int xdb_biodone(buf_t *);
878863SEdward.Pilatowicz@Sun.COM 
886144Srab 
895084Sjohnlev #ifdef DEBUG
905084Sjohnlev /*
915084Sjohnlev  * debug aid functions
925084Sjohnlev  */
935084Sjohnlev 
945084Sjohnlev static void
logva(xdb_t * vdp,uint64_t va)955084Sjohnlev logva(xdb_t *vdp, uint64_t va)
965084Sjohnlev {
975084Sjohnlev 	uint64_t *page_addrs;
985084Sjohnlev 	int i;
995084Sjohnlev 
1005084Sjohnlev 	page_addrs = vdp->page_addrs;
1016144Srab 	for (i = 0; i < XDB_MAX_IO_PAGES(vdp); i++) {
1025084Sjohnlev 		if (page_addrs[i] == va)
1035084Sjohnlev 			debug_enter("VA remapping found!");
1045084Sjohnlev 	}
1055084Sjohnlev 
1066144Srab 	for (i = 0; i < XDB_MAX_IO_PAGES(vdp); i++) {
1075084Sjohnlev 		if (page_addrs[i] == 0) {
1085084Sjohnlev 			page_addrs[i] = va;
1095084Sjohnlev 			break;
1105084Sjohnlev 		}
1115084Sjohnlev 	}
1126144Srab 	ASSERT(i < XDB_MAX_IO_PAGES(vdp));
1135084Sjohnlev }
1145084Sjohnlev 
1155084Sjohnlev static void
unlogva(xdb_t * vdp,uint64_t va)1165084Sjohnlev unlogva(xdb_t *vdp, uint64_t va)
1175084Sjohnlev {
1185084Sjohnlev 	uint64_t *page_addrs;
1195084Sjohnlev 	int i;
1205084Sjohnlev 
1215084Sjohnlev 	page_addrs = vdp->page_addrs;
1226144Srab 	for (i = 0; i < XDB_MAX_IO_PAGES(vdp); i++) {
1235084Sjohnlev 		if (page_addrs[i] == va) {
1245084Sjohnlev 			page_addrs[i] = 0;
1255084Sjohnlev 			break;
1265084Sjohnlev 		}
1275084Sjohnlev 	}
1286144Srab 	ASSERT(i < XDB_MAX_IO_PAGES(vdp));
1295084Sjohnlev }
1305084Sjohnlev 
1315084Sjohnlev static void
xdb_dump_request_oe(blkif_request_t * req)1325084Sjohnlev xdb_dump_request_oe(blkif_request_t *req)
1335084Sjohnlev {
1345084Sjohnlev 	int i;
1355084Sjohnlev 
1365084Sjohnlev 	/*
1375084Sjohnlev 	 * Exploit the public interface definitions for BLKIF_OP_READ
1385084Sjohnlev 	 * etc..
1395084Sjohnlev 	 */
1405084Sjohnlev 	char *op_name[] = { "read", "write", "barrier", "flush" };
1415084Sjohnlev 
1425084Sjohnlev 	XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE, "op=%s", op_name[req->operation]));
1435084Sjohnlev 	XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE, "num of segments=%d",
1445084Sjohnlev 	    req->nr_segments));
1455084Sjohnlev 	XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE, "handle=%d", req->handle));
1465084Sjohnlev 	XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE, "id=%llu",
1475084Sjohnlev 	    (unsigned long long)req->id));
1485084Sjohnlev 	XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE, "start sector=%llu",
1495084Sjohnlev 	    (unsigned long long)req->sector_number));
1505084Sjohnlev 	for (i = 0; i < req->nr_segments; i++) {
1515084Sjohnlev 		XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE, "gref=%d, first sec=%d,"
1525084Sjohnlev 		    "last sec=%d", req->seg[i].gref, req->seg[i].first_sect,
1535084Sjohnlev 		    req->seg[i].last_sect));
1545084Sjohnlev 	}
1555084Sjohnlev }
1565084Sjohnlev #endif /* DEBUG */
1575084Sjohnlev 
1585084Sjohnlev /*
1595084Sjohnlev  * Statistics.
1605084Sjohnlev  */
1615084Sjohnlev static char *xdb_stats[] = {
1625084Sjohnlev 	"rd_reqs",
1635084Sjohnlev 	"wr_reqs",
1645084Sjohnlev 	"br_reqs",
1655084Sjohnlev 	"fl_reqs",
1665084Sjohnlev 	"oo_reqs"
1675084Sjohnlev };
1685084Sjohnlev 
1695084Sjohnlev static int
xdb_kstat_update(kstat_t * ksp,int flag)1705084Sjohnlev xdb_kstat_update(kstat_t *ksp, int flag)
1715084Sjohnlev {
1725084Sjohnlev 	xdb_t *vdp;
1735084Sjohnlev 	kstat_named_t *knp;
1745084Sjohnlev 
1755084Sjohnlev 	if (flag != KSTAT_READ)
1765084Sjohnlev 		return (EACCES);
1775084Sjohnlev 
1785084Sjohnlev 	vdp = ksp->ks_private;
1795084Sjohnlev 	knp = ksp->ks_data;
1805084Sjohnlev 
1815084Sjohnlev 	/*
1825084Sjohnlev 	 * Assignment order should match that of the names in
1835084Sjohnlev 	 * xdb_stats.
1845084Sjohnlev 	 */
1855084Sjohnlev 	(knp++)->value.ui64 = vdp->xs_stat_req_reads;
1865084Sjohnlev 	(knp++)->value.ui64 = vdp->xs_stat_req_writes;
1875084Sjohnlev 	(knp++)->value.ui64 = vdp->xs_stat_req_barriers;
1885084Sjohnlev 	(knp++)->value.ui64 = vdp->xs_stat_req_flushes;
1895084Sjohnlev 	(knp++)->value.ui64 = 0; /* oo_req */
1905084Sjohnlev 
1915084Sjohnlev 	return (0);
1925084Sjohnlev }
1935084Sjohnlev 
1945084Sjohnlev static boolean_t
xdb_kstat_init(xdb_t * vdp)1955084Sjohnlev xdb_kstat_init(xdb_t *vdp)
1965084Sjohnlev {
1975084Sjohnlev 	int nstat = sizeof (xdb_stats) / sizeof (xdb_stats[0]);
1985084Sjohnlev 	char **cp = xdb_stats;
1995084Sjohnlev 	kstat_named_t *knp;
2005084Sjohnlev 
2015084Sjohnlev 	if ((vdp->xs_kstats = kstat_create("xdb",
2025084Sjohnlev 	    ddi_get_instance(vdp->xs_dip),
2035084Sjohnlev 	    "req_statistics", "block", KSTAT_TYPE_NAMED,
2045084Sjohnlev 	    nstat, 0)) == NULL)
2055084Sjohnlev 		return (B_FALSE);
2065084Sjohnlev 
2075084Sjohnlev 	vdp->xs_kstats->ks_private = vdp;
2085084Sjohnlev 	vdp->xs_kstats->ks_update = xdb_kstat_update;
2095084Sjohnlev 
2105084Sjohnlev 	knp = vdp->xs_kstats->ks_data;
2115084Sjohnlev 	while (nstat > 0) {
2125084Sjohnlev 		kstat_named_init(knp, *cp, KSTAT_DATA_UINT64);
2135084Sjohnlev 		knp++;
2145084Sjohnlev 		cp++;
2155084Sjohnlev 		nstat--;
2165084Sjohnlev 	}
2175084Sjohnlev 
2185084Sjohnlev 	kstat_install(vdp->xs_kstats);
2195084Sjohnlev 
2205084Sjohnlev 	return (B_TRUE);
2215084Sjohnlev }
2225084Sjohnlev 
2238863SEdward.Pilatowicz@Sun.COM static char *
i_pathname(dev_info_t * dip)2248863SEdward.Pilatowicz@Sun.COM i_pathname(dev_info_t *dip)
2258863SEdward.Pilatowicz@Sun.COM {
2268863SEdward.Pilatowicz@Sun.COM 	char *path, *rv;
2278863SEdward.Pilatowicz@Sun.COM 
2288863SEdward.Pilatowicz@Sun.COM 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2298863SEdward.Pilatowicz@Sun.COM 	(void) ddi_pathname(dip, path);
2308863SEdward.Pilatowicz@Sun.COM 	rv = strdup(path);
2318863SEdward.Pilatowicz@Sun.COM 	kmem_free(path, MAXPATHLEN);
2328863SEdward.Pilatowicz@Sun.COM 
2338863SEdward.Pilatowicz@Sun.COM 	return (rv);
2348863SEdward.Pilatowicz@Sun.COM }
2355084Sjohnlev 
2365084Sjohnlev static buf_t *
xdb_get_buf(xdb_t * vdp,blkif_request_t * req,xdb_request_t * xreq)2375084Sjohnlev xdb_get_buf(xdb_t *vdp, blkif_request_t *req, xdb_request_t *xreq)
2385084Sjohnlev {
2395084Sjohnlev 	buf_t *bp;
2405084Sjohnlev 	uint8_t segs, curseg;
2415084Sjohnlev 	int sectors;
2425084Sjohnlev 	int i, err;
2435084Sjohnlev 	gnttab_map_grant_ref_t mapops[BLKIF_MAX_SEGMENTS_PER_REQUEST];
2445084Sjohnlev 	ddi_acc_handle_t acchdl;
2455084Sjohnlev 
2465084Sjohnlev 	acchdl = vdp->xs_ring_hdl;
2475084Sjohnlev 	bp = XDB_XREQ2BP(xreq);
2485084Sjohnlev 	curseg = xreq->xr_curseg;
2495084Sjohnlev 	/* init a new xdb request */
2505084Sjohnlev 	if (req != NULL) {
2515084Sjohnlev 		ASSERT(MUTEX_HELD(&vdp->xs_iomutex));
2525084Sjohnlev 		boolean_t pagemapok = B_TRUE;
2535084Sjohnlev 		uint8_t op = ddi_get8(acchdl, &req->operation);
2545084Sjohnlev 
2555084Sjohnlev 		xreq->xr_vdp = vdp;
2565084Sjohnlev 		xreq->xr_op = op;
2575084Sjohnlev 		xreq->xr_id = ddi_get64(acchdl, &req->id);
2585084Sjohnlev 		segs = xreq->xr_buf_pages = ddi_get8(acchdl, &req->nr_segments);
2595084Sjohnlev 		if (segs == 0) {
2605084Sjohnlev 			if (op != BLKIF_OP_FLUSH_DISKCACHE)
2615084Sjohnlev 				cmn_err(CE_WARN, "!non-BLKIF_OP_FLUSH_DISKCACHE"
2625084Sjohnlev 				    " is seen from domain %d with zero "
2635084Sjohnlev 				    "length data buffer!", vdp->xs_peer);
2645084Sjohnlev 			bioinit(bp);
2655084Sjohnlev 			bp->b_bcount = 0;
2665084Sjohnlev 			bp->b_lblkno = 0;
2675084Sjohnlev 			bp->b_un.b_addr = NULL;
2685084Sjohnlev 			return (bp);
2695084Sjohnlev 		} else if (op == BLKIF_OP_FLUSH_DISKCACHE) {
2705084Sjohnlev 			cmn_err(CE_WARN, "!BLKIF_OP_FLUSH_DISKCACHE"
2715084Sjohnlev 			    " is seen from domain %d with non-zero "
2725084Sjohnlev 			    "length data buffer!", vdp->xs_peer);
2735084Sjohnlev 		}
2745084Sjohnlev 
2755084Sjohnlev 		/*
2765084Sjohnlev 		 * segs should be no bigger than BLKIF_MAX_SEGMENTS_PER_REQUEST
2775084Sjohnlev 		 * according to the definition of blk interface by Xen
2785084Sjohnlev 		 * we do sanity check here
2795084Sjohnlev 		 */
2805084Sjohnlev 		if (segs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
2815084Sjohnlev 			segs = xreq->xr_buf_pages =
2825084Sjohnlev 			    BLKIF_MAX_SEGMENTS_PER_REQUEST;
2835084Sjohnlev 
2845084Sjohnlev 		for (i = 0; i < segs; i++) {
2855084Sjohnlev 			uint8_t fs, ls;
2865084Sjohnlev 
2875084Sjohnlev 			mapops[i].host_addr =
2885084Sjohnlev 			    (uint64_t)(uintptr_t)XDB_IOPAGE_VA(
2895084Sjohnlev 			    vdp->xs_iopage_va, xreq->xr_idx, i);
2905084Sjohnlev 			mapops[i].dom = vdp->xs_peer;
2915084Sjohnlev 			mapops[i].ref = ddi_get32(acchdl, &req->seg[i].gref);
2925084Sjohnlev 			mapops[i].flags = GNTMAP_host_map;
2935084Sjohnlev 			if (op != BLKIF_OP_READ)
2945084Sjohnlev 				mapops[i].flags |= GNTMAP_readonly;
2955084Sjohnlev 
2965084Sjohnlev 			fs = ddi_get8(acchdl, &req->seg[i].first_sect);
2975084Sjohnlev 			ls = ddi_get8(acchdl, &req->seg[i].last_sect);
2985084Sjohnlev 
2995084Sjohnlev 			/*
3005084Sjohnlev 			 * first_sect should be no bigger than last_sect and
3015084Sjohnlev 			 * both of them should be no bigger than
3029917SMark.Johnson@Sun.COM 			 * XB_LAST_SECTOR_IN_SEG according to definition
3035084Sjohnlev 			 * of blk interface by Xen, so sanity check again
3045084Sjohnlev 			 */
3059917SMark.Johnson@Sun.COM 			if (fs > XB_LAST_SECTOR_IN_SEG)
3069917SMark.Johnson@Sun.COM 				fs = XB_LAST_SECTOR_IN_SEG;
3079917SMark.Johnson@Sun.COM 			if (ls > XB_LAST_SECTOR_IN_SEG)
3089917SMark.Johnson@Sun.COM 				ls = XB_LAST_SECTOR_IN_SEG;
3095084Sjohnlev 			if (fs > ls)
3105084Sjohnlev 				fs = ls;
3115084Sjohnlev 
3125084Sjohnlev 			xreq->xr_segs[i].fs = fs;
3135084Sjohnlev 			xreq->xr_segs[i].ls = ls;
3145084Sjohnlev 		}
3155084Sjohnlev 
3165084Sjohnlev 		/* map in io pages */
3177756SMark.Johnson@Sun.COM 		err = xen_map_gref(GNTTABOP_map_grant_ref, mapops, i, B_FALSE);
3185084Sjohnlev 		if (err != 0)
3195084Sjohnlev 			return (NULL);
3205084Sjohnlev 		for (i = 0; i < segs; i++) {
3215084Sjohnlev 			/*
3225084Sjohnlev 			 * Although HYPERVISOR_grant_table_op() returned no
3235084Sjohnlev 			 * error, mapping of each single page can fail. So,
3245084Sjohnlev 			 * we have to do the check here and handle the error
3255084Sjohnlev 			 * if needed
3265084Sjohnlev 			 */
3275084Sjohnlev 			if (mapops[i].status != GNTST_okay) {
3285084Sjohnlev 				int j;
3295084Sjohnlev 				for (j = 0; j < i; j++) {
3305084Sjohnlev #ifdef DEBUG
3315084Sjohnlev 					unlogva(vdp, mapops[j].host_addr);
3325084Sjohnlev #endif
3335084Sjohnlev 					xen_release_pfn(
3345084Sjohnlev 					    xreq->xr_plist[j].p_pagenum);
3355084Sjohnlev 				}
3365084Sjohnlev 				pagemapok = B_FALSE;
3375084Sjohnlev 				break;
3385084Sjohnlev 			}
3395084Sjohnlev 			/* record page mapping handle for unmapping later */
3405084Sjohnlev 			xreq->xr_page_hdls[i] = mapops[i].handle;
3415084Sjohnlev #ifdef DEBUG
3425084Sjohnlev 			logva(vdp, mapops[i].host_addr);
3435084Sjohnlev #endif
3445084Sjohnlev 			/*
3455084Sjohnlev 			 * Pass the MFNs down using the shadow list (xr_pplist)
3465084Sjohnlev 			 *
3475084Sjohnlev 			 * This is pretty ugly since we have implict knowledge
3485084Sjohnlev 			 * of how the rootnex binds buffers.
3495084Sjohnlev 			 * The GNTTABOP_map_grant_ref op makes us do some ugly
3505084Sjohnlev 			 * stuff since we're not allowed to touch these PTEs
3515084Sjohnlev 			 * from the VM.
3525084Sjohnlev 			 *
3535084Sjohnlev 			 * Obviously, these aren't real page_t's. The rootnex
3545084Sjohnlev 			 * only needs p_pagenum.
3555084Sjohnlev 			 * Also, don't use btop() here or 32 bit PAE breaks.
3565084Sjohnlev 			 */
3575084Sjohnlev 			xreq->xr_pplist[i] = &xreq->xr_plist[i];
3585084Sjohnlev 			xreq->xr_plist[i].p_pagenum =
3595084Sjohnlev 			    xen_assign_pfn(mapops[i].dev_bus_addr >> PAGESHIFT);
3605084Sjohnlev 		}
3615084Sjohnlev 
3625084Sjohnlev 		/*
3635084Sjohnlev 		 * not all pages mapped in successfully, unmap those mapped-in
3645084Sjohnlev 		 * page and return failure
3655084Sjohnlev 		 */
3665084Sjohnlev 		if (!pagemapok) {
3675084Sjohnlev 			gnttab_unmap_grant_ref_t unmapop;
3685084Sjohnlev 
3695084Sjohnlev 			for (i = 0; i < segs; i++) {
3705084Sjohnlev 				if (mapops[i].status != GNTST_okay)
3715084Sjohnlev 					continue;
3725084Sjohnlev 				unmapop.host_addr =
3735084Sjohnlev 				    (uint64_t)(uintptr_t)XDB_IOPAGE_VA(
3745084Sjohnlev 				    vdp->xs_iopage_va, xreq->xr_idx, i);
3755084Sjohnlev 				unmapop.dev_bus_addr = NULL;
3765084Sjohnlev 				unmapop.handle = mapops[i].handle;
3775084Sjohnlev 				(void) HYPERVISOR_grant_table_op(
3785084Sjohnlev 				    GNTTABOP_unmap_grant_ref, &unmapop, 1);
3795084Sjohnlev 			}
3805084Sjohnlev 
3815084Sjohnlev 			return (NULL);
3825084Sjohnlev 		}
3835084Sjohnlev 		bioinit(bp);
3845084Sjohnlev 		bp->b_lblkno = ddi_get64(acchdl, &req->sector_number);
3855084Sjohnlev 		bp->b_flags = B_BUSY | B_SHADOW | B_PHYS;
3865084Sjohnlev 		bp->b_flags |= (ddi_get8(acchdl, &req->operation) ==
3875084Sjohnlev 		    BLKIF_OP_READ) ? B_READ : (B_WRITE | B_ASYNC);
3885084Sjohnlev 	} else {
3895084Sjohnlev 		uint64_t blkst;
3905084Sjohnlev 		int isread;
3915084Sjohnlev 
3925084Sjohnlev 		/* reuse this buf */
3935084Sjohnlev 		blkst = bp->b_lblkno + bp->b_bcount / DEV_BSIZE;
3945084Sjohnlev 		isread = bp->b_flags & B_READ;
3955084Sjohnlev 		bioreset(bp);
3965084Sjohnlev 		bp->b_lblkno = blkst;
3975084Sjohnlev 		bp->b_flags = B_BUSY | B_SHADOW | B_PHYS;
3985084Sjohnlev 		bp->b_flags |= isread ? B_READ : (B_WRITE | B_ASYNC);
3995084Sjohnlev 		XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE, "reuse buf, xreq is %d!!",
4005084Sjohnlev 		    xreq->xr_idx));
4015084Sjohnlev 	}
4025084Sjohnlev 
4035084Sjohnlev 	/* form a buf */
4045084Sjohnlev 	bp->b_un.b_addr = XDB_IOPAGE_VA(vdp->xs_iopage_va, xreq->xr_idx,
4055084Sjohnlev 	    curseg) + xreq->xr_segs[curseg].fs * DEV_BSIZE;
4065084Sjohnlev 	bp->b_shadow = &xreq->xr_pplist[curseg];
4075084Sjohnlev 	bp->b_iodone = xdb_biodone;
4085084Sjohnlev 	sectors = 0;
4099917SMark.Johnson@Sun.COM 
4109917SMark.Johnson@Sun.COM 	/*
4119917SMark.Johnson@Sun.COM 	 * Run through the segments. There are XB_NUM_SECTORS_PER_SEG sectors
4129917SMark.Johnson@Sun.COM 	 * per segment. On some OSes (e.g. Linux), there may be empty gaps
4139917SMark.Johnson@Sun.COM 	 * between segments. (i.e. the first segment may end on sector 6 and
4149917SMark.Johnson@Sun.COM 	 * the second segment start on sector 4).
4159917SMark.Johnson@Sun.COM 	 *
4169917SMark.Johnson@Sun.COM 	 * if a segments first sector is not set to 0, and this is not the
4179917SMark.Johnson@Sun.COM 	 * first segment in our buf, end this buf now.
4189917SMark.Johnson@Sun.COM 	 *
4199917SMark.Johnson@Sun.COM 	 * if a segments last sector is not set to XB_LAST_SECTOR_IN_SEG, and
4209917SMark.Johnson@Sun.COM 	 * this is not the last segment in the request, add this segment into
4219917SMark.Johnson@Sun.COM 	 * the buf, then end this buf (updating the pointer to point to the
4229917SMark.Johnson@Sun.COM 	 * next segment next time around).
4239917SMark.Johnson@Sun.COM 	 */
4245084Sjohnlev 	for (i = curseg; i < xreq->xr_buf_pages; i++) {
4259917SMark.Johnson@Sun.COM 		if ((xreq->xr_segs[i].fs != 0) && (i != curseg)) {
4265084Sjohnlev 			break;
4275084Sjohnlev 		}
4285084Sjohnlev 		sectors += (xreq->xr_segs[i].ls - xreq->xr_segs[i].fs + 1);
4299917SMark.Johnson@Sun.COM 		if ((xreq->xr_segs[i].ls != XB_LAST_SECTOR_IN_SEG) &&
4309917SMark.Johnson@Sun.COM 		    (i != (xreq->xr_buf_pages - 1))) {
4319917SMark.Johnson@Sun.COM 			i++;
4329917SMark.Johnson@Sun.COM 			break;
4339917SMark.Johnson@Sun.COM 		}
4345084Sjohnlev 	}
4355084Sjohnlev 	xreq->xr_curseg = i;
4365084Sjohnlev 	bp->b_bcount = sectors * DEV_BSIZE;
4375084Sjohnlev 	bp->b_bufsize = bp->b_bcount;
4385084Sjohnlev 
4395084Sjohnlev 	return (bp);
4405084Sjohnlev }
4415084Sjohnlev 
4425084Sjohnlev static xdb_request_t *
xdb_get_req(xdb_t * vdp)4435084Sjohnlev xdb_get_req(xdb_t *vdp)
4445084Sjohnlev {
4455084Sjohnlev 	xdb_request_t *req;
4465084Sjohnlev 	int idx;
4475084Sjohnlev 
4485084Sjohnlev 	ASSERT(MUTEX_HELD(&vdp->xs_iomutex));
4495084Sjohnlev 	ASSERT(vdp->xs_free_req != -1);
4505084Sjohnlev 	req = &vdp->xs_req[vdp->xs_free_req];
4515084Sjohnlev 	vdp->xs_free_req = req->xr_next;
4525084Sjohnlev 	idx = req->xr_idx;
4535084Sjohnlev 	bzero(req, sizeof (xdb_request_t));
4545084Sjohnlev 	req->xr_idx = idx;
4555084Sjohnlev 	return (req);
4565084Sjohnlev }
4575084Sjohnlev 
4585084Sjohnlev static void
xdb_free_req(xdb_request_t * req)4595084Sjohnlev xdb_free_req(xdb_request_t *req)
4605084Sjohnlev {
4615084Sjohnlev 	xdb_t *vdp = req->xr_vdp;
4625084Sjohnlev 
4635084Sjohnlev 	ASSERT(MUTEX_HELD(&vdp->xs_iomutex));
4645084Sjohnlev 	req->xr_next = vdp->xs_free_req;
4655084Sjohnlev 	vdp->xs_free_req = req->xr_idx;
4665084Sjohnlev }
4675084Sjohnlev 
4685084Sjohnlev static void
xdb_response(xdb_t * vdp,blkif_request_t * req,boolean_t ok)4695084Sjohnlev xdb_response(xdb_t *vdp, blkif_request_t *req, boolean_t ok)
4705084Sjohnlev {
4715084Sjohnlev 	ddi_acc_handle_t acchdl = vdp->xs_ring_hdl;
4725084Sjohnlev 
4736144Srab 	if (xdb_push_response(vdp, ddi_get64(acchdl, &req->id),
4746144Srab 	    ddi_get8(acchdl, &req->operation), ok))
4755084Sjohnlev 		xvdi_notify_oe(vdp->xs_dip);
4765084Sjohnlev }
4775084Sjohnlev 
4785084Sjohnlev static void
xdb_init_ioreqs(xdb_t * vdp)4795084Sjohnlev xdb_init_ioreqs(xdb_t *vdp)
4805084Sjohnlev {
4815084Sjohnlev 	int i;
4825084Sjohnlev 
4836144Srab 	ASSERT(vdp->xs_nentry);
4846144Srab 
4856144Srab 	if (vdp->xs_req == NULL)
4866144Srab 		vdp->xs_req = kmem_alloc(vdp->xs_nentry *
4876144Srab 		    sizeof (xdb_request_t), KM_SLEEP);
4886144Srab #ifdef DEBUG
4896144Srab 	if (vdp->page_addrs == NULL)
4906144Srab 		vdp->page_addrs = kmem_zalloc(XDB_MAX_IO_PAGES(vdp) *
4916144Srab 		    sizeof (uint64_t), KM_SLEEP);
4926144Srab #endif
4936144Srab 	for (i = 0; i < vdp->xs_nentry; i++) {
4945084Sjohnlev 		vdp->xs_req[i].xr_idx = i;
4955084Sjohnlev 		vdp->xs_req[i].xr_next = i + 1;
4965084Sjohnlev 	}
4976144Srab 	vdp->xs_req[vdp->xs_nentry - 1].xr_next = -1;
4985084Sjohnlev 	vdp->xs_free_req = 0;
4995084Sjohnlev 
5005084Sjohnlev 	/* alloc va in host dom for io page mapping */
5015084Sjohnlev 	vdp->xs_iopage_va = vmem_xalloc(heap_arena,
5026144Srab 	    XDB_MAX_IO_PAGES(vdp) * PAGESIZE, PAGESIZE, 0, 0, 0, 0,
5035084Sjohnlev 	    VM_SLEEP);
5046144Srab 	for (i = 0; i < XDB_MAX_IO_PAGES(vdp); i++)
5055084Sjohnlev 		hat_prepare_mapping(kas.a_hat,
5067756SMark.Johnson@Sun.COM 		    vdp->xs_iopage_va + i * PAGESIZE, NULL);
5075084Sjohnlev }
5085084Sjohnlev 
5095084Sjohnlev static void
xdb_uninit_ioreqs(xdb_t * vdp)5105084Sjohnlev xdb_uninit_ioreqs(xdb_t *vdp)
5115084Sjohnlev {
5125084Sjohnlev 	int i;
5135084Sjohnlev 
5146144Srab 	for (i = 0; i < XDB_MAX_IO_PAGES(vdp); i++)
5155084Sjohnlev 		hat_release_mapping(kas.a_hat,
5165084Sjohnlev 		    vdp->xs_iopage_va + i * PAGESIZE);
5175084Sjohnlev 	vmem_xfree(heap_arena, vdp->xs_iopage_va,
5186144Srab 	    XDB_MAX_IO_PAGES(vdp) * PAGESIZE);
5196144Srab 	if (vdp->xs_req != NULL) {
5206144Srab 		kmem_free(vdp->xs_req, vdp->xs_nentry * sizeof (xdb_request_t));
5216144Srab 		vdp->xs_req = NULL;
5226144Srab 	}
5236144Srab #ifdef DEBUG
5246144Srab 	if (vdp->page_addrs != NULL) {
5256144Srab 		kmem_free(vdp->page_addrs, XDB_MAX_IO_PAGES(vdp) *
5266144Srab 		    sizeof (uint64_t));
5276144Srab 		vdp->page_addrs = NULL;
5286144Srab 	}
5296144Srab #endif
5305084Sjohnlev }
5315084Sjohnlev 
5325084Sjohnlev static uint_t
xdb_intr(caddr_t arg)5335084Sjohnlev xdb_intr(caddr_t arg)
5345084Sjohnlev {
5358863SEdward.Pilatowicz@Sun.COM 	xdb_t		*vdp = (xdb_t *)arg;
5368863SEdward.Pilatowicz@Sun.COM 	dev_info_t	*dip = vdp->xs_dip;
5378863SEdward.Pilatowicz@Sun.COM 	blkif_request_t	req, *reqp = &req;
5388863SEdward.Pilatowicz@Sun.COM 	xdb_request_t	*xreq;
5398863SEdward.Pilatowicz@Sun.COM 	buf_t		*bp;
5408863SEdward.Pilatowicz@Sun.COM 	uint8_t		op;
5418863SEdward.Pilatowicz@Sun.COM 	int		ret = DDI_INTR_UNCLAIMED;
5425084Sjohnlev 
5435084Sjohnlev 	XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE,
5445084Sjohnlev 	    "xdb@%s: I/O request received from dom %d",
5455084Sjohnlev 	    ddi_get_name_addr(dip), vdp->xs_peer));
5465084Sjohnlev 
5475084Sjohnlev 	mutex_enter(&vdp->xs_iomutex);
5485084Sjohnlev 
5495084Sjohnlev 	/* shouldn't touch ring buffer if not in connected state */
5508863SEdward.Pilatowicz@Sun.COM 	if (!vdp->xs_if_connected) {
5515084Sjohnlev 		mutex_exit(&vdp->xs_iomutex);
5525084Sjohnlev 		return (DDI_INTR_UNCLAIMED);
5535084Sjohnlev 	}
5548863SEdward.Pilatowicz@Sun.COM 	ASSERT(vdp->xs_hp_connected && vdp->xs_fe_initialised);
5555084Sjohnlev 
5565084Sjohnlev 	/*
5575084Sjohnlev 	 * We'll loop till there is no more request in the ring
5585084Sjohnlev 	 * We won't stuck in this loop for ever since the size of ring buffer
5595084Sjohnlev 	 * is limited, and frontend will stop pushing requests into it when
5605084Sjohnlev 	 * the ring buffer is full
5615084Sjohnlev 	 */
5625084Sjohnlev 
5635084Sjohnlev 	/* req_event will be increased in xvdi_ring_get_request() */
5646144Srab 	while (xdb_get_request(vdp, reqp)) {
5655084Sjohnlev 		ret = DDI_INTR_CLAIMED;
5665084Sjohnlev 
5676144Srab 		op = ddi_get8(vdp->xs_ring_hdl, &reqp->operation);
5685084Sjohnlev 		if (op == BLKIF_OP_READ			||
5695084Sjohnlev 		    op == BLKIF_OP_WRITE		||
5705084Sjohnlev 		    op == BLKIF_OP_WRITE_BARRIER	||
5715084Sjohnlev 		    op == BLKIF_OP_FLUSH_DISKCACHE) {
5725084Sjohnlev #ifdef DEBUG
5736144Srab 			xdb_dump_request_oe(reqp);
5745084Sjohnlev #endif
5755084Sjohnlev 			xreq = xdb_get_req(vdp);
5765084Sjohnlev 			ASSERT(xreq);
5775084Sjohnlev 			switch (op) {
5785084Sjohnlev 			case BLKIF_OP_READ:
5795084Sjohnlev 				vdp->xs_stat_req_reads++;
5805084Sjohnlev 				break;
5815084Sjohnlev 			case BLKIF_OP_WRITE_BARRIER:
5825084Sjohnlev 				vdp->xs_stat_req_barriers++;
5835084Sjohnlev 				/* FALLTHRU */
5845084Sjohnlev 			case BLKIF_OP_WRITE:
5855084Sjohnlev 				vdp->xs_stat_req_writes++;
5865084Sjohnlev 				break;
5875084Sjohnlev 			case BLKIF_OP_FLUSH_DISKCACHE:
5885084Sjohnlev 				vdp->xs_stat_req_flushes++;
5895084Sjohnlev 				break;
5905084Sjohnlev 			}
5915084Sjohnlev 
5925084Sjohnlev 			xreq->xr_curseg = 0; /* start from first segment */
5936144Srab 			bp = xdb_get_buf(vdp, reqp, xreq);
5945084Sjohnlev 			if (bp == NULL) {
5955084Sjohnlev 				/* failed to form a buf */
5965084Sjohnlev 				xdb_free_req(xreq);
5976144Srab 				xdb_response(vdp, reqp, B_FALSE);
5985084Sjohnlev 				continue;
5995084Sjohnlev 			}
6005084Sjohnlev 			bp->av_forw = NULL;
6015084Sjohnlev 
6025084Sjohnlev 			XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE,
6035084Sjohnlev 			    " buf %p, blkno %lld, size %lu, addr %p",
6045084Sjohnlev 			    (void *)bp, (longlong_t)bp->b_blkno,
6055084Sjohnlev 			    (ulong_t)bp->b_bcount, (void *)bp->b_un.b_addr));
6065084Sjohnlev 
6075084Sjohnlev 			/* send bp to underlying blk driver */
6085084Sjohnlev 			if (vdp->xs_f_iobuf == NULL) {
6095084Sjohnlev 				vdp->xs_f_iobuf = vdp->xs_l_iobuf = bp;
6105084Sjohnlev 			} else {
6115084Sjohnlev 				vdp->xs_l_iobuf->av_forw = bp;
6125084Sjohnlev 				vdp->xs_l_iobuf = bp;
6135084Sjohnlev 			}
6145084Sjohnlev 		} else {
6156144Srab 			xdb_response(vdp, reqp, B_FALSE);
6165084Sjohnlev 			XDB_DBPRINT(XDB_DBG_IO, (CE_WARN, "xdb@%s: "
6175084Sjohnlev 			    "Unsupported cmd received from dom %d",
6185084Sjohnlev 			    ddi_get_name_addr(dip), vdp->xs_peer));
6195084Sjohnlev 		}
6205084Sjohnlev 	}
6215084Sjohnlev 	/* notify our taskq to push buf to underlying blk driver */
6225084Sjohnlev 	if (ret == DDI_INTR_CLAIMED)
6235084Sjohnlev 		cv_broadcast(&vdp->xs_iocv);
6245084Sjohnlev 
6255084Sjohnlev 	mutex_exit(&vdp->xs_iomutex);
6265084Sjohnlev 
6275084Sjohnlev 	return (ret);
6285084Sjohnlev }
6295084Sjohnlev 
6305084Sjohnlev static int
xdb_biodone(buf_t * bp)6315084Sjohnlev xdb_biodone(buf_t *bp)
6325084Sjohnlev {
6335084Sjohnlev 	int i, err, bioerr;
6345084Sjohnlev 	uint8_t segs;
6355084Sjohnlev 	gnttab_unmap_grant_ref_t unmapops[BLKIF_MAX_SEGMENTS_PER_REQUEST];
6365084Sjohnlev 	xdb_request_t *xreq = XDB_BP2XREQ(bp);
6375084Sjohnlev 	xdb_t *vdp = xreq->xr_vdp;
6385084Sjohnlev 	buf_t *nbp;
6395084Sjohnlev 
6405084Sjohnlev 	bioerr = geterror(bp);
6415084Sjohnlev 	if (bioerr)
6425084Sjohnlev 		XDB_DBPRINT(XDB_DBG_IO, (CE_WARN, "xdb@%s: I/O error %d",
6435084Sjohnlev 		    ddi_get_name_addr(vdp->xs_dip), bioerr));
6445084Sjohnlev 
6455084Sjohnlev 	/* check if we are done w/ this I/O request */
6465084Sjohnlev 	if ((bioerr == 0) && (xreq->xr_curseg < xreq->xr_buf_pages)) {
6475084Sjohnlev 		nbp = xdb_get_buf(vdp, NULL, xreq);
6485084Sjohnlev 		if (nbp) {
6495084Sjohnlev 			err = ldi_strategy(vdp->xs_ldi_hdl, nbp);
6505084Sjohnlev 			if (err == 0) {
6515084Sjohnlev 				XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE,
6525084Sjohnlev 				    "sent buf to backend ok"));
6535084Sjohnlev 				return (DDI_SUCCESS);
6545084Sjohnlev 			}
6555084Sjohnlev 			bioerr = EIO;
6565084Sjohnlev 			XDB_DBPRINT(XDB_DBG_IO, (CE_WARN, "xdb@%s: "
6575084Sjohnlev 			    "sent buf to backend dev failed, err=%d",
6585084Sjohnlev 			    ddi_get_name_addr(vdp->xs_dip), err));
6595084Sjohnlev 		} else {
6605084Sjohnlev 			bioerr = EIO;
6615084Sjohnlev 		}
6625084Sjohnlev 	}
6635084Sjohnlev 
6645084Sjohnlev 	/* unmap io pages */
6655084Sjohnlev 	segs = xreq->xr_buf_pages;
6665084Sjohnlev 	/*
6675084Sjohnlev 	 * segs should be no bigger than BLKIF_MAX_SEGMENTS_PER_REQUEST
6685084Sjohnlev 	 * according to the definition of blk interface by Xen
6695084Sjohnlev 	 */
6705084Sjohnlev 	ASSERT(segs <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
6715084Sjohnlev 	for (i = 0; i < segs; i++) {
6725084Sjohnlev 		unmapops[i].host_addr = (uint64_t)(uintptr_t)XDB_IOPAGE_VA(
6735084Sjohnlev 		    vdp->xs_iopage_va, xreq->xr_idx, i);
6745084Sjohnlev #ifdef DEBUG
6755084Sjohnlev 		mutex_enter(&vdp->xs_iomutex);
6765084Sjohnlev 		unlogva(vdp, unmapops[i].host_addr);
6775084Sjohnlev 		mutex_exit(&vdp->xs_iomutex);
6785084Sjohnlev #endif
6795084Sjohnlev 		unmapops[i].dev_bus_addr = NULL;
6805084Sjohnlev 		unmapops[i].handle = xreq->xr_page_hdls[i];
6815084Sjohnlev 	}
6825084Sjohnlev 	err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
6835084Sjohnlev 	    unmapops, segs);
6845084Sjohnlev 	ASSERT(!err);
6855084Sjohnlev 
6865084Sjohnlev 	/*
6875084Sjohnlev 	 * If we have reached a barrier write or a cache flush , then we must
6885084Sjohnlev 	 * flush all our I/Os.
6895084Sjohnlev 	 */
6905084Sjohnlev 	if (xreq->xr_op == BLKIF_OP_WRITE_BARRIER ||
6915084Sjohnlev 	    xreq->xr_op == BLKIF_OP_FLUSH_DISKCACHE) {
6925084Sjohnlev 		/*
6935084Sjohnlev 		 * XXX At this point the write did succeed, so I don't
6945084Sjohnlev 		 * believe we should report an error because the flush
6955084Sjohnlev 		 * failed. However, this is a debatable point, so
6965084Sjohnlev 		 * maybe we need to think more carefully about this.
6975084Sjohnlev 		 * For now, just cast to void.
6985084Sjohnlev 		 */
6995084Sjohnlev 		(void) ldi_ioctl(vdp->xs_ldi_hdl,
7005084Sjohnlev 		    DKIOCFLUSHWRITECACHE, NULL, FKIOCTL, kcred, NULL);
7015084Sjohnlev 	}
7025084Sjohnlev 
7035084Sjohnlev 	mutex_enter(&vdp->xs_iomutex);
7045084Sjohnlev 
7055084Sjohnlev 	/* send response back to frontend */
7068863SEdward.Pilatowicz@Sun.COM 	if (vdp->xs_if_connected) {
7078863SEdward.Pilatowicz@Sun.COM 		ASSERT(vdp->xs_hp_connected && vdp->xs_fe_initialised);
7086144Srab 		if (xdb_push_response(vdp, xreq->xr_id, xreq->xr_op, bioerr))
7095084Sjohnlev 			xvdi_notify_oe(vdp->xs_dip);
7105084Sjohnlev 		XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE,
7115084Sjohnlev 		    "sent resp back to frontend, id=%llu",
7125084Sjohnlev 		    (unsigned long long)xreq->xr_id));
7135084Sjohnlev 	}
7145084Sjohnlev 	/* free io resources */
7155084Sjohnlev 	biofini(bp);
7165084Sjohnlev 	xdb_free_req(xreq);
7175084Sjohnlev 
7185084Sjohnlev 	vdp->xs_ionum--;
7198863SEdward.Pilatowicz@Sun.COM 	if (!vdp->xs_if_connected && (vdp->xs_ionum == 0)) {
7205084Sjohnlev 		/* we're closing, someone is waiting for I/O clean-up */
7215084Sjohnlev 		cv_signal(&vdp->xs_ionumcv);
7226144Srab 	}
7235084Sjohnlev 
7245084Sjohnlev 	mutex_exit(&vdp->xs_iomutex);
7255084Sjohnlev 
7265084Sjohnlev 	return (DDI_SUCCESS);
7275084Sjohnlev }
7285084Sjohnlev 
7295084Sjohnlev static int
xdb_bindto_frontend(xdb_t * vdp)7305084Sjohnlev xdb_bindto_frontend(xdb_t *vdp)
7315084Sjohnlev {
7325084Sjohnlev 	int err;
7335084Sjohnlev 	char *oename;
7345084Sjohnlev 	grant_ref_t gref;
7355084Sjohnlev 	evtchn_port_t evtchn;
7365084Sjohnlev 	dev_info_t *dip = vdp->xs_dip;
7376144Srab 	char protocol[64] = "";
7385084Sjohnlev 
7398863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
7408863SEdward.Pilatowicz@Sun.COM 
7418863SEdward.Pilatowicz@Sun.COM 	/*
7428863SEdward.Pilatowicz@Sun.COM 	 * Switch to the XenbusStateInitialised state.  This let's the
7438863SEdward.Pilatowicz@Sun.COM 	 * frontend know that we're about to negotiate a connection.
7448863SEdward.Pilatowicz@Sun.COM 	 */
7458863SEdward.Pilatowicz@Sun.COM 	(void) xvdi_switch_state(dip, XBT_NULL, XenbusStateInitialised);
7468863SEdward.Pilatowicz@Sun.COM 
7475084Sjohnlev 	/*
7485084Sjohnlev 	 * Gather info from frontend
7495084Sjohnlev 	 */
7505084Sjohnlev 	oename = xvdi_get_oename(dip);
7515084Sjohnlev 	if (oename == NULL)
7525084Sjohnlev 		return (DDI_FAILURE);
7535084Sjohnlev 
7545084Sjohnlev 	err = xenbus_gather(XBT_NULL, oename,
7558863SEdward.Pilatowicz@Sun.COM 	    XBP_RING_REF, "%lu", &gref,
7568863SEdward.Pilatowicz@Sun.COM 	    XBP_EVENT_CHAN, "%u", &evtchn,
7578863SEdward.Pilatowicz@Sun.COM 	    NULL);
7585084Sjohnlev 	if (err != 0) {
7598863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, err,
7605084Sjohnlev 		    "Getting ring-ref and evtchn from frontend");
7615084Sjohnlev 		return (DDI_FAILURE);
7625084Sjohnlev 	}
7635084Sjohnlev 
7646144Srab 	vdp->xs_blk_protocol = BLKIF_PROTOCOL_NATIVE;
7656144Srab 	vdp->xs_nentry = BLKIF_RING_SIZE;
7666144Srab 	vdp->xs_entrysize = sizeof (union blkif_sring_entry);
7676144Srab 
7686144Srab 	err = xenbus_gather(XBT_NULL, oename,
7698863SEdward.Pilatowicz@Sun.COM 	    XBP_PROTOCOL, "%63s", protocol, NULL);
7706144Srab 	if (err)
7716144Srab 		(void) strcpy(protocol, "unspecified, assuming native");
7726144Srab 	else {
7736144Srab 		/*
7746144Srab 		 * We must check for NATIVE first, so that the fast path
7756144Srab 		 * is taken for copying data from the guest to the host.
7766144Srab 		 */
7776144Srab 		if (strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE) != 0) {
7786144Srab 			if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
7796144Srab 				vdp->xs_blk_protocol = BLKIF_PROTOCOL_X86_32;
7806144Srab 				vdp->xs_nentry = BLKIF_X86_32_RING_SIZE;
7816144Srab 				vdp->xs_entrysize =
7826144Srab 				    sizeof (union blkif_x86_32_sring_entry);
7836144Srab 			} else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_64) ==
7846144Srab 			    0) {
7856144Srab 				vdp->xs_blk_protocol = BLKIF_PROTOCOL_X86_64;
7866144Srab 				vdp->xs_nentry = BLKIF_X86_64_RING_SIZE;
7876144Srab 				vdp->xs_entrysize =
7886144Srab 				    sizeof (union blkif_x86_64_sring_entry);
7896144Srab 			} else {
7906144Srab 				xvdi_fatal_error(dip, err, "unknown protocol");
7916144Srab 				return (DDI_FAILURE);
7926144Srab 			}
7936144Srab 		}
7946144Srab 	}
7956144Srab #ifdef DEBUG
7966175Sjohnlev 	cmn_err(CE_NOTE, "!xdb@%s: blkif protocol '%s' ",
7976144Srab 	    ddi_get_name_addr(dip), protocol);
7986144Srab #endif
7996144Srab 
8005084Sjohnlev 	/*
8018863SEdward.Pilatowicz@Sun.COM 	 * Map and init ring.  The ring parameters must match those which
8028863SEdward.Pilatowicz@Sun.COM 	 * have been allocated in the front end.
8035084Sjohnlev 	 */
8048863SEdward.Pilatowicz@Sun.COM 	if (xvdi_map_ring(dip, vdp->xs_nentry, vdp->xs_entrysize,
8058863SEdward.Pilatowicz@Sun.COM 	    gref, &vdp->xs_ring) != DDI_SUCCESS)
8065084Sjohnlev 		return (DDI_FAILURE);
8078863SEdward.Pilatowicz@Sun.COM 
8085084Sjohnlev 	/*
8095084Sjohnlev 	 * This will be removed after we use shadow I/O ring request since
8105084Sjohnlev 	 * we don't need to access the ring itself directly, thus the access
8115084Sjohnlev 	 * handle is not needed
8125084Sjohnlev 	 */
8135084Sjohnlev 	vdp->xs_ring_hdl = vdp->xs_ring->xr_acc_hdl;
8145084Sjohnlev 
8158863SEdward.Pilatowicz@Sun.COM 	/* bind event channel */
8165084Sjohnlev 	err = xvdi_bind_evtchn(dip, evtchn);
8175084Sjohnlev 	if (err != DDI_SUCCESS) {
8185084Sjohnlev 		xvdi_unmap_ring(vdp->xs_ring);
8195084Sjohnlev 		return (DDI_FAILURE);
8205084Sjohnlev 	}
8215084Sjohnlev 
8225084Sjohnlev 	return (DDI_SUCCESS);
8235084Sjohnlev }
8245084Sjohnlev 
8255084Sjohnlev static void
xdb_unbindfrom_frontend(xdb_t * vdp)8265084Sjohnlev xdb_unbindfrom_frontend(xdb_t *vdp)
8275084Sjohnlev {
8288863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
8298863SEdward.Pilatowicz@Sun.COM 
8305084Sjohnlev 	xvdi_free_evtchn(vdp->xs_dip);
8315084Sjohnlev 	xvdi_unmap_ring(vdp->xs_ring);
8325084Sjohnlev }
8335084Sjohnlev 
8348863SEdward.Pilatowicz@Sun.COM /*
8358863SEdward.Pilatowicz@Sun.COM  * xdb_params_change() initiates a allows change to the underlying device/file
8368863SEdward.Pilatowicz@Sun.COM  * that the backend is accessing.  It does this by disconnecting from the
8378863SEdward.Pilatowicz@Sun.COM  * frontend, closing the old device, clearing a bunch of xenbus parameters,
8388863SEdward.Pilatowicz@Sun.COM  * and switching back to the XenbusStateInitialising state.  The frontend
8398863SEdward.Pilatowicz@Sun.COM  * should notice this transition to the XenbusStateInitialising state and
8408863SEdward.Pilatowicz@Sun.COM  * should attempt to reconnect to us (the backend).
8418863SEdward.Pilatowicz@Sun.COM  */
8428863SEdward.Pilatowicz@Sun.COM static void
xdb_params_change(xdb_t * vdp,char * params,boolean_t update_xs)8438863SEdward.Pilatowicz@Sun.COM xdb_params_change(xdb_t *vdp, char *params, boolean_t update_xs)
8448863SEdward.Pilatowicz@Sun.COM {
8458863SEdward.Pilatowicz@Sun.COM 	xenbus_transaction_t	xbt;
8468863SEdward.Pilatowicz@Sun.COM 	dev_info_t		*dip = vdp->xs_dip;
8478863SEdward.Pilatowicz@Sun.COM 	char			*xsname;
8488863SEdward.Pilatowicz@Sun.COM 	int			err;
8498863SEdward.Pilatowicz@Sun.COM 
8508863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
8518863SEdward.Pilatowicz@Sun.COM 	ASSERT(vdp->xs_params_path != NULL);
8528863SEdward.Pilatowicz@Sun.COM 
8538863SEdward.Pilatowicz@Sun.COM 	if ((xsname = xvdi_get_xsname(dip)) == NULL)
8548863SEdward.Pilatowicz@Sun.COM 		return;
8558863SEdward.Pilatowicz@Sun.COM 	if (strcmp(vdp->xs_params_path, params) == 0)
8568863SEdward.Pilatowicz@Sun.COM 		return;
8578863SEdward.Pilatowicz@Sun.COM 
8588863SEdward.Pilatowicz@Sun.COM 	/*
8598863SEdward.Pilatowicz@Sun.COM 	 * Close the device we're currently accessing and update the
8608863SEdward.Pilatowicz@Sun.COM 	 * path which points to our backend device/file.
8618863SEdward.Pilatowicz@Sun.COM 	 */
8628863SEdward.Pilatowicz@Sun.COM 	xdb_close(dip);
8638863SEdward.Pilatowicz@Sun.COM 	vdp->xs_fe_initialised = B_FALSE;
8648863SEdward.Pilatowicz@Sun.COM 
8658863SEdward.Pilatowicz@Sun.COM trans_retry:
8668863SEdward.Pilatowicz@Sun.COM 	if ((err = xenbus_transaction_start(&xbt)) != 0) {
8678863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, err, "params change transaction init");
8688863SEdward.Pilatowicz@Sun.COM 		goto errout;
8698863SEdward.Pilatowicz@Sun.COM 	}
8708863SEdward.Pilatowicz@Sun.COM 
8718863SEdward.Pilatowicz@Sun.COM 	/*
8728863SEdward.Pilatowicz@Sun.COM 	 * Delete all the xenbus properties that are connection dependant
8738863SEdward.Pilatowicz@Sun.COM 	 * and go back to the initializing state so that the frontend
8748863SEdward.Pilatowicz@Sun.COM 	 * driver can re-negotiate a connection.
8758863SEdward.Pilatowicz@Sun.COM 	 */
8768863SEdward.Pilatowicz@Sun.COM 	if (((err = xenbus_rm(xbt, xsname, XBP_FB)) != 0) ||
8778863SEdward.Pilatowicz@Sun.COM 	    ((err = xenbus_rm(xbt, xsname, XBP_INFO)) != 0) ||
8788863SEdward.Pilatowicz@Sun.COM 	    ((err = xenbus_rm(xbt, xsname, "sector-size")) != 0) ||
8798863SEdward.Pilatowicz@Sun.COM 	    ((err = xenbus_rm(xbt, xsname, XBP_SECTORS)) != 0) ||
8808863SEdward.Pilatowicz@Sun.COM 	    ((err = xenbus_rm(xbt, xsname, "instance")) != 0) ||
8818863SEdward.Pilatowicz@Sun.COM 	    ((err = xenbus_rm(xbt, xsname, "node")) != 0) ||
8828863SEdward.Pilatowicz@Sun.COM 	    (update_xs && ((err = xenbus_printf(xbt, xsname,
8838863SEdward.Pilatowicz@Sun.COM 	    "params", "%s", params)) != 0)) ||
8848863SEdward.Pilatowicz@Sun.COM 	    ((err = xvdi_switch_state(dip,
8858863SEdward.Pilatowicz@Sun.COM 	    xbt, XenbusStateInitialising) > 0))) {
8868863SEdward.Pilatowicz@Sun.COM 		(void) xenbus_transaction_end(xbt, 1);
8878863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, err, "params change transaction setup");
8888863SEdward.Pilatowicz@Sun.COM 		goto errout;
8898863SEdward.Pilatowicz@Sun.COM 	}
8908863SEdward.Pilatowicz@Sun.COM 
8918863SEdward.Pilatowicz@Sun.COM 	if ((err = xenbus_transaction_end(xbt, 0)) != 0) {
8928863SEdward.Pilatowicz@Sun.COM 		if (err == EAGAIN) {
8938863SEdward.Pilatowicz@Sun.COM 			/* transaction is ended, don't need to abort it */
8948863SEdward.Pilatowicz@Sun.COM 			goto trans_retry;
8958863SEdward.Pilatowicz@Sun.COM 		}
8968863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, err, "params change transaction commit");
8978863SEdward.Pilatowicz@Sun.COM 		goto errout;
8988863SEdward.Pilatowicz@Sun.COM 	}
8998863SEdward.Pilatowicz@Sun.COM 
9008863SEdward.Pilatowicz@Sun.COM 	/* Change the device that we plan to access */
9018863SEdward.Pilatowicz@Sun.COM 	strfree(vdp->xs_params_path);
9028863SEdward.Pilatowicz@Sun.COM 	vdp->xs_params_path = strdup(params);
9038863SEdward.Pilatowicz@Sun.COM 	return;
9048863SEdward.Pilatowicz@Sun.COM 
9058863SEdward.Pilatowicz@Sun.COM errout:
9068863SEdward.Pilatowicz@Sun.COM 	(void) xvdi_switch_state(dip, xbt, XenbusStateInitialising);
9078863SEdward.Pilatowicz@Sun.COM }
9088863SEdward.Pilatowicz@Sun.COM 
9098863SEdward.Pilatowicz@Sun.COM /*
9108863SEdward.Pilatowicz@Sun.COM  * xdb_watch_params_cb() - This callback is invoked whenever there
9118863SEdward.Pilatowicz@Sun.COM  * is an update to the following xenbus parameter:
9128863SEdward.Pilatowicz@Sun.COM  *     /local/domain/0/backend/vbd/<domU_id>/<domU_dev>/params
9138863SEdward.Pilatowicz@Sun.COM  *
9148863SEdward.Pilatowicz@Sun.COM  * This normally happens during xm block-configure operations, which
9158863SEdward.Pilatowicz@Sun.COM  * are used to change CD device images for HVM domUs.
9168863SEdward.Pilatowicz@Sun.COM  */
9178863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
9188863SEdward.Pilatowicz@Sun.COM static void
xdb_watch_params_cb(dev_info_t * dip,const char * path,void * arg)9198863SEdward.Pilatowicz@Sun.COM xdb_watch_params_cb(dev_info_t *dip, const char *path, void *arg)
9208863SEdward.Pilatowicz@Sun.COM {
9218863SEdward.Pilatowicz@Sun.COM 	xdb_t			*vdp = (xdb_t *)ddi_get_driver_private(dip);
9228863SEdward.Pilatowicz@Sun.COM 	char			*xsname, *oename, *str, *str2;
9238863SEdward.Pilatowicz@Sun.COM 
9248863SEdward.Pilatowicz@Sun.COM 	if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
9258863SEdward.Pilatowicz@Sun.COM 	    ((oename = xvdi_get_oename(dip)) == NULL)) {
9268863SEdward.Pilatowicz@Sun.COM 		return;
9278863SEdward.Pilatowicz@Sun.COM 	}
9288863SEdward.Pilatowicz@Sun.COM 
9298863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&vdp->xs_cbmutex);
9308863SEdward.Pilatowicz@Sun.COM 
9318863SEdward.Pilatowicz@Sun.COM 	if (xenbus_read_str(xsname, "params", &str) != 0) {
9328863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_cbmutex);
9338863SEdward.Pilatowicz@Sun.COM 		return;
9348863SEdward.Pilatowicz@Sun.COM 	}
9358863SEdward.Pilatowicz@Sun.COM 
9368863SEdward.Pilatowicz@Sun.COM 	if (strcmp(vdp->xs_params_path, str) == 0) {
9378863SEdward.Pilatowicz@Sun.COM 		/* Nothing todo */
9388863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_cbmutex);
9398863SEdward.Pilatowicz@Sun.COM 		strfree(str);
9408863SEdward.Pilatowicz@Sun.COM 		return;
9418863SEdward.Pilatowicz@Sun.COM 	}
9428863SEdward.Pilatowicz@Sun.COM 
9438863SEdward.Pilatowicz@Sun.COM 	/*
9448863SEdward.Pilatowicz@Sun.COM 	 * If the frontend isn't a cd device, doesn't support media
9458863SEdward.Pilatowicz@Sun.COM 	 * requests, or has locked the media, then we can't change
9468863SEdward.Pilatowicz@Sun.COM 	 * the params value.  restore the current value.
9478863SEdward.Pilatowicz@Sun.COM 	 */
9488863SEdward.Pilatowicz@Sun.COM 	str2 = NULL;
9498863SEdward.Pilatowicz@Sun.COM 	if (!XDB_IS_FE_CD(vdp) ||
9508863SEdward.Pilatowicz@Sun.COM 	    (xenbus_read_str(oename, XBP_MEDIA_REQ, &str2) != 0) ||
9518863SEdward.Pilatowicz@Sun.COM 	    (strcmp(str2, XBV_MEDIA_REQ_LOCK) == 0)) {
9528863SEdward.Pilatowicz@Sun.COM 		if (str2 != NULL)
9538863SEdward.Pilatowicz@Sun.COM 			strfree(str2);
9548863SEdward.Pilatowicz@Sun.COM 		strfree(str);
9558863SEdward.Pilatowicz@Sun.COM 
9568863SEdward.Pilatowicz@Sun.COM 		str = i_pathname(dip);
9578863SEdward.Pilatowicz@Sun.COM 		cmn_err(CE_NOTE,
9588863SEdward.Pilatowicz@Sun.COM 		    "!%s: media locked, ignoring params update", str);
9598863SEdward.Pilatowicz@Sun.COM 		strfree(str);
9608863SEdward.Pilatowicz@Sun.COM 
9618863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_cbmutex);
9628863SEdward.Pilatowicz@Sun.COM 		return;
9638863SEdward.Pilatowicz@Sun.COM 	}
9648863SEdward.Pilatowicz@Sun.COM 
9658863SEdward.Pilatowicz@Sun.COM 	XDB_DBPRINT(XDB_DBG_INFO, (CE_NOTE,
9668863SEdward.Pilatowicz@Sun.COM 	    "block-configure params request: \"%s\"", str));
9678863SEdward.Pilatowicz@Sun.COM 
9688863SEdward.Pilatowicz@Sun.COM 	xdb_params_change(vdp, str, B_FALSE);
9698863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&vdp->xs_cbmutex);
9708863SEdward.Pilatowicz@Sun.COM 	strfree(str);
9718863SEdward.Pilatowicz@Sun.COM }
9728863SEdward.Pilatowicz@Sun.COM 
9738863SEdward.Pilatowicz@Sun.COM /*
9748863SEdward.Pilatowicz@Sun.COM  * xdb_watch_media_req_cb() - This callback is invoked whenever there
9758863SEdward.Pilatowicz@Sun.COM  * is an update to the following xenbus parameter:
9768863SEdward.Pilatowicz@Sun.COM  *     /local/domain/<domU_id>/device/vbd/<domU_dev>/media-req
9778863SEdward.Pilatowicz@Sun.COM  *
9788863SEdward.Pilatowicz@Sun.COM  * Media requests are only supported on CD devices and are issued by
9798863SEdward.Pilatowicz@Sun.COM  * the frontend.  Currently the only supported media request operaions
9808863SEdward.Pilatowicz@Sun.COM  * are "lock" and "eject".  A "lock" prevents the backend from changing
9818863SEdward.Pilatowicz@Sun.COM  * the backing device/file (via xm block-configure).  An "eject" requests
9828863SEdward.Pilatowicz@Sun.COM  * tells the backend device that it should disconnect from the frontend
9838863SEdward.Pilatowicz@Sun.COM  * and closing the backing device/file that is currently in use.
9848863SEdward.Pilatowicz@Sun.COM  */
9858863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
9868863SEdward.Pilatowicz@Sun.COM static void
xdb_watch_media_req_cb(dev_info_t * dip,const char * path,void * arg)9878863SEdward.Pilatowicz@Sun.COM xdb_watch_media_req_cb(dev_info_t *dip, const char *path, void *arg)
9888863SEdward.Pilatowicz@Sun.COM {
9898863SEdward.Pilatowicz@Sun.COM 	xdb_t			*vdp = (xdb_t *)ddi_get_driver_private(dip);
9908863SEdward.Pilatowicz@Sun.COM 	char			*oename, *str;
9918863SEdward.Pilatowicz@Sun.COM 
9928863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&vdp->xs_cbmutex);
9938863SEdward.Pilatowicz@Sun.COM 
9948863SEdward.Pilatowicz@Sun.COM 	if ((oename = xvdi_get_oename(dip)) == NULL) {
9958863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_cbmutex);
9968863SEdward.Pilatowicz@Sun.COM 		return;
9978863SEdward.Pilatowicz@Sun.COM 	}
9988863SEdward.Pilatowicz@Sun.COM 
9998863SEdward.Pilatowicz@Sun.COM 	if (xenbus_read_str(oename, XBP_MEDIA_REQ, &str) != 0) {
10008863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_cbmutex);
10018863SEdward.Pilatowicz@Sun.COM 		return;
10028863SEdward.Pilatowicz@Sun.COM 	}
10038863SEdward.Pilatowicz@Sun.COM 
10048863SEdward.Pilatowicz@Sun.COM 	if (!XDB_IS_FE_CD(vdp)) {
10058863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, EINVAL,
10068863SEdward.Pilatowicz@Sun.COM 		    "media-req only supported for cdrom devices");
10078863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_cbmutex);
10088863SEdward.Pilatowicz@Sun.COM 		return;
10098863SEdward.Pilatowicz@Sun.COM 	}
10108863SEdward.Pilatowicz@Sun.COM 
10118863SEdward.Pilatowicz@Sun.COM 	if (strcmp(str, XBV_MEDIA_REQ_EJECT) != 0) {
10128863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_cbmutex);
10138863SEdward.Pilatowicz@Sun.COM 		strfree(str);
10148863SEdward.Pilatowicz@Sun.COM 		return;
10158863SEdward.Pilatowicz@Sun.COM 	}
10168863SEdward.Pilatowicz@Sun.COM 	strfree(str);
10178863SEdward.Pilatowicz@Sun.COM 
10188863SEdward.Pilatowicz@Sun.COM 	XDB_DBPRINT(XDB_DBG_INFO, (CE_NOTE, "media eject request"));
10198863SEdward.Pilatowicz@Sun.COM 
10208863SEdward.Pilatowicz@Sun.COM 	xdb_params_change(vdp, "", B_TRUE);
10218863SEdward.Pilatowicz@Sun.COM 	(void) xenbus_printf(XBT_NULL, oename,
10228863SEdward.Pilatowicz@Sun.COM 	    XBP_MEDIA_REQ, "%s", XBV_MEDIA_REQ_NONE);
10238863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&vdp->xs_cbmutex);
10248863SEdward.Pilatowicz@Sun.COM }
10258863SEdward.Pilatowicz@Sun.COM 
10268863SEdward.Pilatowicz@Sun.COM /*
10278863SEdward.Pilatowicz@Sun.COM  * If we're dealing with a cdrom device, let the frontend know that
10288863SEdward.Pilatowicz@Sun.COM  * we support media requests via XBP_MEDIA_REQ_SUP, and setup a watch
10298863SEdward.Pilatowicz@Sun.COM  * to handle those frontend media request changes, which modify the
10308863SEdward.Pilatowicz@Sun.COM  * following xenstore parameter:
10318863SEdward.Pilatowicz@Sun.COM  *	/local/domain/<domU_id>/device/vbd/<domU_dev>/media-req
10328863SEdward.Pilatowicz@Sun.COM  */
10338863SEdward.Pilatowicz@Sun.COM static boolean_t
xdb_media_req_init(xdb_t * vdp)10348863SEdward.Pilatowicz@Sun.COM xdb_media_req_init(xdb_t *vdp)
10358863SEdward.Pilatowicz@Sun.COM {
10368863SEdward.Pilatowicz@Sun.COM 	dev_info_t		*dip = vdp->xs_dip;
10378863SEdward.Pilatowicz@Sun.COM 	char			*xsname, *oename;
10388863SEdward.Pilatowicz@Sun.COM 
10398863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
10408863SEdward.Pilatowicz@Sun.COM 
10418863SEdward.Pilatowicz@Sun.COM 	if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
10428863SEdward.Pilatowicz@Sun.COM 	    ((oename = xvdi_get_oename(dip)) == NULL))
10438863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
10448863SEdward.Pilatowicz@Sun.COM 
10458863SEdward.Pilatowicz@Sun.COM 	if (!XDB_IS_FE_CD(vdp))
10468863SEdward.Pilatowicz@Sun.COM 		return (B_TRUE);
10478863SEdward.Pilatowicz@Sun.COM 
10488863SEdward.Pilatowicz@Sun.COM 	if (xenbus_printf(XBT_NULL, xsname, XBP_MEDIA_REQ_SUP, "%d", 1) != 0)
10498863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
10508863SEdward.Pilatowicz@Sun.COM 
10518863SEdward.Pilatowicz@Sun.COM 	if (xvdi_add_xb_watch_handler(dip, oename,
10528863SEdward.Pilatowicz@Sun.COM 	    XBP_MEDIA_REQ, xdb_watch_media_req_cb, NULL) != DDI_SUCCESS) {
10538863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, EAGAIN,
10548863SEdward.Pilatowicz@Sun.COM 		    "Failed to register watch for cdrom media requests");
10558863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
10568863SEdward.Pilatowicz@Sun.COM 	}
10578863SEdward.Pilatowicz@Sun.COM 
10588863SEdward.Pilatowicz@Sun.COM 	return (B_TRUE);
10598863SEdward.Pilatowicz@Sun.COM }
10608863SEdward.Pilatowicz@Sun.COM 
10618863SEdward.Pilatowicz@Sun.COM /*
10628863SEdward.Pilatowicz@Sun.COM  * Get our params value.  Also, if we're using "params" then setup a
10638863SEdward.Pilatowicz@Sun.COM  * watch to handle xm block-configure operations which modify the
10648863SEdward.Pilatowicz@Sun.COM  * following xenstore parameter:
10658863SEdward.Pilatowicz@Sun.COM  *	/local/domain/0/backend/vbd/<domU_id>/<domU_dev>/params
10668863SEdward.Pilatowicz@Sun.COM  */
10678863SEdward.Pilatowicz@Sun.COM static boolean_t
xdb_params_init(xdb_t * vdp)10688863SEdward.Pilatowicz@Sun.COM xdb_params_init(xdb_t *vdp)
10698863SEdward.Pilatowicz@Sun.COM {
10708863SEdward.Pilatowicz@Sun.COM 	dev_info_t		*dip = vdp->xs_dip;
10718863SEdward.Pilatowicz@Sun.COM 	char			*str, *xsname;
1072*10175SStuart.Maybee@Sun.COM 	int			err;
10738863SEdward.Pilatowicz@Sun.COM 
10748863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
10758863SEdward.Pilatowicz@Sun.COM 	ASSERT(vdp->xs_params_path == NULL);
10768863SEdward.Pilatowicz@Sun.COM 
10778863SEdward.Pilatowicz@Sun.COM 	if ((xsname = xvdi_get_xsname(dip)) == NULL)
10788863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
10798863SEdward.Pilatowicz@Sun.COM 
1080*10175SStuart.Maybee@Sun.COM 	err = xenbus_read_str(xsname, "params", &str);
1081*10175SStuart.Maybee@Sun.COM 	if (err != 0) {
1082*10175SStuart.Maybee@Sun.COM 		return (B_FALSE);
10838863SEdward.Pilatowicz@Sun.COM 	}
10848863SEdward.Pilatowicz@Sun.COM 	vdp->xs_params_path = str;
10858863SEdward.Pilatowicz@Sun.COM 
10868863SEdward.Pilatowicz@Sun.COM 	if (xvdi_add_xb_watch_handler(dip, xsname, "params",
10878863SEdward.Pilatowicz@Sun.COM 	    xdb_watch_params_cb, NULL) != DDI_SUCCESS) {
10888863SEdward.Pilatowicz@Sun.COM 		strfree(vdp->xs_params_path);
10898863SEdward.Pilatowicz@Sun.COM 		vdp->xs_params_path = NULL;
10908863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
10918863SEdward.Pilatowicz@Sun.COM 	}
10928863SEdward.Pilatowicz@Sun.COM 
10938863SEdward.Pilatowicz@Sun.COM 	return (B_TRUE);
10948863SEdward.Pilatowicz@Sun.COM }
10958863SEdward.Pilatowicz@Sun.COM 
10965084Sjohnlev #define	LOFI_CTRL_NODE	"/dev/lofictl"
10975084Sjohnlev #define	LOFI_DEV_NODE	"/devices/pseudo/lofi@0:"
10988863SEdward.Pilatowicz@Sun.COM #define	LOFI_MODE	(FREAD | FWRITE | FEXCL)
10995084Sjohnlev 
11005084Sjohnlev static int
xdb_setup_node(xdb_t * vdp,char * path)11015084Sjohnlev xdb_setup_node(xdb_t *vdp, char *path)
11025084Sjohnlev {
11038863SEdward.Pilatowicz@Sun.COM 	dev_info_t		*dip = vdp->xs_dip;
11048863SEdward.Pilatowicz@Sun.COM 	char			*xsname, *str;
11058863SEdward.Pilatowicz@Sun.COM 	ldi_handle_t		ldi_hdl;
11068863SEdward.Pilatowicz@Sun.COM 	struct lofi_ioctl	*li;
11078863SEdward.Pilatowicz@Sun.COM 	int			minor, err;
11085084Sjohnlev 
11098863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
11108863SEdward.Pilatowicz@Sun.COM 
11118863SEdward.Pilatowicz@Sun.COM 	if ((xsname = xvdi_get_xsname(dip)) == NULL)
11125084Sjohnlev 		return (DDI_FAILURE);
11135084Sjohnlev 
11148863SEdward.Pilatowicz@Sun.COM 	if ((err = xenbus_read_str(xsname, "type", &str)) != 0) {
11158863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, err, "Getting type from backend device");
11165084Sjohnlev 		return (DDI_FAILURE);
11175084Sjohnlev 	}
11188863SEdward.Pilatowicz@Sun.COM 	if (strcmp(str, "file") == 0)
11198863SEdward.Pilatowicz@Sun.COM 		vdp->xs_type |= XDB_DEV_BE_LOFI;
11208863SEdward.Pilatowicz@Sun.COM 	strfree(str);
11215084Sjohnlev 
11228863SEdward.Pilatowicz@Sun.COM 	if (!XDB_IS_BE_LOFI(vdp)) {
11238863SEdward.Pilatowicz@Sun.COM 		(void) strlcpy(path, vdp->xs_params_path, MAXPATHLEN);
11248863SEdward.Pilatowicz@Sun.COM 		ASSERT(vdp->xs_lofi_path == NULL);
11255084Sjohnlev 		return (DDI_SUCCESS);
11265084Sjohnlev 	}
11275084Sjohnlev 
11285084Sjohnlev 	do {
11295084Sjohnlev 		err = ldi_open_by_name(LOFI_CTRL_NODE, LOFI_MODE, kcred,
11305084Sjohnlev 		    &ldi_hdl, vdp->xs_ldi_li);
11315084Sjohnlev 	} while (err == EBUSY);
11325084Sjohnlev 	if (err != 0) {
11335084Sjohnlev 		return (DDI_FAILURE);
11345084Sjohnlev 	}
11355084Sjohnlev 
11365084Sjohnlev 	li = kmem_zalloc(sizeof (*li), KM_SLEEP);
11378863SEdward.Pilatowicz@Sun.COM 	(void) strlcpy(li->li_filename, vdp->xs_params_path,
11388863SEdward.Pilatowicz@Sun.COM 	    sizeof (li->li_filename));
11398863SEdward.Pilatowicz@Sun.COM 	err = ldi_ioctl(ldi_hdl, LOFI_MAP_FILE, (intptr_t)li,
11408863SEdward.Pilatowicz@Sun.COM 	    LOFI_MODE | FKIOCTL, kcred, &minor);
11418863SEdward.Pilatowicz@Sun.COM 	(void) ldi_close(ldi_hdl, LOFI_MODE, kcred);
11428863SEdward.Pilatowicz@Sun.COM 	kmem_free(li, sizeof (*li));
11438863SEdward.Pilatowicz@Sun.COM 
11448863SEdward.Pilatowicz@Sun.COM 	if (err != 0) {
11455084Sjohnlev 		cmn_err(CE_WARN, "xdb@%s: Failed to create lofi dev for %s",
11468863SEdward.Pilatowicz@Sun.COM 		    ddi_get_name_addr(dip), vdp->xs_params_path);
11475084Sjohnlev 		return (DDI_FAILURE);
11485084Sjohnlev 	}
11498863SEdward.Pilatowicz@Sun.COM 
11505084Sjohnlev 	/*
11515084Sjohnlev 	 * return '/devices/...' instead of '/dev/lofi/...' since the
11525084Sjohnlev 	 * former is available immediately after calling ldi_ioctl
11535084Sjohnlev 	 */
11548081SDina.Nimeh@Sun.Com 	(void) snprintf(path, MAXPATHLEN, LOFI_DEV_NODE "%d", minor);
11558863SEdward.Pilatowicz@Sun.COM 	(void) xenbus_printf(XBT_NULL, xsname, "node", "%s", path);
11568863SEdward.Pilatowicz@Sun.COM 
11578863SEdward.Pilatowicz@Sun.COM 	ASSERT(vdp->xs_lofi_path == NULL);
11588863SEdward.Pilatowicz@Sun.COM 	vdp->xs_lofi_path = strdup(path);
11598863SEdward.Pilatowicz@Sun.COM 
11605084Sjohnlev 	return (DDI_SUCCESS);
11615084Sjohnlev }
11625084Sjohnlev 
11635084Sjohnlev static void
xdb_teardown_node(xdb_t * vdp)11645084Sjohnlev xdb_teardown_node(xdb_t *vdp)
11655084Sjohnlev {
11668863SEdward.Pilatowicz@Sun.COM 	dev_info_t *dip = vdp->xs_dip;
11675084Sjohnlev 	ldi_handle_t ldi_hdl;
11685084Sjohnlev 	struct lofi_ioctl *li;
11695084Sjohnlev 	int err;
11705084Sjohnlev 
11718863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
11725084Sjohnlev 
11738863SEdward.Pilatowicz@Sun.COM 	if (!XDB_IS_BE_LOFI(vdp))
11745084Sjohnlev 		return;
11755084Sjohnlev 
11768863SEdward.Pilatowicz@Sun.COM 	vdp->xs_type &= ~XDB_DEV_BE_LOFI;
11778863SEdward.Pilatowicz@Sun.COM 	ASSERT(vdp->xs_lofi_path != NULL);
11785084Sjohnlev 
11795084Sjohnlev 	li = kmem_zalloc(sizeof (*li), KM_SLEEP);
11808863SEdward.Pilatowicz@Sun.COM 	(void) strlcpy(li->li_filename, vdp->xs_params_path,
11818863SEdward.Pilatowicz@Sun.COM 	    sizeof (li->li_filename));
11825084Sjohnlev 
11835084Sjohnlev 	do {
11845084Sjohnlev 		err = ldi_open_by_name(LOFI_CTRL_NODE, LOFI_MODE, kcred,
11855084Sjohnlev 		    &ldi_hdl, vdp->xs_ldi_li);
11865084Sjohnlev 	} while (err == EBUSY);
11875084Sjohnlev 
11885084Sjohnlev 	if (err != 0) {
11895084Sjohnlev 		kmem_free(li, sizeof (*li));
11905084Sjohnlev 		return;
11915084Sjohnlev 	}
11925084Sjohnlev 
11935084Sjohnlev 	if (ldi_ioctl(ldi_hdl, LOFI_UNMAP_FILE, (intptr_t)li,
11945084Sjohnlev 	    LOFI_MODE | FKIOCTL, kcred, NULL) != 0) {
11955084Sjohnlev 		cmn_err(CE_WARN, "xdb@%s: Failed to delete lofi dev for %s",
11965084Sjohnlev 		    ddi_get_name_addr(dip), li->li_filename);
11975084Sjohnlev 	}
11985084Sjohnlev 
11995084Sjohnlev 	(void) ldi_close(ldi_hdl, LOFI_MODE, kcred);
12005084Sjohnlev 	kmem_free(li, sizeof (*li));
12018863SEdward.Pilatowicz@Sun.COM 
12028863SEdward.Pilatowicz@Sun.COM 	strfree(vdp->xs_lofi_path);
12038863SEdward.Pilatowicz@Sun.COM 	vdp->xs_lofi_path = NULL;
12045084Sjohnlev }
12055084Sjohnlev 
12065084Sjohnlev static int
xdb_open_device(xdb_t * vdp)12075084Sjohnlev xdb_open_device(xdb_t *vdp)
12085084Sjohnlev {
12098863SEdward.Pilatowicz@Sun.COM 	dev_info_t *dip = vdp->xs_dip;
12105084Sjohnlev 	uint64_t devsize;
12119889SLarry.Liu@Sun.COM 	int blksize;
12125084Sjohnlev 	char *nodepath;
1213*10175SStuart.Maybee@Sun.COM 	char *xsname;
1214*10175SStuart.Maybee@Sun.COM 	char *str;
1215*10175SStuart.Maybee@Sun.COM 	int err;
12165084Sjohnlev 
12178863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
12185084Sjohnlev 
12198863SEdward.Pilatowicz@Sun.COM 	if (strlen(vdp->xs_params_path) == 0) {
12208863SEdward.Pilatowicz@Sun.COM 		/*
12218863SEdward.Pilatowicz@Sun.COM 		 * it's possible to have no backing device when dealing
12228863SEdward.Pilatowicz@Sun.COM 		 * with a pv cdrom drive that has no virtual cd associated
12238863SEdward.Pilatowicz@Sun.COM 		 * with it.
12248863SEdward.Pilatowicz@Sun.COM 		 */
12258863SEdward.Pilatowicz@Sun.COM 		ASSERT(XDB_IS_FE_CD(vdp));
12268863SEdward.Pilatowicz@Sun.COM 		ASSERT(vdp->xs_sectors == 0);
12278863SEdward.Pilatowicz@Sun.COM 		ASSERT(vdp->xs_ldi_li == NULL);
12288863SEdward.Pilatowicz@Sun.COM 		ASSERT(vdp->xs_ldi_hdl == NULL);
12298863SEdward.Pilatowicz@Sun.COM 		return (DDI_SUCCESS);
12305084Sjohnlev 	}
12315084Sjohnlev 
1232*10175SStuart.Maybee@Sun.COM 	/*
1233*10175SStuart.Maybee@Sun.COM 	 * after the hotplug scripts have "connected" the device, check to see
1234*10175SStuart.Maybee@Sun.COM 	 * if we're using a dynamic device.  If so, replace the params path
1235*10175SStuart.Maybee@Sun.COM 	 * with the dynamic one.
1236*10175SStuart.Maybee@Sun.COM 	 */
1237*10175SStuart.Maybee@Sun.COM 	xsname = xvdi_get_xsname(dip);
1238*10175SStuart.Maybee@Sun.COM 	err = xenbus_read_str(xsname, "dynamic-device-path", &str);
1239*10175SStuart.Maybee@Sun.COM 	if (err == 0) {
1240*10175SStuart.Maybee@Sun.COM 		strfree(vdp->xs_params_path);
1241*10175SStuart.Maybee@Sun.COM 		vdp->xs_params_path = str;
1242*10175SStuart.Maybee@Sun.COM 	}
1243*10175SStuart.Maybee@Sun.COM 
12445084Sjohnlev 	if (ldi_ident_from_dip(dip, &vdp->xs_ldi_li) != 0)
12455084Sjohnlev 		return (DDI_FAILURE);
12465084Sjohnlev 
12478081SDina.Nimeh@Sun.Com 	nodepath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
12488863SEdward.Pilatowicz@Sun.COM 
12498863SEdward.Pilatowicz@Sun.COM 	/* try to open backend device */
12508863SEdward.Pilatowicz@Sun.COM 	if (xdb_setup_node(vdp, nodepath) != DDI_SUCCESS) {
12518863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, ENXIO,
12525084Sjohnlev 		    "Getting device path of backend device");
12535084Sjohnlev 		ldi_ident_release(vdp->xs_ldi_li);
12548081SDina.Nimeh@Sun.Com 		kmem_free(nodepath, MAXPATHLEN);
12555084Sjohnlev 		return (DDI_FAILURE);
12565084Sjohnlev 	}
12575084Sjohnlev 
12585084Sjohnlev 	if (ldi_open_by_name(nodepath,
12595084Sjohnlev 	    FREAD | (XDB_IS_RO(vdp) ? 0 : FWRITE),
12605084Sjohnlev 	    kcred, &vdp->xs_ldi_hdl, vdp->xs_ldi_li) != 0) {
12615084Sjohnlev 		xdb_teardown_node(vdp);
12625084Sjohnlev 		ldi_ident_release(vdp->xs_ldi_li);
12635084Sjohnlev 		cmn_err(CE_WARN, "xdb@%s: Failed to open: %s",
12645084Sjohnlev 		    ddi_get_name_addr(dip), nodepath);
12658081SDina.Nimeh@Sun.Com 		kmem_free(nodepath, MAXPATHLEN);
12665084Sjohnlev 		return (DDI_FAILURE);
12675084Sjohnlev 	}
12685084Sjohnlev 
12695084Sjohnlev 	if (ldi_get_size(vdp->xs_ldi_hdl, &devsize) != DDI_SUCCESS) {
12705084Sjohnlev 		(void) ldi_close(vdp->xs_ldi_hdl,
12715084Sjohnlev 		    FREAD | (XDB_IS_RO(vdp) ? 0 : FWRITE), kcred);
12725084Sjohnlev 		xdb_teardown_node(vdp);
12735084Sjohnlev 		ldi_ident_release(vdp->xs_ldi_li);
12748081SDina.Nimeh@Sun.Com 		kmem_free(nodepath, MAXPATHLEN);
12755084Sjohnlev 		return (DDI_FAILURE);
12765084Sjohnlev 	}
12779889SLarry.Liu@Sun.COM 
12789889SLarry.Liu@Sun.COM 	blksize = ldi_prop_get_int64(vdp->xs_ldi_hdl,
12799889SLarry.Liu@Sun.COM 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
12809889SLarry.Liu@Sun.COM 	    "blksize", DEV_BSIZE);
12819889SLarry.Liu@Sun.COM 	if (blksize == DEV_BSIZE)
12829889SLarry.Liu@Sun.COM 		blksize = ldi_prop_get_int(vdp->xs_ldi_hdl,
12839889SLarry.Liu@Sun.COM 		    LDI_DEV_T_ANY | DDI_PROP_DONTPASS |
12849889SLarry.Liu@Sun.COM 		    DDI_PROP_NOTPROM, "device-blksize", DEV_BSIZE);
12859889SLarry.Liu@Sun.COM 
12869889SLarry.Liu@Sun.COM 	vdp->xs_sec_size = blksize;
12879889SLarry.Liu@Sun.COM 	vdp->xs_sectors = devsize / blksize;
12885084Sjohnlev 
12898863SEdward.Pilatowicz@Sun.COM 	/* check if the underlying device is a CD/DVD disc */
12908863SEdward.Pilatowicz@Sun.COM 	if (ldi_prop_get_int(vdp->xs_ldi_hdl, LDI_DEV_T_ANY | DDI_PROP_DONTPASS,
12918863SEdward.Pilatowicz@Sun.COM 	    INQUIRY_DEVICE_TYPE, DTYPE_DIRECT) == DTYPE_RODIRECT)
12928863SEdward.Pilatowicz@Sun.COM 		vdp->xs_type |= XDB_DEV_BE_CD;
12938863SEdward.Pilatowicz@Sun.COM 
12948863SEdward.Pilatowicz@Sun.COM 	/* check if the underlying device is a removable disk */
12958863SEdward.Pilatowicz@Sun.COM 	if (ldi_prop_exists(vdp->xs_ldi_hdl,
12968863SEdward.Pilatowicz@Sun.COM 	    LDI_DEV_T_ANY | DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
12978863SEdward.Pilatowicz@Sun.COM 	    "removable-media"))
12988863SEdward.Pilatowicz@Sun.COM 		vdp->xs_type |= XDB_DEV_BE_RMB;
12998863SEdward.Pilatowicz@Sun.COM 
13008081SDina.Nimeh@Sun.Com 	kmem_free(nodepath, MAXPATHLEN);
13015084Sjohnlev 	return (DDI_SUCCESS);
13025084Sjohnlev }
13035084Sjohnlev 
13045084Sjohnlev static void
xdb_close_device(xdb_t * vdp)13055084Sjohnlev xdb_close_device(xdb_t *vdp)
13065084Sjohnlev {
13078863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
13088863SEdward.Pilatowicz@Sun.COM 
13098863SEdward.Pilatowicz@Sun.COM 	if (strlen(vdp->xs_params_path) == 0) {
13108863SEdward.Pilatowicz@Sun.COM 		ASSERT(XDB_IS_FE_CD(vdp));
13118863SEdward.Pilatowicz@Sun.COM 		ASSERT(vdp->xs_sectors == 0);
13128863SEdward.Pilatowicz@Sun.COM 		ASSERT(vdp->xs_ldi_li == NULL);
13138863SEdward.Pilatowicz@Sun.COM 		ASSERT(vdp->xs_ldi_hdl == NULL);
13148863SEdward.Pilatowicz@Sun.COM 		return;
13158863SEdward.Pilatowicz@Sun.COM 	}
13168863SEdward.Pilatowicz@Sun.COM 
13175084Sjohnlev 	(void) ldi_close(vdp->xs_ldi_hdl,
13185084Sjohnlev 	    FREAD | (XDB_IS_RO(vdp) ? 0 : FWRITE), kcred);
13195084Sjohnlev 	xdb_teardown_node(vdp);
13205084Sjohnlev 	ldi_ident_release(vdp->xs_ldi_li);
13218863SEdward.Pilatowicz@Sun.COM 	vdp->xs_type &= ~(XDB_DEV_BE_CD | XDB_DEV_BE_RMB);
13228863SEdward.Pilatowicz@Sun.COM 	vdp->xs_sectors = 0;
13235084Sjohnlev 	vdp->xs_ldi_li = NULL;
13245084Sjohnlev 	vdp->xs_ldi_hdl = NULL;
13255084Sjohnlev }
13265084Sjohnlev 
13275084Sjohnlev /*
13285084Sjohnlev  * Kick-off connect process
13298863SEdward.Pilatowicz@Sun.COM  * If xs_fe_initialised == B_TRUE and xs_hp_connected == B_TRUE
13308863SEdward.Pilatowicz@Sun.COM  * the xs_if_connected will be changed to B_TRUE on success,
13315084Sjohnlev  */
13328863SEdward.Pilatowicz@Sun.COM static void
xdb_start_connect(xdb_t * vdp)13335084Sjohnlev xdb_start_connect(xdb_t *vdp)
13345084Sjohnlev {
13358863SEdward.Pilatowicz@Sun.COM 	xenbus_transaction_t	xbt;
13368863SEdward.Pilatowicz@Sun.COM 	dev_info_t		*dip = vdp->xs_dip;
13378863SEdward.Pilatowicz@Sun.COM 	boolean_t		fb_exists;
13388863SEdward.Pilatowicz@Sun.COM 	int			err, instance = ddi_get_instance(dip);
13398863SEdward.Pilatowicz@Sun.COM 	uint64_t		sectors;
13408863SEdward.Pilatowicz@Sun.COM 	uint_t			dinfo, ssize;
13418863SEdward.Pilatowicz@Sun.COM 	char			*xsname;
13428863SEdward.Pilatowicz@Sun.COM 
13438863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
13448863SEdward.Pilatowicz@Sun.COM 
13458863SEdward.Pilatowicz@Sun.COM 	if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
13468863SEdward.Pilatowicz@Sun.COM 	    ((vdp->xs_peer = xvdi_get_oeid(dip)) == (domid_t)-1))
13478863SEdward.Pilatowicz@Sun.COM 		return;
13488863SEdward.Pilatowicz@Sun.COM 
13498863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&vdp->xs_iomutex);
13508863SEdward.Pilatowicz@Sun.COM 	/*
13518863SEdward.Pilatowicz@Sun.COM 	 * if the hotplug scripts haven't run or if the frontend is not
13528863SEdward.Pilatowicz@Sun.COM 	 * initialized, then we can't try to connect.
13538863SEdward.Pilatowicz@Sun.COM 	 */
13548863SEdward.Pilatowicz@Sun.COM 	if (!vdp->xs_hp_connected || !vdp->xs_fe_initialised) {
13558863SEdward.Pilatowicz@Sun.COM 		ASSERT(!vdp->xs_if_connected);
13568863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_iomutex);
13578863SEdward.Pilatowicz@Sun.COM 		return;
13588863SEdward.Pilatowicz@Sun.COM 	}
13598863SEdward.Pilatowicz@Sun.COM 
13608863SEdward.Pilatowicz@Sun.COM 	/* If we're already connected then there's nothing todo */
13618863SEdward.Pilatowicz@Sun.COM 	if (vdp->xs_if_connected) {
13628863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_iomutex);
13638863SEdward.Pilatowicz@Sun.COM 		return;
13648863SEdward.Pilatowicz@Sun.COM 	}
13658863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&vdp->xs_iomutex);
13665084Sjohnlev 
13675084Sjohnlev 	/*
13685084Sjohnlev 	 * Start connect to frontend only when backend device are ready
13695084Sjohnlev 	 * and frontend has moved to XenbusStateInitialised, which means
13708863SEdward.Pilatowicz@Sun.COM 	 * ready to connect.
13715084Sjohnlev 	 */
13728863SEdward.Pilatowicz@Sun.COM 	XDB_DBPRINT(XDB_DBG_INFO, (CE_NOTE,
13738863SEdward.Pilatowicz@Sun.COM 	    "xdb@%s: starting connection process", ddi_get_name_addr(dip)));
13745084Sjohnlev 
13758863SEdward.Pilatowicz@Sun.COM 	if (xdb_open_device(vdp) != DDI_SUCCESS)
13768863SEdward.Pilatowicz@Sun.COM 		return;
13775084Sjohnlev 
13788863SEdward.Pilatowicz@Sun.COM 	if (xdb_bindto_frontend(vdp) != DDI_SUCCESS) {
13798863SEdward.Pilatowicz@Sun.COM 		xdb_close_device(vdp);
13808863SEdward.Pilatowicz@Sun.COM 		return;
13818863SEdward.Pilatowicz@Sun.COM 	}
13825084Sjohnlev 
13835084Sjohnlev 	/* init i/o requests */
13845084Sjohnlev 	xdb_init_ioreqs(vdp);
13855084Sjohnlev 
13865084Sjohnlev 	if (ddi_add_intr(dip, 0, NULL, NULL, xdb_intr, (caddr_t)vdp)
13878863SEdward.Pilatowicz@Sun.COM 	    != DDI_SUCCESS) {
13888863SEdward.Pilatowicz@Sun.COM 		xdb_uninit_ioreqs(vdp);
13898863SEdward.Pilatowicz@Sun.COM 		xdb_unbindfrom_frontend(vdp);
13908863SEdward.Pilatowicz@Sun.COM 		xdb_close_device(vdp);
13918863SEdward.Pilatowicz@Sun.COM 		return;
13928863SEdward.Pilatowicz@Sun.COM 	}
13938863SEdward.Pilatowicz@Sun.COM 
13948863SEdward.Pilatowicz@Sun.COM 	dinfo = 0;
13958863SEdward.Pilatowicz@Sun.COM 	if (XDB_IS_RO(vdp))
13968863SEdward.Pilatowicz@Sun.COM 		dinfo |= VDISK_READONLY;
13978863SEdward.Pilatowicz@Sun.COM 	if (XDB_IS_BE_RMB(vdp))
13988863SEdward.Pilatowicz@Sun.COM 		dinfo |= VDISK_REMOVABLE;
13998863SEdward.Pilatowicz@Sun.COM 	if (XDB_IS_BE_CD(vdp))
14008863SEdward.Pilatowicz@Sun.COM 		dinfo |= VDISK_CDROM;
14018863SEdward.Pilatowicz@Sun.COM 	if (XDB_IS_FE_CD(vdp))
14028863SEdward.Pilatowicz@Sun.COM 		dinfo |= VDISK_REMOVABLE | VDISK_CDROM;
14035084Sjohnlev 
14045084Sjohnlev 	/*
14055084Sjohnlev 	 * we can recieve intr any time from now on
14065084Sjohnlev 	 * mark that we're ready to take intr
14075084Sjohnlev 	 */
14085084Sjohnlev 	mutex_enter(&vdp->xs_iomutex);
14098863SEdward.Pilatowicz@Sun.COM 	ASSERT(vdp->xs_fe_initialised);
14108863SEdward.Pilatowicz@Sun.COM 	vdp->xs_if_connected = B_TRUE;
14115084Sjohnlev 	mutex_exit(&vdp->xs_iomutex);
14125084Sjohnlev 
14138863SEdward.Pilatowicz@Sun.COM trans_retry:
14145084Sjohnlev 	/* write into xenstore the info needed by frontend */
14158863SEdward.Pilatowicz@Sun.COM 	if ((err = xenbus_transaction_start(&xbt)) != 0) {
14168863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, err, "connect transaction init");
14178863SEdward.Pilatowicz@Sun.COM 		goto errout;
14185084Sjohnlev 	}
14195084Sjohnlev 
14208863SEdward.Pilatowicz@Sun.COM 	/* If feature-barrier isn't present in xenstore, add it.  */
14218863SEdward.Pilatowicz@Sun.COM 	fb_exists = xenbus_exists(xsname, XBP_FB);
14225084Sjohnlev 
14239889SLarry.Liu@Sun.COM 	ssize = (vdp->xs_sec_size == 0) ? DEV_BSIZE : vdp->xs_sec_size;
14248863SEdward.Pilatowicz@Sun.COM 	sectors = vdp->xs_sectors;
14258863SEdward.Pilatowicz@Sun.COM 	if (((!fb_exists &&
14268863SEdward.Pilatowicz@Sun.COM 	    (err = xenbus_printf(xbt, xsname, XBP_FB, "%d", 1)))) ||
14278863SEdward.Pilatowicz@Sun.COM 	    (err = xenbus_printf(xbt, xsname, XBP_INFO, "%u", dinfo)) ||
14289889SLarry.Liu@Sun.COM 	    (err = xenbus_printf(xbt, xsname, XBP_SECTOR_SIZE, "%u", ssize)) ||
14298863SEdward.Pilatowicz@Sun.COM 	    (err = xenbus_printf(xbt, xsname,
14308863SEdward.Pilatowicz@Sun.COM 	    XBP_SECTORS, "%"PRIu64, sectors)) ||
14318863SEdward.Pilatowicz@Sun.COM 	    (err = xenbus_printf(xbt, xsname, "instance", "%d", instance)) ||
14328863SEdward.Pilatowicz@Sun.COM 	    ((err = xvdi_switch_state(dip, xbt, XenbusStateConnected)) > 0)) {
14338863SEdward.Pilatowicz@Sun.COM 		(void) xenbus_transaction_end(xbt, 1);
14348863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, err, "connect transaction setup");
14358863SEdward.Pilatowicz@Sun.COM 		goto errout;
14365084Sjohnlev 	}
14375084Sjohnlev 
14388863SEdward.Pilatowicz@Sun.COM 	if ((err = xenbus_transaction_end(xbt, 0)) != 0) {
14398863SEdward.Pilatowicz@Sun.COM 		if (err == EAGAIN) {
14408863SEdward.Pilatowicz@Sun.COM 			/* transaction is ended, don't need to abort it */
14418863SEdward.Pilatowicz@Sun.COM 			goto trans_retry;
14428863SEdward.Pilatowicz@Sun.COM 		}
14438863SEdward.Pilatowicz@Sun.COM 		xvdi_dev_error(dip, err, "connect transaction commit");
14448863SEdward.Pilatowicz@Sun.COM 		goto errout;
14458863SEdward.Pilatowicz@Sun.COM 	}
14465084Sjohnlev 
14478863SEdward.Pilatowicz@Sun.COM 	return;
14485084Sjohnlev 
14498863SEdward.Pilatowicz@Sun.COM errout:
14508863SEdward.Pilatowicz@Sun.COM 	xdb_close(dip);
14515084Sjohnlev }
14525084Sjohnlev 
14535084Sjohnlev /*
14545084Sjohnlev  * Disconnect from frontend and close backend device
14555084Sjohnlev  */
14565084Sjohnlev static void
xdb_close(dev_info_t * dip)14575084Sjohnlev xdb_close(dev_info_t *dip)
14585084Sjohnlev {
14595084Sjohnlev 	xdb_t *vdp = (xdb_t *)ddi_get_driver_private(dip);
14605084Sjohnlev 
14615084Sjohnlev 	ASSERT(MUTEX_HELD(&vdp->xs_cbmutex));
14625084Sjohnlev 	mutex_enter(&vdp->xs_iomutex);
14635084Sjohnlev 
14648863SEdward.Pilatowicz@Sun.COM 	/*
14658863SEdward.Pilatowicz@Sun.COM 	 * if the hotplug scripts haven't run or if the frontend is not
14668863SEdward.Pilatowicz@Sun.COM 	 * initialized, then we can't be connected, so there's no
14678863SEdward.Pilatowicz@Sun.COM 	 * connection to close.
14688863SEdward.Pilatowicz@Sun.COM 	 */
14698863SEdward.Pilatowicz@Sun.COM 	if (!vdp->xs_hp_connected || !vdp->xs_fe_initialised) {
14708863SEdward.Pilatowicz@Sun.COM 		ASSERT(!vdp->xs_if_connected);
14718863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_iomutex);
14728863SEdward.Pilatowicz@Sun.COM 		return;
14738863SEdward.Pilatowicz@Sun.COM 	}
14748863SEdward.Pilatowicz@Sun.COM 
14758863SEdward.Pilatowicz@Sun.COM 	/* if we're not connected, there's nothing to do */
14768863SEdward.Pilatowicz@Sun.COM 	if (!vdp->xs_if_connected) {
14775084Sjohnlev 		cv_broadcast(&vdp->xs_iocv);
14785084Sjohnlev 		mutex_exit(&vdp->xs_iomutex);
14795084Sjohnlev 		return;
14805084Sjohnlev 	}
14818863SEdward.Pilatowicz@Sun.COM 
14828863SEdward.Pilatowicz@Sun.COM 	XDB_DBPRINT(XDB_DBG_INFO, (CE_NOTE, "closing while connected"));
14838863SEdward.Pilatowicz@Sun.COM 
14848863SEdward.Pilatowicz@Sun.COM 	vdp->xs_if_connected = B_FALSE;
14855084Sjohnlev 	cv_broadcast(&vdp->xs_iocv);
14865084Sjohnlev 
14875084Sjohnlev 	mutex_exit(&vdp->xs_iomutex);
14885084Sjohnlev 
14895084Sjohnlev 	/* stop accepting I/O request from frontend */
14905084Sjohnlev 	ddi_remove_intr(dip, 0, NULL);
14918863SEdward.Pilatowicz@Sun.COM 
14925084Sjohnlev 	/* clear all on-going I/Os, if any */
14935084Sjohnlev 	mutex_enter(&vdp->xs_iomutex);
14945084Sjohnlev 	while (vdp->xs_ionum > 0)
14955084Sjohnlev 		cv_wait(&vdp->xs_ionumcv, &vdp->xs_iomutex);
14965084Sjohnlev 	mutex_exit(&vdp->xs_iomutex);
14975084Sjohnlev 
14985084Sjohnlev 	/* clean up resources and close this interface */
14995084Sjohnlev 	xdb_uninit_ioreqs(vdp);
15005084Sjohnlev 	xdb_unbindfrom_frontend(vdp);
15015084Sjohnlev 	xdb_close_device(vdp);
15025084Sjohnlev 	vdp->xs_peer = (domid_t)-1;
15035084Sjohnlev }
15045084Sjohnlev 
15055084Sjohnlev static void
xdb_send_buf(void * arg)15065084Sjohnlev xdb_send_buf(void *arg)
15075084Sjohnlev {
15088863SEdward.Pilatowicz@Sun.COM 	xdb_t	*vdp = (xdb_t *)arg;
15098863SEdward.Pilatowicz@Sun.COM 	buf_t	*bp;
15108863SEdward.Pilatowicz@Sun.COM 	int	err;
15115084Sjohnlev 
15125084Sjohnlev 	mutex_enter(&vdp->xs_iomutex);
15138863SEdward.Pilatowicz@Sun.COM 	while (vdp->xs_send_buf) {
15148863SEdward.Pilatowicz@Sun.COM 		if ((bp = vdp->xs_f_iobuf) == NULL) {
15158863SEdward.Pilatowicz@Sun.COM 			/* wait for some io to send */
15168863SEdward.Pilatowicz@Sun.COM 			XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE,
15178863SEdward.Pilatowicz@Sun.COM 			    "send buf waiting for io"));
15188863SEdward.Pilatowicz@Sun.COM 			cv_wait(&vdp->xs_iocv, &vdp->xs_iomutex);
15198863SEdward.Pilatowicz@Sun.COM 			continue;
15205084Sjohnlev 		}
15215084Sjohnlev 
15228863SEdward.Pilatowicz@Sun.COM 		vdp->xs_f_iobuf = bp->av_forw;
15238863SEdward.Pilatowicz@Sun.COM 		bp->av_forw = NULL;
15248863SEdward.Pilatowicz@Sun.COM 		vdp->xs_ionum++;
15258863SEdward.Pilatowicz@Sun.COM 
15268863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_iomutex);
15278863SEdward.Pilatowicz@Sun.COM 		if (bp->b_bcount == 0) {
15288863SEdward.Pilatowicz@Sun.COM 			/* no I/O needs to be done */
15298863SEdward.Pilatowicz@Sun.COM 			(void) xdb_biodone(bp);
15308863SEdward.Pilatowicz@Sun.COM 			mutex_enter(&vdp->xs_iomutex);
15318863SEdward.Pilatowicz@Sun.COM 			continue;
15328863SEdward.Pilatowicz@Sun.COM 		}
15338863SEdward.Pilatowicz@Sun.COM 
15348863SEdward.Pilatowicz@Sun.COM 		err = EIO;
15358863SEdward.Pilatowicz@Sun.COM 		if (vdp->xs_ldi_hdl != NULL)
15368863SEdward.Pilatowicz@Sun.COM 			err = ldi_strategy(vdp->xs_ldi_hdl, bp);
15378863SEdward.Pilatowicz@Sun.COM 		if (err != 0) {
15388863SEdward.Pilatowicz@Sun.COM 			bp->b_flags |= B_ERROR;
15398863SEdward.Pilatowicz@Sun.COM 			(void) xdb_biodone(bp);
15408863SEdward.Pilatowicz@Sun.COM 			XDB_DBPRINT(XDB_DBG_IO, (CE_WARN,
15418863SEdward.Pilatowicz@Sun.COM 			    "xdb@%s: sent buf to backend devfailed, err=%d",
15428863SEdward.Pilatowicz@Sun.COM 			    ddi_get_name_addr(vdp->xs_dip), err));
15438863SEdward.Pilatowicz@Sun.COM 		} else {
15448863SEdward.Pilatowicz@Sun.COM 			XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE,
15458863SEdward.Pilatowicz@Sun.COM 			    "sent buf to backend ok"));
15468863SEdward.Pilatowicz@Sun.COM 		}
15478863SEdward.Pilatowicz@Sun.COM 		mutex_enter(&vdp->xs_iomutex);
15485084Sjohnlev 	}
15498863SEdward.Pilatowicz@Sun.COM 	XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE, "send buf finishing"));
15505084Sjohnlev 	mutex_exit(&vdp->xs_iomutex);
15515084Sjohnlev }
15525084Sjohnlev 
15535084Sjohnlev /*ARGSUSED*/
15545084Sjohnlev static void
xdb_hp_state_change(dev_info_t * dip,ddi_eventcookie_t id,void * arg,void * impl_data)15555084Sjohnlev xdb_hp_state_change(dev_info_t *dip, ddi_eventcookie_t id, void *arg,
15565084Sjohnlev     void *impl_data)
15575084Sjohnlev {
15585084Sjohnlev 	xendev_hotplug_state_t state = *(xendev_hotplug_state_t *)impl_data;
15595084Sjohnlev 	xdb_t *vdp = (xdb_t *)ddi_get_driver_private(dip);
15605084Sjohnlev 
15615084Sjohnlev 	XDB_DBPRINT(XDB_DBG_INFO, (CE_NOTE, "xdb@%s: "
15625084Sjohnlev 	    "hotplug status change to %d!", ddi_get_name_addr(dip), state));
15635084Sjohnlev 
15648863SEdward.Pilatowicz@Sun.COM 	if (state != Connected)
15658863SEdward.Pilatowicz@Sun.COM 		return;
15668863SEdward.Pilatowicz@Sun.COM 
15675084Sjohnlev 	mutex_enter(&vdp->xs_cbmutex);
15688863SEdward.Pilatowicz@Sun.COM 
15698863SEdward.Pilatowicz@Sun.COM 	/* If hotplug script have already run, there's nothing todo */
15708863SEdward.Pilatowicz@Sun.COM 	if (vdp->xs_hp_connected) {
15718863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_cbmutex);
15728863SEdward.Pilatowicz@Sun.COM 		return;
15735084Sjohnlev 	}
15748863SEdward.Pilatowicz@Sun.COM 
15758863SEdward.Pilatowicz@Sun.COM 	vdp->xs_hp_connected = B_TRUE;
15768863SEdward.Pilatowicz@Sun.COM 	xdb_start_connect(vdp);
15775084Sjohnlev 	mutex_exit(&vdp->xs_cbmutex);
15785084Sjohnlev }
15795084Sjohnlev 
15805084Sjohnlev /*ARGSUSED*/
15815084Sjohnlev static void
xdb_oe_state_change(dev_info_t * dip,ddi_eventcookie_t id,void * arg,void * impl_data)15825084Sjohnlev xdb_oe_state_change(dev_info_t *dip, ddi_eventcookie_t id, void *arg,
15835084Sjohnlev     void *impl_data)
15845084Sjohnlev {
15855084Sjohnlev 	XenbusState new_state = *(XenbusState *)impl_data;
15865084Sjohnlev 	xdb_t *vdp = (xdb_t *)ddi_get_driver_private(dip);
15875084Sjohnlev 
15885084Sjohnlev 	XDB_DBPRINT(XDB_DBG_INFO, (CE_NOTE, "xdb@%s: "
15895084Sjohnlev 	    "otherend state change to %d!", ddi_get_name_addr(dip), new_state));
15905084Sjohnlev 
15915084Sjohnlev 	mutex_enter(&vdp->xs_cbmutex);
15925084Sjohnlev 
15938863SEdward.Pilatowicz@Sun.COM 	/*
15948863SEdward.Pilatowicz@Sun.COM 	 * Now it'd really be nice if there was a well defined state
15958863SEdward.Pilatowicz@Sun.COM 	 * transition model for xen frontend drivers, but unfortunatly
15968863SEdward.Pilatowicz@Sun.COM 	 * there isn't.  So we're stuck with assuming that all state
15978863SEdward.Pilatowicz@Sun.COM 	 * transitions are possible, and we'll just have to deal with
15988863SEdward.Pilatowicz@Sun.COM 	 * them regardless of what state we're in.
15998863SEdward.Pilatowicz@Sun.COM 	 */
16005084Sjohnlev 	switch (new_state) {
16018863SEdward.Pilatowicz@Sun.COM 	case XenbusStateUnknown:
16028863SEdward.Pilatowicz@Sun.COM 	case XenbusStateInitialising:
16038863SEdward.Pilatowicz@Sun.COM 	case XenbusStateInitWait:
16048863SEdward.Pilatowicz@Sun.COM 		/* tear down our connection to the frontend */
16058863SEdward.Pilatowicz@Sun.COM 		xdb_close(dip);
16068863SEdward.Pilatowicz@Sun.COM 		vdp->xs_fe_initialised = B_FALSE;
16078863SEdward.Pilatowicz@Sun.COM 		break;
16085084Sjohnlev 
16098863SEdward.Pilatowicz@Sun.COM 	case XenbusStateInitialised:
16108863SEdward.Pilatowicz@Sun.COM 		/*
16118863SEdward.Pilatowicz@Sun.COM 		 * If we were conected, then we need to drop the connection
16128863SEdward.Pilatowicz@Sun.COM 		 * and re-negotiate it.
16138863SEdward.Pilatowicz@Sun.COM 		 */
16148863SEdward.Pilatowicz@Sun.COM 		xdb_close(dip);
16158863SEdward.Pilatowicz@Sun.COM 		vdp->xs_fe_initialised = B_TRUE;
16168863SEdward.Pilatowicz@Sun.COM 		xdb_start_connect(vdp);
16178863SEdward.Pilatowicz@Sun.COM 		break;
16185084Sjohnlev 
16198863SEdward.Pilatowicz@Sun.COM 	case XenbusStateConnected:
16208863SEdward.Pilatowicz@Sun.COM 		/* nothing todo here other than congratulate the frontend */
16215084Sjohnlev 		break;
16228863SEdward.Pilatowicz@Sun.COM 
16235084Sjohnlev 	case XenbusStateClosing:
16248863SEdward.Pilatowicz@Sun.COM 		/* monkey see monkey do */
16255084Sjohnlev 		(void) xvdi_switch_state(dip, XBT_NULL, XenbusStateClosing);
16265084Sjohnlev 		break;
16278863SEdward.Pilatowicz@Sun.COM 
16285084Sjohnlev 	case XenbusStateClosed:
16298863SEdward.Pilatowicz@Sun.COM 		/* tear down our connection to the frontend */
16305084Sjohnlev 		xdb_close(dip);
16318863SEdward.Pilatowicz@Sun.COM 		vdp->xs_fe_initialised = B_FALSE;
16328863SEdward.Pilatowicz@Sun.COM 		(void) xvdi_switch_state(dip, XBT_NULL, new_state);
16338863SEdward.Pilatowicz@Sun.COM 		break;
16345084Sjohnlev 	}
16355084Sjohnlev 
16365084Sjohnlev 	mutex_exit(&vdp->xs_cbmutex);
16375084Sjohnlev }
16385084Sjohnlev 
16395084Sjohnlev static int
xdb_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)16405084Sjohnlev xdb_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
16415084Sjohnlev {
16428863SEdward.Pilatowicz@Sun.COM 	ddi_iblock_cookie_t	ibc;
16438863SEdward.Pilatowicz@Sun.COM 	xdb_t			*vdp;
16448863SEdward.Pilatowicz@Sun.COM 	int			instance = ddi_get_instance(dip);
16458863SEdward.Pilatowicz@Sun.COM 	char			*xsname, *oename;
16468863SEdward.Pilatowicz@Sun.COM 	char			*str;
16475084Sjohnlev 
16485084Sjohnlev 	switch (cmd) {
16495084Sjohnlev 	case DDI_RESUME:
16505084Sjohnlev 		return (DDI_FAILURE);
16515084Sjohnlev 	case DDI_ATTACH:
16525084Sjohnlev 		break;
16535084Sjohnlev 	default:
16545084Sjohnlev 		return (DDI_FAILURE);
16555084Sjohnlev 	}
16568863SEdward.Pilatowicz@Sun.COM 	/* DDI_ATTACH */
16575084Sjohnlev 
16588863SEdward.Pilatowicz@Sun.COM 	if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
16598863SEdward.Pilatowicz@Sun.COM 	    ((oename = xvdi_get_oename(dip)) == NULL))
16608863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
16618863SEdward.Pilatowicz@Sun.COM 
16628863SEdward.Pilatowicz@Sun.COM 	/*
16638863SEdward.Pilatowicz@Sun.COM 	 * Disable auto-detach.  This is necessary so that we don't get
16648863SEdward.Pilatowicz@Sun.COM 	 * detached while we're disconnected from the front end.
16658863SEdward.Pilatowicz@Sun.COM 	 */
16668863SEdward.Pilatowicz@Sun.COM 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1);
16678863SEdward.Pilatowicz@Sun.COM 
16688863SEdward.Pilatowicz@Sun.COM 	if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS)
16698863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
16708863SEdward.Pilatowicz@Sun.COM 
16715084Sjohnlev 	if (ddi_soft_state_zalloc(xdb_statep, instance) != DDI_SUCCESS)
16725084Sjohnlev 		return (DDI_FAILURE);
16735084Sjohnlev 
16745084Sjohnlev 	vdp = ddi_get_soft_state(xdb_statep, instance);
16755084Sjohnlev 	vdp->xs_dip = dip;
16765084Sjohnlev 	mutex_init(&vdp->xs_iomutex, NULL, MUTEX_DRIVER, (void *)ibc);
16775084Sjohnlev 	mutex_init(&vdp->xs_cbmutex, NULL, MUTEX_DRIVER, (void *)ibc);
16785084Sjohnlev 	cv_init(&vdp->xs_iocv, NULL, CV_DRIVER, NULL);
16795084Sjohnlev 	cv_init(&vdp->xs_ionumcv, NULL, CV_DRIVER, NULL);
16805084Sjohnlev 	ddi_set_driver_private(dip, vdp);
16815084Sjohnlev 
16828863SEdward.Pilatowicz@Sun.COM 	if (!xdb_kstat_init(vdp))
16838863SEdward.Pilatowicz@Sun.COM 		goto errout1;
16848863SEdward.Pilatowicz@Sun.COM 
16858863SEdward.Pilatowicz@Sun.COM 	/* Check if the frontend device is supposed to be a cdrom */
16868863SEdward.Pilatowicz@Sun.COM 	if (xenbus_read_str(oename, XBP_DEV_TYPE, &str) != 0)
16878863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
16888863SEdward.Pilatowicz@Sun.COM 	if (strcmp(str, XBV_DEV_TYPE_CD) == 0)
16898863SEdward.Pilatowicz@Sun.COM 		vdp->xs_type |= XDB_DEV_FE_CD;
16908863SEdward.Pilatowicz@Sun.COM 	strfree(str);
16918863SEdward.Pilatowicz@Sun.COM 
16928863SEdward.Pilatowicz@Sun.COM 	/* Check if the frontend device is supposed to be read only */
16938863SEdward.Pilatowicz@Sun.COM 	if (xenbus_read_str(xsname, "mode", &str) != 0)
16948863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
16958863SEdward.Pilatowicz@Sun.COM 	if ((strcmp(str, "r") == NULL) || (strcmp(str, "ro") == NULL))
16968863SEdward.Pilatowicz@Sun.COM 		vdp->xs_type |= XDB_DEV_RO;
16978863SEdward.Pilatowicz@Sun.COM 	strfree(str);
16988863SEdward.Pilatowicz@Sun.COM 
16998863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&vdp->xs_cbmutex);
17008863SEdward.Pilatowicz@Sun.COM 	if (!xdb_media_req_init(vdp) || !xdb_params_init(vdp)) {
17018863SEdward.Pilatowicz@Sun.COM 		xvdi_remove_xb_watch_handlers(dip);
17028863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&vdp->xs_cbmutex);
17038863SEdward.Pilatowicz@Sun.COM 		goto errout2;
17048863SEdward.Pilatowicz@Sun.COM 	}
17058863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&vdp->xs_cbmutex);
17068863SEdward.Pilatowicz@Sun.COM 
17078863SEdward.Pilatowicz@Sun.COM 	vdp->xs_send_buf = B_TRUE;
17085084Sjohnlev 	vdp->xs_iotaskq = ddi_taskq_create(dip, "xdb_iotask", 1,
17095084Sjohnlev 	    TASKQ_DEFAULTPRI, 0);
17105084Sjohnlev 	(void) ddi_taskq_dispatch(vdp->xs_iotaskq, xdb_send_buf, vdp,
17115084Sjohnlev 	    DDI_SLEEP);
17125084Sjohnlev 
17135084Sjohnlev 	/* Watch frontend and hotplug state change */
17148863SEdward.Pilatowicz@Sun.COM 	if ((xvdi_add_event_handler(dip, XS_OE_STATE, xdb_oe_state_change,
17158863SEdward.Pilatowicz@Sun.COM 	    NULL) != DDI_SUCCESS) ||
17168863SEdward.Pilatowicz@Sun.COM 	    (xvdi_add_event_handler(dip, XS_HP_STATE, xdb_hp_state_change,
17178863SEdward.Pilatowicz@Sun.COM 	    NULL) != DDI_SUCCESS))
17185084Sjohnlev 		goto errout3;
17195084Sjohnlev 
17205084Sjohnlev 	/*
17215084Sjohnlev 	 * Kick-off hotplug script
17225084Sjohnlev 	 */
17235084Sjohnlev 	if (xvdi_post_event(dip, XEN_HP_ADD) != DDI_SUCCESS) {
17245084Sjohnlev 		cmn_err(CE_WARN, "xdb@%s: failed to start hotplug script",
17255084Sjohnlev 		    ddi_get_name_addr(dip));
17268863SEdward.Pilatowicz@Sun.COM 		goto errout3;
17275084Sjohnlev 	}
17285084Sjohnlev 
17295084Sjohnlev 	/*
17305084Sjohnlev 	 * start waiting for hotplug event and otherend state event
17315084Sjohnlev 	 * mainly for debugging, frontend will not take any op seeing this
17325084Sjohnlev 	 */
17335084Sjohnlev 	(void) xvdi_switch_state(dip, XBT_NULL, XenbusStateInitWait);
17345084Sjohnlev 
17355084Sjohnlev 	XDB_DBPRINT(XDB_DBG_INFO, (CE_NOTE, "xdb@%s: attached!",
17365084Sjohnlev 	    ddi_get_name_addr(dip)));
17375084Sjohnlev 	return (DDI_SUCCESS);
17385084Sjohnlev 
17398863SEdward.Pilatowicz@Sun.COM errout3:
17408863SEdward.Pilatowicz@Sun.COM 	ASSERT(vdp->xs_hp_connected && vdp->xs_if_connected);
17418863SEdward.Pilatowicz@Sun.COM 
17425084Sjohnlev 	xvdi_remove_event_handler(dip, NULL);
17438863SEdward.Pilatowicz@Sun.COM 
17448863SEdward.Pilatowicz@Sun.COM 	/* Disconnect from the backend */
17455084Sjohnlev 	mutex_enter(&vdp->xs_cbmutex);
17465084Sjohnlev 	mutex_enter(&vdp->xs_iomutex);
17478863SEdward.Pilatowicz@Sun.COM 	vdp->xs_send_buf = B_FALSE;
17485084Sjohnlev 	cv_broadcast(&vdp->xs_iocv);
17495084Sjohnlev 	mutex_exit(&vdp->xs_iomutex);
17505084Sjohnlev 	mutex_exit(&vdp->xs_cbmutex);
17518863SEdward.Pilatowicz@Sun.COM 
17528863SEdward.Pilatowicz@Sun.COM 	/* wait for all io to dtrain and destroy io taskq */
17535084Sjohnlev 	ddi_taskq_destroy(vdp->xs_iotaskq);
17548863SEdward.Pilatowicz@Sun.COM 
17558863SEdward.Pilatowicz@Sun.COM 	/* tear down block-configure watch */
17568863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&vdp->xs_cbmutex);
17578863SEdward.Pilatowicz@Sun.COM 	xvdi_remove_xb_watch_handlers(dip);
17588863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&vdp->xs_cbmutex);
17598863SEdward.Pilatowicz@Sun.COM 
17605084Sjohnlev errout2:
17618863SEdward.Pilatowicz@Sun.COM 	/* remove kstats */
17628863SEdward.Pilatowicz@Sun.COM 	kstat_delete(vdp->xs_kstats);
17638863SEdward.Pilatowicz@Sun.COM 
17648863SEdward.Pilatowicz@Sun.COM errout1:
17658863SEdward.Pilatowicz@Sun.COM 	/* free up driver state */
17665084Sjohnlev 	ddi_set_driver_private(dip, NULL);
17675084Sjohnlev 	cv_destroy(&vdp->xs_iocv);
17685084Sjohnlev 	cv_destroy(&vdp->xs_ionumcv);
17695084Sjohnlev 	mutex_destroy(&vdp->xs_cbmutex);
17705084Sjohnlev 	mutex_destroy(&vdp->xs_iomutex);
17715084Sjohnlev 	ddi_soft_state_free(xdb_statep, instance);
17728863SEdward.Pilatowicz@Sun.COM 
17735084Sjohnlev 	return (DDI_FAILURE);
17745084Sjohnlev }
17755084Sjohnlev 
17765084Sjohnlev /*ARGSUSED*/
17775084Sjohnlev static int
xdb_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)17785084Sjohnlev xdb_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
17795084Sjohnlev {
17805084Sjohnlev 	int instance = ddi_get_instance(dip);
17815084Sjohnlev 	xdb_t *vdp = XDB_INST2SOFTS(instance);
17825084Sjohnlev 
17835084Sjohnlev 	switch (cmd) {
17845084Sjohnlev 	case DDI_SUSPEND:
17855084Sjohnlev 		return (DDI_FAILURE);
17865084Sjohnlev 	case DDI_DETACH:
17875084Sjohnlev 		break;
17885084Sjohnlev 	default:
17895084Sjohnlev 		return (DDI_FAILURE);
17905084Sjohnlev 	}
17915084Sjohnlev 
17925084Sjohnlev 	/* DDI_DETACH handling */
17935084Sjohnlev 
17948863SEdward.Pilatowicz@Sun.COM 	/* refuse to detach if we're still in use by the frontend */
17955084Sjohnlev 	mutex_enter(&vdp->xs_iomutex);
17968863SEdward.Pilatowicz@Sun.COM 	if (vdp->xs_if_connected) {
17975084Sjohnlev 		mutex_exit(&vdp->xs_iomutex);
17985084Sjohnlev 		return (DDI_FAILURE);
17995084Sjohnlev 	}
18008863SEdward.Pilatowicz@Sun.COM 	vdp->xs_send_buf = B_FALSE;
18018863SEdward.Pilatowicz@Sun.COM 	cv_broadcast(&vdp->xs_iocv);
18025084Sjohnlev 	mutex_exit(&vdp->xs_iomutex);
18035084Sjohnlev 
18045084Sjohnlev 	xvdi_remove_event_handler(dip, NULL);
18055084Sjohnlev 	(void) xvdi_post_event(dip, XEN_HP_REMOVE);
18065084Sjohnlev 
18075084Sjohnlev 	ddi_taskq_destroy(vdp->xs_iotaskq);
18088863SEdward.Pilatowicz@Sun.COM 
18098863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&vdp->xs_cbmutex);
18108863SEdward.Pilatowicz@Sun.COM 	xvdi_remove_xb_watch_handlers(dip);
18118863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&vdp->xs_cbmutex);
18128863SEdward.Pilatowicz@Sun.COM 
18135084Sjohnlev 	cv_destroy(&vdp->xs_iocv);
18145084Sjohnlev 	cv_destroy(&vdp->xs_ionumcv);
18155084Sjohnlev 	mutex_destroy(&vdp->xs_cbmutex);
18165084Sjohnlev 	mutex_destroy(&vdp->xs_iomutex);
18175084Sjohnlev 	kstat_delete(vdp->xs_kstats);
18185084Sjohnlev 	ddi_set_driver_private(dip, NULL);
18195084Sjohnlev 	ddi_soft_state_free(xdb_statep, instance);
18205084Sjohnlev 
18215084Sjohnlev 	XDB_DBPRINT(XDB_DBG_INFO, (CE_NOTE, "xdb@%s: detached!",
18225084Sjohnlev 	    ddi_get_name_addr(dip)));
18235084Sjohnlev 	return (DDI_SUCCESS);
18245084Sjohnlev }
18255084Sjohnlev 
18265084Sjohnlev static struct dev_ops xdb_dev_ops = {
18275084Sjohnlev 	DEVO_REV,	/* devo_rev */
18285084Sjohnlev 	0,		/* devo_refcnt */
18295084Sjohnlev 	ddi_getinfo_1to1, /* devo_getinfo */
18305084Sjohnlev 	nulldev,	/* devo_identify */
18315084Sjohnlev 	nulldev,	/* devo_probe */
18325084Sjohnlev 	xdb_attach,	/* devo_attach */
18335084Sjohnlev 	xdb_detach,	/* devo_detach */
18345084Sjohnlev 	nodev,		/* devo_reset */
18355084Sjohnlev 	NULL,		/* devo_cb_ops */
18365084Sjohnlev 	NULL,		/* devo_bus_ops */
18377656SSherry.Moore@Sun.COM 	NULL,		/* power */
18388863SEdward.Pilatowicz@Sun.COM 	ddi_quiesce_not_needed, /* quiesce */
18395084Sjohnlev };
18405084Sjohnlev 
18415084Sjohnlev /*
18425084Sjohnlev  * Module linkage information for the kernel.
18435084Sjohnlev  */
18445084Sjohnlev static struct modldrv modldrv = {
18455084Sjohnlev 	&mod_driverops,			/* Type of module. */
18468863SEdward.Pilatowicz@Sun.COM 	"vbd backend driver",		/* Name of the module */
18475084Sjohnlev 	&xdb_dev_ops			/* driver ops */
18485084Sjohnlev };
18495084Sjohnlev 
18505084Sjohnlev static struct modlinkage xdb_modlinkage = {
18515084Sjohnlev 	MODREV_1,
18525084Sjohnlev 	&modldrv,
18535084Sjohnlev 	NULL
18545084Sjohnlev };
18555084Sjohnlev 
18565084Sjohnlev int
_init(void)18575084Sjohnlev _init(void)
18585084Sjohnlev {
18595084Sjohnlev 	int rv;
18605084Sjohnlev 
18615084Sjohnlev 	if ((rv = ddi_soft_state_init((void **)&xdb_statep,
18625084Sjohnlev 	    sizeof (xdb_t), 0)) == 0)
18635084Sjohnlev 		if ((rv = mod_install(&xdb_modlinkage)) != 0)
18645084Sjohnlev 			ddi_soft_state_fini((void **)&xdb_statep);
18655084Sjohnlev 	return (rv);
18665084Sjohnlev }
18675084Sjohnlev 
18685084Sjohnlev int
_fini(void)18695084Sjohnlev _fini(void)
18705084Sjohnlev {
18715084Sjohnlev 	int rv;
18725084Sjohnlev 
18735084Sjohnlev 	if ((rv = mod_remove(&xdb_modlinkage)) != 0)
18745084Sjohnlev 		return (rv);
18755084Sjohnlev 	ddi_soft_state_fini((void **)&xdb_statep);
18765084Sjohnlev 	return (rv);
18775084Sjohnlev }
18785084Sjohnlev 
18795084Sjohnlev int
_info(struct modinfo * modinfop)18805084Sjohnlev _info(struct modinfo *modinfop)
18815084Sjohnlev {
18825084Sjohnlev 	return (mod_info(&xdb_modlinkage, modinfop));
18835084Sjohnlev }
18846144Srab 
18856144Srab static int
xdb_get_request(xdb_t * vdp,blkif_request_t * req)18866144Srab xdb_get_request(xdb_t *vdp, blkif_request_t *req)
18876144Srab {
18886144Srab 	void *src = xvdi_ring_get_request(vdp->xs_ring);
18896144Srab 
18906144Srab 	if (src == NULL)
18916144Srab 		return (0);
18926144Srab 
18936144Srab 	switch (vdp->xs_blk_protocol) {
18946144Srab 	case BLKIF_PROTOCOL_NATIVE:
18956144Srab 		(void) memcpy(req, src, sizeof (*req));
18966144Srab 		break;
18976144Srab 	case BLKIF_PROTOCOL_X86_32:
18986144Srab 		blkif_get_x86_32_req(req, src);
18996144Srab 		break;
19006144Srab 	case BLKIF_PROTOCOL_X86_64:
19016144Srab 		blkif_get_x86_64_req(req, src);
19026144Srab 		break;
19036144Srab 	default:
19046144Srab 		cmn_err(CE_PANIC, "xdb@%s: unrecognised protocol: %d",
19056144Srab 		    ddi_get_name_addr(vdp->xs_dip),
19066144Srab 		    vdp->xs_blk_protocol);
19076144Srab 	}
19086144Srab 	return (1);
19096144Srab }
19106144Srab 
19116144Srab static int
xdb_push_response(xdb_t * vdp,uint64_t id,uint8_t op,uint16_t status)19126144Srab xdb_push_response(xdb_t *vdp, uint64_t id, uint8_t op, uint16_t status)
19136144Srab {
19146144Srab 	ddi_acc_handle_t acchdl = vdp->xs_ring_hdl;
19156144Srab 	blkif_response_t *rsp = xvdi_ring_get_response(vdp->xs_ring);
19166144Srab 	blkif_x86_32_response_t *rsp_32 = (blkif_x86_32_response_t *)rsp;
19176144Srab 	blkif_x86_64_response_t *rsp_64 = (blkif_x86_64_response_t *)rsp;
19186144Srab 
19196144Srab 	ASSERT(rsp);
19206144Srab 
19216144Srab 	switch (vdp->xs_blk_protocol) {
19226144Srab 	case BLKIF_PROTOCOL_NATIVE:
19236144Srab 		ddi_put64(acchdl, &rsp->id, id);
19246144Srab 		ddi_put8(acchdl, &rsp->operation, op);
19256144Srab 		ddi_put16(acchdl, (uint16_t *)&rsp->status,
19266144Srab 		    status == 0 ? BLKIF_RSP_OKAY : BLKIF_RSP_ERROR);
19276144Srab 		break;
19286144Srab 	case BLKIF_PROTOCOL_X86_32:
19296144Srab 		ddi_put64(acchdl, &rsp_32->id, id);
19306144Srab 		ddi_put8(acchdl, &rsp_32->operation, op);
19316144Srab 		ddi_put16(acchdl, (uint16_t *)&rsp_32->status,
19326144Srab 		    status == 0 ? BLKIF_RSP_OKAY : BLKIF_RSP_ERROR);
19336144Srab 		break;
19346144Srab 	case BLKIF_PROTOCOL_X86_64:
19356144Srab 		ddi_put64(acchdl, &rsp_64->id, id);
19366144Srab 		ddi_put8(acchdl, &rsp_64->operation, op);
19376144Srab 		ddi_put16(acchdl, (uint16_t *)&rsp_64->status,
19386144Srab 		    status == 0 ? BLKIF_RSP_OKAY : BLKIF_RSP_ERROR);
19396144Srab 		break;
19406144Srab 	default:
19416144Srab 		cmn_err(CE_PANIC, "xdb@%s: unrecognised protocol: %d",
19426144Srab 		    ddi_get_name_addr(vdp->xs_dip),
19436144Srab 		    vdp->xs_blk_protocol);
19446144Srab 	}
19456144Srab 
19466144Srab 	return (xvdi_ring_push_response(vdp->xs_ring));
19476144Srab }
19486144Srab 
19496144Srab static void
blkif_get_x86_32_req(blkif_request_t * dst,blkif_x86_32_request_t * src)19506144Srab blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
19516144Srab {
19526144Srab 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
19536144Srab 	dst->operation = src->operation;
19546144Srab 	dst->nr_segments = src->nr_segments;
19556144Srab 	dst->handle = src->handle;
19566144Srab 	dst->id = src->id;
19576144Srab 	dst->sector_number = src->sector_number;
19586144Srab 	if (n > src->nr_segments)
19596144Srab 		n = src->nr_segments;
19606144Srab 	for (i = 0; i < n; i++)
19616144Srab 		dst->seg[i] = src->seg[i];
19626144Srab }
19636144Srab 
19646144Srab static void
blkif_get_x86_64_req(blkif_request_t * dst,blkif_x86_64_request_t * src)19656144Srab blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
19666144Srab {
19676144Srab 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
19686144Srab 	dst->operation = src->operation;
19696144Srab 	dst->nr_segments = src->nr_segments;
19706144Srab 	dst->handle = src->handle;
19716144Srab 	dst->id = src->id;
19726144Srab 	dst->sector_number = src->sector_number;
19736144Srab 	if (n > src->nr_segments)
19746144Srab 		n = src->nr_segments;
19756144Srab 	for (i = 0; i < n; i++)
19766144Srab 		dst->seg[i] = src->seg[i];
19776144Srab }
1978