xref: /onnv-gate/usr/src/uts/i86pc/i86hvm/io/xdf_shell.c (revision 10021:a41c569bdaca)
18863SEdward.Pilatowicz@Sun.COM /*
28863SEdward.Pilatowicz@Sun.COM  * CDDL HEADER START
38863SEdward.Pilatowicz@Sun.COM  *
48863SEdward.Pilatowicz@Sun.COM  * The contents of this file are subject to the terms of the
58863SEdward.Pilatowicz@Sun.COM  * Common Development and Distribution License (the "License").
68863SEdward.Pilatowicz@Sun.COM  * You may not use this file except in compliance with the License.
78863SEdward.Pilatowicz@Sun.COM  *
88863SEdward.Pilatowicz@Sun.COM  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
98863SEdward.Pilatowicz@Sun.COM  * or http://www.opensolaris.org/os/licensing.
108863SEdward.Pilatowicz@Sun.COM  * See the License for the specific language governing permissions
118863SEdward.Pilatowicz@Sun.COM  * and limitations under the License.
128863SEdward.Pilatowicz@Sun.COM  *
138863SEdward.Pilatowicz@Sun.COM  * When distributing Covered Code, include this CDDL HEADER in each
148863SEdward.Pilatowicz@Sun.COM  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
158863SEdward.Pilatowicz@Sun.COM  * If applicable, add the following below this CDDL HEADER, with the
168863SEdward.Pilatowicz@Sun.COM  * fields enclosed by brackets "[]" replaced with your own identifying
178863SEdward.Pilatowicz@Sun.COM  * information: Portions Copyright [yyyy] [name of copyright owner]
188863SEdward.Pilatowicz@Sun.COM  *
198863SEdward.Pilatowicz@Sun.COM  * CDDL HEADER END
208863SEdward.Pilatowicz@Sun.COM  */
218863SEdward.Pilatowicz@Sun.COM /*
228863SEdward.Pilatowicz@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
238863SEdward.Pilatowicz@Sun.COM  * Use is subject to license terms.
248863SEdward.Pilatowicz@Sun.COM  */
258863SEdward.Pilatowicz@Sun.COM 
268863SEdward.Pilatowicz@Sun.COM #include <io/xdf_shell.h>
278863SEdward.Pilatowicz@Sun.COM #include <sys/dkio.h>
288863SEdward.Pilatowicz@Sun.COM #include <sys/scsi/scsi_types.h>
298863SEdward.Pilatowicz@Sun.COM 
308863SEdward.Pilatowicz@Sun.COM /*
318863SEdward.Pilatowicz@Sun.COM  * General Notes
328863SEdward.Pilatowicz@Sun.COM  *
338863SEdward.Pilatowicz@Sun.COM  * We don't support disks with bad block mappins.  We have this
348863SEdward.Pilatowicz@Sun.COM  * limitation because the underlying xdf driver doesn't support
358863SEdward.Pilatowicz@Sun.COM  * bad block remapping.  If there is a need to support this feature
368863SEdward.Pilatowicz@Sun.COM  * it should be added directly to the xdf driver and we should just
378863SEdward.Pilatowicz@Sun.COM  * pass requests strait on through and let it handle the remapping.
388863SEdward.Pilatowicz@Sun.COM  * Also, it's probably worth pointing out that most modern disks do bad
398863SEdward.Pilatowicz@Sun.COM  * block remapping internally in the hardware so there's actually less
408863SEdward.Pilatowicz@Sun.COM  * of a chance of us ever discovering bad blocks.  Also, in most cases
418863SEdward.Pilatowicz@Sun.COM  * this driver (and the xdf driver) will only be used with virtualized
428863SEdward.Pilatowicz@Sun.COM  * devices, so one might wonder why a virtual device would ever actually
438863SEdward.Pilatowicz@Sun.COM  * experience bad blocks.  To wrap this up, you might be wondering how
448863SEdward.Pilatowicz@Sun.COM  * these bad block mappings get created and how they are managed.  Well,
458863SEdward.Pilatowicz@Sun.COM  * there are two tools for managing bad block mappings, format(1M) and
468863SEdward.Pilatowicz@Sun.COM  * addbadsec(1M).  Format(1M) can be used to do a surface scan of a disk
478863SEdward.Pilatowicz@Sun.COM  * to attempt to find bad block and create mappings for them.  Format(1M)
488863SEdward.Pilatowicz@Sun.COM  * and addbadsec(1M) can also be used to edit existing mappings that may
498863SEdward.Pilatowicz@Sun.COM  * be saved on the disk.
508863SEdward.Pilatowicz@Sun.COM  *
518863SEdward.Pilatowicz@Sun.COM  * The underlying PV driver that this driver passes on requests to is the
528863SEdward.Pilatowicz@Sun.COM  * xdf driver.  Since in most cases the xdf driver doesn't deal with
538863SEdward.Pilatowicz@Sun.COM  * physical disks it has it's own algorithm for assigning a physical
548863SEdward.Pilatowicz@Sun.COM  * geometry to a virtual disk (ie, cylinder count, head count, etc.)
558863SEdward.Pilatowicz@Sun.COM  * The default values chosen by the xdf driver may not match those
568863SEdward.Pilatowicz@Sun.COM  * assigned to a disk by a hardware disk emulator in an HVM environment.
578863SEdward.Pilatowicz@Sun.COM  * This is a problem since these physical geometry attributes affect
588863SEdward.Pilatowicz@Sun.COM  * things like the partition table, backup label location, etc.  So
598863SEdward.Pilatowicz@Sun.COM  * to emulate disk devices correctly we need to know the physical geometry
608863SEdward.Pilatowicz@Sun.COM  * that was assigned to a disk at the time of it's initalization.
618863SEdward.Pilatowicz@Sun.COM  * Normally in an HVM environment this information will passed to
628863SEdward.Pilatowicz@Sun.COM  * the BIOS and operating system from the hardware emulator that is
638863SEdward.Pilatowicz@Sun.COM  * emulating the disk devices.  In the case of a solaris dom0+xvm
648863SEdward.Pilatowicz@Sun.COM  * this would be qemu.  So to work around this issue, this driver will
658863SEdward.Pilatowicz@Sun.COM  * query the emulated hardware to get the assigned physical geometry
668863SEdward.Pilatowicz@Sun.COM  * and then pass this geometry onto the xdf driver so that it can use it.
678863SEdward.Pilatowicz@Sun.COM  * But really, this information is essentially metadata about the disk
688863SEdward.Pilatowicz@Sun.COM  * that should be kept with the disk image itself.  (Assuming or course
698863SEdward.Pilatowicz@Sun.COM  * that a disk image is the actual backingstore for this emulated device.)
708863SEdward.Pilatowicz@Sun.COM  * This metadata should also be made available to PV drivers via a common
718863SEdward.Pilatowicz@Sun.COM  * mechanism, probably the xenstore.  The fact that this metadata isn't
728863SEdward.Pilatowicz@Sun.COM  * available outside of HVM domains means that it's difficult to move
738863SEdward.Pilatowicz@Sun.COM  * disks between HVM and PV domains, since a fully PV domain will have no
748863SEdward.Pilatowicz@Sun.COM  * way of knowing what the correct geometry of the target device is.
758863SEdward.Pilatowicz@Sun.COM  * (Short of reading the disk, looking for things like partition tables
768863SEdward.Pilatowicz@Sun.COM  * and labels, and taking a best guess at what the geometry was when
778863SEdward.Pilatowicz@Sun.COM  * the disk was initialized.  Unsuprisingly, qemu actually does this.)
788863SEdward.Pilatowicz@Sun.COM  *
798863SEdward.Pilatowicz@Sun.COM  * This driver has to map xdf shell device instances into their corresponding
808863SEdward.Pilatowicz@Sun.COM  * xdf device instances.  We have to do this to ensure that when a user
818863SEdward.Pilatowicz@Sun.COM  * accesses a emulated xdf shell device we map those accesses to the proper
828863SEdward.Pilatowicz@Sun.COM  * paravirtualized device.  Basically what we need to know is how multiple
838863SEdward.Pilatowicz@Sun.COM  * 'disk' entries in a domU configuration file get mapped to emulated
848863SEdward.Pilatowicz@Sun.COM  * xdf shell devices and to xdf devices.  The 'disk' entry to xdf instance
858863SEdward.Pilatowicz@Sun.COM  * mappings we know because those are done within the Solaris xvdi code
868863SEdward.Pilatowicz@Sun.COM  * and the xpvd nexus driver.  But the config to emulated devices mappings
878863SEdward.Pilatowicz@Sun.COM  * are handled entirely within the xen management tool chain and the
888863SEdward.Pilatowicz@Sun.COM  * hardware emulator.  Since all the tools that establish these mappings
898863SEdward.Pilatowicz@Sun.COM  * live in dom0, dom0 should really supply us with this information,
908863SEdward.Pilatowicz@Sun.COM  * probably via the xenstore.  Unfortunatly it doesn't so, since there's
918863SEdward.Pilatowicz@Sun.COM  * no good way to determine this mapping dynamically, this driver uses
928863SEdward.Pilatowicz@Sun.COM  * a hard coded set of static mappings.  These mappings are hardware
938863SEdward.Pilatowicz@Sun.COM  * emulator specific because each different hardware emulator could have
948863SEdward.Pilatowicz@Sun.COM  * a different device tree with different xdf shell device paths.  This
958863SEdward.Pilatowicz@Sun.COM  * means that if we want to continue to use this static mapping approach
968863SEdward.Pilatowicz@Sun.COM  * to allow Solaris to run on different hardware emulators we'll have
978863SEdward.Pilatowicz@Sun.COM  * to analyze each of those emulators to determine what paths they
988863SEdward.Pilatowicz@Sun.COM  * use and hard code those paths into this driver.  yech.  This metadata
998863SEdward.Pilatowicz@Sun.COM  * really needs to be supplied to us by dom0.
1008863SEdward.Pilatowicz@Sun.COM  *
1018863SEdward.Pilatowicz@Sun.COM  * This driver access underlying xdf nodes.  Unfortunatly, devices
1028863SEdward.Pilatowicz@Sun.COM  * must create minor nodes during attach, and for disk devices to create
1038863SEdward.Pilatowicz@Sun.COM  * minor nodes, they have to look at the label on the disk, so this means
1048863SEdward.Pilatowicz@Sun.COM  * that disk drivers must be able to access a disk contents during
1058863SEdward.Pilatowicz@Sun.COM  * attach.  That means that this disk driver must be able to access
1068863SEdward.Pilatowicz@Sun.COM  * underlying xdf nodes during attach.  Unfortunatly, due to device tree
1078863SEdward.Pilatowicz@Sun.COM  * locking restrictions, we cannot have an attach operation occuring on
1088863SEdward.Pilatowicz@Sun.COM  * this device and then attempt to access another device which may
1098863SEdward.Pilatowicz@Sun.COM  * cause another attach to occur in a different device tree branch
1108863SEdward.Pilatowicz@Sun.COM  * since this could result in deadlock.  Hence, this driver can only
1118863SEdward.Pilatowicz@Sun.COM  * access xdf device nodes that we know are attached, and it can't use
1128863SEdward.Pilatowicz@Sun.COM  * any ddi interfaces to access those nodes if those interfaces could
1138863SEdward.Pilatowicz@Sun.COM  * trigger an attach of the xdf device.  So this driver works around
1148863SEdward.Pilatowicz@Sun.COM  * these restrictions by talking directly to xdf devices via
1158863SEdward.Pilatowicz@Sun.COM  * xdf_hvm_hold().  This interface takes a pathname to an xdf device,
1168863SEdward.Pilatowicz@Sun.COM  * and if that device is already attached then it returns the a held dip
1178863SEdward.Pilatowicz@Sun.COM  * pointer for that device node.  This prevents us from getting into
1188863SEdward.Pilatowicz@Sun.COM  * deadlock situations, but now we need a mechanism to ensure that all
1198863SEdward.Pilatowicz@Sun.COM  * the xdf device nodes this driver might access are attached before
1208863SEdward.Pilatowicz@Sun.COM  * this driver tries to access them.  This is accomplished via the
1218863SEdward.Pilatowicz@Sun.COM  * hvmboot_rootconf() callback which is invoked just before root is
1228863SEdward.Pilatowicz@Sun.COM  * mounted.  hvmboot_rootconf() will attach xpvd and tell it to configure
1238863SEdward.Pilatowicz@Sun.COM  * all xdf device visible to the system.  All these xdf device nodes
1248863SEdward.Pilatowicz@Sun.COM  * will also be marked with the "ddi-no-autodetach" property so that
1258863SEdward.Pilatowicz@Sun.COM  * once they are configured, the will not be automatically unconfigured.
1268863SEdward.Pilatowicz@Sun.COM  * The only way that they could be unconfigured is if the administrator
1278863SEdward.Pilatowicz@Sun.COM  * explicitly attempts to unload required modules via rem_drv(1M)
1288863SEdward.Pilatowicz@Sun.COM  * or modunload(1M).
1298863SEdward.Pilatowicz@Sun.COM  */
1308863SEdward.Pilatowicz@Sun.COM 
1318863SEdward.Pilatowicz@Sun.COM /*
1328863SEdward.Pilatowicz@Sun.COM  * 16 paritions + fdisk (see xdf.h)
1338863SEdward.Pilatowicz@Sun.COM  */
1348863SEdward.Pilatowicz@Sun.COM #define	XDFS_DEV2UNIT(dev)	XDF_INST((getminor((dev))))
1358863SEdward.Pilatowicz@Sun.COM #define	XDFS_DEV2PART(dev)	XDF_PART((getminor((dev))))
1368863SEdward.Pilatowicz@Sun.COM 
1378863SEdward.Pilatowicz@Sun.COM #define	OTYP_VALID(otyp)	((otyp == OTYP_BLK) ||			\
1388863SEdward.Pilatowicz@Sun.COM 					(otyp == OTYP_CHR) ||		\
1398863SEdward.Pilatowicz@Sun.COM 					(otyp == OTYP_LYR))
1408863SEdward.Pilatowicz@Sun.COM 
1418863SEdward.Pilatowicz@Sun.COM #define	XDFS_NODES		4
1428863SEdward.Pilatowicz@Sun.COM 
1438863SEdward.Pilatowicz@Sun.COM #define	XDFS_HVM_MODE(sp)	(XDFS_HVM_STATE(sp)->xdfs_hs_mode)
1448863SEdward.Pilatowicz@Sun.COM #define	XDFS_HVM_DIP(sp)	(XDFS_HVM_STATE(sp)->xdfs_hs_dip)
1458863SEdward.Pilatowicz@Sun.COM #define	XDFS_HVM_PATH(sp)	(XDFS_HVM_STATE(sp)->xdfs_hs_path)
1468863SEdward.Pilatowicz@Sun.COM #define	XDFS_HVM_STATE(sp)						\
1478863SEdward.Pilatowicz@Sun.COM 		((xdfs_hvm_state_t *)(&((char *)(sp))[XDFS_HVM_STATE_OFFSET]))
1488863SEdward.Pilatowicz@Sun.COM #define	XDFS_HVM_STATE_OFFSET	(xdfs_ss_size - sizeof (xdfs_hvm_state_t))
1498863SEdward.Pilatowicz@Sun.COM #define	XDFS_HVM_SANE(sp)						\
1508863SEdward.Pilatowicz@Sun.COM 		ASSERT(XDFS_HVM_MODE(sp));				\
1518863SEdward.Pilatowicz@Sun.COM 		ASSERT(XDFS_HVM_DIP(sp) != NULL);			\
1528863SEdward.Pilatowicz@Sun.COM 		ASSERT(XDFS_HVM_PATH(sp) != NULL);
1538863SEdward.Pilatowicz@Sun.COM 
1548863SEdward.Pilatowicz@Sun.COM 
1558863SEdward.Pilatowicz@Sun.COM typedef struct xdfs_hvm_state {
1568863SEdward.Pilatowicz@Sun.COM 	boolean_t	xdfs_hs_mode;
1578863SEdward.Pilatowicz@Sun.COM 	dev_info_t	*xdfs_hs_dip;
1588863SEdward.Pilatowicz@Sun.COM 	char		*xdfs_hs_path;
1598863SEdward.Pilatowicz@Sun.COM } xdfs_hvm_state_t;
1608863SEdward.Pilatowicz@Sun.COM 
1618863SEdward.Pilatowicz@Sun.COM /* local function and structure prototypes */
1628863SEdward.Pilatowicz@Sun.COM static int xdfs_iodone(struct buf *);
1638863SEdward.Pilatowicz@Sun.COM static boolean_t xdfs_isopen_part(xdfs_state_t *, int);
1648863SEdward.Pilatowicz@Sun.COM static boolean_t xdfs_isopen(xdfs_state_t *);
1658863SEdward.Pilatowicz@Sun.COM static cmlb_tg_ops_t xdfs_lb_ops;
1668863SEdward.Pilatowicz@Sun.COM 
1678863SEdward.Pilatowicz@Sun.COM /*
1688863SEdward.Pilatowicz@Sun.COM  * Globals
1698863SEdward.Pilatowicz@Sun.COM  */
1708863SEdward.Pilatowicz@Sun.COM major_t			xdfs_major;
1718863SEdward.Pilatowicz@Sun.COM #define			xdfs_hvm_dev_ops (xdfs_c_hvm_dev_ops)
1728863SEdward.Pilatowicz@Sun.COM #define			xdfs_hvm_cb_ops (xdfs_hvm_dev_ops->devo_cb_ops)
1738863SEdward.Pilatowicz@Sun.COM 
1748863SEdward.Pilatowicz@Sun.COM /*
1758863SEdward.Pilatowicz@Sun.COM  * Private globals
1768863SEdward.Pilatowicz@Sun.COM  */
1778863SEdward.Pilatowicz@Sun.COM volatile boolean_t	xdfs_pv_disable = B_FALSE;
1788863SEdward.Pilatowicz@Sun.COM static void		*xdfs_ssp;
1798863SEdward.Pilatowicz@Sun.COM static size_t		xdfs_ss_size;
1808863SEdward.Pilatowicz@Sun.COM 
1818863SEdward.Pilatowicz@Sun.COM /*
1828863SEdward.Pilatowicz@Sun.COM  * Private helper functions
1838863SEdward.Pilatowicz@Sun.COM  */
1848863SEdward.Pilatowicz@Sun.COM static boolean_t
xdfs_tgt_hold(xdfs_state_t * xsp)1858863SEdward.Pilatowicz@Sun.COM xdfs_tgt_hold(xdfs_state_t *xsp)
1868863SEdward.Pilatowicz@Sun.COM {
1878863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&xsp->xdfss_mutex);
1888863SEdward.Pilatowicz@Sun.COM 	ASSERT(xsp->xdfss_tgt_holds >= 0);
1898863SEdward.Pilatowicz@Sun.COM 	if (!xsp->xdfss_tgt_attached) {
1908863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&xsp->xdfss_mutex);
1918863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
1928863SEdward.Pilatowicz@Sun.COM 	}
1938863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_tgt_holds++;
1948863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&xsp->xdfss_mutex);
1958863SEdward.Pilatowicz@Sun.COM 	return (B_TRUE);
1968863SEdward.Pilatowicz@Sun.COM }
1978863SEdward.Pilatowicz@Sun.COM 
1988863SEdward.Pilatowicz@Sun.COM static void
xdfs_tgt_release(xdfs_state_t * xsp)1998863SEdward.Pilatowicz@Sun.COM xdfs_tgt_release(xdfs_state_t *xsp)
2008863SEdward.Pilatowicz@Sun.COM {
2018863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&xsp->xdfss_mutex);
2028863SEdward.Pilatowicz@Sun.COM 	ASSERT(xsp->xdfss_tgt_attached);
2038863SEdward.Pilatowicz@Sun.COM 	ASSERT(xsp->xdfss_tgt_holds > 0);
2048863SEdward.Pilatowicz@Sun.COM 	if (--xsp->xdfss_tgt_holds == 0)
2058863SEdward.Pilatowicz@Sun.COM 		cv_broadcast(&xsp->xdfss_cv);
2068863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&xsp->xdfss_mutex);
2078863SEdward.Pilatowicz@Sun.COM }
2088863SEdward.Pilatowicz@Sun.COM 
2098863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
2108863SEdward.Pilatowicz@Sun.COM static int
xdfs_lb_getinfo(dev_info_t * dip,int cmd,void * arg,void * tg_cookie)2118863SEdward.Pilatowicz@Sun.COM xdfs_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie)
2128863SEdward.Pilatowicz@Sun.COM {
2138863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
2148863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
2158863SEdward.Pilatowicz@Sun.COM 	int		rv;
2168863SEdward.Pilatowicz@Sun.COM 
2178863SEdward.Pilatowicz@Sun.COM 	if (xsp == NULL)
2188863SEdward.Pilatowicz@Sun.COM 		return (ENXIO);
2198863SEdward.Pilatowicz@Sun.COM 
2208863SEdward.Pilatowicz@Sun.COM 	if (!xdfs_tgt_hold(xsp))
2218863SEdward.Pilatowicz@Sun.COM 		return (ENXIO);
2228863SEdward.Pilatowicz@Sun.COM 
2238863SEdward.Pilatowicz@Sun.COM 	if (cmd == TG_GETVIRTGEOM) {
2248863SEdward.Pilatowicz@Sun.COM 		cmlb_geom_t	pgeom, *vgeomp;
2258863SEdward.Pilatowicz@Sun.COM 		diskaddr_t	capacity;
2268863SEdward.Pilatowicz@Sun.COM 
2278863SEdward.Pilatowicz@Sun.COM 		/*
2288863SEdward.Pilatowicz@Sun.COM 		 * The native xdf driver doesn't support this ioctl.
2298863SEdward.Pilatowicz@Sun.COM 		 * Intead of passing it on, emulate it here so that the
2308863SEdward.Pilatowicz@Sun.COM 		 * results look the same as what we get for a real xdf
2318863SEdward.Pilatowicz@Sun.COM 		 * shell device.
2328863SEdward.Pilatowicz@Sun.COM 		 *
2338863SEdward.Pilatowicz@Sun.COM 		 * Get the real size of the device
2348863SEdward.Pilatowicz@Sun.COM 		 */
2358863SEdward.Pilatowicz@Sun.COM 		if ((rv = xdf_lb_getinfo(xsp->xdfss_tgt_dip,
2368863SEdward.Pilatowicz@Sun.COM 		    TG_GETPHYGEOM, &pgeom, tg_cookie)) != 0)
2378863SEdward.Pilatowicz@Sun.COM 			goto out;
2388863SEdward.Pilatowicz@Sun.COM 		capacity = pgeom.g_capacity;
2398863SEdward.Pilatowicz@Sun.COM 
2408863SEdward.Pilatowicz@Sun.COM 		/*
2418863SEdward.Pilatowicz@Sun.COM 		 * If the controller returned us something that doesn't
2428863SEdward.Pilatowicz@Sun.COM 		 * really fit into an Int 13/function 8 geometry
2438863SEdward.Pilatowicz@Sun.COM 		 * result, just fail the ioctl.  See PSARC 1998/313.
2448863SEdward.Pilatowicz@Sun.COM 		 */
2458863SEdward.Pilatowicz@Sun.COM 		if (capacity >= (63 * 254 * 1024)) {
2468863SEdward.Pilatowicz@Sun.COM 			rv = EINVAL;
2478863SEdward.Pilatowicz@Sun.COM 			goto out;
2488863SEdward.Pilatowicz@Sun.COM 		}
2498863SEdward.Pilatowicz@Sun.COM 
2508863SEdward.Pilatowicz@Sun.COM 		vgeomp = (cmlb_geom_t *)arg;
2518863SEdward.Pilatowicz@Sun.COM 		vgeomp->g_capacity	= capacity;
2528863SEdward.Pilatowicz@Sun.COM 		vgeomp->g_nsect		= 63;
2538863SEdward.Pilatowicz@Sun.COM 		vgeomp->g_nhead		= 254;
2548863SEdward.Pilatowicz@Sun.COM 		vgeomp->g_ncyl		= capacity / (63 * 254);
2558863SEdward.Pilatowicz@Sun.COM 		vgeomp->g_acyl		= 0;
2568863SEdward.Pilatowicz@Sun.COM 		vgeomp->g_secsize	= 512;
2578863SEdward.Pilatowicz@Sun.COM 		vgeomp->g_intrlv	= 1;
2588863SEdward.Pilatowicz@Sun.COM 		vgeomp->g_rpm		= 3600;
2598863SEdward.Pilatowicz@Sun.COM 		rv = 0;
2608863SEdward.Pilatowicz@Sun.COM 		goto out;
2618863SEdward.Pilatowicz@Sun.COM 	}
2628863SEdward.Pilatowicz@Sun.COM 
2638863SEdward.Pilatowicz@Sun.COM 	rv = xdf_lb_getinfo(xsp->xdfss_tgt_dip, cmd, arg, tg_cookie);
2648863SEdward.Pilatowicz@Sun.COM 
2658863SEdward.Pilatowicz@Sun.COM out:
2668863SEdward.Pilatowicz@Sun.COM 	xdfs_tgt_release(xsp);
2678863SEdward.Pilatowicz@Sun.COM 	return (rv);
2688863SEdward.Pilatowicz@Sun.COM }
2698863SEdward.Pilatowicz@Sun.COM 
2708863SEdward.Pilatowicz@Sun.COM static boolean_t
xdfs_isopen_part(xdfs_state_t * xsp,int part)2718863SEdward.Pilatowicz@Sun.COM xdfs_isopen_part(xdfs_state_t *xsp, int part)
2728863SEdward.Pilatowicz@Sun.COM {
2738863SEdward.Pilatowicz@Sun.COM 	int otyp;
2748863SEdward.Pilatowicz@Sun.COM 
2758863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&xsp->xdfss_mutex));
2768863SEdward.Pilatowicz@Sun.COM 	for (otyp = 0; (otyp < OTYPCNT); otyp++) {
2778863SEdward.Pilatowicz@Sun.COM 		if (xsp->xdfss_otyp_count[otyp][part] != 0) {
2788863SEdward.Pilatowicz@Sun.COM 			ASSERT(xsp->xdfss_tgt_attached);
2798863SEdward.Pilatowicz@Sun.COM 			ASSERT(xsp->xdfss_tgt_holds >= 0);
2808863SEdward.Pilatowicz@Sun.COM 			return (B_TRUE);
2818863SEdward.Pilatowicz@Sun.COM 		}
2828863SEdward.Pilatowicz@Sun.COM 	}
2838863SEdward.Pilatowicz@Sun.COM 	return (B_FALSE);
2848863SEdward.Pilatowicz@Sun.COM }
2858863SEdward.Pilatowicz@Sun.COM 
2868863SEdward.Pilatowicz@Sun.COM static boolean_t
xdfs_isopen(xdfs_state_t * xsp)2878863SEdward.Pilatowicz@Sun.COM xdfs_isopen(xdfs_state_t *xsp)
2888863SEdward.Pilatowicz@Sun.COM {
2898863SEdward.Pilatowicz@Sun.COM 	int part;
2908863SEdward.Pilatowicz@Sun.COM 
2918863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&xsp->xdfss_mutex));
2928863SEdward.Pilatowicz@Sun.COM 	for (part = 0; part < XDF_PEXT; part++) {
2938863SEdward.Pilatowicz@Sun.COM 		if (xdfs_isopen_part(xsp, part))
2948863SEdward.Pilatowicz@Sun.COM 			return (B_TRUE);
2958863SEdward.Pilatowicz@Sun.COM 	}
2968863SEdward.Pilatowicz@Sun.COM 	return (B_FALSE);
2978863SEdward.Pilatowicz@Sun.COM }
2988863SEdward.Pilatowicz@Sun.COM 
2998863SEdward.Pilatowicz@Sun.COM static int
xdfs_iodone(struct buf * bp)3008863SEdward.Pilatowicz@Sun.COM xdfs_iodone(struct buf *bp)
3018863SEdward.Pilatowicz@Sun.COM {
3028863SEdward.Pilatowicz@Sun.COM 	struct buf	*bp_orig = bp->b_chain;
3038863SEdward.Pilatowicz@Sun.COM 
3048863SEdward.Pilatowicz@Sun.COM 	/* Propegate back the io results */
3058863SEdward.Pilatowicz@Sun.COM 	bp_orig->b_resid = bp->b_resid;
3068863SEdward.Pilatowicz@Sun.COM 	bioerror(bp_orig, geterror(bp));
3078863SEdward.Pilatowicz@Sun.COM 	biodone(bp_orig);
3088863SEdward.Pilatowicz@Sun.COM 
3098863SEdward.Pilatowicz@Sun.COM 	freerbuf(bp);
3108863SEdward.Pilatowicz@Sun.COM 	return (0);
3118863SEdward.Pilatowicz@Sun.COM }
3128863SEdward.Pilatowicz@Sun.COM 
3138863SEdward.Pilatowicz@Sun.COM static int
xdfs_cmlb_attach(xdfs_state_t * xsp)3148863SEdward.Pilatowicz@Sun.COM xdfs_cmlb_attach(xdfs_state_t *xsp)
3158863SEdward.Pilatowicz@Sun.COM {
3168863SEdward.Pilatowicz@Sun.COM 	return (cmlb_attach(xsp->xdfss_dip, &xdfs_lb_ops,
3178863SEdward.Pilatowicz@Sun.COM 	    xsp->xdfss_tgt_is_cd ? DTYPE_RODIRECT : DTYPE_DIRECT,
3188863SEdward.Pilatowicz@Sun.COM 	    xdf_is_rm(xsp->xdfss_tgt_dip),
3198863SEdward.Pilatowicz@Sun.COM 	    B_TRUE,
3208863SEdward.Pilatowicz@Sun.COM 	    xdfs_c_cmlb_node_type(xsp),
3218863SEdward.Pilatowicz@Sun.COM 	    xdfs_c_cmlb_alter_behavior(xsp),
3228863SEdward.Pilatowicz@Sun.COM 	    xsp->xdfss_cmlbhandle, 0));
3238863SEdward.Pilatowicz@Sun.COM }
3248863SEdward.Pilatowicz@Sun.COM 
3258863SEdward.Pilatowicz@Sun.COM static boolean_t
xdfs_tgt_probe(xdfs_state_t * xsp,dev_info_t * tgt_dip)3268863SEdward.Pilatowicz@Sun.COM xdfs_tgt_probe(xdfs_state_t *xsp, dev_info_t *tgt_dip)
3278863SEdward.Pilatowicz@Sun.COM {
3288863SEdward.Pilatowicz@Sun.COM 	cmlb_geom_t		pgeom;
3298863SEdward.Pilatowicz@Sun.COM 	int			tgt_instance = ddi_get_instance(tgt_dip);
3308863SEdward.Pilatowicz@Sun.COM 
3318863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&xsp->xdfss_mutex));
3328863SEdward.Pilatowicz@Sun.COM 	ASSERT(!xdfs_isopen(xsp));
3338863SEdward.Pilatowicz@Sun.COM 	ASSERT(!xsp->xdfss_tgt_attached);
3348863SEdward.Pilatowicz@Sun.COM 
3358863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_tgt_dip = tgt_dip;
3368863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_tgt_holds = 0;
3378863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_tgt_dev = makedevice(ddi_driver_major(tgt_dip),
3388863SEdward.Pilatowicz@Sun.COM 	    XDF_MINOR(tgt_instance, 0));
3398863SEdward.Pilatowicz@Sun.COM 	ASSERT((xsp->xdfss_tgt_dev & XDF_PMASK) == 0);
3408863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_tgt_is_cd = xdf_is_cd(tgt_dip);
3418863SEdward.Pilatowicz@Sun.COM 
3428863SEdward.Pilatowicz@Sun.COM 	/*
3438863SEdward.Pilatowicz@Sun.COM 	 * GROSS HACK ALERT!  GROSS HACK ALERT!
3448863SEdward.Pilatowicz@Sun.COM 	 *
3458863SEdward.Pilatowicz@Sun.COM 	 * Before we can initialize the cmlb layer, we have to tell the
3468863SEdward.Pilatowicz@Sun.COM 	 * underlying xdf device what it's physical geometry should be.
3478863SEdward.Pilatowicz@Sun.COM 	 * See the block comments at the top of this file for more info.
3488863SEdward.Pilatowicz@Sun.COM 	 */
3498863SEdward.Pilatowicz@Sun.COM 	if (!xsp->xdfss_tgt_is_cd &&
3508863SEdward.Pilatowicz@Sun.COM 	    ((xdfs_c_getpgeom(xsp->xdfss_dip, &pgeom) != 0) ||
3518863SEdward.Pilatowicz@Sun.COM 	    (xdf_hvm_setpgeom(xsp->xdfss_tgt_dip, &pgeom) != 0)))
3528863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
3538863SEdward.Pilatowicz@Sun.COM 
3548863SEdward.Pilatowicz@Sun.COM 	/*
3558863SEdward.Pilatowicz@Sun.COM 	 * Force the xdf front end driver to connect to the backend.  From
3568863SEdward.Pilatowicz@Sun.COM 	 * the solaris device tree perspective, the xdf driver devinfo node
3578863SEdward.Pilatowicz@Sun.COM 	 * is already in the ATTACHED state.  (Otherwise xdf_hvm_hold()
3588863SEdward.Pilatowicz@Sun.COM 	 * would not have returned a dip.)  But this doesn't mean that the
3598863SEdward.Pilatowicz@Sun.COM 	 * xdf device has actually established a connection to it's back
3608863SEdward.Pilatowicz@Sun.COM 	 * end driver.  For us to be able to access the xdf device it needs
3618863SEdward.Pilatowicz@Sun.COM 	 * to be connected.
3628863SEdward.Pilatowicz@Sun.COM 	 */
3638863SEdward.Pilatowicz@Sun.COM 	if (!xdf_hvm_connect(xsp->xdfss_tgt_dip)) {
3648863SEdward.Pilatowicz@Sun.COM 		cmn_err(CE_WARN, "pv driver failed to connect: %s",
3658863SEdward.Pilatowicz@Sun.COM 		    xsp->xdfss_pv);
3668863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
3678863SEdward.Pilatowicz@Sun.COM 	}
3688863SEdward.Pilatowicz@Sun.COM 
3698863SEdward.Pilatowicz@Sun.COM 	if (xsp->xdfss_tgt_is_cd && !xdf_media_req_supported(tgt_dip)) {
3708863SEdward.Pilatowicz@Sun.COM 		/*
3718863SEdward.Pilatowicz@Sun.COM 		 * Unfortunatly, the dom0 backend driver doesn't support
3728863SEdward.Pilatowicz@Sun.COM 		 * important media request operations like eject, so fail
3738863SEdward.Pilatowicz@Sun.COM 		 * the probe (this should cause us to fall back to emulated
3748863SEdward.Pilatowicz@Sun.COM 		 * hvm device access, which does support things like eject).
3758863SEdward.Pilatowicz@Sun.COM 		 */
3768863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
3778863SEdward.Pilatowicz@Sun.COM 	}
3788863SEdward.Pilatowicz@Sun.COM 
3798863SEdward.Pilatowicz@Sun.COM 	/* create kstat for iostat(1M) */
3808863SEdward.Pilatowicz@Sun.COM 	if (xdf_kstat_create(xsp->xdfss_tgt_dip, (char *)xdfs_c_name,
3818863SEdward.Pilatowicz@Sun.COM 	    tgt_instance) != 0)
3828863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
3838863SEdward.Pilatowicz@Sun.COM 
3848863SEdward.Pilatowicz@Sun.COM 	/*
3858863SEdward.Pilatowicz@Sun.COM 	 * Now we need to mark ourselves as attached and drop xdfss_mutex.
3868863SEdward.Pilatowicz@Sun.COM 	 * We do this because the final steps in the attach process will
3878863SEdward.Pilatowicz@Sun.COM 	 * need to access the underlying disk to read the label and
3888863SEdward.Pilatowicz@Sun.COM 	 * possibly the devid.
3898863SEdward.Pilatowicz@Sun.COM 	 */
3908863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_tgt_attached = B_TRUE;
3918863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&xsp->xdfss_mutex);
3928863SEdward.Pilatowicz@Sun.COM 
3938863SEdward.Pilatowicz@Sun.COM 	if (!xsp->xdfss_tgt_is_cd && xdfs_c_bb_check(xsp)) {
3948863SEdward.Pilatowicz@Sun.COM 		cmn_err(CE_WARN, "pv disks with bad blocks are unsupported: %s",
3958863SEdward.Pilatowicz@Sun.COM 		    xsp->xdfss_hvm);
3968863SEdward.Pilatowicz@Sun.COM 		mutex_enter(&xsp->xdfss_mutex);
3978863SEdward.Pilatowicz@Sun.COM 		xdf_kstat_delete(xsp->xdfss_tgt_dip);
3988863SEdward.Pilatowicz@Sun.COM 		xsp->xdfss_tgt_attached = B_FALSE;
3998863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
4008863SEdward.Pilatowicz@Sun.COM 	}
4018863SEdward.Pilatowicz@Sun.COM 
4028863SEdward.Pilatowicz@Sun.COM 	/*
4038863SEdward.Pilatowicz@Sun.COM 	 * Initalize cmlb.  Note that for partition information cmlb
4048863SEdward.Pilatowicz@Sun.COM 	 * will access the underly xdf disk device directly via
4058863SEdward.Pilatowicz@Sun.COM 	 * xdfs_lb_rdwr() and xdfs_lb_getinfo().  There are no
4068863SEdward.Pilatowicz@Sun.COM 	 * layered driver handles associated with this access because
4078863SEdward.Pilatowicz@Sun.COM 	 * it is a direct disk access that doesn't go through
4088863SEdward.Pilatowicz@Sun.COM 	 * any of the device nodes exported by the xdf device (since
4098863SEdward.Pilatowicz@Sun.COM 	 * all exported device nodes only reflect the portion of
4108863SEdward.Pilatowicz@Sun.COM 	 * the device visible via the partition/slice that the node
4118863SEdward.Pilatowicz@Sun.COM 	 * is associated with.)  So while not observable via the LDI,
4128863SEdward.Pilatowicz@Sun.COM 	 * this direct disk access is ok since we're actually holding
4138863SEdward.Pilatowicz@Sun.COM 	 * the target device.
4148863SEdward.Pilatowicz@Sun.COM 	 */
4158863SEdward.Pilatowicz@Sun.COM 	if (xdfs_cmlb_attach(xsp) != 0) {
4168863SEdward.Pilatowicz@Sun.COM 		mutex_enter(&xsp->xdfss_mutex);
4178863SEdward.Pilatowicz@Sun.COM 		xdf_kstat_delete(xsp->xdfss_tgt_dip);
4188863SEdward.Pilatowicz@Sun.COM 		xsp->xdfss_tgt_attached = B_FALSE;
4198863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
4208863SEdward.Pilatowicz@Sun.COM 	}
4218863SEdward.Pilatowicz@Sun.COM 
4228863SEdward.Pilatowicz@Sun.COM 	/* setup devid string */
4238863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_tgt_devid = NULL;
4248863SEdward.Pilatowicz@Sun.COM 	if (!xsp->xdfss_tgt_is_cd)
4258863SEdward.Pilatowicz@Sun.COM 		xdfs_c_devid_setup(xsp);
4268863SEdward.Pilatowicz@Sun.COM 
4278863SEdward.Pilatowicz@Sun.COM 	(void) cmlb_validate(xsp->xdfss_cmlbhandle, 0, 0);
4288863SEdward.Pilatowicz@Sun.COM 
4298863SEdward.Pilatowicz@Sun.COM 	/* Have the system report any newly created device nodes */
4308863SEdward.Pilatowicz@Sun.COM 	ddi_report_dev(xsp->xdfss_dip);
4318863SEdward.Pilatowicz@Sun.COM 
4328863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&xsp->xdfss_mutex);
4338863SEdward.Pilatowicz@Sun.COM 	return (B_TRUE);
4348863SEdward.Pilatowicz@Sun.COM }
4358863SEdward.Pilatowicz@Sun.COM 
4368863SEdward.Pilatowicz@Sun.COM static boolean_t
xdfs_tgt_detach(xdfs_state_t * xsp)4378863SEdward.Pilatowicz@Sun.COM xdfs_tgt_detach(xdfs_state_t *xsp)
4388863SEdward.Pilatowicz@Sun.COM {
4398863SEdward.Pilatowicz@Sun.COM 	ASSERT(MUTEX_HELD(&xsp->xdfss_mutex));
4408863SEdward.Pilatowicz@Sun.COM 	ASSERT(xsp->xdfss_tgt_attached);
4418863SEdward.Pilatowicz@Sun.COM 	ASSERT(xsp->xdfss_tgt_holds >= 0);
4428863SEdward.Pilatowicz@Sun.COM 
4438863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_isopen(xsp)) || (xsp->xdfss_tgt_holds != 0))
4448863SEdward.Pilatowicz@Sun.COM 		return (B_FALSE);
4458863SEdward.Pilatowicz@Sun.COM 
4468863SEdward.Pilatowicz@Sun.COM 	ddi_devid_unregister(xsp->xdfss_dip);
4478863SEdward.Pilatowicz@Sun.COM 	if (xsp->xdfss_tgt_devid != NULL)
4488863SEdward.Pilatowicz@Sun.COM 		ddi_devid_free(xsp->xdfss_tgt_devid);
4498863SEdward.Pilatowicz@Sun.COM 
4508863SEdward.Pilatowicz@Sun.COM 	xdf_kstat_delete(xsp->xdfss_tgt_dip);
4518863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_tgt_attached = B_FALSE;
4528863SEdward.Pilatowicz@Sun.COM 	return (B_TRUE);
4538863SEdward.Pilatowicz@Sun.COM }
4548863SEdward.Pilatowicz@Sun.COM 
4558863SEdward.Pilatowicz@Sun.COM /*
4568863SEdward.Pilatowicz@Sun.COM  * Xdf_shell interfaces that may be called from outside this file.
4578863SEdward.Pilatowicz@Sun.COM  */
4588863SEdward.Pilatowicz@Sun.COM void
xdfs_minphys(struct buf * bp)4598863SEdward.Pilatowicz@Sun.COM xdfs_minphys(struct buf *bp)
4608863SEdward.Pilatowicz@Sun.COM {
4618863SEdward.Pilatowicz@Sun.COM 	xdfmin(bp);
4628863SEdward.Pilatowicz@Sun.COM }
4638863SEdward.Pilatowicz@Sun.COM 
4648863SEdward.Pilatowicz@Sun.COM /*
4658863SEdward.Pilatowicz@Sun.COM  * Cmlb ops vector, allows the cmlb module to directly access the entire
4668863SEdward.Pilatowicz@Sun.COM  * xdf disk device without going through any partitioning layers.
4678863SEdward.Pilatowicz@Sun.COM  */
4688863SEdward.Pilatowicz@Sun.COM int
xdfs_lb_rdwr(dev_info_t * dip,uchar_t cmd,void * bufaddr,diskaddr_t start,size_t count,void * tg_cookie)4698863SEdward.Pilatowicz@Sun.COM xdfs_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
4708863SEdward.Pilatowicz@Sun.COM     diskaddr_t start, size_t count, void *tg_cookie)
4718863SEdward.Pilatowicz@Sun.COM {
4728863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
4738863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
4748863SEdward.Pilatowicz@Sun.COM 	int		rv;
4758863SEdward.Pilatowicz@Sun.COM 
4768863SEdward.Pilatowicz@Sun.COM 	if (xsp == NULL)
4778863SEdward.Pilatowicz@Sun.COM 		return (ENXIO);
4788863SEdward.Pilatowicz@Sun.COM 
4798863SEdward.Pilatowicz@Sun.COM 	if (!xdfs_tgt_hold(xsp))
4808863SEdward.Pilatowicz@Sun.COM 		return (ENXIO);
4818863SEdward.Pilatowicz@Sun.COM 
4828863SEdward.Pilatowicz@Sun.COM 	rv = xdf_lb_rdwr(xsp->xdfss_tgt_dip,
4838863SEdward.Pilatowicz@Sun.COM 	    cmd, bufaddr, start, count, tg_cookie);
4848863SEdward.Pilatowicz@Sun.COM 
4858863SEdward.Pilatowicz@Sun.COM 	xdfs_tgt_release(xsp);
4868863SEdward.Pilatowicz@Sun.COM 	return (rv);
4878863SEdward.Pilatowicz@Sun.COM }
4888863SEdward.Pilatowicz@Sun.COM 
4898863SEdward.Pilatowicz@Sun.COM /*
4908863SEdward.Pilatowicz@Sun.COM  * Driver PV and HVM cb_ops entry points
4918863SEdward.Pilatowicz@Sun.COM  */
4928863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
4938863SEdward.Pilatowicz@Sun.COM static int
xdfs_open(dev_t * dev_p,int flag,int otyp,cred_t * credp)4948863SEdward.Pilatowicz@Sun.COM xdfs_open(dev_t *dev_p, int flag, int otyp, cred_t *credp)
4958863SEdward.Pilatowicz@Sun.COM {
4968863SEdward.Pilatowicz@Sun.COM 	ldi_ident_t	li;
4978863SEdward.Pilatowicz@Sun.COM 	dev_t		dev = *dev_p;
4988863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
4998863SEdward.Pilatowicz@Sun.COM 	int		part = XDFS_DEV2PART(dev);
5008863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
5018863SEdward.Pilatowicz@Sun.COM 	dev_t		tgt_devt = xsp->xdfss_tgt_dev | part;
5028863SEdward.Pilatowicz@Sun.COM 	int		err = 0;
5038863SEdward.Pilatowicz@Sun.COM 
5048863SEdward.Pilatowicz@Sun.COM 	if ((otyp < 0) || (otyp >= OTYPCNT))
5058863SEdward.Pilatowicz@Sun.COM 		return (EINVAL);
5068863SEdward.Pilatowicz@Sun.COM 
5078863SEdward.Pilatowicz@Sun.COM 	if (XDFS_HVM_MODE(xsp)) {
5088863SEdward.Pilatowicz@Sun.COM 		if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL))
5098863SEdward.Pilatowicz@Sun.COM 			return (ENOTSUP);
5108863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_cb_ops->cb_open(dev_p, flag, otyp, credp));
5118863SEdward.Pilatowicz@Sun.COM 	}
5128863SEdward.Pilatowicz@Sun.COM 
5138863SEdward.Pilatowicz@Sun.COM 	/* allocate an ldi handle */
5148863SEdward.Pilatowicz@Sun.COM 	VERIFY(ldi_ident_from_dev(*dev_p, &li) == 0);
5158863SEdward.Pilatowicz@Sun.COM 
5168863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&xsp->xdfss_mutex);
5178863SEdward.Pilatowicz@Sun.COM 
5188863SEdward.Pilatowicz@Sun.COM 	/*
5198863SEdward.Pilatowicz@Sun.COM 	 * We translate all device opens (chr, blk, and lyr) into
5208863SEdward.Pilatowicz@Sun.COM 	 * block device opens.  Why?  Because for all the opens that
5218863SEdward.Pilatowicz@Sun.COM 	 * come through this driver, we only keep around one LDI handle.
5228863SEdward.Pilatowicz@Sun.COM 	 * So that handle can only be of one open type.  The reason
5238863SEdward.Pilatowicz@Sun.COM 	 * that we choose the block interface for this is that to use
5248863SEdward.Pilatowicz@Sun.COM 	 * the block interfaces for a device the system needs to allocate
5258863SEdward.Pilatowicz@Sun.COM 	 * buf_ts, which are associated with system memory which can act
5268863SEdward.Pilatowicz@Sun.COM 	 * as a cache for device data.  So normally when a block device
5278863SEdward.Pilatowicz@Sun.COM 	 * is closed the system will ensure that all these pages get
5288863SEdward.Pilatowicz@Sun.COM 	 * flushed out of memory.  But if we were to open the device
5298863SEdward.Pilatowicz@Sun.COM 	 * as a character device, then when we went to close the underlying
5308863SEdward.Pilatowicz@Sun.COM 	 * device (even if we had invoked the block interfaces) any data
5318863SEdward.Pilatowicz@Sun.COM 	 * remaining in memory wouldn't necessairly be flushed out
5328863SEdward.Pilatowicz@Sun.COM 	 * before the device was closed.
5338863SEdward.Pilatowicz@Sun.COM 	 */
5348863SEdward.Pilatowicz@Sun.COM 	if (xsp->xdfss_tgt_lh[part] == NULL) {
5358863SEdward.Pilatowicz@Sun.COM 		ASSERT(!xdfs_isopen_part(xsp, part));
5368863SEdward.Pilatowicz@Sun.COM 
5378863SEdward.Pilatowicz@Sun.COM 		err = ldi_open_by_dev(&tgt_devt, OTYP_BLK, flag, credp,
5388863SEdward.Pilatowicz@Sun.COM 		    &xsp->xdfss_tgt_lh[part], li);
5398863SEdward.Pilatowicz@Sun.COM 
5408863SEdward.Pilatowicz@Sun.COM 		if (err != 0) {
5418863SEdward.Pilatowicz@Sun.COM 			mutex_exit(&xsp->xdfss_mutex);
5428863SEdward.Pilatowicz@Sun.COM 			ldi_ident_release(li);
5438863SEdward.Pilatowicz@Sun.COM 			return (err);
5448863SEdward.Pilatowicz@Sun.COM 		}
5458863SEdward.Pilatowicz@Sun.COM 
5468863SEdward.Pilatowicz@Sun.COM 		/* Disk devices really shouldn't clone */
5478863SEdward.Pilatowicz@Sun.COM 		ASSERT(tgt_devt == (xsp->xdfss_tgt_dev | part));
5488863SEdward.Pilatowicz@Sun.COM 	} else {
5498863SEdward.Pilatowicz@Sun.COM 		ldi_handle_t lh_tmp;
5508863SEdward.Pilatowicz@Sun.COM 
5518863SEdward.Pilatowicz@Sun.COM 		ASSERT(xdfs_isopen_part(xsp, part));
5528863SEdward.Pilatowicz@Sun.COM 
5538863SEdward.Pilatowicz@Sun.COM 		/* do ldi open/close to get flags and cred check */
5548863SEdward.Pilatowicz@Sun.COM 		err = ldi_open_by_dev(&tgt_devt, OTYP_BLK, flag, credp,
5558863SEdward.Pilatowicz@Sun.COM 		    &lh_tmp, li);
5568863SEdward.Pilatowicz@Sun.COM 		if (err != 0) {
5578863SEdward.Pilatowicz@Sun.COM 			mutex_exit(&xsp->xdfss_mutex);
5588863SEdward.Pilatowicz@Sun.COM 			ldi_ident_release(li);
5598863SEdward.Pilatowicz@Sun.COM 			return (err);
5608863SEdward.Pilatowicz@Sun.COM 		}
5618863SEdward.Pilatowicz@Sun.COM 
5628863SEdward.Pilatowicz@Sun.COM 		/* Disk devices really shouldn't clone */
5638863SEdward.Pilatowicz@Sun.COM 		ASSERT(tgt_devt == (xsp->xdfss_tgt_dev | part));
5648863SEdward.Pilatowicz@Sun.COM 		(void) ldi_close(lh_tmp, flag, credp);
5658863SEdward.Pilatowicz@Sun.COM 	}
5668863SEdward.Pilatowicz@Sun.COM 	ldi_ident_release(li);
5678863SEdward.Pilatowicz@Sun.COM 
5688863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_otyp_count[otyp][part]++;
5698863SEdward.Pilatowicz@Sun.COM 
5708863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&xsp->xdfss_mutex);
5718863SEdward.Pilatowicz@Sun.COM 	return (0);
5728863SEdward.Pilatowicz@Sun.COM }
5738863SEdward.Pilatowicz@Sun.COM 
5748863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
5758863SEdward.Pilatowicz@Sun.COM static int
xdfs_close(dev_t dev,int flag,int otyp,cred_t * credp)5768863SEdward.Pilatowicz@Sun.COM xdfs_close(dev_t dev, int flag, int otyp, cred_t *credp)
5778863SEdward.Pilatowicz@Sun.COM {
5788863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
5798863SEdward.Pilatowicz@Sun.COM 	int		part = XDFS_DEV2PART(dev);
5808863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
5818863SEdward.Pilatowicz@Sun.COM 	int		err = 0;
5828863SEdward.Pilatowicz@Sun.COM 
5838863SEdward.Pilatowicz@Sun.COM 	ASSERT((otyp >= 0) && otyp < OTYPCNT);
5848863SEdward.Pilatowicz@Sun.COM 
5858863SEdward.Pilatowicz@Sun.COM 	/* Sanity check the dev_t associated with this request. */
5868863SEdward.Pilatowicz@Sun.COM 	ASSERT(getmajor(dev) == xdfs_major);
5878863SEdward.Pilatowicz@Sun.COM 	if (getmajor(dev) != xdfs_major)
5888863SEdward.Pilatowicz@Sun.COM 		return (ENXIO);
5898863SEdward.Pilatowicz@Sun.COM 
5908863SEdward.Pilatowicz@Sun.COM 	if (XDFS_HVM_MODE(xsp)) {
5918863SEdward.Pilatowicz@Sun.COM 		if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL))
5928863SEdward.Pilatowicz@Sun.COM 			return (ENOTSUP);
5938863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_cb_ops->cb_close(dev, flag, otyp, credp));
5948863SEdward.Pilatowicz@Sun.COM 	}
5958863SEdward.Pilatowicz@Sun.COM 
5968863SEdward.Pilatowicz@Sun.COM 	/*
5978863SEdward.Pilatowicz@Sun.COM 	 * Sanity check that that the device is actually open.  On debug
5988863SEdward.Pilatowicz@Sun.COM 	 * kernels we'll panic and on non-debug kernels we'll return failure.
5998863SEdward.Pilatowicz@Sun.COM 	 */
6008863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&xsp->xdfss_mutex);
6018863SEdward.Pilatowicz@Sun.COM 	ASSERT(xdfs_isopen_part(xsp, part));
6028863SEdward.Pilatowicz@Sun.COM 	if (!xdfs_isopen_part(xsp, part)) {
6038863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&xsp->xdfss_mutex);
6048863SEdward.Pilatowicz@Sun.COM 		return (ENXIO);
6058863SEdward.Pilatowicz@Sun.COM 	}
6068863SEdward.Pilatowicz@Sun.COM 
6078863SEdward.Pilatowicz@Sun.COM 	ASSERT(xsp->xdfss_tgt_lh[part] != NULL);
6088863SEdward.Pilatowicz@Sun.COM 	ASSERT(xsp->xdfss_otyp_count[otyp][part] > 0);
6098863SEdward.Pilatowicz@Sun.COM 	if (otyp == OTYP_LYR) {
6108863SEdward.Pilatowicz@Sun.COM 		xsp->xdfss_otyp_count[otyp][part]--;
6118863SEdward.Pilatowicz@Sun.COM 	} else {
6128863SEdward.Pilatowicz@Sun.COM 		xsp->xdfss_otyp_count[otyp][part] = 0;
6138863SEdward.Pilatowicz@Sun.COM 	}
6148863SEdward.Pilatowicz@Sun.COM 
6158863SEdward.Pilatowicz@Sun.COM 	if (!xdfs_isopen_part(xsp, part)) {
6168863SEdward.Pilatowicz@Sun.COM 		err = ldi_close(xsp->xdfss_tgt_lh[part], flag, credp);
6178863SEdward.Pilatowicz@Sun.COM 		xsp->xdfss_tgt_lh[part] = NULL;
6188863SEdward.Pilatowicz@Sun.COM 	}
6198863SEdward.Pilatowicz@Sun.COM 
6208863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&xsp->xdfss_mutex);
6218863SEdward.Pilatowicz@Sun.COM 
6228863SEdward.Pilatowicz@Sun.COM 	return (err);
6238863SEdward.Pilatowicz@Sun.COM }
6248863SEdward.Pilatowicz@Sun.COM 
6258863SEdward.Pilatowicz@Sun.COM int
xdfs_strategy(struct buf * bp)6268863SEdward.Pilatowicz@Sun.COM xdfs_strategy(struct buf *bp)
6278863SEdward.Pilatowicz@Sun.COM {
6288863SEdward.Pilatowicz@Sun.COM 	dev_t		dev = bp->b_edev;
6298863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
6308863SEdward.Pilatowicz@Sun.COM 	int		part = XDFS_DEV2PART(dev);
6318863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
6328863SEdward.Pilatowicz@Sun.COM 	dev_t		tgt_devt;
6338863SEdward.Pilatowicz@Sun.COM 	struct buf	*bp_clone;
6348863SEdward.Pilatowicz@Sun.COM 
6358863SEdward.Pilatowicz@Sun.COM 	/* Sanity check the dev_t associated with this request. */
6368863SEdward.Pilatowicz@Sun.COM 	ASSERT(getmajor(dev) == xdfs_major);
6378863SEdward.Pilatowicz@Sun.COM 	if (getmajor(dev) != xdfs_major)
6388863SEdward.Pilatowicz@Sun.COM 		goto err;
6398863SEdward.Pilatowicz@Sun.COM 
6408863SEdward.Pilatowicz@Sun.COM 	if (XDFS_HVM_MODE(xsp)) {
6418863SEdward.Pilatowicz@Sun.COM 		if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL))
6428863SEdward.Pilatowicz@Sun.COM 			return (ENOTSUP);
6438863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_cb_ops->cb_strategy(bp));
6448863SEdward.Pilatowicz@Sun.COM 	}
6458863SEdward.Pilatowicz@Sun.COM 
6468863SEdward.Pilatowicz@Sun.COM 	/*
6478863SEdward.Pilatowicz@Sun.COM 	 * Sanity checks that the dev_t associated with the buf we were
6488863SEdward.Pilatowicz@Sun.COM 	 * passed corresponds to an open partition.  On debug kernels we'll
6498863SEdward.Pilatowicz@Sun.COM 	 * panic and on non-debug kernels we'll return failure.
6508863SEdward.Pilatowicz@Sun.COM 	 */
6518863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&xsp->xdfss_mutex);
6528863SEdward.Pilatowicz@Sun.COM 	ASSERT(xdfs_isopen_part(xsp, part));
6538863SEdward.Pilatowicz@Sun.COM 	if (!xdfs_isopen_part(xsp, part)) {
6548863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&xsp->xdfss_mutex);
6558863SEdward.Pilatowicz@Sun.COM 		goto err;
6568863SEdward.Pilatowicz@Sun.COM 	}
6578863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&xsp->xdfss_mutex);
6588863SEdward.Pilatowicz@Sun.COM 
6598863SEdward.Pilatowicz@Sun.COM 	/* clone this buffer */
6608863SEdward.Pilatowicz@Sun.COM 	tgt_devt = xsp->xdfss_tgt_dev | part;
6618863SEdward.Pilatowicz@Sun.COM 	bp_clone = bioclone(bp, 0, bp->b_bcount, tgt_devt, bp->b_blkno,
6628863SEdward.Pilatowicz@Sun.COM 	    xdfs_iodone, NULL, KM_SLEEP);
6638863SEdward.Pilatowicz@Sun.COM 	bp_clone->b_chain = bp;
6648863SEdward.Pilatowicz@Sun.COM 
6658863SEdward.Pilatowicz@Sun.COM 	/*
6668863SEdward.Pilatowicz@Sun.COM 	 * If we're being invoked on behalf of the physio() call in
6678863SEdward.Pilatowicz@Sun.COM 	 * xdfs_dioctl_rwcmd() then b_private will be set to
6688863SEdward.Pilatowicz@Sun.COM 	 * XB_SLICE_NONE and we need to propegate this flag into the
6698863SEdward.Pilatowicz@Sun.COM 	 * cloned buffer so that the xdf driver will see it.
6708863SEdward.Pilatowicz@Sun.COM 	 */
6718863SEdward.Pilatowicz@Sun.COM 	if (bp->b_private == (void *)XB_SLICE_NONE)
6728863SEdward.Pilatowicz@Sun.COM 		bp_clone->b_private = (void *)XB_SLICE_NONE;
6738863SEdward.Pilatowicz@Sun.COM 
6748863SEdward.Pilatowicz@Sun.COM 	/*
6758863SEdward.Pilatowicz@Sun.COM 	 * Pass on the cloned buffer.  Note that we don't bother to check
6768863SEdward.Pilatowicz@Sun.COM 	 * for failure because the xdf strategy routine will have to
6778863SEdward.Pilatowicz@Sun.COM 	 * invoke biodone() if it wants to return an error, which means
6788863SEdward.Pilatowicz@Sun.COM 	 * that the xdfs_iodone() callback will get invoked and it
6798863SEdward.Pilatowicz@Sun.COM 	 * will propegate the error back up the stack and free the cloned
6808863SEdward.Pilatowicz@Sun.COM 	 * buffer.
6818863SEdward.Pilatowicz@Sun.COM 	 */
6828863SEdward.Pilatowicz@Sun.COM 	ASSERT(xsp->xdfss_tgt_lh[part] != NULL);
6838863SEdward.Pilatowicz@Sun.COM 	return (ldi_strategy(xsp->xdfss_tgt_lh[part], bp_clone));
6848863SEdward.Pilatowicz@Sun.COM 
6858863SEdward.Pilatowicz@Sun.COM err:
6868863SEdward.Pilatowicz@Sun.COM 	bioerror(bp, ENXIO);
6878863SEdward.Pilatowicz@Sun.COM 	bp->b_resid = bp->b_bcount;
6888863SEdward.Pilatowicz@Sun.COM 	biodone(bp);
6898863SEdward.Pilatowicz@Sun.COM 	return (0);
6908863SEdward.Pilatowicz@Sun.COM }
6918863SEdward.Pilatowicz@Sun.COM 
6928863SEdward.Pilatowicz@Sun.COM static int
xdfs_dump(dev_t dev,caddr_t addr,daddr_t blkno,int nblk)6938863SEdward.Pilatowicz@Sun.COM xdfs_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
6948863SEdward.Pilatowicz@Sun.COM {
6958863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
6968863SEdward.Pilatowicz@Sun.COM 	int		part = XDFS_DEV2PART(dev);
6978863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
6988863SEdward.Pilatowicz@Sun.COM 
6998863SEdward.Pilatowicz@Sun.COM 	if (!XDFS_HVM_MODE(xsp))
7008863SEdward.Pilatowicz@Sun.COM 		return (ldi_dump(xsp->xdfss_tgt_lh[part], addr, blkno, nblk));
7018863SEdward.Pilatowicz@Sun.COM 
7028863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL))
7038863SEdward.Pilatowicz@Sun.COM 		return (ENOTSUP);
7048863SEdward.Pilatowicz@Sun.COM 	return (xdfs_hvm_cb_ops->cb_dump(dev, addr, blkno, nblk));
7058863SEdward.Pilatowicz@Sun.COM }
7068863SEdward.Pilatowicz@Sun.COM 
7078863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
7088863SEdward.Pilatowicz@Sun.COM static int
xdfs_read(dev_t dev,struct uio * uio,cred_t * credp)7098863SEdward.Pilatowicz@Sun.COM xdfs_read(dev_t dev, struct uio *uio, cred_t *credp)
7108863SEdward.Pilatowicz@Sun.COM {
7118863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
7128863SEdward.Pilatowicz@Sun.COM 	int		part = XDFS_DEV2PART(dev);
7138863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
7148863SEdward.Pilatowicz@Sun.COM 
7158863SEdward.Pilatowicz@Sun.COM 	if (!XDFS_HVM_MODE(xsp))
7168863SEdward.Pilatowicz@Sun.COM 		return (ldi_read(xsp->xdfss_tgt_lh[part], uio, credp));
7178863SEdward.Pilatowicz@Sun.COM 
7188863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL))
7198863SEdward.Pilatowicz@Sun.COM 		return (ENOTSUP);
7208863SEdward.Pilatowicz@Sun.COM 	return (xdfs_hvm_cb_ops->cb_read(dev, uio, credp));
7218863SEdward.Pilatowicz@Sun.COM }
7228863SEdward.Pilatowicz@Sun.COM 
7238863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
7248863SEdward.Pilatowicz@Sun.COM static int
xdfs_write(dev_t dev,struct uio * uio,cred_t * credp)7258863SEdward.Pilatowicz@Sun.COM xdfs_write(dev_t dev, struct uio *uio, cred_t *credp)
7268863SEdward.Pilatowicz@Sun.COM {
7278863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
7288863SEdward.Pilatowicz@Sun.COM 	int		part = XDFS_DEV2PART(dev);
7298863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
7308863SEdward.Pilatowicz@Sun.COM 
7318863SEdward.Pilatowicz@Sun.COM 	if (!XDFS_HVM_MODE(xsp))
7328863SEdward.Pilatowicz@Sun.COM 		return (ldi_write(xsp->xdfss_tgt_lh[part], uio, credp));
7338863SEdward.Pilatowicz@Sun.COM 
7348863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL))
7358863SEdward.Pilatowicz@Sun.COM 		return (ENOTSUP);
7368863SEdward.Pilatowicz@Sun.COM 	return (xdfs_hvm_cb_ops->cb_write(dev, uio, credp));
7378863SEdward.Pilatowicz@Sun.COM }
7388863SEdward.Pilatowicz@Sun.COM 
7398863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
7408863SEdward.Pilatowicz@Sun.COM static int
xdfs_aread(dev_t dev,struct aio_req * aio,cred_t * credp)7418863SEdward.Pilatowicz@Sun.COM xdfs_aread(dev_t dev, struct aio_req *aio, cred_t *credp)
7428863SEdward.Pilatowicz@Sun.COM {
7438863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
7448863SEdward.Pilatowicz@Sun.COM 	int		part = XDFS_DEV2PART(dev);
7458863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
7468863SEdward.Pilatowicz@Sun.COM 
7478863SEdward.Pilatowicz@Sun.COM 	if (!XDFS_HVM_MODE(xsp))
7488863SEdward.Pilatowicz@Sun.COM 		return (ldi_aread(xsp->xdfss_tgt_lh[part], aio, credp));
7498863SEdward.Pilatowicz@Sun.COM 
7508863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL) ||
7518863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_cb_ops->cb_strategy == NULL) ||
7528863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_cb_ops->cb_strategy == nodev) ||
7538863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_cb_ops->cb_aread == NULL))
7548863SEdward.Pilatowicz@Sun.COM 		return (ENOTSUP);
7558863SEdward.Pilatowicz@Sun.COM 	return (xdfs_hvm_cb_ops->cb_aread(dev, aio, credp));
7568863SEdward.Pilatowicz@Sun.COM }
7578863SEdward.Pilatowicz@Sun.COM 
7588863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
7598863SEdward.Pilatowicz@Sun.COM static int
xdfs_awrite(dev_t dev,struct aio_req * aio,cred_t * credp)7608863SEdward.Pilatowicz@Sun.COM xdfs_awrite(dev_t dev, struct aio_req *aio, cred_t *credp)
7618863SEdward.Pilatowicz@Sun.COM {
7628863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
7638863SEdward.Pilatowicz@Sun.COM 	int		part = XDFS_DEV2PART(dev);
7648863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
7658863SEdward.Pilatowicz@Sun.COM 
7668863SEdward.Pilatowicz@Sun.COM 	if (!XDFS_HVM_MODE(xsp))
7678863SEdward.Pilatowicz@Sun.COM 		return (ldi_awrite(xsp->xdfss_tgt_lh[part], aio, credp));
7688863SEdward.Pilatowicz@Sun.COM 
7698863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL) ||
7708863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_cb_ops->cb_strategy == NULL) ||
7718863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_cb_ops->cb_strategy == nodev) ||
7728863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_cb_ops->cb_awrite == NULL))
7738863SEdward.Pilatowicz@Sun.COM 		return (ENOTSUP);
7748863SEdward.Pilatowicz@Sun.COM 	return (xdfs_hvm_cb_ops->cb_awrite(dev, aio, credp));
7758863SEdward.Pilatowicz@Sun.COM }
7768863SEdward.Pilatowicz@Sun.COM 
7778863SEdward.Pilatowicz@Sun.COM static int
xdfs_ioctl(dev_t dev,int cmd,intptr_t arg,int flag,cred_t * credp,int * rvalp)7788863SEdward.Pilatowicz@Sun.COM xdfs_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp,
7798863SEdward.Pilatowicz@Sun.COM     int *rvalp)
7808863SEdward.Pilatowicz@Sun.COM {
7818863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
7828863SEdward.Pilatowicz@Sun.COM 	int		part = XDFS_DEV2PART(dev);
7838863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
7848863SEdward.Pilatowicz@Sun.COM 	int		rv;
7858863SEdward.Pilatowicz@Sun.COM 	boolean_t	done;
7868863SEdward.Pilatowicz@Sun.COM 
7878863SEdward.Pilatowicz@Sun.COM 	if (XDFS_HVM_MODE(xsp)) {
7888863SEdward.Pilatowicz@Sun.COM 		if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL))
7898863SEdward.Pilatowicz@Sun.COM 			return (ENOTSUP);
7908863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_cb_ops->cb_ioctl(
7918863SEdward.Pilatowicz@Sun.COM 		    dev, cmd, arg, flag, credp, rvalp));
7928863SEdward.Pilatowicz@Sun.COM 	}
7938863SEdward.Pilatowicz@Sun.COM 
7948863SEdward.Pilatowicz@Sun.COM 	rv = xdfs_c_ioctl(xsp, dev, part, cmd, arg, flag, credp, rvalp, &done);
7958863SEdward.Pilatowicz@Sun.COM 	if (done)
7968863SEdward.Pilatowicz@Sun.COM 		return (rv);
797*10021SSheshadri.Vasudevan@Sun.COM 	rv = ldi_ioctl(xsp->xdfss_tgt_lh[part], cmd, arg, flag, credp, rvalp);
798*10021SSheshadri.Vasudevan@Sun.COM 	if (rv == 0) {
799*10021SSheshadri.Vasudevan@Sun.COM 		/* Force Geometry Validation */
800*10021SSheshadri.Vasudevan@Sun.COM 		(void) cmlb_invalidate(xsp->xdfss_cmlbhandle, 0);
801*10021SSheshadri.Vasudevan@Sun.COM 		(void) cmlb_validate(xsp->xdfss_cmlbhandle, 0, 0);
802*10021SSheshadri.Vasudevan@Sun.COM 	}
803*10021SSheshadri.Vasudevan@Sun.COM 	return (rv);
8048863SEdward.Pilatowicz@Sun.COM }
8058863SEdward.Pilatowicz@Sun.COM 
8068863SEdward.Pilatowicz@Sun.COM static int
xdfs_hvm_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int flags,char * name,caddr_t valuep,int * lengthp)8078863SEdward.Pilatowicz@Sun.COM xdfs_hvm_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
8088863SEdward.Pilatowicz@Sun.COM     int flags, char *name, caddr_t valuep, int *lengthp)
8098863SEdward.Pilatowicz@Sun.COM {
8108863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
8118863SEdward.Pilatowicz@Sun.COM 	void		*xsp = ddi_get_soft_state(xdfs_ssp, instance);
8128863SEdward.Pilatowicz@Sun.COM 
8138863SEdward.Pilatowicz@Sun.COM 	ASSERT(XDFS_HVM_MODE(xsp));
8148863SEdward.Pilatowicz@Sun.COM 
8158863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) || (xdfs_hvm_cb_ops == NULL) ||
8168863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_cb_ops->cb_prop_op == NULL) ||
8178863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_cb_ops->cb_prop_op == nodev) ||
8188863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_cb_ops->cb_prop_op == nulldev))
8198863SEdward.Pilatowicz@Sun.COM 		return (DDI_PROP_NOT_FOUND);
8208863SEdward.Pilatowicz@Sun.COM 
8218863SEdward.Pilatowicz@Sun.COM 	return (xdfs_hvm_cb_ops->cb_prop_op(dev, dip, prop_op,
8228863SEdward.Pilatowicz@Sun.COM 	    flags, name, valuep, lengthp));
8238863SEdward.Pilatowicz@Sun.COM }
8248863SEdward.Pilatowicz@Sun.COM 
8258863SEdward.Pilatowicz@Sun.COM static int
xdfs_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int flags,char * name,caddr_t valuep,int * lengthp)8268863SEdward.Pilatowicz@Sun.COM xdfs_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
8278863SEdward.Pilatowicz@Sun.COM     int flags, char *name, caddr_t valuep, int *lengthp)
8288863SEdward.Pilatowicz@Sun.COM {
8298863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
8308863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
8318863SEdward.Pilatowicz@Sun.COM 	int		rv;
8328863SEdward.Pilatowicz@Sun.COM 	dev_info_t	*tgt_dip;
8338863SEdward.Pilatowicz@Sun.COM 	dev_t		tgt_devt;
8348863SEdward.Pilatowicz@Sun.COM 
8358863SEdward.Pilatowicz@Sun.COM 	/*
8368863SEdward.Pilatowicz@Sun.COM 	 * Sanity check that if a dev_t or dip were specified that they
8378863SEdward.Pilatowicz@Sun.COM 	 * correspond to this device driver.  On debug kernels we'll
8388863SEdward.Pilatowicz@Sun.COM 	 * panic and on non-debug kernels we'll return failure.
8398863SEdward.Pilatowicz@Sun.COM 	 */
8408863SEdward.Pilatowicz@Sun.COM 	ASSERT(ddi_driver_major(dip) == xdfs_major);
8418863SEdward.Pilatowicz@Sun.COM 	ASSERT((dev == DDI_DEV_T_ANY) || (getmajor(dev) == xdfs_major));
8428863SEdward.Pilatowicz@Sun.COM 	if ((ddi_driver_major(dip) != xdfs_major) ||
8438863SEdward.Pilatowicz@Sun.COM 	    ((dev != DDI_DEV_T_ANY) && (getmajor(dev) != xdfs_major)))
8448863SEdward.Pilatowicz@Sun.COM 		return (DDI_PROP_NOT_FOUND);
8458863SEdward.Pilatowicz@Sun.COM 
8468863SEdward.Pilatowicz@Sun.COM 	/*
8478863SEdward.Pilatowicz@Sun.COM 	 * This property lookup might be associated with a device node
8488863SEdward.Pilatowicz@Sun.COM 	 * that is not yet attached, if so pass it onto ddi_prop_op().
8498863SEdward.Pilatowicz@Sun.COM 	 */
8508863SEdward.Pilatowicz@Sun.COM 	if (xsp == NULL)
8518863SEdward.Pilatowicz@Sun.COM 		return (ddi_prop_op(dev, dip, prop_op, flags,
8528863SEdward.Pilatowicz@Sun.COM 		    name, valuep, lengthp));
8538863SEdward.Pilatowicz@Sun.COM 
8548863SEdward.Pilatowicz@Sun.COM 	/* If we're accessing the device in hvm mode, pass this request on */
8558863SEdward.Pilatowicz@Sun.COM 	if (XDFS_HVM_MODE(xsp))
8568863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_prop_op(dev, dip, prop_op,
8578863SEdward.Pilatowicz@Sun.COM 		    flags, name, valuep, lengthp));
8588863SEdward.Pilatowicz@Sun.COM 
8598863SEdward.Pilatowicz@Sun.COM 	/*
8608863SEdward.Pilatowicz@Sun.COM 	 * Make sure we only lookup static properties.
8618863SEdward.Pilatowicz@Sun.COM 	 *
8628863SEdward.Pilatowicz@Sun.COM 	 * If there are static properties of the underlying xdf driver
8638863SEdward.Pilatowicz@Sun.COM 	 * that we want to mirror, then we'll have to explicity look them
8648863SEdward.Pilatowicz@Sun.COM 	 * up and define them during attach.  There are a few reasons
8658863SEdward.Pilatowicz@Sun.COM 	 * for this.  Most importantly, most static properties are typed
8668863SEdward.Pilatowicz@Sun.COM 	 * and all dynamic properties are untyped, ie, for dynamic
8678863SEdward.Pilatowicz@Sun.COM 	 * properties the caller must know the type of the property and
8688863SEdward.Pilatowicz@Sun.COM 	 * how to interpret the value of the property.  the prop_op drivedr
8698863SEdward.Pilatowicz@Sun.COM 	 * entry point is only designed for returning dynamic/untyped
8708863SEdward.Pilatowicz@Sun.COM 	 * properties, so if we were to attempt to lookup and pass back
8718863SEdward.Pilatowicz@Sun.COM 	 * static properties of the underlying device here then we would
8728863SEdward.Pilatowicz@Sun.COM 	 * be losing the type information for those properties.  Another
8738863SEdward.Pilatowicz@Sun.COM 	 * reason we don't want to pass on static property requests is that
8748863SEdward.Pilatowicz@Sun.COM 	 * static properties are enumerable in the device tree, where as
8758863SEdward.Pilatowicz@Sun.COM 	 * dynamic ones are not.
8768863SEdward.Pilatowicz@Sun.COM 	 */
8778863SEdward.Pilatowicz@Sun.COM 	flags |= DDI_PROP_DYNAMIC;
8788863SEdward.Pilatowicz@Sun.COM 
8798863SEdward.Pilatowicz@Sun.COM 	/*
8808863SEdward.Pilatowicz@Sun.COM 	 * We can't use the ldi here to access the underlying device because
8818863SEdward.Pilatowicz@Sun.COM 	 * the ldi actually opens the device, and that open might fail if the
8828863SEdward.Pilatowicz@Sun.COM 	 * device has already been opened with the FEXCL flag.  If we used
8838863SEdward.Pilatowicz@Sun.COM 	 * the ldi here, it would also be possible for some other caller to
8848863SEdward.Pilatowicz@Sun.COM 	 * try open the device with the FEXCL flag and get a failure back
8858863SEdward.Pilatowicz@Sun.COM 	 * because we have it open to do a property query.  Instad we'll
8868863SEdward.Pilatowicz@Sun.COM 	 * grab a hold on the target dip.
8878863SEdward.Pilatowicz@Sun.COM 	 */
8888863SEdward.Pilatowicz@Sun.COM 	if (!xdfs_tgt_hold(xsp))
8898863SEdward.Pilatowicz@Sun.COM 		return (DDI_PROP_NOT_FOUND);
8908863SEdward.Pilatowicz@Sun.COM 
8918863SEdward.Pilatowicz@Sun.COM 	/* figure out dip the dev_t we're going to pass on down */
8928863SEdward.Pilatowicz@Sun.COM 	tgt_dip = xsp->xdfss_tgt_dip;
8938863SEdward.Pilatowicz@Sun.COM 	if (dev == DDI_DEV_T_ANY) {
8948863SEdward.Pilatowicz@Sun.COM 		tgt_devt = DDI_DEV_T_ANY;
8958863SEdward.Pilatowicz@Sun.COM 	} else {
8968863SEdward.Pilatowicz@Sun.COM 		tgt_devt = xsp->xdfss_tgt_dev | XDFS_DEV2PART(dev);
8978863SEdward.Pilatowicz@Sun.COM 	}
8988863SEdward.Pilatowicz@Sun.COM 
8998863SEdward.Pilatowicz@Sun.COM 	/*
9008863SEdward.Pilatowicz@Sun.COM 	 * Cdev_prop_op() is not a public interface, and normally the caller
9018863SEdward.Pilatowicz@Sun.COM 	 * is required to make sure that the target driver actually implements
9028863SEdward.Pilatowicz@Sun.COM 	 * this interface before trying to invoke it.  In this case we know
9038863SEdward.Pilatowicz@Sun.COM 	 * that we're always accessing the xdf driver and it does have this
9048863SEdward.Pilatowicz@Sun.COM 	 * interface defined, so we can skip the check.
9058863SEdward.Pilatowicz@Sun.COM 	 */
9068863SEdward.Pilatowicz@Sun.COM 	rv = cdev_prop_op(tgt_devt, tgt_dip,
9078863SEdward.Pilatowicz@Sun.COM 	    prop_op, flags, name, valuep, lengthp);
9088863SEdward.Pilatowicz@Sun.COM 
9098863SEdward.Pilatowicz@Sun.COM 	xdfs_tgt_release(xsp);
9108863SEdward.Pilatowicz@Sun.COM 	return (rv);
9118863SEdward.Pilatowicz@Sun.COM }
9128863SEdward.Pilatowicz@Sun.COM 
9138863SEdward.Pilatowicz@Sun.COM /*
9148863SEdward.Pilatowicz@Sun.COM  * Driver PV and HVM dev_ops entry points
9158863SEdward.Pilatowicz@Sun.COM  */
9168863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
9178863SEdward.Pilatowicz@Sun.COM static int
xdfs_getinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)9188863SEdward.Pilatowicz@Sun.COM xdfs_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
9198863SEdward.Pilatowicz@Sun.COM     void **result)
9208863SEdward.Pilatowicz@Sun.COM {
9218863SEdward.Pilatowicz@Sun.COM 	dev_t		dev = (dev_t)arg;
9228863SEdward.Pilatowicz@Sun.COM 	int		instance = XDFS_DEV2UNIT(dev);
9238863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
9248863SEdward.Pilatowicz@Sun.COM 
9258863SEdward.Pilatowicz@Sun.COM 	switch (infocmd) {
9268863SEdward.Pilatowicz@Sun.COM 		case DDI_INFO_DEVT2DEVINFO:
9278863SEdward.Pilatowicz@Sun.COM 			if (xsp == NULL)
9288863SEdward.Pilatowicz@Sun.COM 				return (DDI_FAILURE);
9298863SEdward.Pilatowicz@Sun.COM 			if (XDFS_HVM_MODE(xsp))
9308863SEdward.Pilatowicz@Sun.COM 				*result = XDFS_HVM_DIP(xsp);
9318863SEdward.Pilatowicz@Sun.COM 			else
9328863SEdward.Pilatowicz@Sun.COM 				*result = (void *)xsp->xdfss_dip;
9338863SEdward.Pilatowicz@Sun.COM 			break;
9348863SEdward.Pilatowicz@Sun.COM 		case DDI_INFO_DEVT2INSTANCE:
9358863SEdward.Pilatowicz@Sun.COM 			*result = (void *)(intptr_t)instance;
9368863SEdward.Pilatowicz@Sun.COM 			break;
9378863SEdward.Pilatowicz@Sun.COM 		default:
9388863SEdward.Pilatowicz@Sun.COM 			return (DDI_FAILURE);
9398863SEdward.Pilatowicz@Sun.COM 	}
9408863SEdward.Pilatowicz@Sun.COM 	return (DDI_SUCCESS);
9418863SEdward.Pilatowicz@Sun.COM }
9428863SEdward.Pilatowicz@Sun.COM 
9438863SEdward.Pilatowicz@Sun.COM static int
xdfs_hvm_probe(dev_info_t * dip,char * path)9448863SEdward.Pilatowicz@Sun.COM xdfs_hvm_probe(dev_info_t *dip, char *path)
9458863SEdward.Pilatowicz@Sun.COM {
9468863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
9478863SEdward.Pilatowicz@Sun.COM 	int		rv = DDI_PROBE_SUCCESS;
9488863SEdward.Pilatowicz@Sun.COM 	void		*xsp;
9498863SEdward.Pilatowicz@Sun.COM 
9508863SEdward.Pilatowicz@Sun.COM 	ASSERT(path != NULL);
9518863SEdward.Pilatowicz@Sun.COM 	cmn_err(CE_WARN, "PV access to device disabled: %s", path);
9528863SEdward.Pilatowicz@Sun.COM 
9538863SEdward.Pilatowicz@Sun.COM 	(void) ddi_soft_state_zalloc(xdfs_ssp, instance);
9548863SEdward.Pilatowicz@Sun.COM 	VERIFY((xsp = ddi_get_soft_state(xdfs_ssp, instance)) != NULL);
9558863SEdward.Pilatowicz@Sun.COM 
9568863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) ||
9578863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_dev_ops->devo_probe == NULL) ||
9588863SEdward.Pilatowicz@Sun.COM 	    ((rv = xdfs_hvm_dev_ops->devo_probe(dip)) == DDI_PROBE_FAILURE)) {
9598863SEdward.Pilatowicz@Sun.COM 		ddi_soft_state_free(xdfs_ssp, instance);
9608863SEdward.Pilatowicz@Sun.COM 		cmn_err(CE_WARN, "HVM probe of device failed: %s", path);
9618863SEdward.Pilatowicz@Sun.COM 		kmem_free(path, MAXPATHLEN);
9628863SEdward.Pilatowicz@Sun.COM 		return (DDI_PROBE_FAILURE);
9638863SEdward.Pilatowicz@Sun.COM 	}
9648863SEdward.Pilatowicz@Sun.COM 
9658863SEdward.Pilatowicz@Sun.COM 	XDFS_HVM_MODE(xsp) = B_TRUE;
9668863SEdward.Pilatowicz@Sun.COM 	XDFS_HVM_DIP(xsp) = dip;
9678863SEdward.Pilatowicz@Sun.COM 	XDFS_HVM_PATH(xsp) = path;
9688863SEdward.Pilatowicz@Sun.COM 
9698863SEdward.Pilatowicz@Sun.COM 	return (rv);
9708863SEdward.Pilatowicz@Sun.COM }
9718863SEdward.Pilatowicz@Sun.COM 
9728863SEdward.Pilatowicz@Sun.COM static int
xdfs_probe(dev_info_t * dip)9738863SEdward.Pilatowicz@Sun.COM xdfs_probe(dev_info_t *dip)
9748863SEdward.Pilatowicz@Sun.COM {
9758863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
9768863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp;
9778863SEdward.Pilatowicz@Sun.COM 	dev_info_t	*tgt_dip;
9788863SEdward.Pilatowicz@Sun.COM 	char		*path;
9798863SEdward.Pilatowicz@Sun.COM 	int		i, pv_disable;
9808863SEdward.Pilatowicz@Sun.COM 
9818863SEdward.Pilatowicz@Sun.COM 	/* if we've already probed the device then there's nothing todo */
9828863SEdward.Pilatowicz@Sun.COM 	if (ddi_get_soft_state(xdfs_ssp, instance))
9838863SEdward.Pilatowicz@Sun.COM 		return (DDI_PROBE_PARTIAL);
9848863SEdward.Pilatowicz@Sun.COM 
9858863SEdward.Pilatowicz@Sun.COM 	/* Figure out our pathname */
9868863SEdward.Pilatowicz@Sun.COM 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9878863SEdward.Pilatowicz@Sun.COM 	(void) ddi_pathname(dip, path);
9888863SEdward.Pilatowicz@Sun.COM 
9898863SEdward.Pilatowicz@Sun.COM 	/* see if we should disable pv access mode */
9908863SEdward.Pilatowicz@Sun.COM 	pv_disable = ddi_prop_get_int(DDI_DEV_T_ANY,
9918863SEdward.Pilatowicz@Sun.COM 	    dip, DDI_PROP_NOTPROM, "pv_disable", 0);
9928863SEdward.Pilatowicz@Sun.COM 
9938863SEdward.Pilatowicz@Sun.COM 	if (xdfs_pv_disable || pv_disable)
9948863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_probe(dip, path));
9958863SEdward.Pilatowicz@Sun.COM 
9968863SEdward.Pilatowicz@Sun.COM 	/*
9978863SEdward.Pilatowicz@Sun.COM 	 * This xdf shell device layers on top of an xdf device.  So the first
9988863SEdward.Pilatowicz@Sun.COM 	 * thing we need to do is determine which xdf device instance this
9998863SEdward.Pilatowicz@Sun.COM 	 * xdf shell instance should be layered on top of.
10008863SEdward.Pilatowicz@Sun.COM 	 */
10018863SEdward.Pilatowicz@Sun.COM 	for (i = 0; xdfs_c_h2p_map[i].xdfs_h2p_hvm != NULL; i++) {
10028863SEdward.Pilatowicz@Sun.COM 		if (strcmp(xdfs_c_h2p_map[i].xdfs_h2p_hvm, path) == 0)
10038863SEdward.Pilatowicz@Sun.COM 			break;
10048863SEdward.Pilatowicz@Sun.COM 	}
10058863SEdward.Pilatowicz@Sun.COM 
10068863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_c_h2p_map[i].xdfs_h2p_hvm == NULL) ||
10078863SEdward.Pilatowicz@Sun.COM 	    ((tgt_dip = xdf_hvm_hold(xdfs_c_h2p_map[i].xdfs_h2p_pv)) == NULL)) {
10088863SEdward.Pilatowicz@Sun.COM 		/*
10098863SEdward.Pilatowicz@Sun.COM 		 * UhOh.  We either don't know what xdf instance this xdf
10108863SEdward.Pilatowicz@Sun.COM 		 * shell device should be mapped to or the xdf node assocaited
10118863SEdward.Pilatowicz@Sun.COM 		 * with this instance isnt' attached.  in either case fall
10128863SEdward.Pilatowicz@Sun.COM 		 * back to hvm access.
10138863SEdward.Pilatowicz@Sun.COM 		 */
10148863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_probe(dip, path));
10158863SEdward.Pilatowicz@Sun.COM 	}
10168863SEdward.Pilatowicz@Sun.COM 
10178863SEdward.Pilatowicz@Sun.COM 	/* allocate and initialize our state structure */
10188863SEdward.Pilatowicz@Sun.COM 	(void) ddi_soft_state_zalloc(xdfs_ssp, instance);
10198863SEdward.Pilatowicz@Sun.COM 	xsp = ddi_get_soft_state(xdfs_ssp, instance);
10208863SEdward.Pilatowicz@Sun.COM 	mutex_init(&xsp->xdfss_mutex, NULL, MUTEX_DRIVER, NULL);
10218863SEdward.Pilatowicz@Sun.COM 	cv_init(&xsp->xdfss_cv, NULL, CV_DEFAULT, NULL);
10228863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&xsp->xdfss_mutex);
10238863SEdward.Pilatowicz@Sun.COM 
10248863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_dip = dip;
10258863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_pv = xdfs_c_h2p_map[i].xdfs_h2p_pv;
10268863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_hvm = xdfs_c_h2p_map[i].xdfs_h2p_hvm;
10278863SEdward.Pilatowicz@Sun.COM 	xsp->xdfss_tgt_attached = B_FALSE;
10288863SEdward.Pilatowicz@Sun.COM 	cmlb_alloc_handle((cmlb_handle_t *)&xsp->xdfss_cmlbhandle);
10298863SEdward.Pilatowicz@Sun.COM 
10308863SEdward.Pilatowicz@Sun.COM 	if (!xdfs_tgt_probe(xsp, tgt_dip)) {
10318863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&xsp->xdfss_mutex);
10328863SEdward.Pilatowicz@Sun.COM 		cmlb_free_handle(&xsp->xdfss_cmlbhandle);
10338863SEdward.Pilatowicz@Sun.COM 		ddi_soft_state_free(xdfs_ssp, instance);
10348863SEdward.Pilatowicz@Sun.COM 		ddi_release_devi(tgt_dip);
10358863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_probe(dip, path));
10368863SEdward.Pilatowicz@Sun.COM 	}
10378863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&xsp->xdfss_mutex);
10388863SEdward.Pilatowicz@Sun.COM 
10398863SEdward.Pilatowicz@Sun.COM 	/*
10408863SEdward.Pilatowicz@Sun.COM 	 * Add a zero-length attribute to tell the world we support
10418863SEdward.Pilatowicz@Sun.COM 	 * kernel ioctls (for layered drivers).
10428863SEdward.Pilatowicz@Sun.COM 	 */
10438863SEdward.Pilatowicz@Sun.COM 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
10448863SEdward.Pilatowicz@Sun.COM 	    DDI_KERNEL_IOCTL, NULL, 0);
10458863SEdward.Pilatowicz@Sun.COM 
10469003SEdward.Pilatowicz@Sun.COM 	kmem_free(path, MAXPATHLEN);
10478863SEdward.Pilatowicz@Sun.COM 	return (DDI_PROBE_SUCCESS);
10488863SEdward.Pilatowicz@Sun.COM }
10498863SEdward.Pilatowicz@Sun.COM 
10508863SEdward.Pilatowicz@Sun.COM static int
xdfs_hvm_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)10518863SEdward.Pilatowicz@Sun.COM xdfs_hvm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
10528863SEdward.Pilatowicz@Sun.COM {
10538863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
10548863SEdward.Pilatowicz@Sun.COM 	void		*xsp = ddi_get_soft_state(xdfs_ssp, instance);
10558863SEdward.Pilatowicz@Sun.COM 	int		rv = DDI_FAILURE;
10568863SEdward.Pilatowicz@Sun.COM 
10578863SEdward.Pilatowicz@Sun.COM 	XDFS_HVM_SANE(xsp);
10588863SEdward.Pilatowicz@Sun.COM 
10598863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) ||
10608863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_dev_ops->devo_attach == NULL) ||
10618863SEdward.Pilatowicz@Sun.COM 	    ((rv = xdfs_hvm_dev_ops->devo_attach(dip, cmd)) != DDI_SUCCESS)) {
10628863SEdward.Pilatowicz@Sun.COM 		cmn_err(CE_WARN, "HVM attach of device failed: %s",
10638863SEdward.Pilatowicz@Sun.COM 		    XDFS_HVM_PATH(xsp));
10648863SEdward.Pilatowicz@Sun.COM 		kmem_free(XDFS_HVM_PATH(xsp), MAXPATHLEN);
10658863SEdward.Pilatowicz@Sun.COM 		ddi_soft_state_free(xdfs_ssp, instance);
10668863SEdward.Pilatowicz@Sun.COM 		return (rv);
10678863SEdward.Pilatowicz@Sun.COM 	}
10688863SEdward.Pilatowicz@Sun.COM 
10698863SEdward.Pilatowicz@Sun.COM 	return (DDI_SUCCESS);
10708863SEdward.Pilatowicz@Sun.COM }
10718863SEdward.Pilatowicz@Sun.COM 
10728863SEdward.Pilatowicz@Sun.COM /*
10738863SEdward.Pilatowicz@Sun.COM  * Autoconfiguration Routines
10748863SEdward.Pilatowicz@Sun.COM  */
10758863SEdward.Pilatowicz@Sun.COM static int
xdfs_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)10768863SEdward.Pilatowicz@Sun.COM xdfs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
10778863SEdward.Pilatowicz@Sun.COM {
10788863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
10798863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
10808863SEdward.Pilatowicz@Sun.COM 
10818863SEdward.Pilatowicz@Sun.COM 	if (xsp == NULL)
10828863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
10838863SEdward.Pilatowicz@Sun.COM 	if (XDFS_HVM_MODE(xsp))
10848863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_attach(dip, cmd));
10858863SEdward.Pilatowicz@Sun.COM 	if (cmd != DDI_ATTACH)
10868863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
10878863SEdward.Pilatowicz@Sun.COM 
10888863SEdward.Pilatowicz@Sun.COM 	xdfs_c_attach(xsp);
10898863SEdward.Pilatowicz@Sun.COM 	return (DDI_SUCCESS);
10908863SEdward.Pilatowicz@Sun.COM }
10918863SEdward.Pilatowicz@Sun.COM 
10928863SEdward.Pilatowicz@Sun.COM static int
xdfs_hvm_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)10938863SEdward.Pilatowicz@Sun.COM xdfs_hvm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
10948863SEdward.Pilatowicz@Sun.COM {
10958863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
10968863SEdward.Pilatowicz@Sun.COM 	void		*xsp = ddi_get_soft_state(xdfs_ssp, instance);
10978863SEdward.Pilatowicz@Sun.COM 	int		rv;
10988863SEdward.Pilatowicz@Sun.COM 
10998863SEdward.Pilatowicz@Sun.COM 	XDFS_HVM_SANE(xsp);
11008863SEdward.Pilatowicz@Sun.COM 
11018863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) ||
11028863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_dev_ops->devo_detach == NULL))
11038863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
11048863SEdward.Pilatowicz@Sun.COM 
11058863SEdward.Pilatowicz@Sun.COM 	if ((rv = xdfs_hvm_dev_ops->devo_detach(dip, cmd)) != DDI_SUCCESS)
11068863SEdward.Pilatowicz@Sun.COM 		return (rv);
11078863SEdward.Pilatowicz@Sun.COM 
11088863SEdward.Pilatowicz@Sun.COM 	kmem_free(XDFS_HVM_PATH(xsp), MAXPATHLEN);
11098863SEdward.Pilatowicz@Sun.COM 	ddi_soft_state_free(xdfs_ssp, instance);
11108863SEdward.Pilatowicz@Sun.COM 	return (DDI_SUCCESS);
11118863SEdward.Pilatowicz@Sun.COM }
11128863SEdward.Pilatowicz@Sun.COM 
11138863SEdward.Pilatowicz@Sun.COM static int
xdfs_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)11148863SEdward.Pilatowicz@Sun.COM xdfs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
11158863SEdward.Pilatowicz@Sun.COM {
11168863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
11178863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
11188863SEdward.Pilatowicz@Sun.COM 
11198863SEdward.Pilatowicz@Sun.COM 	if (XDFS_HVM_MODE(xsp))
11208863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_detach(dip, cmd));
11218863SEdward.Pilatowicz@Sun.COM 	if (cmd != DDI_DETACH)
11228863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
11238863SEdward.Pilatowicz@Sun.COM 
11248863SEdward.Pilatowicz@Sun.COM 	mutex_enter(&xsp->xdfss_mutex);
11258863SEdward.Pilatowicz@Sun.COM 	if (!xdfs_tgt_detach(xsp)) {
11268863SEdward.Pilatowicz@Sun.COM 		mutex_exit(&xsp->xdfss_mutex);
11278863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
11288863SEdward.Pilatowicz@Sun.COM 	}
11298863SEdward.Pilatowicz@Sun.COM 	mutex_exit(&xsp->xdfss_mutex);
11308863SEdward.Pilatowicz@Sun.COM 
11318863SEdward.Pilatowicz@Sun.COM 	cmlb_detach(xsp->xdfss_cmlbhandle, 0);
11328863SEdward.Pilatowicz@Sun.COM 	cmlb_free_handle(&xsp->xdfss_cmlbhandle);
11338863SEdward.Pilatowicz@Sun.COM 	ddi_release_devi(xsp->xdfss_tgt_dip);
11348863SEdward.Pilatowicz@Sun.COM 	ddi_soft_state_free(xdfs_ssp, instance);
11358863SEdward.Pilatowicz@Sun.COM 	ddi_prop_remove_all(dip);
11368863SEdward.Pilatowicz@Sun.COM 	return (DDI_SUCCESS);
11378863SEdward.Pilatowicz@Sun.COM }
11388863SEdward.Pilatowicz@Sun.COM 
11398863SEdward.Pilatowicz@Sun.COM static int
xdfs_hvm_power(dev_info_t * dip,int component,int level)11408863SEdward.Pilatowicz@Sun.COM xdfs_hvm_power(dev_info_t *dip, int component, int level)
11418863SEdward.Pilatowicz@Sun.COM {
11428863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
11438863SEdward.Pilatowicz@Sun.COM 	void		*xsp = ddi_get_soft_state(xdfs_ssp, instance);
11448863SEdward.Pilatowicz@Sun.COM 
11458863SEdward.Pilatowicz@Sun.COM 	XDFS_HVM_SANE(xsp);
11468863SEdward.Pilatowicz@Sun.COM 
11478863SEdward.Pilatowicz@Sun.COM 	if ((xdfs_hvm_dev_ops == NULL) ||
11488863SEdward.Pilatowicz@Sun.COM 	    (xdfs_hvm_dev_ops->devo_power == NULL))
11498863SEdward.Pilatowicz@Sun.COM 		return (DDI_FAILURE);
11508863SEdward.Pilatowicz@Sun.COM 	return (xdfs_hvm_dev_ops->devo_power(dip, component, level));
11518863SEdward.Pilatowicz@Sun.COM }
11528863SEdward.Pilatowicz@Sun.COM 
11538863SEdward.Pilatowicz@Sun.COM static int
xdfs_power(dev_info_t * dip,int component,int level)11548863SEdward.Pilatowicz@Sun.COM xdfs_power(dev_info_t *dip, int component, int level)
11558863SEdward.Pilatowicz@Sun.COM {
11568863SEdward.Pilatowicz@Sun.COM 	int		instance = ddi_get_instance(dip);
11578863SEdward.Pilatowicz@Sun.COM 	xdfs_state_t	*xsp = ddi_get_soft_state(xdfs_ssp, instance);
11588863SEdward.Pilatowicz@Sun.COM 
11598863SEdward.Pilatowicz@Sun.COM 	if (XDFS_HVM_MODE(xsp))
11608863SEdward.Pilatowicz@Sun.COM 		return (xdfs_hvm_power(dip, component, level));
11618863SEdward.Pilatowicz@Sun.COM 	return (nodev());
11628863SEdward.Pilatowicz@Sun.COM }
11638863SEdward.Pilatowicz@Sun.COM 
11648863SEdward.Pilatowicz@Sun.COM /*
11658863SEdward.Pilatowicz@Sun.COM  * Cmlb ops vector
11668863SEdward.Pilatowicz@Sun.COM  */
11678863SEdward.Pilatowicz@Sun.COM static cmlb_tg_ops_t xdfs_lb_ops = {
11688863SEdward.Pilatowicz@Sun.COM 	TG_DK_OPS_VERSION_1,
11698863SEdward.Pilatowicz@Sun.COM 	xdfs_lb_rdwr,
11708863SEdward.Pilatowicz@Sun.COM 	xdfs_lb_getinfo
11718863SEdward.Pilatowicz@Sun.COM };
11728863SEdward.Pilatowicz@Sun.COM 
11738863SEdward.Pilatowicz@Sun.COM /*
11748863SEdward.Pilatowicz@Sun.COM  * Device driver ops vector
11758863SEdward.Pilatowicz@Sun.COM  */
11768863SEdward.Pilatowicz@Sun.COM static struct cb_ops xdfs_cb_ops = {
11778863SEdward.Pilatowicz@Sun.COM 	xdfs_open,		/* open */
11788863SEdward.Pilatowicz@Sun.COM 	xdfs_close,		/* close */
11798863SEdward.Pilatowicz@Sun.COM 	xdfs_strategy,		/* strategy */
11808863SEdward.Pilatowicz@Sun.COM 	nodev,			/* print */
11818863SEdward.Pilatowicz@Sun.COM 	xdfs_dump,		/* dump */
11828863SEdward.Pilatowicz@Sun.COM 	xdfs_read,		/* read */
11838863SEdward.Pilatowicz@Sun.COM 	xdfs_write,		/* write */
11848863SEdward.Pilatowicz@Sun.COM 	xdfs_ioctl,		/* ioctl */
11858863SEdward.Pilatowicz@Sun.COM 	nodev,			/* devmap */
11868863SEdward.Pilatowicz@Sun.COM 	nodev,			/* mmap */
11878863SEdward.Pilatowicz@Sun.COM 	nodev,			/* segmap */
11888863SEdward.Pilatowicz@Sun.COM 	nochpoll,		/* poll */
11898863SEdward.Pilatowicz@Sun.COM 	xdfs_prop_op,		/* cb_prop_op */
11908863SEdward.Pilatowicz@Sun.COM 	0,			/* streamtab  */
11918863SEdward.Pilatowicz@Sun.COM 	D_64BIT | D_MP | D_NEW,	/* Driver comaptibility flag */
11928863SEdward.Pilatowicz@Sun.COM 	CB_REV,			/* cb_rev */
11938863SEdward.Pilatowicz@Sun.COM 	xdfs_aread,		/* async read */
11948863SEdward.Pilatowicz@Sun.COM 	xdfs_awrite		/* async write */
11958863SEdward.Pilatowicz@Sun.COM };
11968863SEdward.Pilatowicz@Sun.COM 
11978863SEdward.Pilatowicz@Sun.COM struct dev_ops xdfs_ops = {
11988863SEdward.Pilatowicz@Sun.COM 	DEVO_REV,		/* devo_rev, */
11998863SEdward.Pilatowicz@Sun.COM 	0,			/* refcnt  */
12008863SEdward.Pilatowicz@Sun.COM 	xdfs_getinfo,		/* info */
12018863SEdward.Pilatowicz@Sun.COM 	nulldev,		/* identify */
12028863SEdward.Pilatowicz@Sun.COM 	xdfs_probe,		/* probe */
12038863SEdward.Pilatowicz@Sun.COM 	xdfs_attach,		/* attach */
12048863SEdward.Pilatowicz@Sun.COM 	xdfs_detach,		/* detach */
12058863SEdward.Pilatowicz@Sun.COM 	nodev,			/* reset */
12068863SEdward.Pilatowicz@Sun.COM 	&xdfs_cb_ops,		/* driver operations */
12078863SEdward.Pilatowicz@Sun.COM 	NULL,			/* bus operations */
12088863SEdward.Pilatowicz@Sun.COM 	xdfs_power,		/* power */
12098863SEdward.Pilatowicz@Sun.COM 	ddi_quiesce_not_supported, /* devo_quiesce */
12108863SEdward.Pilatowicz@Sun.COM };
12118863SEdward.Pilatowicz@Sun.COM 
12128863SEdward.Pilatowicz@Sun.COM /*
12138863SEdward.Pilatowicz@Sun.COM  * Module linkage information for the kernel.
12148863SEdward.Pilatowicz@Sun.COM  */
12158863SEdward.Pilatowicz@Sun.COM static struct modldrv modldrv = {
12168863SEdward.Pilatowicz@Sun.COM 	&mod_driverops,		/* Type of module.  This one is a driver. */
12178863SEdward.Pilatowicz@Sun.COM 	NULL,			/* Module description.  Set by _init() */
12188863SEdward.Pilatowicz@Sun.COM 	&xdfs_ops,		/* Driver ops. */
12198863SEdward.Pilatowicz@Sun.COM };
12208863SEdward.Pilatowicz@Sun.COM 
12218863SEdward.Pilatowicz@Sun.COM static struct modlinkage modlinkage = {
12228863SEdward.Pilatowicz@Sun.COM 	MODREV_1, (void *)&modldrv, NULL
12238863SEdward.Pilatowicz@Sun.COM };
12248863SEdward.Pilatowicz@Sun.COM 
12258863SEdward.Pilatowicz@Sun.COM int
_init(void)12268863SEdward.Pilatowicz@Sun.COM _init(void)
12278863SEdward.Pilatowicz@Sun.COM {
12288863SEdward.Pilatowicz@Sun.COM 	int rval;
12298863SEdward.Pilatowicz@Sun.COM 
12308863SEdward.Pilatowicz@Sun.COM 	xdfs_major = ddi_name_to_major((char *)xdfs_c_name);
12318863SEdward.Pilatowicz@Sun.COM 	if (xdfs_major == (major_t)-1)
12328863SEdward.Pilatowicz@Sun.COM 		return (EINVAL);
12338863SEdward.Pilatowicz@Sun.COM 
12348863SEdward.Pilatowicz@Sun.COM 	/*
12358863SEdward.Pilatowicz@Sun.COM 	 * Determine the size of our soft state structure.  The base
12368863SEdward.Pilatowicz@Sun.COM 	 * size of the structure is the larger of the hvm clients state
12378863SEdward.Pilatowicz@Sun.COM 	 * structure, or our shell state structure.  Then we'll align
12388863SEdward.Pilatowicz@Sun.COM 	 * the end of the structure to a pointer boundry and append
12398863SEdward.Pilatowicz@Sun.COM 	 * a xdfs_hvm_state_t structure.  This way the xdfs_hvm_state_t
12408863SEdward.Pilatowicz@Sun.COM 	 * structure is always present and we can use it to determine the
12418863SEdward.Pilatowicz@Sun.COM 	 * current device access mode (hvm or shell).
12428863SEdward.Pilatowicz@Sun.COM 	 */
12438863SEdward.Pilatowicz@Sun.COM 	xdfs_ss_size = MAX(xdfs_c_hvm_ss_size, sizeof (xdfs_state_t));
12448863SEdward.Pilatowicz@Sun.COM 	xdfs_ss_size = P2ROUNDUP(xdfs_ss_size, sizeof (uintptr_t));
12458863SEdward.Pilatowicz@Sun.COM 	xdfs_ss_size += sizeof (xdfs_hvm_state_t);
12468863SEdward.Pilatowicz@Sun.COM 
12478863SEdward.Pilatowicz@Sun.COM 	/*
12488863SEdward.Pilatowicz@Sun.COM 	 * In general ide usually supports 4 disk devices, this same
12498863SEdward.Pilatowicz@Sun.COM 	 * limitation also applies to software emulating ide devices.
12508863SEdward.Pilatowicz@Sun.COM 	 * so by default we pre-allocate 4 xdf shell soft state structures.
12518863SEdward.Pilatowicz@Sun.COM 	 */
12528863SEdward.Pilatowicz@Sun.COM 	if ((rval = ddi_soft_state_init(&xdfs_ssp,
12538863SEdward.Pilatowicz@Sun.COM 	    xdfs_ss_size, XDFS_NODES)) != 0)
12548863SEdward.Pilatowicz@Sun.COM 		return (rval);
12558863SEdward.Pilatowicz@Sun.COM 	*xdfs_c_hvm_ss = xdfs_ssp;
12568863SEdward.Pilatowicz@Sun.COM 
12578863SEdward.Pilatowicz@Sun.COM 	/* Install our module */
12588863SEdward.Pilatowicz@Sun.COM 	if (modldrv.drv_linkinfo == NULL)
12598863SEdward.Pilatowicz@Sun.COM 		modldrv.drv_linkinfo = (char *)xdfs_c_linkinfo;
12608863SEdward.Pilatowicz@Sun.COM 	if ((rval = mod_install(&modlinkage)) != 0) {
12618863SEdward.Pilatowicz@Sun.COM 		ddi_soft_state_fini(&xdfs_ssp);
12628863SEdward.Pilatowicz@Sun.COM 		return (rval);
12638863SEdward.Pilatowicz@Sun.COM 	}
12648863SEdward.Pilatowicz@Sun.COM 
12658863SEdward.Pilatowicz@Sun.COM 	return (0);
12668863SEdward.Pilatowicz@Sun.COM }
12678863SEdward.Pilatowicz@Sun.COM 
12688863SEdward.Pilatowicz@Sun.COM int
_info(struct modinfo * modinfop)12698863SEdward.Pilatowicz@Sun.COM _info(struct modinfo *modinfop)
12708863SEdward.Pilatowicz@Sun.COM {
12718863SEdward.Pilatowicz@Sun.COM 	if (modldrv.drv_linkinfo == NULL)
12728863SEdward.Pilatowicz@Sun.COM 		modldrv.drv_linkinfo = (char *)xdfs_c_linkinfo;
12738863SEdward.Pilatowicz@Sun.COM 	return (mod_info(&modlinkage, modinfop));
12748863SEdward.Pilatowicz@Sun.COM }
12758863SEdward.Pilatowicz@Sun.COM 
12768863SEdward.Pilatowicz@Sun.COM int
_fini(void)12778863SEdward.Pilatowicz@Sun.COM _fini(void)
12788863SEdward.Pilatowicz@Sun.COM {
12798863SEdward.Pilatowicz@Sun.COM 	int	rval;
12808863SEdward.Pilatowicz@Sun.COM 	if ((rval = mod_remove(&modlinkage)) != 0)
12818863SEdward.Pilatowicz@Sun.COM 		return (rval);
12828863SEdward.Pilatowicz@Sun.COM 	ddi_soft_state_fini(&xdfs_ssp);
12838863SEdward.Pilatowicz@Sun.COM 	return (0);
12848863SEdward.Pilatowicz@Sun.COM }
1285