xref: /onnv-gate/usr/src/uts/intel/io/dktp/disk/cmdk.c (revision 1709)
1*1709Smlf /*
2*1709Smlf  * CDDL HEADER START
3*1709Smlf  *
4*1709Smlf  * The contents of this file are subject to the terms of the
5*1709Smlf  * Common Development and Distribution License, Version 1.0 only
6*1709Smlf  * (the "License").  You may not use this file except in compliance
7*1709Smlf  * with the License.
8*1709Smlf  *
9*1709Smlf  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*1709Smlf  * or http://www.opensolaris.org/os/licensing.
11*1709Smlf  * See the License for the specific language governing permissions
12*1709Smlf  * and limitations under the License.
13*1709Smlf  *
14*1709Smlf  * When distributing Covered Code, include this CDDL HEADER in each
15*1709Smlf  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*1709Smlf  * If applicable, add the following below this CDDL HEADER, with the
17*1709Smlf  * fields enclosed by brackets "[]" replaced with your own identifying
18*1709Smlf  * information: Portions Copyright [yyyy] [name of copyright owner]
19*1709Smlf  *
20*1709Smlf  * CDDL HEADER END
21*1709Smlf  */
22*1709Smlf /*
23*1709Smlf  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*1709Smlf  * Use is subject to license terms.
25*1709Smlf  */
26*1709Smlf 
27*1709Smlf #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*1709Smlf 
29*1709Smlf #include <sys/scsi/scsi.h>
30*1709Smlf #include <sys/dktp/cm.h>
31*1709Smlf #include <sys/dktp/quetypes.h>
32*1709Smlf #include <sys/dktp/queue.h>
33*1709Smlf #include <sys/dktp/fctypes.h>
34*1709Smlf #include <sys/dktp/flowctrl.h>
35*1709Smlf #include <sys/dktp/cmdev.h>
36*1709Smlf #include <sys/dkio.h>
37*1709Smlf #include <sys/dktp/tgdk.h>
38*1709Smlf #include <sys/dktp/dadk.h>
39*1709Smlf #include <sys/dktp/bbh.h>
40*1709Smlf #include <sys/dktp/altsctr.h>
41*1709Smlf #include <sys/dktp/cmdk.h>
42*1709Smlf 
43*1709Smlf #include <sys/stat.h>
44*1709Smlf #include <sys/vtoc.h>
45*1709Smlf #include <sys/file.h>
46*1709Smlf #include <sys/dktp/dadkio.h>
47*1709Smlf #include <sys/aio_req.h>
48*1709Smlf 
49*1709Smlf #include <sys/cmlb.h>
50*1709Smlf 
51*1709Smlf /*
52*1709Smlf  * Local Static Data
53*1709Smlf  */
54*1709Smlf #ifdef CMDK_DEBUG
55*1709Smlf #define	DENT	0x0001
56*1709Smlf #define	DIO	0x0002
57*1709Smlf 
58*1709Smlf static	int	cmdk_debug = DIO;
59*1709Smlf #endif
60*1709Smlf 
61*1709Smlf #ifndef	TRUE
62*1709Smlf #define	TRUE	1
63*1709Smlf #endif
64*1709Smlf 
65*1709Smlf #ifndef	FALSE
66*1709Smlf #define	FALSE	0
67*1709Smlf #endif
68*1709Smlf 
69*1709Smlf /*
70*1709Smlf  * NDKMAP is the base number for accessing the fdisk partitions.
71*1709Smlf  * c?d?p0 --> cmdk@?,?:q
72*1709Smlf  */
73*1709Smlf #define	PARTITION0_INDEX	(NDKMAP + 0)
74*1709Smlf 
75*1709Smlf #define	DKTP_DATA		(dkp->dk_tgobjp)->tg_data
76*1709Smlf #define	DKTP_EXT		(dkp->dk_tgobjp)->tg_ext
77*1709Smlf 
78*1709Smlf static void *cmdk_state;
79*1709Smlf 
80*1709Smlf /*
81*1709Smlf  * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded
82*1709Smlf  * attach situations
83*1709Smlf  */
84*1709Smlf static kmutex_t cmdk_attach_mutex;
85*1709Smlf static int cmdk_max_instance = 0;
86*1709Smlf 
87*1709Smlf /*
88*1709Smlf  * Panic dumpsys state
89*1709Smlf  * There is only a single flag that is not mutex locked since
90*1709Smlf  * the system is prevented from thread switching and cmdk_dump
91*1709Smlf  * will only be called in a single threaded operation.
92*1709Smlf  */
93*1709Smlf static int	cmdk_indump;
94*1709Smlf 
95*1709Smlf /*
96*1709Smlf  * Local Function Prototypes
97*1709Smlf  */
98*1709Smlf static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp);
99*1709Smlf static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp);
100*1709Smlf static void cmdkmin(struct buf *bp);
101*1709Smlf static int cmdkrw(dev_t dev, struct uio *uio, int flag);
102*1709Smlf static int cmdkarw(dev_t dev, struct aio_req *aio, int flag);
103*1709Smlf 
104*1709Smlf /*
105*1709Smlf  * Bad Block Handling Functions Prototypes
106*1709Smlf  */
107*1709Smlf static void cmdk_bbh_reopen(struct cmdk *dkp);
108*1709Smlf static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp);
109*1709Smlf static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle);
110*1709Smlf static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle);
111*1709Smlf static void cmdk_bbh_close(struct cmdk *dkp);
112*1709Smlf static void cmdk_bbh_setalts_idx(struct cmdk *dkp);
113*1709Smlf static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key);
114*1709Smlf 
115*1709Smlf static struct bbh_objops cmdk_bbh_ops = {
116*1709Smlf 	nulldev,
117*1709Smlf 	nulldev,
118*1709Smlf 	cmdk_bbh_gethandle,
119*1709Smlf 	cmdk_bbh_htoc,
120*1709Smlf 	cmdk_bbh_freehandle,
121*1709Smlf 	0, 0
122*1709Smlf };
123*1709Smlf 
124*1709Smlf static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp);
125*1709Smlf static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp);
126*1709Smlf static int cmdkstrategy(struct buf *bp);
127*1709Smlf static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
128*1709Smlf static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *);
129*1709Smlf static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp);
130*1709Smlf static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp);
131*1709Smlf static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
132*1709Smlf     int mod_flags, char *name, caddr_t valuep, int *lengthp);
133*1709Smlf static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp);
134*1709Smlf static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp);
135*1709Smlf 
136*1709Smlf /*
137*1709Smlf  * Device driver ops vector
138*1709Smlf  */
139*1709Smlf 
140*1709Smlf static struct cb_ops cmdk_cb_ops = {
141*1709Smlf 	cmdkopen, 		/* open */
142*1709Smlf 	cmdkclose, 		/* close */
143*1709Smlf 	cmdkstrategy, 		/* strategy */
144*1709Smlf 	nodev, 			/* print */
145*1709Smlf 	cmdkdump, 		/* dump */
146*1709Smlf 	cmdkread, 		/* read */
147*1709Smlf 	cmdkwrite, 		/* write */
148*1709Smlf 	cmdkioctl, 		/* ioctl */
149*1709Smlf 	nodev, 			/* devmap */
150*1709Smlf 	nodev, 			/* mmap */
151*1709Smlf 	nodev, 			/* segmap */
152*1709Smlf 	nochpoll, 		/* poll */
153*1709Smlf 	cmdk_prop_op, 		/* cb_prop_op */
154*1709Smlf 	0, 			/* streamtab  */
155*1709Smlf 	D_64BIT | D_MP | D_NEW,	/* Driver comaptibility flag */
156*1709Smlf 	CB_REV,			/* cb_rev */
157*1709Smlf 	cmdkaread,		/* async read */
158*1709Smlf 	cmdkawrite		/* async write */
159*1709Smlf };
160*1709Smlf 
161*1709Smlf static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
162*1709Smlf     void **result);
163*1709Smlf static int cmdkprobe(dev_info_t *dip);
164*1709Smlf static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd);
165*1709Smlf static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd);
166*1709Smlf 
167*1709Smlf struct dev_ops cmdk_ops = {
168*1709Smlf 	DEVO_REV, 		/* devo_rev, */
169*1709Smlf 	0, 			/* refcnt  */
170*1709Smlf 	cmdkinfo,		/* info */
171*1709Smlf 	nulldev, 		/* identify */
172*1709Smlf 	cmdkprobe, 		/* probe */
173*1709Smlf 	cmdkattach, 		/* attach */
174*1709Smlf 	cmdkdetach,		/* detach */
175*1709Smlf 	nodev, 			/* reset */
176*1709Smlf 	&cmdk_cb_ops, 		/* driver operations */
177*1709Smlf 	(struct bus_ops *)0	/* bus operations */
178*1709Smlf };
179*1709Smlf 
180*1709Smlf /*
181*1709Smlf  * This is the loadable module wrapper.
182*1709Smlf  */
183*1709Smlf #include <sys/modctl.h>
184*1709Smlf 
185*1709Smlf extern struct mod_ops mod_driverops;
186*1709Smlf 
187*1709Smlf static struct modldrv modldrv = {
188*1709Smlf 	&mod_driverops, 	/* Type of module. This one is a driver */
189*1709Smlf 	"Common Direct Access Disk %I%",
190*1709Smlf 	&cmdk_ops, 				/* driver ops 		*/
191*1709Smlf };
192*1709Smlf 
193*1709Smlf static struct modlinkage modlinkage = {
194*1709Smlf 	MODREV_1, (void *)&modldrv, NULL
195*1709Smlf };
196*1709Smlf 
197*1709Smlf /* Function prototypes for cmlb callbacks */
198*1709Smlf 
199*1709Smlf static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
200*1709Smlf     diskaddr_t start, size_t length);
201*1709Smlf static int cmdk_lb_getphygeom(dev_info_t *dip,  cmlb_geom_t *phygeomp);
202*1709Smlf static int cmdk_lb_getvirtgeom(dev_info_t *dip,  cmlb_geom_t *virtgeomp);
203*1709Smlf static int cmdk_lb_getcapacity(dev_info_t *dip, diskaddr_t *capp);
204*1709Smlf static int cmdk_lb_getattribute(dev_info_t *dip, tg_attribute_t *tgattribute);
205*1709Smlf 
206*1709Smlf static void cmdk_devid_setup(struct cmdk *dkp);
207*1709Smlf static int cmdk_devid_modser(struct cmdk *dkp);
208*1709Smlf static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len);
209*1709Smlf static int cmdk_devid_fabricate(struct cmdk *dkp);
210*1709Smlf static int cmdk_devid_read(struct cmdk *dkp);
211*1709Smlf 
212*1709Smlf static cmlb_tg_ops_t cmdk_lb_ops = {
213*1709Smlf 	TG_DK_OPS_VERSION_0,
214*1709Smlf 	cmdk_lb_rdwr,
215*1709Smlf 	cmdk_lb_getphygeom,
216*1709Smlf 	cmdk_lb_getvirtgeom,
217*1709Smlf 	cmdk_lb_getcapacity,
218*1709Smlf 	cmdk_lb_getattribute
219*1709Smlf };
220*1709Smlf 
221*1709Smlf int
222*1709Smlf _init(void)
223*1709Smlf {
224*1709Smlf 	int 	rval;
225*1709Smlf 
226*1709Smlf 	if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7))
227*1709Smlf 		return (rval);
228*1709Smlf 
229*1709Smlf 	mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL);
230*1709Smlf 	if ((rval = mod_install(&modlinkage)) != 0) {
231*1709Smlf 		mutex_destroy(&cmdk_attach_mutex);
232*1709Smlf 		ddi_soft_state_fini(&cmdk_state);
233*1709Smlf 	}
234*1709Smlf 	return (rval);
235*1709Smlf }
236*1709Smlf 
237*1709Smlf int
238*1709Smlf _fini(void)
239*1709Smlf {
240*1709Smlf 	return (EBUSY);
241*1709Smlf 
242*1709Smlf 	/*
243*1709Smlf 	 * This has been commented out until cmdk is a true
244*1709Smlf 	 * unloadable module. Right now x86's are panicking on
245*1709Smlf 	 * a diskless reconfig boot.
246*1709Smlf 	 */
247*1709Smlf 
248*1709Smlf #if 0 	/* bugid 1186679 */
249*1709Smlf 	int	rval;
250*1709Smlf 
251*1709Smlf 	rval = mod_remove(&modlinkage);
252*1709Smlf 	if (rval != 0)
253*1709Smlf 		return (rval);
254*1709Smlf 
255*1709Smlf 	mutex_destroy(&cmdk_attach_mutex);
256*1709Smlf 	ddi_soft_state_fini(&cmdk_state);
257*1709Smlf 
258*1709Smlf 	return (0);
259*1709Smlf #endif
260*1709Smlf }
261*1709Smlf 
262*1709Smlf int
263*1709Smlf _info(struct modinfo *modinfop)
264*1709Smlf {
265*1709Smlf 	return (mod_info(&modlinkage, modinfop));
266*1709Smlf }
267*1709Smlf 
268*1709Smlf /*
269*1709Smlf  * Autoconfiguration Routines
270*1709Smlf  */
271*1709Smlf static int
272*1709Smlf cmdkprobe(dev_info_t *dip)
273*1709Smlf {
274*1709Smlf 	int 	instance;
275*1709Smlf 	int	status;
276*1709Smlf 	struct	cmdk	*dkp;
277*1709Smlf 
278*1709Smlf 	instance = ddi_get_instance(dip);
279*1709Smlf 
280*1709Smlf 	if (ddi_get_soft_state(cmdk_state, instance))
281*1709Smlf 		return (DDI_PROBE_PARTIAL);
282*1709Smlf 
283*1709Smlf 	if ((ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) ||
284*1709Smlf 	    ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL))
285*1709Smlf 		return (DDI_PROBE_PARTIAL);
286*1709Smlf 
287*1709Smlf 	mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
288*1709Smlf 	rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL);
289*1709Smlf 	dkp->dk_dip = dip;
290*1709Smlf 	mutex_enter(&dkp->dk_mutex);
291*1709Smlf 
292*1709Smlf 	dkp->dk_dev = makedevice(ddi_driver_major(dip),
293*1709Smlf 	    ddi_get_instance(dip) << CMDK_UNITSHF);
294*1709Smlf 
295*1709Smlf 	/* linkage to dadk and strategy */
296*1709Smlf 	if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) {
297*1709Smlf 		mutex_exit(&dkp->dk_mutex);
298*1709Smlf 		mutex_destroy(&dkp->dk_mutex);
299*1709Smlf 		rw_destroy(&dkp->dk_bbh_mutex);
300*1709Smlf 		ddi_soft_state_free(cmdk_state, instance);
301*1709Smlf 		return (DDI_PROBE_PARTIAL);
302*1709Smlf 	}
303*1709Smlf 
304*1709Smlf 	status = dadk_probe(DKTP_DATA, KM_NOSLEEP);
305*1709Smlf 	if (status != DDI_PROBE_SUCCESS) {
306*1709Smlf 		cmdk_destroy_obj(dip, dkp);	/* dadk/strategy linkage  */
307*1709Smlf 		mutex_exit(&dkp->dk_mutex);
308*1709Smlf 		mutex_destroy(&dkp->dk_mutex);
309*1709Smlf 		rw_destroy(&dkp->dk_bbh_mutex);
310*1709Smlf 		ddi_soft_state_free(cmdk_state, instance);
311*1709Smlf 		return (status);
312*1709Smlf 	}
313*1709Smlf 
314*1709Smlf 	mutex_exit(&dkp->dk_mutex);
315*1709Smlf #ifdef CMDK_DEBUG
316*1709Smlf 	if (cmdk_debug & DENT)
317*1709Smlf 		PRF("cmdkprobe: instance= %d name= `%s`\n",
318*1709Smlf 		    instance, ddi_get_name_addr(dip));
319*1709Smlf #endif
320*1709Smlf 	return (status);
321*1709Smlf }
322*1709Smlf 
323*1709Smlf static int
324*1709Smlf cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
325*1709Smlf {
326*1709Smlf 	int 		instance;
327*1709Smlf 	struct		cmdk *dkp;
328*1709Smlf 	char 		*node_type;
329*1709Smlf 
330*1709Smlf 	if (cmd != DDI_ATTACH)
331*1709Smlf 		return (DDI_FAILURE);
332*1709Smlf 
333*1709Smlf 	instance = ddi_get_instance(dip);
334*1709Smlf 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
335*1709Smlf 		return (DDI_FAILURE);
336*1709Smlf 
337*1709Smlf 	mutex_enter(&dkp->dk_mutex);
338*1709Smlf 
339*1709Smlf 	/* dadk_attach is an empty function that only returns SUCCESS */
340*1709Smlf 	(void) dadk_attach(DKTP_DATA);
341*1709Smlf 
342*1709Smlf 	node_type = (DKTP_EXT->tg_nodetype);
343*1709Smlf 
344*1709Smlf 	/*
345*1709Smlf 	 * this open allows cmlb to read the device
346*1709Smlf 	 * and determine the label types
347*1709Smlf 	 * so that cmlb can create minor nodes for device
348*1709Smlf 	 */
349*1709Smlf 
350*1709Smlf 	/* open the target disk	 */
351*1709Smlf 	if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS)
352*1709Smlf 		goto fail2;
353*1709Smlf 
354*1709Smlf 	/* mark as having opened target */
355*1709Smlf 	dkp->dk_flag |= CMDK_TGDK_OPEN;
356*1709Smlf 
357*1709Smlf 	cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle);
358*1709Smlf 
359*1709Smlf 	if (cmlb_attach(dip,
360*1709Smlf 	    &cmdk_lb_ops,
361*1709Smlf 	    DTYPE_DIRECT,		/* device_type */
362*1709Smlf 	    0,				/* removable */
363*1709Smlf 	    node_type,
364*1709Smlf 	    CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT,	/* alter_behaviour */
365*1709Smlf 	    dkp->dk_cmlbhandle) != 0)
366*1709Smlf 		goto fail1;
367*1709Smlf 
368*1709Smlf 	/* Calling validate will create minor nodes according to disk label */
369*1709Smlf 	(void) cmlb_validate(dkp->dk_cmlbhandle);
370*1709Smlf 
371*1709Smlf 	/* set bbh (Bad Block Handling) */
372*1709Smlf 	cmdk_bbh_reopen(dkp);
373*1709Smlf 
374*1709Smlf 	/* setup devid string */
375*1709Smlf 	cmdk_devid_setup(dkp);
376*1709Smlf 
377*1709Smlf 	mutex_enter(&cmdk_attach_mutex);
378*1709Smlf 	if (instance > cmdk_max_instance)
379*1709Smlf 		cmdk_max_instance = instance;
380*1709Smlf 	mutex_exit(&cmdk_attach_mutex);
381*1709Smlf 
382*1709Smlf 	mutex_exit(&dkp->dk_mutex);
383*1709Smlf 
384*1709Smlf 	/*
385*1709Smlf 	 * Add a zero-length attribute to tell the world we support
386*1709Smlf 	 * kernel ioctls (for layered drivers)
387*1709Smlf 	 */
388*1709Smlf 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
389*1709Smlf 	    DDI_KERNEL_IOCTL, NULL, 0);
390*1709Smlf 	ddi_report_dev(dip);
391*1709Smlf 
392*1709Smlf 	return (DDI_SUCCESS);
393*1709Smlf 
394*1709Smlf fail1:
395*1709Smlf 	cmlb_free_handle(&dkp->dk_cmlbhandle);
396*1709Smlf 	(void) dadk_close(DKTP_DATA);
397*1709Smlf fail2:
398*1709Smlf 	cmdk_destroy_obj(dip, dkp);
399*1709Smlf 	rw_destroy(&dkp->dk_bbh_mutex);
400*1709Smlf 	mutex_exit(&dkp->dk_mutex);
401*1709Smlf 	mutex_destroy(&dkp->dk_mutex);
402*1709Smlf 	ddi_soft_state_free(cmdk_state, instance);
403*1709Smlf 	return (DDI_FAILURE);
404*1709Smlf }
405*1709Smlf 
406*1709Smlf 
407*1709Smlf static int
408*1709Smlf cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
409*1709Smlf {
410*1709Smlf 	struct cmdk	*dkp;
411*1709Smlf 	int 		instance;
412*1709Smlf 	int		max_instance;
413*1709Smlf 
414*1709Smlf 	if (cmd != DDI_DETACH) {
415*1709Smlf #ifdef CMDK_DEBUG
416*1709Smlf 		if (cmdk_debug & DIO) {
417*1709Smlf 			PRF("cmdkdetach: cmd = %d unknown\n", cmd);
418*1709Smlf 		}
419*1709Smlf #endif
420*1709Smlf 		return (DDI_FAILURE);
421*1709Smlf 	}
422*1709Smlf 
423*1709Smlf 	mutex_enter(&cmdk_attach_mutex);
424*1709Smlf 	max_instance = cmdk_max_instance;
425*1709Smlf 	mutex_exit(&cmdk_attach_mutex);
426*1709Smlf 
427*1709Smlf 	/* check if any instance of driver is open */
428*1709Smlf 	for (instance = 0; instance < max_instance; instance++) {
429*1709Smlf 		dkp = ddi_get_soft_state(cmdk_state, instance);
430*1709Smlf 		if (!dkp)
431*1709Smlf 			continue;
432*1709Smlf 		if (dkp->dk_flag & CMDK_OPEN)
433*1709Smlf 			return (DDI_FAILURE);
434*1709Smlf 	}
435*1709Smlf 
436*1709Smlf 	instance = ddi_get_instance(dip);
437*1709Smlf 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
438*1709Smlf 		return (DDI_SUCCESS);
439*1709Smlf 
440*1709Smlf 	mutex_enter(&dkp->dk_mutex);
441*1709Smlf 
442*1709Smlf 	/*
443*1709Smlf 	 * The cmdk_part_info call at the end of cmdkattach may have
444*1709Smlf 	 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on
445*1709Smlf 	 * detach for case when cmdkopen/cmdkclose never occurs.
446*1709Smlf 	 */
447*1709Smlf 	if (dkp->dk_flag & CMDK_TGDK_OPEN) {
448*1709Smlf 		dkp->dk_flag &= ~CMDK_TGDK_OPEN;
449*1709Smlf 		(void) dadk_close(DKTP_DATA);
450*1709Smlf 	}
451*1709Smlf 
452*1709Smlf 	cmlb_detach(dkp->dk_cmlbhandle);
453*1709Smlf 	cmlb_free_handle(&dkp->dk_cmlbhandle);
454*1709Smlf 	ddi_prop_remove_all(dip);
455*1709Smlf 
456*1709Smlf 	cmdk_destroy_obj(dip, dkp);	/* dadk/strategy linkage  */
457*1709Smlf 	mutex_exit(&dkp->dk_mutex);
458*1709Smlf 	mutex_destroy(&dkp->dk_mutex);
459*1709Smlf 	rw_destroy(&dkp->dk_bbh_mutex);
460*1709Smlf 	ddi_soft_state_free(cmdk_state, instance);
461*1709Smlf 
462*1709Smlf 	return (DDI_SUCCESS);
463*1709Smlf }
464*1709Smlf 
465*1709Smlf static int
466*1709Smlf cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
467*1709Smlf {
468*1709Smlf 	dev_t		dev = (dev_t)arg;
469*1709Smlf 	int 		instance;
470*1709Smlf 	struct	cmdk	*dkp;
471*1709Smlf 
472*1709Smlf #ifdef lint
473*1709Smlf 	dip = dip;	/* no one ever uses this */
474*1709Smlf #endif
475*1709Smlf #ifdef CMDK_DEBUG
476*1709Smlf 	if (cmdk_debug & DENT)
477*1709Smlf 		PRF("cmdkinfo: call\n");
478*1709Smlf #endif
479*1709Smlf 	instance = CMDKUNIT(dev);
480*1709Smlf 
481*1709Smlf 	switch (infocmd) {
482*1709Smlf 		case DDI_INFO_DEVT2DEVINFO:
483*1709Smlf 			if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
484*1709Smlf 				return (DDI_FAILURE);
485*1709Smlf 			*result = (void *) dkp->dk_dip;
486*1709Smlf 			break;
487*1709Smlf 		case DDI_INFO_DEVT2INSTANCE:
488*1709Smlf 			*result = (void *)(intptr_t)instance;
489*1709Smlf 			break;
490*1709Smlf 		default:
491*1709Smlf 			return (DDI_FAILURE);
492*1709Smlf 	}
493*1709Smlf 	return (DDI_SUCCESS);
494*1709Smlf }
495*1709Smlf 
496*1709Smlf static int
497*1709Smlf cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
498*1709Smlf     char *name, caddr_t valuep, int *lengthp)
499*1709Smlf {
500*1709Smlf 	struct	cmdk	*dkp;
501*1709Smlf 	diskaddr_t	p_lblksrt;
502*1709Smlf 	diskaddr_t	p_lblkcnt;
503*1709Smlf 
504*1709Smlf #ifdef CMDK_DEBUG
505*1709Smlf 	if (cmdk_debug & DENT)
506*1709Smlf 		PRF("cmdk_prop_op: call\n");
507*1709Smlf #endif
508*1709Smlf 
509*1709Smlf 	dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
510*1709Smlf 
511*1709Smlf 	/*
512*1709Smlf 	 * Our dynamic properties are all device specific and size oriented.
513*1709Smlf 	 * Requests issued under conditions where size is valid are passed
514*1709Smlf 	 * to ddi_prop_op_nblocks with the size information, otherwise the
515*1709Smlf 	 * request is passed to ddi_prop_op. Size depends on valid label.
516*1709Smlf 	 */
517*1709Smlf 	if ((dev != DDI_DEV_T_ANY) && (dkp != NULL)) {
518*1709Smlf 		if (!cmlb_partinfo(
519*1709Smlf 		    dkp->dk_cmlbhandle,
520*1709Smlf 		    CMDKPART(dev),
521*1709Smlf 		    &p_lblkcnt,
522*1709Smlf 		    &p_lblksrt,
523*1709Smlf 		    NULL,
524*1709Smlf 		    NULL))
525*1709Smlf 			return (ddi_prop_op_nblocks(dev, dip,
526*1709Smlf 			    prop_op, mod_flags,
527*1709Smlf 			    name, valuep, lengthp,
528*1709Smlf 			    (uint64_t)p_lblkcnt));
529*1709Smlf 	}
530*1709Smlf 
531*1709Smlf 	return (ddi_prop_op(dev, dip,
532*1709Smlf 	    prop_op, mod_flags,
533*1709Smlf 	    name, valuep, lengthp));
534*1709Smlf }
535*1709Smlf 
536*1709Smlf /*
537*1709Smlf  * dump routine
538*1709Smlf  */
539*1709Smlf static int
540*1709Smlf cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
541*1709Smlf {
542*1709Smlf 	int 		instance;
543*1709Smlf 	struct	cmdk	*dkp;
544*1709Smlf 	diskaddr_t	p_lblksrt;
545*1709Smlf 	diskaddr_t	p_lblkcnt;
546*1709Smlf 	struct	buf	local;
547*1709Smlf 	struct	buf	*bp;
548*1709Smlf 
549*1709Smlf #ifdef CMDK_DEBUG
550*1709Smlf 	if (cmdk_debug & DENT)
551*1709Smlf 		PRF("cmdkdump: call\n");
552*1709Smlf #endif
553*1709Smlf 	instance = CMDKUNIT(dev);
554*1709Smlf 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0))
555*1709Smlf 		return (ENXIO);
556*1709Smlf 
557*1709Smlf 	if (cmlb_partinfo(
558*1709Smlf 	    dkp->dk_cmlbhandle,
559*1709Smlf 	    CMDKPART(dev),
560*1709Smlf 	    &p_lblkcnt,
561*1709Smlf 	    &p_lblksrt,
562*1709Smlf 	    NULL,
563*1709Smlf 	    NULL)) {
564*1709Smlf 		return (ENXIO);
565*1709Smlf 	}
566*1709Smlf 
567*1709Smlf 	if ((blkno+nblk) > p_lblkcnt)
568*1709Smlf 		return (EINVAL);
569*1709Smlf 
570*1709Smlf 	cmdk_indump = 1;	/* Tell disk targets we are panic dumpping */
571*1709Smlf 
572*1709Smlf 	bp = &local;
573*1709Smlf 	bzero(bp, sizeof (*bp));
574*1709Smlf 	bp->b_flags = B_BUSY;
575*1709Smlf 	bp->b_un.b_addr = addr;
576*1709Smlf 	bp->b_bcount = nblk << SCTRSHFT;
577*1709Smlf 	SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno)));
578*1709Smlf 
579*1709Smlf 	(void) dadk_dump(DKTP_DATA, bp);
580*1709Smlf 	return (bp->b_error);
581*1709Smlf }
582*1709Smlf 
583*1709Smlf /*
584*1709Smlf  * Copy in the dadkio_rwcmd according to the user's data model.  If needed,
585*1709Smlf  * convert it for our internal use.
586*1709Smlf  */
587*1709Smlf static int
588*1709Smlf rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag)
589*1709Smlf {
590*1709Smlf 	switch (ddi_model_convert_from(flag)) {
591*1709Smlf 		case DDI_MODEL_ILP32: {
592*1709Smlf 			struct dadkio_rwcmd32 cmd32;
593*1709Smlf 
594*1709Smlf 			if (ddi_copyin(inaddr, &cmd32,
595*1709Smlf 			    sizeof (struct dadkio_rwcmd32), flag)) {
596*1709Smlf 				return (EFAULT);
597*1709Smlf 			}
598*1709Smlf 
599*1709Smlf 			rwcmdp->cmd = cmd32.cmd;
600*1709Smlf 			rwcmdp->flags = cmd32.flags;
601*1709Smlf 			rwcmdp->blkaddr = (daddr_t)cmd32.blkaddr;
602*1709Smlf 			rwcmdp->buflen = cmd32.buflen;
603*1709Smlf 			rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr;
604*1709Smlf 			/*
605*1709Smlf 			 * Note: we do not convert the 'status' field,
606*1709Smlf 			 * as it should not contain valid data at this
607*1709Smlf 			 * point.
608*1709Smlf 			 */
609*1709Smlf 			bzero(&rwcmdp->status, sizeof (rwcmdp->status));
610*1709Smlf 			break;
611*1709Smlf 		}
612*1709Smlf 		case DDI_MODEL_NONE: {
613*1709Smlf 			if (ddi_copyin(inaddr, rwcmdp,
614*1709Smlf 			    sizeof (struct dadkio_rwcmd), flag)) {
615*1709Smlf 				return (EFAULT);
616*1709Smlf 			}
617*1709Smlf 		}
618*1709Smlf 	}
619*1709Smlf 	return (0);
620*1709Smlf }
621*1709Smlf 
622*1709Smlf /*
623*1709Smlf  * If necessary, convert the internal rwcmdp and status to the appropriate
624*1709Smlf  * data model and copy it out to the user.
625*1709Smlf  */
626*1709Smlf static int
627*1709Smlf rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag)
628*1709Smlf {
629*1709Smlf 	switch (ddi_model_convert_from(flag)) {
630*1709Smlf 		case DDI_MODEL_ILP32: {
631*1709Smlf 			struct dadkio_rwcmd32 cmd32;
632*1709Smlf 
633*1709Smlf 			cmd32.cmd = rwcmdp->cmd;
634*1709Smlf 			cmd32.flags = rwcmdp->flags;
635*1709Smlf 			cmd32.blkaddr = rwcmdp->blkaddr;
636*1709Smlf 			cmd32.buflen = rwcmdp->buflen;
637*1709Smlf 			ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0);
638*1709Smlf 			cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr;
639*1709Smlf 
640*1709Smlf 			cmd32.status.status = rwcmdp->status.status;
641*1709Smlf 			cmd32.status.resid = rwcmdp->status.resid;
642*1709Smlf 			cmd32.status.failed_blk_is_valid =
643*1709Smlf 			    rwcmdp->status.failed_blk_is_valid;
644*1709Smlf 			cmd32.status.failed_blk = rwcmdp->status.failed_blk;
645*1709Smlf 			cmd32.status.fru_code_is_valid =
646*1709Smlf 			    rwcmdp->status.fru_code_is_valid;
647*1709Smlf 			cmd32.status.fru_code = rwcmdp->status.fru_code;
648*1709Smlf 
649*1709Smlf 			bcopy(rwcmdp->status.add_error_info,
650*1709Smlf 			    cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN);
651*1709Smlf 
652*1709Smlf 			if (ddi_copyout(&cmd32, outaddr,
653*1709Smlf 			    sizeof (struct dadkio_rwcmd32), flag))
654*1709Smlf 				return (EFAULT);
655*1709Smlf 			break;
656*1709Smlf 		}
657*1709Smlf 		case DDI_MODEL_NONE: {
658*1709Smlf 			if (ddi_copyout(rwcmdp, outaddr,
659*1709Smlf 			    sizeof (struct dadkio_rwcmd), flag))
660*1709Smlf 			return (EFAULT);
661*1709Smlf 		}
662*1709Smlf 	}
663*1709Smlf 	return (0);
664*1709Smlf }
665*1709Smlf 
666*1709Smlf /*
667*1709Smlf  * ioctl routine
668*1709Smlf  */
669*1709Smlf static int
670*1709Smlf cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp)
671*1709Smlf {
672*1709Smlf 	int 		instance;
673*1709Smlf 	struct scsi_device *devp;
674*1709Smlf 	struct cmdk	*dkp;
675*1709Smlf 	char 		data[NBPSCTR];
676*1709Smlf 
677*1709Smlf 	instance = CMDKUNIT(dev);
678*1709Smlf 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
679*1709Smlf 		return (ENXIO);
680*1709Smlf 
681*1709Smlf 	bzero(data, sizeof (data));
682*1709Smlf 
683*1709Smlf 	switch (cmd) {
684*1709Smlf 
685*1709Smlf 	case DKIOCGMEDIAINFO: {
686*1709Smlf 		struct dk_minfo	media_info;
687*1709Smlf 		struct  tgdk_geom phyg;
688*1709Smlf 
689*1709Smlf 		/* dadk_getphygeom always returns success */
690*1709Smlf 		(void) dadk_getphygeom(DKTP_DATA, &phyg);
691*1709Smlf 
692*1709Smlf 		media_info.dki_lbsize = phyg.g_secsiz;
693*1709Smlf 		media_info.dki_capacity = phyg.g_cap;
694*1709Smlf 		media_info.dki_media_type = DK_FIXED_DISK;
695*1709Smlf 
696*1709Smlf 		if (ddi_copyout(&media_info, (void *)arg,
697*1709Smlf 		    sizeof (struct dk_minfo), flag)) {
698*1709Smlf 			return (EFAULT);
699*1709Smlf 		} else {
700*1709Smlf 			return (0);
701*1709Smlf 		}
702*1709Smlf 	}
703*1709Smlf 
704*1709Smlf 	case DKIOCINFO: {
705*1709Smlf 		struct dk_cinfo *info = (struct dk_cinfo *)data;
706*1709Smlf 
707*1709Smlf 		/* controller information */
708*1709Smlf 		info->dki_ctype = (DKTP_EXT->tg_ctype);
709*1709Smlf 		info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip));
710*1709Smlf 		(void) strcpy(info->dki_cname,
711*1709Smlf 		    ddi_get_name(ddi_get_parent(dkp->dk_dip)));
712*1709Smlf 
713*1709Smlf 		/* Unit Information */
714*1709Smlf 		info->dki_unit = ddi_get_instance(dkp->dk_dip);
715*1709Smlf 		devp = ddi_get_driver_private(dkp->dk_dip);
716*1709Smlf 		info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp);
717*1709Smlf 		(void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip));
718*1709Smlf 		info->dki_flags = DKI_FMTVOL;
719*1709Smlf 		info->dki_partition = CMDKPART(dev);
720*1709Smlf 
721*1709Smlf 		info->dki_maxtransfer = maxphys / DEV_BSIZE;
722*1709Smlf 		info->dki_addr = 1;
723*1709Smlf 		info->dki_space = 0;
724*1709Smlf 		info->dki_prio = 0;
725*1709Smlf 		info->dki_vec = 0;
726*1709Smlf 
727*1709Smlf 		if (ddi_copyout(data, (void *)arg, sizeof (*info), flag))
728*1709Smlf 			return (EFAULT);
729*1709Smlf 		else
730*1709Smlf 			return (0);
731*1709Smlf 	}
732*1709Smlf 
733*1709Smlf 	case DKIOCSTATE: {
734*1709Smlf 		int	state;
735*1709Smlf 		int	rval;
736*1709Smlf 		diskaddr_t	p_lblksrt;
737*1709Smlf 		diskaddr_t	p_lblkcnt;
738*1709Smlf 
739*1709Smlf 		if (ddi_copyin((void *)arg, &state, sizeof (int), flag))
740*1709Smlf 			return (EFAULT);
741*1709Smlf 
742*1709Smlf 		/* dadk_check_media blocks until state changes */
743*1709Smlf 		if (rval = dadk_check_media(DKTP_DATA, &state))
744*1709Smlf 			return (rval);
745*1709Smlf 
746*1709Smlf 		if (state == DKIO_INSERTED) {
747*1709Smlf 
748*1709Smlf 			if (cmlb_validate(dkp->dk_cmlbhandle) != 0)
749*1709Smlf 				return (ENXIO);
750*1709Smlf 
751*1709Smlf 			if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev),
752*1709Smlf 			    &p_lblkcnt, &p_lblksrt, NULL, NULL))
753*1709Smlf 				return (ENXIO);
754*1709Smlf 
755*1709Smlf 			if (p_lblkcnt <= 0)
756*1709Smlf 				return (ENXIO);
757*1709Smlf 		}
758*1709Smlf 
759*1709Smlf 		if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag))
760*1709Smlf 			return (EFAULT);
761*1709Smlf 
762*1709Smlf 		return (0);
763*1709Smlf 	}
764*1709Smlf 
765*1709Smlf 	/*
766*1709Smlf 	 * is media removable?
767*1709Smlf 	 */
768*1709Smlf 	case DKIOCREMOVABLE: {
769*1709Smlf 		int i;
770*1709Smlf 
771*1709Smlf 		i = (DKTP_EXT->tg_rmb) ? 1 : 0;
772*1709Smlf 
773*1709Smlf 		if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag))
774*1709Smlf 			return (EFAULT);
775*1709Smlf 
776*1709Smlf 		return (0);
777*1709Smlf 	}
778*1709Smlf 
779*1709Smlf 	case DKIOCADDBAD:
780*1709Smlf 		/*
781*1709Smlf 		 * This is not an update mechanism to add bad blocks
782*1709Smlf 		 * to the bad block structures stored on disk.
783*1709Smlf 		 *
784*1709Smlf 		 * addbadsec(1M) will update the bad block data on disk
785*1709Smlf 		 * and use this ioctl to force the driver to re-initialize
786*1709Smlf 		 * the list of bad blocks in the driver.
787*1709Smlf 		 */
788*1709Smlf 
789*1709Smlf 		/* start BBH */
790*1709Smlf 		cmdk_bbh_reopen(dkp);
791*1709Smlf 		return (0);
792*1709Smlf 
793*1709Smlf 	case DKIOCG_PHYGEOM:
794*1709Smlf 	case DKIOCG_VIRTGEOM:
795*1709Smlf 	case DKIOCGGEOM:
796*1709Smlf 	case DKIOCSGEOM:
797*1709Smlf 	case DKIOCGAPART:
798*1709Smlf 	case DKIOCSAPART:
799*1709Smlf 	case DKIOCGVTOC:
800*1709Smlf 	case DKIOCSVTOC:
801*1709Smlf 	case DKIOCPARTINFO:
802*1709Smlf 	case DKIOCGMBOOT:
803*1709Smlf 	case DKIOCSMBOOT:
804*1709Smlf 	case DKIOCGETEFI:
805*1709Smlf 	case DKIOCSETEFI:
806*1709Smlf 	case DKIOCPARTITION:
807*1709Smlf 	{
808*1709Smlf 		int rc;
809*1709Smlf 
810*1709Smlf 		rc = cmlb_ioctl(
811*1709Smlf 		    dkp->dk_cmlbhandle,
812*1709Smlf 		    dev,
813*1709Smlf 		    cmd,
814*1709Smlf 		    arg,
815*1709Smlf 		    flag,
816*1709Smlf 		    credp,
817*1709Smlf 		    rvalp);
818*1709Smlf 		if (cmd == DKIOCSVTOC)
819*1709Smlf 			cmdk_devid_setup(dkp);
820*1709Smlf 		return (rc);
821*1709Smlf 	}
822*1709Smlf 
823*1709Smlf 	case DIOCTL_RWCMD: {
824*1709Smlf 		struct	dadkio_rwcmd *rwcmdp;
825*1709Smlf 		int	status;
826*1709Smlf 
827*1709Smlf 		rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP);
828*1709Smlf 
829*1709Smlf 		status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag);
830*1709Smlf 
831*1709Smlf 		if (status == 0) {
832*1709Smlf 			bzero(&(rwcmdp->status), sizeof (struct dadkio_status));
833*1709Smlf 			status = dadk_ioctl(DKTP_DATA,
834*1709Smlf 			    dev,
835*1709Smlf 			    cmd,
836*1709Smlf 			    (uintptr_t)rwcmdp,
837*1709Smlf 			    flag,
838*1709Smlf 			    credp,
839*1709Smlf 			    rvalp);
840*1709Smlf 		}
841*1709Smlf 		if (status == 0)
842*1709Smlf 			status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag);
843*1709Smlf 
844*1709Smlf 		kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd));
845*1709Smlf 		return (status);
846*1709Smlf 	}
847*1709Smlf 
848*1709Smlf 	default:
849*1709Smlf 		return (dadk_ioctl(DKTP_DATA,
850*1709Smlf 		    dev,
851*1709Smlf 		    cmd,
852*1709Smlf 		    arg,
853*1709Smlf 		    flag,
854*1709Smlf 		    credp,
855*1709Smlf 		    rvalp));
856*1709Smlf 	}
857*1709Smlf }
858*1709Smlf 
859*1709Smlf /*ARGSUSED1*/
860*1709Smlf static int
861*1709Smlf cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp)
862*1709Smlf {
863*1709Smlf 	int		part;
864*1709Smlf 	ulong_t		partbit;
865*1709Smlf 	int 		instance;
866*1709Smlf 	struct cmdk	*dkp;
867*1709Smlf 	int		lastclose = 1;
868*1709Smlf 	int		i;
869*1709Smlf 
870*1709Smlf 	instance = CMDKUNIT(dev);
871*1709Smlf 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
872*1709Smlf 	    (otyp >= OTYPCNT))
873*1709Smlf 		return (ENXIO);
874*1709Smlf 
875*1709Smlf 	mutex_enter(&dkp->dk_mutex);
876*1709Smlf 
877*1709Smlf 	/* check if device has been opened */
878*1709Smlf 	if (!(dkp->dk_flag & CMDK_OPEN)) {
879*1709Smlf 		mutex_exit(&dkp->dk_mutex);
880*1709Smlf 		return (ENXIO);
881*1709Smlf 	}
882*1709Smlf 
883*1709Smlf 	part = CMDKPART(dev);
884*1709Smlf 	partbit = 1 << part;
885*1709Smlf 
886*1709Smlf 	/* account for close */
887*1709Smlf 	if (otyp == OTYP_LYR) {
888*1709Smlf 		if (dkp->dk_open_lyr[part])
889*1709Smlf 			dkp->dk_open_lyr[part]--;
890*1709Smlf 	} else
891*1709Smlf 		dkp->dk_open_reg[otyp] &= ~partbit;
892*1709Smlf 	dkp->dk_open_exl &= ~partbit;
893*1709Smlf 
894*1709Smlf 	for (i = 0; i < CMDK_MAXPART; i++)
895*1709Smlf 		if (dkp->dk_open_lyr[i] != 0) {
896*1709Smlf 			lastclose = 0;
897*1709Smlf 			break;
898*1709Smlf 		}
899*1709Smlf 
900*1709Smlf 	if (lastclose)
901*1709Smlf 		for (i = 0; i < OTYPCNT; i++)
902*1709Smlf 			if (dkp->dk_open_reg[i] != 0) {
903*1709Smlf 				lastclose = 0;
904*1709Smlf 				break;
905*1709Smlf 			}
906*1709Smlf 
907*1709Smlf 	mutex_exit(&dkp->dk_mutex);
908*1709Smlf 
909*1709Smlf 	if (lastclose)
910*1709Smlf 		cmlb_invalidate(dkp->dk_cmlbhandle);
911*1709Smlf 
912*1709Smlf 	return (DDI_SUCCESS);
913*1709Smlf }
914*1709Smlf 
915*1709Smlf /*ARGSUSED3*/
916*1709Smlf static int
917*1709Smlf cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp)
918*1709Smlf {
919*1709Smlf 	dev_t		dev = *dev_p;
920*1709Smlf 	int 		part;
921*1709Smlf 	ulong_t		partbit;
922*1709Smlf 	int 		instance;
923*1709Smlf 	struct	cmdk	*dkp;
924*1709Smlf 	diskaddr_t	p_lblksrt;
925*1709Smlf 	diskaddr_t	p_lblkcnt;
926*1709Smlf 	int		i;
927*1709Smlf 	int		nodelay;
928*1709Smlf 
929*1709Smlf 	instance = CMDKUNIT(dev);
930*1709Smlf 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
931*1709Smlf 		return (ENXIO);
932*1709Smlf 
933*1709Smlf 	if (otyp >= OTYPCNT)
934*1709Smlf 		return (EINVAL);
935*1709Smlf 
936*1709Smlf 	part = CMDKPART(dev);
937*1709Smlf 	partbit = 1 << part;
938*1709Smlf 	nodelay = (flag & (FNDELAY | FNONBLOCK));
939*1709Smlf 
940*1709Smlf 	mutex_enter(&dkp->dk_mutex);
941*1709Smlf 
942*1709Smlf 	if (cmlb_validate(dkp->dk_cmlbhandle) != 0) {
943*1709Smlf 
944*1709Smlf 		/* fail if not doing non block open */
945*1709Smlf 		if (!nodelay) {
946*1709Smlf 			mutex_exit(&dkp->dk_mutex);
947*1709Smlf 			return (ENXIO);
948*1709Smlf 		}
949*1709Smlf 	} else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt,
950*1709Smlf 	    &p_lblksrt, NULL, NULL) == 0) {
951*1709Smlf 
952*1709Smlf 		if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) {
953*1709Smlf 			mutex_exit(&dkp->dk_mutex);
954*1709Smlf 			return (ENXIO);
955*1709Smlf 		}
956*1709Smlf 	} else {
957*1709Smlf 		/* fail if not doing non block open */
958*1709Smlf 		if (!nodelay) {
959*1709Smlf 			mutex_exit(&dkp->dk_mutex);
960*1709Smlf 			return (ENXIO);
961*1709Smlf 		}
962*1709Smlf 	}
963*1709Smlf 
964*1709Smlf 	if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) {
965*1709Smlf 		mutex_exit(&dkp->dk_mutex);
966*1709Smlf 		return (EROFS);
967*1709Smlf 	}
968*1709Smlf 
969*1709Smlf 	/* check for part already opend exclusively */
970*1709Smlf 	if (dkp->dk_open_exl & partbit)
971*1709Smlf 		goto excl_open_fail;
972*1709Smlf 
973*1709Smlf 	/* check if we can establish exclusive open */
974*1709Smlf 	if (flag & FEXCL) {
975*1709Smlf 		if (dkp->dk_open_lyr[part])
976*1709Smlf 			goto excl_open_fail;
977*1709Smlf 		for (i = 0; i < OTYPCNT; i++) {
978*1709Smlf 			if (dkp->dk_open_reg[i] & partbit)
979*1709Smlf 				goto excl_open_fail;
980*1709Smlf 		}
981*1709Smlf 	}
982*1709Smlf 
983*1709Smlf 	/* open will succeed, account for open */
984*1709Smlf 	dkp->dk_flag |= CMDK_OPEN;
985*1709Smlf 	if (otyp == OTYP_LYR)
986*1709Smlf 		dkp->dk_open_lyr[part]++;
987*1709Smlf 	else
988*1709Smlf 		dkp->dk_open_reg[otyp] |= partbit;
989*1709Smlf 	if (flag & FEXCL)
990*1709Smlf 		dkp->dk_open_exl |= partbit;
991*1709Smlf 
992*1709Smlf 	mutex_exit(&dkp->dk_mutex);
993*1709Smlf 	return (DDI_SUCCESS);
994*1709Smlf 
995*1709Smlf excl_open_fail:
996*1709Smlf 	mutex_exit(&dkp->dk_mutex);
997*1709Smlf 	return (EBUSY);
998*1709Smlf }
999*1709Smlf 
1000*1709Smlf /*
1001*1709Smlf  * read routine
1002*1709Smlf  */
1003*1709Smlf /*ARGSUSED2*/
1004*1709Smlf static int
1005*1709Smlf cmdkread(dev_t dev, struct uio *uio, cred_t *credp)
1006*1709Smlf {
1007*1709Smlf 	return (cmdkrw(dev, uio, B_READ));
1008*1709Smlf }
1009*1709Smlf 
1010*1709Smlf /*
1011*1709Smlf  * async read routine
1012*1709Smlf  */
1013*1709Smlf /*ARGSUSED2*/
1014*1709Smlf static int
1015*1709Smlf cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp)
1016*1709Smlf {
1017*1709Smlf 	return (cmdkarw(dev, aio, B_READ));
1018*1709Smlf }
1019*1709Smlf 
1020*1709Smlf /*
1021*1709Smlf  * write routine
1022*1709Smlf  */
1023*1709Smlf /*ARGSUSED2*/
1024*1709Smlf static int
1025*1709Smlf cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp)
1026*1709Smlf {
1027*1709Smlf 	return (cmdkrw(dev, uio, B_WRITE));
1028*1709Smlf }
1029*1709Smlf 
1030*1709Smlf /*
1031*1709Smlf  * async write routine
1032*1709Smlf  */
1033*1709Smlf /*ARGSUSED2*/
1034*1709Smlf static int
1035*1709Smlf cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp)
1036*1709Smlf {
1037*1709Smlf 	return (cmdkarw(dev, aio, B_WRITE));
1038*1709Smlf }
1039*1709Smlf 
1040*1709Smlf static void
1041*1709Smlf cmdkmin(struct buf *bp)
1042*1709Smlf {
1043*1709Smlf 	if (bp->b_bcount > DK_MAXRECSIZE)
1044*1709Smlf 		bp->b_bcount = DK_MAXRECSIZE;
1045*1709Smlf }
1046*1709Smlf 
1047*1709Smlf static int
1048*1709Smlf cmdkrw(dev_t dev, struct uio *uio, int flag)
1049*1709Smlf {
1050*1709Smlf 	return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio));
1051*1709Smlf }
1052*1709Smlf 
1053*1709Smlf static int
1054*1709Smlf cmdkarw(dev_t dev, struct aio_req *aio, int flag)
1055*1709Smlf {
1056*1709Smlf 	return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio));
1057*1709Smlf }
1058*1709Smlf 
1059*1709Smlf /*
1060*1709Smlf  * strategy routine
1061*1709Smlf  */
1062*1709Smlf static int
1063*1709Smlf cmdkstrategy(struct buf *bp)
1064*1709Smlf {
1065*1709Smlf 	int 		instance;
1066*1709Smlf 	struct	cmdk 	*dkp;
1067*1709Smlf 	long		d_cnt;
1068*1709Smlf 	diskaddr_t	p_lblksrt;
1069*1709Smlf 	diskaddr_t	p_lblkcnt;
1070*1709Smlf 
1071*1709Smlf 	instance = CMDKUNIT(bp->b_edev);
1072*1709Smlf 	if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1073*1709Smlf 	    (dkblock(bp) < 0)) {
1074*1709Smlf 		bp->b_resid = bp->b_bcount;
1075*1709Smlf 		SETBPERR(bp, ENXIO);
1076*1709Smlf 		biodone(bp);
1077*1709Smlf 		return (0);
1078*1709Smlf 	}
1079*1709Smlf 
1080*1709Smlf 	bp->b_flags &= ~(B_DONE|B_ERROR);
1081*1709Smlf 	bp->b_resid = 0;
1082*1709Smlf 	bp->av_back = NULL;
1083*1709Smlf 
1084*1709Smlf 	/*
1085*1709Smlf 	 * only re-read the vtoc if necessary (force == FALSE)
1086*1709Smlf 	 */
1087*1709Smlf 	if (cmlb_partinfo(
1088*1709Smlf 	    dkp->dk_cmlbhandle,
1089*1709Smlf 	    CMDKPART(bp->b_edev),
1090*1709Smlf 	    &p_lblkcnt,
1091*1709Smlf 	    &p_lblksrt,
1092*1709Smlf 	    NULL,
1093*1709Smlf 	    NULL)) {
1094*1709Smlf 		SETBPERR(bp, ENXIO);
1095*1709Smlf 	}
1096*1709Smlf 
1097*1709Smlf 	if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt))
1098*1709Smlf 		SETBPERR(bp, ENXIO);
1099*1709Smlf 
1100*1709Smlf 	if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) {
1101*1709Smlf 		bp->b_resid = bp->b_bcount;
1102*1709Smlf 		biodone(bp);
1103*1709Smlf 		return (0);
1104*1709Smlf 	}
1105*1709Smlf 
1106*1709Smlf 	d_cnt = bp->b_bcount >> SCTRSHFT;
1107*1709Smlf 	if ((dkblock(bp) + d_cnt) > p_lblkcnt) {
1108*1709Smlf 		bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT;
1109*1709Smlf 		bp->b_bcount -= bp->b_resid;
1110*1709Smlf 	}
1111*1709Smlf 
1112*1709Smlf 	SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp))));
1113*1709Smlf 	if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) {
1114*1709Smlf 		bp->b_resid += bp->b_bcount;
1115*1709Smlf 		biodone(bp);
1116*1709Smlf 	}
1117*1709Smlf 	return (0);
1118*1709Smlf }
1119*1709Smlf 
1120*1709Smlf static int
1121*1709Smlf cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp)
1122*1709Smlf {
1123*1709Smlf 	struct scsi_device *devp;
1124*1709Smlf 	opaque_t	queobjp = NULL;
1125*1709Smlf 	opaque_t	flcobjp = NULL;
1126*1709Smlf 	char		que_keyvalp[64];
1127*1709Smlf 	int		que_keylen;
1128*1709Smlf 	char		flc_keyvalp[64];
1129*1709Smlf 	int		flc_keylen;
1130*1709Smlf 
1131*1709Smlf 	ASSERT(mutex_owned(&dkp->dk_mutex));
1132*1709Smlf 
1133*1709Smlf 	/* Create linkage to queueing routines based on property */
1134*1709Smlf 	que_keylen = sizeof (que_keyvalp);
1135*1709Smlf 	if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1136*1709Smlf 	    DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1137*1709Smlf 	    DDI_PROP_SUCCESS) {
1138*1709Smlf 		cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined");
1139*1709Smlf 		return (DDI_FAILURE);
1140*1709Smlf 	}
1141*1709Smlf 	que_keyvalp[que_keylen] = (char)0;
1142*1709Smlf 
1143*1709Smlf 	if (strcmp(que_keyvalp, "qfifo") == 0) {
1144*1709Smlf 		queobjp = (opaque_t)qfifo_create();
1145*1709Smlf 	} else if (strcmp(que_keyvalp, "qsort") == 0) {
1146*1709Smlf 		queobjp = (opaque_t)qsort_create();
1147*1709Smlf 	} else {
1148*1709Smlf 		return (DDI_FAILURE);
1149*1709Smlf 	}
1150*1709Smlf 
1151*1709Smlf 	/* Create linkage to dequeueing routines based on property */
1152*1709Smlf 	flc_keylen = sizeof (flc_keyvalp);
1153*1709Smlf 	if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1154*1709Smlf 	    DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1155*1709Smlf 	    DDI_PROP_SUCCESS) {
1156*1709Smlf 		cmn_err(CE_WARN,
1157*1709Smlf 		    "cmdk_create_obj: flow-control property undefined");
1158*1709Smlf 		return (DDI_FAILURE);
1159*1709Smlf 	}
1160*1709Smlf 
1161*1709Smlf 	flc_keyvalp[flc_keylen] = (char)0;
1162*1709Smlf 
1163*1709Smlf 	if (strcmp(flc_keyvalp, "dsngl") == 0) {
1164*1709Smlf 		flcobjp = (opaque_t)dsngl_create();
1165*1709Smlf 	} else if (strcmp(flc_keyvalp, "dmult") == 0) {
1166*1709Smlf 		flcobjp = (opaque_t)dmult_create();
1167*1709Smlf 	} else {
1168*1709Smlf 		return (DDI_FAILURE);
1169*1709Smlf 	}
1170*1709Smlf 
1171*1709Smlf 	/* populate bbh_obj object stored in dkp */
1172*1709Smlf 	dkp->dk_bbh_obj.bbh_data = dkp;
1173*1709Smlf 	dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops;
1174*1709Smlf 
1175*1709Smlf 	/* create linkage to dadk */
1176*1709Smlf 	dkp->dk_tgobjp = (opaque_t)dadk_create();
1177*1709Smlf 
1178*1709Smlf 	devp = ddi_get_driver_private(dip);
1179*1709Smlf 	(void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj,
1180*1709Smlf 	    NULL);
1181*1709Smlf 
1182*1709Smlf 	return (DDI_SUCCESS);
1183*1709Smlf }
1184*1709Smlf 
1185*1709Smlf static void
1186*1709Smlf cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp)
1187*1709Smlf {
1188*1709Smlf 	char		que_keyvalp[64];
1189*1709Smlf 	int		que_keylen;
1190*1709Smlf 	char		flc_keyvalp[64];
1191*1709Smlf 	int		flc_keylen;
1192*1709Smlf 
1193*1709Smlf 	ASSERT(mutex_owned(&dkp->dk_mutex));
1194*1709Smlf 
1195*1709Smlf 	(void) dadk_free((dkp->dk_tgobjp));
1196*1709Smlf 	dkp->dk_tgobjp = NULL;
1197*1709Smlf 
1198*1709Smlf 	que_keylen = sizeof (que_keyvalp);
1199*1709Smlf 	if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1200*1709Smlf 	    DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1201*1709Smlf 	    DDI_PROP_SUCCESS) {
1202*1709Smlf 		cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined");
1203*1709Smlf 		return;
1204*1709Smlf 	}
1205*1709Smlf 	que_keyvalp[que_keylen] = (char)0;
1206*1709Smlf 
1207*1709Smlf 	flc_keylen = sizeof (flc_keyvalp);
1208*1709Smlf 	if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1209*1709Smlf 	    DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1210*1709Smlf 	    DDI_PROP_SUCCESS) {
1211*1709Smlf 		cmn_err(CE_WARN,
1212*1709Smlf 		    "cmdk_destroy_obj: flow-control property undefined");
1213*1709Smlf 		return;
1214*1709Smlf 	}
1215*1709Smlf 	flc_keyvalp[flc_keylen] = (char)0;
1216*1709Smlf }
1217*1709Smlf 
1218*1709Smlf static int
1219*1709Smlf cmdk_lb_rdwr(
1220*1709Smlf     dev_info_t *dip,
1221*1709Smlf     uchar_t cmd,
1222*1709Smlf     void *bufaddr,
1223*1709Smlf     diskaddr_t start,
1224*1709Smlf     size_t count)
1225*1709Smlf {
1226*1709Smlf 	struct cmdk	*dkp;
1227*1709Smlf 	opaque_t	handle;
1228*1709Smlf 	int		rc = 0;
1229*1709Smlf 	char		*bufa;
1230*1709Smlf 
1231*1709Smlf 	dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1232*1709Smlf 	if (dkp == NULL)
1233*1709Smlf 		return (ENXIO);
1234*1709Smlf 
1235*1709Smlf 	if (cmd != TG_READ && cmd != TG_WRITE)
1236*1709Smlf 		return (EINVAL);
1237*1709Smlf 
1238*1709Smlf 	/* count must be multiple of 512 */
1239*1709Smlf 	count = (count + NBPSCTR - 1) & -NBPSCTR;
1240*1709Smlf 	handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP);
1241*1709Smlf 	if (!handle)
1242*1709Smlf 		return (ENOMEM);
1243*1709Smlf 
1244*1709Smlf 	if (cmd == TG_READ) {
1245*1709Smlf 		bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1246*1709Smlf 		if (!bufa)
1247*1709Smlf 			rc = EIO;
1248*1709Smlf 		else
1249*1709Smlf 			bcopy(bufa, bufaddr, count);
1250*1709Smlf 	} else {
1251*1709Smlf 		bufa = dadk_iob_htoc(DKTP_DATA, handle);
1252*1709Smlf 		bcopy(bufaddr, bufa, count);
1253*1709Smlf 		bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1254*1709Smlf 		if (!bufa)
1255*1709Smlf 			rc = EIO;
1256*1709Smlf 	}
1257*1709Smlf 	(void) dadk_iob_free(DKTP_DATA, handle);
1258*1709Smlf 
1259*1709Smlf 	return (rc);
1260*1709Smlf }
1261*1709Smlf 
1262*1709Smlf static int
1263*1709Smlf cmdk_lb_getcapacity(
1264*1709Smlf     dev_info_t *dip,
1265*1709Smlf     diskaddr_t *capp)
1266*1709Smlf {
1267*1709Smlf 	struct cmdk		*dkp;
1268*1709Smlf 	struct tgdk_geom	phyg;
1269*1709Smlf 
1270*1709Smlf 	dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1271*1709Smlf 	if (dkp == NULL)
1272*1709Smlf 		return (ENXIO);
1273*1709Smlf 
1274*1709Smlf 	/* dadk_getphygeom always returns success */
1275*1709Smlf 	(void) dadk_getphygeom(DKTP_DATA, &phyg);
1276*1709Smlf 
1277*1709Smlf 	*capp = phyg.g_cap;
1278*1709Smlf 
1279*1709Smlf 	return (0);
1280*1709Smlf }
1281*1709Smlf 
1282*1709Smlf static int
1283*1709Smlf cmdk_lb_getvirtgeom(
1284*1709Smlf     dev_info_t *dip,
1285*1709Smlf     cmlb_geom_t *virtgeomp)
1286*1709Smlf {
1287*1709Smlf 	struct cmdk		*dkp;
1288*1709Smlf 	struct tgdk_geom	phyg;
1289*1709Smlf 	diskaddr_t		capacity;
1290*1709Smlf 
1291*1709Smlf 	dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1292*1709Smlf 	if (dkp == NULL)
1293*1709Smlf 		return (ENXIO);
1294*1709Smlf 
1295*1709Smlf 	(void) dadk_getgeom(DKTP_DATA, &phyg);
1296*1709Smlf 	capacity = phyg.g_cap;
1297*1709Smlf 
1298*1709Smlf 	/*
1299*1709Smlf 	 * If the controller returned us something that doesn't
1300*1709Smlf 	 * really fit into an Int 13/function 8 geometry
1301*1709Smlf 	 * result, just fail the ioctl.  See PSARC 1998/313.
1302*1709Smlf 	 */
1303*1709Smlf 	if (capacity < 0 || capacity >= 63 * 254 * 1024)
1304*1709Smlf 		return (EINVAL);
1305*1709Smlf 
1306*1709Smlf 	virtgeomp->g_capacity	= capacity;
1307*1709Smlf 	virtgeomp->g_nsect	= 63;
1308*1709Smlf 	virtgeomp->g_nhead	= 254;
1309*1709Smlf 	virtgeomp->g_ncyl	= capacity / (63 * 254);
1310*1709Smlf 	virtgeomp->g_acyl	= 0;
1311*1709Smlf 	virtgeomp->g_secsize	= 512;
1312*1709Smlf 	virtgeomp->g_intrlv	= 1;
1313*1709Smlf 	virtgeomp->g_rpm	= 3600;
1314*1709Smlf 
1315*1709Smlf 	return (0);
1316*1709Smlf }
1317*1709Smlf 
1318*1709Smlf static int
1319*1709Smlf cmdk_lb_getphygeom(
1320*1709Smlf     dev_info_t *dip,
1321*1709Smlf     cmlb_geom_t *phygeomp)
1322*1709Smlf {
1323*1709Smlf 	struct cmdk		*dkp;
1324*1709Smlf 	struct tgdk_geom	phyg;
1325*1709Smlf 
1326*1709Smlf 	dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1327*1709Smlf 	if (dkp == NULL)
1328*1709Smlf 		return (ENXIO);
1329*1709Smlf 
1330*1709Smlf 	/* dadk_getphygeom always returns success */
1331*1709Smlf 	(void) dadk_getphygeom(DKTP_DATA, &phyg);
1332*1709Smlf 
1333*1709Smlf 	phygeomp->g_capacity	= phyg.g_cap;
1334*1709Smlf 	phygeomp->g_nsect	= phyg.g_sec;
1335*1709Smlf 	phygeomp->g_nhead	= phyg.g_head;
1336*1709Smlf 	phygeomp->g_acyl	= phyg.g_acyl;
1337*1709Smlf 	phygeomp->g_ncyl	= phyg.g_cyl;
1338*1709Smlf 	phygeomp->g_secsize	= phyg.g_secsiz;
1339*1709Smlf 	phygeomp->g_intrlv	= 1;
1340*1709Smlf 	phygeomp->g_rpm		= 3600;
1341*1709Smlf 
1342*1709Smlf 	return (0);
1343*1709Smlf }
1344*1709Smlf 
1345*1709Smlf static int
1346*1709Smlf cmdk_lb_getattribute(
1347*1709Smlf     dev_info_t *dip,
1348*1709Smlf     tg_attribute_t *tgattribute)
1349*1709Smlf {
1350*1709Smlf 	struct cmdk *dkp;
1351*1709Smlf 
1352*1709Smlf 	dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1353*1709Smlf 	if (dkp == NULL)
1354*1709Smlf 		return (ENXIO);
1355*1709Smlf 
1356*1709Smlf 	if ((DKTP_EXT->tg_rdonly))
1357*1709Smlf 		tgattribute->media_is_writable = FALSE;
1358*1709Smlf 	else
1359*1709Smlf 		tgattribute->media_is_writable = TRUE;
1360*1709Smlf 
1361*1709Smlf 	return (0);
1362*1709Smlf }
1363*1709Smlf 
1364*1709Smlf /*
1365*1709Smlf  * Create and register the devid.
1366*1709Smlf  * There are 4 different ways we can get a device id:
1367*1709Smlf  *    1. Already have one - nothing to do
1368*1709Smlf  *    2. Build one from the drive's model and serial numbers
1369*1709Smlf  *    3. Read one from the disk (first sector of last track)
1370*1709Smlf  *    4. Fabricate one and write it on the disk.
1371*1709Smlf  * If any of these succeeds, register the deviceid
1372*1709Smlf  */
1373*1709Smlf static void
1374*1709Smlf cmdk_devid_setup(struct cmdk *dkp)
1375*1709Smlf {
1376*1709Smlf 	int	rc;
1377*1709Smlf 
1378*1709Smlf 	/* Try options until one succeeds, or all have failed */
1379*1709Smlf 
1380*1709Smlf 	/* 1. All done if already registered */
1381*1709Smlf 	if (dkp->dk_devid != NULL)
1382*1709Smlf 		return;
1383*1709Smlf 
1384*1709Smlf 	/* 2. Build a devid from the model and serial number */
1385*1709Smlf 	rc = cmdk_devid_modser(dkp);
1386*1709Smlf 	if (rc != DDI_SUCCESS) {
1387*1709Smlf 		/* 3. Read devid from the disk, if present */
1388*1709Smlf 		rc = cmdk_devid_read(dkp);
1389*1709Smlf 
1390*1709Smlf 		/* 4. otherwise make one up and write it on the disk */
1391*1709Smlf 		if (rc != DDI_SUCCESS)
1392*1709Smlf 			rc = cmdk_devid_fabricate(dkp);
1393*1709Smlf 	}
1394*1709Smlf 
1395*1709Smlf 	/* If we managed to get a devid any of the above ways, register it */
1396*1709Smlf 	if (rc == DDI_SUCCESS)
1397*1709Smlf 		(void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid);
1398*1709Smlf 
1399*1709Smlf }
1400*1709Smlf 
1401*1709Smlf /*
1402*1709Smlf  * Build a devid from the model and serial number
1403*1709Smlf  * Return DDI_SUCCESS or DDI_FAILURE.
1404*1709Smlf  */
1405*1709Smlf static int
1406*1709Smlf cmdk_devid_modser(struct cmdk *dkp)
1407*1709Smlf {
1408*1709Smlf 	int	rc = DDI_FAILURE;
1409*1709Smlf 	char	*hwid;
1410*1709Smlf 	int	modlen;
1411*1709Smlf 	int	serlen;
1412*1709Smlf 
1413*1709Smlf 	/*
1414*1709Smlf 	 * device ID is a concatenation of model number, '=', serial number.
1415*1709Smlf 	 */
1416*1709Smlf 	hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP);
1417*1709Smlf 	modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN);
1418*1709Smlf 	if (modlen == 0) {
1419*1709Smlf 		rc = DDI_FAILURE;
1420*1709Smlf 		goto err;
1421*1709Smlf 	}
1422*1709Smlf 	hwid[modlen++] = '=';
1423*1709Smlf 	serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL,
1424*1709Smlf 	    hwid + modlen, CMDK_HWIDLEN - modlen);
1425*1709Smlf 	if (serlen == 0) {
1426*1709Smlf 		rc = DDI_FAILURE;
1427*1709Smlf 		goto err;
1428*1709Smlf 	}
1429*1709Smlf 	hwid[modlen + serlen] = 0;
1430*1709Smlf 
1431*1709Smlf 	/* Initialize the device ID, trailing NULL not included */
1432*1709Smlf 	rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen,
1433*1709Smlf 	    hwid, (ddi_devid_t *)&dkp->dk_devid);
1434*1709Smlf 	if (rc != DDI_SUCCESS) {
1435*1709Smlf 		rc = DDI_FAILURE;
1436*1709Smlf 		goto err;
1437*1709Smlf 	}
1438*1709Smlf 
1439*1709Smlf 	rc = DDI_SUCCESS;
1440*1709Smlf 
1441*1709Smlf err:
1442*1709Smlf 	kmem_free(hwid, CMDK_HWIDLEN);
1443*1709Smlf 	return (rc);
1444*1709Smlf }
1445*1709Smlf 
1446*1709Smlf static int
1447*1709Smlf cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len)
1448*1709Smlf {
1449*1709Smlf 	dadk_ioc_string_t strarg;
1450*1709Smlf 	int		rval;
1451*1709Smlf 	char		*s;
1452*1709Smlf 	char		ch;
1453*1709Smlf 	boolean_t	ret;
1454*1709Smlf 	int		i;
1455*1709Smlf 	int		tb;
1456*1709Smlf 
1457*1709Smlf 	strarg.is_buf = buf;
1458*1709Smlf 	strarg.is_size = len;
1459*1709Smlf 	if (dadk_ioctl(DKTP_DATA,
1460*1709Smlf 	    dkp->dk_dev,
1461*1709Smlf 	    ioccmd,
1462*1709Smlf 	    (uintptr_t)&strarg,
1463*1709Smlf 	    FNATIVE | FKIOCTL,
1464*1709Smlf 	    NULL,
1465*1709Smlf 	    &rval) != 0)
1466*1709Smlf 		return (0);
1467*1709Smlf 
1468*1709Smlf 	/*
1469*1709Smlf 	 * valid model/serial string must contain a non-zero non-space
1470*1709Smlf 	 * trim trailing spaces/NULL
1471*1709Smlf 	 */
1472*1709Smlf 	ret = B_FALSE;
1473*1709Smlf 	s = buf;
1474*1709Smlf 	for (i = 0; i < strarg.is_size; i++) {
1475*1709Smlf 		ch = *s++;
1476*1709Smlf 		if (ch != ' ' && ch != '\0')
1477*1709Smlf 			tb = i + 1;
1478*1709Smlf 		if (ch != ' ' && ch != '\0' && ch != '0')
1479*1709Smlf 			ret = B_TRUE;
1480*1709Smlf 	}
1481*1709Smlf 
1482*1709Smlf 	if (ret == B_FALSE)
1483*1709Smlf 		return (0);
1484*1709Smlf 
1485*1709Smlf 	return (tb);
1486*1709Smlf }
1487*1709Smlf 
1488*1709Smlf /*
1489*1709Smlf  * Read a devid from on the first block of the last track of
1490*1709Smlf  * the last cylinder.  Make sure what we read is a valid devid.
1491*1709Smlf  * Return DDI_SUCCESS or DDI_FAILURE.
1492*1709Smlf  */
1493*1709Smlf static int
1494*1709Smlf cmdk_devid_read(struct cmdk *dkp)
1495*1709Smlf {
1496*1709Smlf 	diskaddr_t	blk;
1497*1709Smlf 	struct dk_devid *dkdevidp;
1498*1709Smlf 	uint_t		*ip;
1499*1709Smlf 	int		chksum;
1500*1709Smlf 	int		i, sz;
1501*1709Smlf 	tgdk_iob_handle	handle;
1502*1709Smlf 	int		rc = DDI_FAILURE;
1503*1709Smlf 
1504*1709Smlf 	if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk))
1505*1709Smlf 		goto err;
1506*1709Smlf 
1507*1709Smlf 	/* read the devid */
1508*1709Smlf 	handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1509*1709Smlf 	if (handle == NULL)
1510*1709Smlf 		goto err;
1511*1709Smlf 
1512*1709Smlf 	dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1513*1709Smlf 	if (dkdevidp == NULL)
1514*1709Smlf 		goto err;
1515*1709Smlf 
1516*1709Smlf 	/* Validate the revision */
1517*1709Smlf 	if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) ||
1518*1709Smlf 	    (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB))
1519*1709Smlf 		goto err;
1520*1709Smlf 
1521*1709Smlf 	/* Calculate the checksum */
1522*1709Smlf 	chksum = 0;
1523*1709Smlf 	ip = (uint_t *)dkdevidp;
1524*1709Smlf 	for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1525*1709Smlf 		chksum ^= ip[i];
1526*1709Smlf 	if (DKD_GETCHKSUM(dkdevidp) != chksum)
1527*1709Smlf 		goto err;
1528*1709Smlf 
1529*1709Smlf 	/* Validate the device id */
1530*1709Smlf 	if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS)
1531*1709Smlf 		goto err;
1532*1709Smlf 
1533*1709Smlf 	/* keep a copy of the device id */
1534*1709Smlf 	sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid);
1535*1709Smlf 	dkp->dk_devid = kmem_alloc(sz, KM_SLEEP);
1536*1709Smlf 	bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz);
1537*1709Smlf 
1538*1709Smlf 	rc = DDI_SUCCESS;
1539*1709Smlf 
1540*1709Smlf err:
1541*1709Smlf 	if (handle != NULL)
1542*1709Smlf 		(void) dadk_iob_free(DKTP_DATA, handle);
1543*1709Smlf 	return (rc);
1544*1709Smlf }
1545*1709Smlf 
1546*1709Smlf /*
1547*1709Smlf  * Create a devid and write it on the first block of the last track of
1548*1709Smlf  * the last cylinder.
1549*1709Smlf  * Return DDI_SUCCESS or DDI_FAILURE.
1550*1709Smlf  */
1551*1709Smlf static int
1552*1709Smlf cmdk_devid_fabricate(struct cmdk *dkp)
1553*1709Smlf {
1554*1709Smlf 	ddi_devid_t	devid = NULL;	/* devid made by ddi_devid_init  */
1555*1709Smlf 	struct dk_devid	*dkdevidp;	/* devid struct stored on disk */
1556*1709Smlf 	diskaddr_t	blk;
1557*1709Smlf 	tgdk_iob_handle	handle = NULL;
1558*1709Smlf 	uint_t		*ip, chksum;
1559*1709Smlf 	int		i;
1560*1709Smlf 	int		rc;
1561*1709Smlf 
1562*1709Smlf 	rc = ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid);
1563*1709Smlf 	if (rc != DDI_SUCCESS)
1564*1709Smlf 		goto err;
1565*1709Smlf 
1566*1709Smlf 	if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk)) {
1567*1709Smlf 		/* no device id block address */
1568*1709Smlf 		return (DDI_FAILURE);
1569*1709Smlf 	}
1570*1709Smlf 
1571*1709Smlf 	handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1572*1709Smlf 	if (!handle)
1573*1709Smlf 		goto err;
1574*1709Smlf 
1575*1709Smlf 	/* Locate the buffer */
1576*1709Smlf 	dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle);
1577*1709Smlf 
1578*1709Smlf 	/* Fill in the revision */
1579*1709Smlf 	bzero(dkdevidp, NBPSCTR);
1580*1709Smlf 	dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB;
1581*1709Smlf 	dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB;
1582*1709Smlf 
1583*1709Smlf 	/* Copy in the device id */
1584*1709Smlf 	i = ddi_devid_sizeof(devid);
1585*1709Smlf 	if (i > DK_DEVID_SIZE)
1586*1709Smlf 		goto err;
1587*1709Smlf 	bcopy(devid, dkdevidp->dkd_devid, i);
1588*1709Smlf 
1589*1709Smlf 	/* Calculate the chksum */
1590*1709Smlf 	chksum = 0;
1591*1709Smlf 	ip = (uint_t *)dkdevidp;
1592*1709Smlf 	for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1593*1709Smlf 		chksum ^= ip[i];
1594*1709Smlf 
1595*1709Smlf 	/* Fill in the checksum */
1596*1709Smlf 	DKD_FORMCHKSUM(chksum, dkdevidp);
1597*1709Smlf 
1598*1709Smlf 	/* write the devid */
1599*1709Smlf 	(void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1600*1709Smlf 
1601*1709Smlf 	dkp->dk_devid = devid;
1602*1709Smlf 
1603*1709Smlf 	rc = DDI_SUCCESS;
1604*1709Smlf 
1605*1709Smlf err:
1606*1709Smlf 	if (handle != NULL)
1607*1709Smlf 		(void) dadk_iob_free(DKTP_DATA, handle);
1608*1709Smlf 
1609*1709Smlf 	if (rc != DDI_SUCCESS && devid != NULL)
1610*1709Smlf 		ddi_devid_free(devid);
1611*1709Smlf 
1612*1709Smlf 	return (rc);
1613*1709Smlf }
1614*1709Smlf 
1615*1709Smlf static void
1616*1709Smlf cmdk_bbh_free_alts(struct cmdk *dkp)
1617*1709Smlf {
1618*1709Smlf 	if (dkp->dk_alts_hdl) {
1619*1709Smlf 		(void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1620*1709Smlf 		kmem_free(dkp->dk_slc_cnt,
1621*1709Smlf 		    NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *)));
1622*1709Smlf 		dkp->dk_alts_hdl = NULL;
1623*1709Smlf 	}
1624*1709Smlf }
1625*1709Smlf 
1626*1709Smlf static void
1627*1709Smlf cmdk_bbh_reopen(struct cmdk *dkp)
1628*1709Smlf {
1629*1709Smlf 	tgdk_iob_handle 	handle = NULL;
1630*1709Smlf 	diskaddr_t		slcb, slcn, slce;
1631*1709Smlf 	struct	alts_parttbl	*ap;
1632*1709Smlf 	struct	alts_ent	*enttblp;
1633*1709Smlf 	uint32_t		altused;
1634*1709Smlf 	uint32_t		altbase;
1635*1709Smlf 	uint32_t		altlast;
1636*1709Smlf 	int			alts;
1637*1709Smlf 	uint16_t		vtoctag;
1638*1709Smlf 	int			i, j;
1639*1709Smlf 
1640*1709Smlf 	/* find slice with V_ALTSCTR tag */
1641*1709Smlf 	for (alts = 0; alts < NDKMAP; alts++) {
1642*1709Smlf 		if (cmlb_partinfo(
1643*1709Smlf 		    dkp->dk_cmlbhandle,
1644*1709Smlf 		    alts,
1645*1709Smlf 		    &slcn,
1646*1709Smlf 		    &slcb,
1647*1709Smlf 		    NULL,
1648*1709Smlf 		    &vtoctag)) {
1649*1709Smlf 			goto empty;	/* no partition table exists */
1650*1709Smlf 		}
1651*1709Smlf 
1652*1709Smlf 		if (vtoctag == V_ALTSCTR && slcn > 1)
1653*1709Smlf 			break;
1654*1709Smlf 	}
1655*1709Smlf 	if (alts >= NDKMAP) {
1656*1709Smlf 		goto empty;	/* no V_ALTSCTR slice defined */
1657*1709Smlf 	}
1658*1709Smlf 
1659*1709Smlf 	/* read in ALTS label block */
1660*1709Smlf 	handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP);
1661*1709Smlf 	if (!handle) {
1662*1709Smlf 		goto empty;
1663*1709Smlf 	}
1664*1709Smlf 
1665*1709Smlf 	ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1666*1709Smlf 	if (!ap || (ap->alts_sanity != ALTS_SANITY)) {
1667*1709Smlf 		goto empty;
1668*1709Smlf 	}
1669*1709Smlf 
1670*1709Smlf 	altused = ap->alts_ent_used;	/* number of BB entries */
1671*1709Smlf 	altbase = ap->alts_ent_base;	/* blk offset from begin slice */
1672*1709Smlf 	altlast = ap->alts_ent_end;	/* blk offset to last block */
1673*1709Smlf 	/* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */
1674*1709Smlf 
1675*1709Smlf 	if (altused == 0 ||
1676*1709Smlf 	    altbase < 1 ||
1677*1709Smlf 	    altbase > altlast ||
1678*1709Smlf 	    altlast >= slcn) {
1679*1709Smlf 		goto empty;
1680*1709Smlf 	}
1681*1709Smlf 	(void) dadk_iob_free(DKTP_DATA, handle);
1682*1709Smlf 
1683*1709Smlf 	/* read in ALTS remapping table */
1684*1709Smlf 	handle = dadk_iob_alloc(DKTP_DATA,
1685*1709Smlf 	    slcb + altbase,
1686*1709Smlf 	    (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP);
1687*1709Smlf 	if (!handle) {
1688*1709Smlf 		goto empty;
1689*1709Smlf 	}
1690*1709Smlf 
1691*1709Smlf 	enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1692*1709Smlf 	if (!enttblp) {
1693*1709Smlf 		goto empty;
1694*1709Smlf 	}
1695*1709Smlf 
1696*1709Smlf 	rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1697*1709Smlf 
1698*1709Smlf 	/* allocate space for dk_slc_cnt and dk_slc_ent tables */
1699*1709Smlf 	if (dkp->dk_slc_cnt == NULL) {
1700*1709Smlf 		dkp->dk_slc_cnt = kmem_alloc(NDKMAP *
1701*1709Smlf 		    (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP);
1702*1709Smlf 	}
1703*1709Smlf 	dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP);
1704*1709Smlf 
1705*1709Smlf 	/* free previous BB table (if any) */
1706*1709Smlf 	if (dkp->dk_alts_hdl) {
1707*1709Smlf 		(void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1708*1709Smlf 		dkp->dk_alts_hdl = NULL;
1709*1709Smlf 		dkp->dk_altused = 0;
1710*1709Smlf 	}
1711*1709Smlf 
1712*1709Smlf 	/* save linkage to new BB table */
1713*1709Smlf 	dkp->dk_alts_hdl = handle;
1714*1709Smlf 	dkp->dk_altused = altused;
1715*1709Smlf 
1716*1709Smlf 	/*
1717*1709Smlf 	 * build indexes to BB table by slice
1718*1709Smlf 	 * effectively we have
1719*1709Smlf 	 *	struct alts_ent *enttblp[altused];
1720*1709Smlf 	 *
1721*1709Smlf 	 *	uint32_t	dk_slc_cnt[NDKMAP];
1722*1709Smlf 	 *	struct alts_ent *dk_slc_ent[NDKMAP];
1723*1709Smlf 	 */
1724*1709Smlf 	for (i = 0; i < NDKMAP; i++) {
1725*1709Smlf 		if (cmlb_partinfo(
1726*1709Smlf 		    dkp->dk_cmlbhandle,
1727*1709Smlf 		    i,
1728*1709Smlf 		    &slcn,
1729*1709Smlf 		    &slcb,
1730*1709Smlf 		    NULL,
1731*1709Smlf 		    NULL)) {
1732*1709Smlf 			goto empty1;
1733*1709Smlf 		}
1734*1709Smlf 
1735*1709Smlf 		dkp->dk_slc_cnt[i] = 0;
1736*1709Smlf 		if (slcn == 0)
1737*1709Smlf 			continue;	/* slice is not allocated */
1738*1709Smlf 
1739*1709Smlf 		/* last block in slice */
1740*1709Smlf 		slce = slcb + slcn - 1;
1741*1709Smlf 
1742*1709Smlf 		/* find first remap entry in after beginnning of slice */
1743*1709Smlf 		for (j = 0; j < altused; j++) {
1744*1709Smlf 			if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb)
1745*1709Smlf 				break;
1746*1709Smlf 		}
1747*1709Smlf 		dkp->dk_slc_ent[i] = enttblp + j;
1748*1709Smlf 
1749*1709Smlf 		/* count remap entrys until end of slice */
1750*1709Smlf 		for (; j < altused && enttblp[j].bad_start <= slce; j++) {
1751*1709Smlf 			dkp->dk_slc_cnt[i] += 1;
1752*1709Smlf 		}
1753*1709Smlf 	}
1754*1709Smlf 
1755*1709Smlf 	rw_exit(&dkp->dk_bbh_mutex);
1756*1709Smlf 	return;
1757*1709Smlf 
1758*1709Smlf empty:
1759*1709Smlf 	rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1760*1709Smlf empty1:
1761*1709Smlf 	if (handle && handle != dkp->dk_alts_hdl)
1762*1709Smlf 		(void) dadk_iob_free(DKTP_DATA, handle);
1763*1709Smlf 
1764*1709Smlf 	if (dkp->dk_alts_hdl) {
1765*1709Smlf 		(void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1766*1709Smlf 		dkp->dk_alts_hdl = NULL;
1767*1709Smlf 	}
1768*1709Smlf 
1769*1709Smlf 	rw_exit(&dkp->dk_bbh_mutex);
1770*1709Smlf }
1771*1709Smlf 
1772*1709Smlf /*ARGSUSED*/
1773*1709Smlf static bbh_cookie_t
1774*1709Smlf cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle)
1775*1709Smlf {
1776*1709Smlf 	struct	bbh_handle *hp;
1777*1709Smlf 	bbh_cookie_t ckp;
1778*1709Smlf 
1779*1709Smlf 	hp = (struct  bbh_handle *)handle;
1780*1709Smlf 	ckp = hp->h_cktab + hp->h_idx;
1781*1709Smlf 	hp->h_idx++;
1782*1709Smlf 	return (ckp);
1783*1709Smlf }
1784*1709Smlf 
1785*1709Smlf /*ARGSUSED*/
1786*1709Smlf static void
1787*1709Smlf cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle)
1788*1709Smlf {
1789*1709Smlf 	struct	bbh_handle *hp;
1790*1709Smlf 
1791*1709Smlf 	hp = (struct  bbh_handle *)handle;
1792*1709Smlf 	kmem_free(handle, (sizeof (struct bbh_handle) +
1793*1709Smlf 	    (hp->h_totck * (sizeof (struct bbh_cookie)))));
1794*1709Smlf }
1795*1709Smlf 
1796*1709Smlf 
1797*1709Smlf /*
1798*1709Smlf  *	cmdk_bbh_gethandle remaps the bad sectors to alternates.
1799*1709Smlf  *	There are 7 different cases when the comparison is made
1800*1709Smlf  *	between the bad sector cluster and the disk section.
1801*1709Smlf  *
1802*1709Smlf  *	bad sector cluster	gggggggggggbbbbbbbggggggggggg
1803*1709Smlf  *	case 1:			   ddddd
1804*1709Smlf  *	case 2:				   -d-----
1805*1709Smlf  *	case 3:					     ddddd
1806*1709Smlf  *	case 4:			         dddddddddddd
1807*1709Smlf  *	case 5:			      ddddddd-----
1808*1709Smlf  *	case 6:			           ---ddddddd
1809*1709Smlf  *	case 7:			           ddddddd
1810*1709Smlf  *
1811*1709Smlf  *	where:  g = good sector,	b = bad sector
1812*1709Smlf  *		d = sector in disk section
1813*1709Smlf  *		- = disk section may be extended to cover those disk area
1814*1709Smlf  */
1815*1709Smlf 
1816*1709Smlf static opaque_t
1817*1709Smlf cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp)
1818*1709Smlf {
1819*1709Smlf 	struct cmdk		*dkp = (struct cmdk *)bbh_data;
1820*1709Smlf 	struct bbh_handle	*hp;
1821*1709Smlf 	struct bbh_cookie	*ckp;
1822*1709Smlf 	struct alts_ent		*altp;
1823*1709Smlf 	uint32_t		alts_used;
1824*1709Smlf 	uint32_t		part = CMDKPART(bp->b_edev);
1825*1709Smlf 	daddr32_t		lastsec;
1826*1709Smlf 	long			d_count;
1827*1709Smlf 	int			i;
1828*1709Smlf 	int			idx;
1829*1709Smlf 	int			cnt;
1830*1709Smlf 
1831*1709Smlf 	if (part >= V_NUMPAR)
1832*1709Smlf 		return (NULL);
1833*1709Smlf 
1834*1709Smlf 	/*
1835*1709Smlf 	 * This if statement is atomic and it will succeed
1836*1709Smlf 	 * if there are no bad blocks (almost always)
1837*1709Smlf 	 *
1838*1709Smlf 	 * so this if is performed outside of the rw_enter for speed
1839*1709Smlf 	 * and then repeated inside the rw_enter for safety
1840*1709Smlf 	 */
1841*1709Smlf 	if (!dkp->dk_alts_hdl) {
1842*1709Smlf 		return (NULL);
1843*1709Smlf 	}
1844*1709Smlf 
1845*1709Smlf 	rw_enter(&dkp->dk_bbh_mutex, RW_READER);
1846*1709Smlf 
1847*1709Smlf 	if (dkp->dk_alts_hdl == NULL) {
1848*1709Smlf 		rw_exit(&dkp->dk_bbh_mutex);
1849*1709Smlf 		return (NULL);
1850*1709Smlf 	}
1851*1709Smlf 
1852*1709Smlf 	alts_used = dkp->dk_slc_cnt[part];
1853*1709Smlf 	if (alts_used == 0) {
1854*1709Smlf 		rw_exit(&dkp->dk_bbh_mutex);
1855*1709Smlf 		return (NULL);
1856*1709Smlf 	}
1857*1709Smlf 	altp = dkp->dk_slc_ent[part];
1858*1709Smlf 
1859*1709Smlf 	/*
1860*1709Smlf 	 * binary search for the largest bad sector index in the alternate
1861*1709Smlf 	 * entry table which overlaps or larger than the starting d_sec
1862*1709Smlf 	 */
1863*1709Smlf 	i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp));
1864*1709Smlf 	/* if starting sector is > the largest bad sector, return */
1865*1709Smlf 	if (i == -1) {
1866*1709Smlf 		rw_exit(&dkp->dk_bbh_mutex);
1867*1709Smlf 		return (NULL);
1868*1709Smlf 	}
1869*1709Smlf 	/* i is the starting index.  Set altp to the starting entry addr */
1870*1709Smlf 	altp += i;
1871*1709Smlf 
1872*1709Smlf 	d_count = bp->b_bcount >> SCTRSHFT;
1873*1709Smlf 	lastsec = GET_BP_SEC(bp) + d_count - 1;
1874*1709Smlf 
1875*1709Smlf 	/* calculate the number of bad sectors */
1876*1709Smlf 	for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) {
1877*1709Smlf 		if (lastsec < altp->bad_start)
1878*1709Smlf 			break;
1879*1709Smlf 	}
1880*1709Smlf 
1881*1709Smlf 	if (!cnt) {
1882*1709Smlf 		rw_exit(&dkp->dk_bbh_mutex);
1883*1709Smlf 		return (NULL);
1884*1709Smlf 	}
1885*1709Smlf 
1886*1709Smlf 	/* calculate the maximum number of reserved cookies */
1887*1709Smlf 	cnt <<= 1;
1888*1709Smlf 	cnt++;
1889*1709Smlf 
1890*1709Smlf 	/* allocate the handle */
1891*1709Smlf 	hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) +
1892*1709Smlf 	    (cnt * sizeof (*ckp))), KM_SLEEP);
1893*1709Smlf 
1894*1709Smlf 	hp->h_idx = 0;
1895*1709Smlf 	hp->h_totck = cnt;
1896*1709Smlf 	ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1);
1897*1709Smlf 	ckp[0].ck_sector = GET_BP_SEC(bp);
1898*1709Smlf 	ckp[0].ck_seclen = d_count;
1899*1709Smlf 
1900*1709Smlf 	altp = dkp->dk_slc_ent[part];
1901*1709Smlf 	altp += i;
1902*1709Smlf 	for (idx = 0; i < alts_used; i++, altp++) {
1903*1709Smlf 		/* CASE 1: */
1904*1709Smlf 		if (lastsec < altp->bad_start)
1905*1709Smlf 			break;
1906*1709Smlf 
1907*1709Smlf 		/* CASE 3: */
1908*1709Smlf 		if (ckp[idx].ck_sector > altp->bad_end)
1909*1709Smlf 			continue;
1910*1709Smlf 
1911*1709Smlf 		/* CASE 2 and 7: */
1912*1709Smlf 		if ((ckp[idx].ck_sector >= altp->bad_start) &&
1913*1709Smlf 		    (lastsec <= altp->bad_end)) {
1914*1709Smlf 			ckp[idx].ck_sector = altp->good_start +
1915*1709Smlf 			    ckp[idx].ck_sector - altp->bad_start;
1916*1709Smlf 			break;
1917*1709Smlf 		}
1918*1709Smlf 
1919*1709Smlf 		/* at least one bad sector in our section.  break it. */
1920*1709Smlf 		/* CASE 5: */
1921*1709Smlf 		if ((lastsec >= altp->bad_start) &&
1922*1709Smlf 			    (lastsec <= altp->bad_end)) {
1923*1709Smlf 			ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1;
1924*1709Smlf 			ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen;
1925*1709Smlf 			ckp[idx+1].ck_sector = altp->good_start;
1926*1709Smlf 			break;
1927*1709Smlf 		}
1928*1709Smlf 		/* CASE 6: */
1929*1709Smlf 		if ((ckp[idx].ck_sector <= altp->bad_end) &&
1930*1709Smlf 		    (ckp[idx].ck_sector >= altp->bad_start)) {
1931*1709Smlf 			ckp[idx+1].ck_seclen = ckp[idx].ck_seclen;
1932*1709Smlf 			ckp[idx].ck_seclen = altp->bad_end -
1933*1709Smlf 			    ckp[idx].ck_sector + 1;
1934*1709Smlf 			ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen;
1935*1709Smlf 			ckp[idx].ck_sector = altp->good_start +
1936*1709Smlf 			    ckp[idx].ck_sector - altp->bad_start;
1937*1709Smlf 			idx++;
1938*1709Smlf 			ckp[idx].ck_sector = altp->bad_end + 1;
1939*1709Smlf 			continue;	/* check rest of section */
1940*1709Smlf 		}
1941*1709Smlf 
1942*1709Smlf 		/* CASE 4: */
1943*1709Smlf 		ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector;
1944*1709Smlf 		ckp[idx+1].ck_sector = altp->good_start;
1945*1709Smlf 		ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1;
1946*1709Smlf 		idx += 2;
1947*1709Smlf 		ckp[idx].ck_sector = altp->bad_end + 1;
1948*1709Smlf 		ckp[idx].ck_seclen = lastsec - altp->bad_end;
1949*1709Smlf 	}
1950*1709Smlf 
1951*1709Smlf 	rw_exit(&dkp->dk_bbh_mutex);
1952*1709Smlf 	return ((opaque_t)hp);
1953*1709Smlf }
1954*1709Smlf 
1955*1709Smlf static int
1956*1709Smlf cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key)
1957*1709Smlf {
1958*1709Smlf 	int	i;
1959*1709Smlf 	int	ind;
1960*1709Smlf 	int	interval;
1961*1709Smlf 	int	mystatus = -1;
1962*1709Smlf 
1963*1709Smlf 	if (!cnt)
1964*1709Smlf 		return (mystatus);
1965*1709Smlf 
1966*1709Smlf 	ind = 1; /* compiler complains about possible uninitialized var	*/
1967*1709Smlf 	for (i = 1; i <= cnt; i <<= 1)
1968*1709Smlf 		ind = i;
1969*1709Smlf 
1970*1709Smlf 	for (interval = ind; interval; ) {
1971*1709Smlf 		if ((key >= buf[ind-1].bad_start) &&
1972*1709Smlf 		    (key <= buf[ind-1].bad_end)) {
1973*1709Smlf 			return (ind-1);
1974*1709Smlf 		} else {
1975*1709Smlf 			interval >>= 1;
1976*1709Smlf 			if (key < buf[ind-1].bad_start) {
1977*1709Smlf 				/* record the largest bad sector index */
1978*1709Smlf 				mystatus = ind-1;
1979*1709Smlf 				if (!interval)
1980*1709Smlf 					break;
1981*1709Smlf 				ind = ind - interval;
1982*1709Smlf 			} else {
1983*1709Smlf 				/*
1984*1709Smlf 				 * if key is larger than the last element
1985*1709Smlf 				 * then break
1986*1709Smlf 				 */
1987*1709Smlf 				if ((ind == cnt) || !interval)
1988*1709Smlf 					break;
1989*1709Smlf 				if ((ind+interval) <= cnt)
1990*1709Smlf 					ind += interval;
1991*1709Smlf 			}
1992*1709Smlf 		}
1993*1709Smlf 	}
1994*1709Smlf 	return (mystatus);
1995*1709Smlf }
1996