xref: /onnv-gate/usr/src/uts/intel/io/dktp/dcdev/dadk.c (revision 1709:39a1331cb1e3)
1*1709Smlf /*
2*1709Smlf  * CDDL HEADER START
3*1709Smlf  *
4*1709Smlf  * The contents of this file are subject to the terms of the
5*1709Smlf  * Common Development and Distribution License (the "License").
6*1709Smlf  * You may not use this file except in compliance with the License.
7*1709Smlf  *
8*1709Smlf  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*1709Smlf  * or http://www.opensolaris.org/os/licensing.
10*1709Smlf  * See the License for the specific language governing permissions
11*1709Smlf  * and limitations under the License.
12*1709Smlf  *
13*1709Smlf  * When distributing Covered Code, include this CDDL HEADER in each
14*1709Smlf  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*1709Smlf  * If applicable, add the following below this CDDL HEADER, with the
16*1709Smlf  * fields enclosed by brackets "[]" replaced with your own identifying
17*1709Smlf  * information: Portions Copyright [yyyy] [name of copyright owner]
18*1709Smlf  *
19*1709Smlf  * CDDL HEADER END
20*1709Smlf  */
21*1709Smlf 
22*1709Smlf /*
23*1709Smlf  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*1709Smlf  * Use is subject to license terms.
25*1709Smlf  */
26*1709Smlf 
27*1709Smlf #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*1709Smlf 
29*1709Smlf /*
30*1709Smlf  * Direct Attached Disk
31*1709Smlf  */
32*1709Smlf 
33*1709Smlf #include <sys/file.h>
34*1709Smlf #include <sys/scsi/scsi.h>
35*1709Smlf #include <sys/var.h>
36*1709Smlf #include <sys/proc.h>
37*1709Smlf #include <sys/dktp/cm.h>
38*1709Smlf #include <sys/vtoc.h>
39*1709Smlf #include <sys/dkio.h>
40*1709Smlf 
41*1709Smlf #include <sys/dktp/dadev.h>
42*1709Smlf #include <sys/dktp/fctypes.h>
43*1709Smlf #include <sys/dktp/flowctrl.h>
44*1709Smlf #include <sys/dktp/tgcom.h>
45*1709Smlf #include <sys/dktp/tgdk.h>
46*1709Smlf #include <sys/dktp/bbh.h>
47*1709Smlf #include <sys/dktp/dadkio.h>
48*1709Smlf #include <sys/dktp/dadk.h>
49*1709Smlf #include <sys/cdio.h>
50*1709Smlf 
51*1709Smlf /*
52*1709Smlf  * Local Function Prototypes
53*1709Smlf  */
54*1709Smlf static void dadk_restart(void *pktp);
55*1709Smlf static void dadk_pktcb(struct cmpkt *pktp);
56*1709Smlf static void dadk_iodone(struct buf *bp);
57*1709Smlf static void dadk_polldone(struct buf *bp);
58*1709Smlf static void dadk_setcap(struct dadk *dadkp);
59*1709Smlf 
60*1709Smlf static int dadk_chkerr(struct cmpkt *pktp);
61*1709Smlf static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
62*1709Smlf static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
63*1709Smlf static int dadk_ioretry(struct cmpkt *pktp, int action);
64*1709Smlf 
65*1709Smlf static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
66*1709Smlf     struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
67*1709Smlf     caddr_t arg);
68*1709Smlf 
69*1709Smlf static int  dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
70*1709Smlf     caddr_t arg);
71*1709Smlf static void dadk_transport(opaque_t com_data, struct buf *bp);
72*1709Smlf 
73*1709Smlf struct tgcom_objops dadk_com_ops = {
74*1709Smlf 	nodev,
75*1709Smlf 	nodev,
76*1709Smlf 	dadk_pkt,
77*1709Smlf 	dadk_transport,
78*1709Smlf 	0, 0
79*1709Smlf };
80*1709Smlf 
81*1709Smlf /*
82*1709Smlf  * architecture dependent allocation restrictions for dadk_iob_alloc(). For
83*1709Smlf  * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
84*1709Smlf  * to dadk_sgl_size during _init().
85*1709Smlf  */
86*1709Smlf #if defined(__sparc)
87*1709Smlf static ddi_dma_attr_t dadk_alloc_attr = {
88*1709Smlf 	DMA_ATTR_V0,	/* version number */
89*1709Smlf 	0x0,		/* lowest usable address */
90*1709Smlf 	0xFFFFFFFFull,	/* high DMA address range */
91*1709Smlf 	0xFFFFFFFFull,	/* DMA counter register */
92*1709Smlf 	1,		/* DMA address alignment */
93*1709Smlf 	1,		/* DMA burstsizes */
94*1709Smlf 	1,		/* min effective DMA size */
95*1709Smlf 	0xFFFFFFFFull,	/* max DMA xfer size */
96*1709Smlf 	0xFFFFFFFFull,	/* segment boundary */
97*1709Smlf 	1,		/* s/g list length */
98*1709Smlf 	512,		/* granularity of device */
99*1709Smlf 	0,		/* DMA transfer flags */
100*1709Smlf };
101*1709Smlf #elif defined(__x86)
102*1709Smlf static ddi_dma_attr_t dadk_alloc_attr = {
103*1709Smlf 	DMA_ATTR_V0,	/* version number */
104*1709Smlf 	0x0,		/* lowest usable address */
105*1709Smlf 	0x0,		/* high DMA address range [set in _init()] */
106*1709Smlf 	0xFFFFull,	/* DMA counter register */
107*1709Smlf 	512,		/* DMA address alignment */
108*1709Smlf 	1,		/* DMA burstsizes */
109*1709Smlf 	1,		/* min effective DMA size */
110*1709Smlf 	0xFFFFFFFFull,	/* max DMA xfer size */
111*1709Smlf 	0xFFFFFFFFull,	/* segment boundary */
112*1709Smlf 	0,		/* s/g list length [set in _init()] */
113*1709Smlf 	512,		/* granularity of device */
114*1709Smlf 	0,		/* DMA transfer flags */
115*1709Smlf };
116*1709Smlf 
117*1709Smlf uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
118*1709Smlf int dadk_sgl_size = 0xFF;
119*1709Smlf #endif
120*1709Smlf 
121*1709Smlf static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
122*1709Smlf     int silent);
123*1709Smlf static void dadk_rmb_iodone(struct buf *bp);
124*1709Smlf 
125*1709Smlf static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
126*1709Smlf     dev_t dev, enum uio_seg dataspace, int rw);
127*1709Smlf static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
128*1709Smlf     struct buf *bp);
129*1709Smlf static void dadkmin(struct buf *bp);
130*1709Smlf static int dadk_dk_strategy(struct buf *bp);
131*1709Smlf static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
132*1709Smlf 
133*1709Smlf struct tgdk_objops dadk_ops = {
134*1709Smlf 	dadk_init,
135*1709Smlf 	dadk_free,
136*1709Smlf 	dadk_probe,
137*1709Smlf 	dadk_attach,
138*1709Smlf 	dadk_open,
139*1709Smlf 	dadk_close,
140*1709Smlf 	dadk_ioctl,
141*1709Smlf 	dadk_strategy,
142*1709Smlf 	dadk_setgeom,
143*1709Smlf 	dadk_getgeom,
144*1709Smlf 	dadk_iob_alloc,
145*1709Smlf 	dadk_iob_free,
146*1709Smlf 	dadk_iob_htoc,
147*1709Smlf 	dadk_iob_xfer,
148*1709Smlf 	dadk_dump,
149*1709Smlf 	dadk_getphygeom,
150*1709Smlf 	dadk_set_bbhobj,
151*1709Smlf 	dadk_check_media,
152*1709Smlf 	dadk_inquiry,
153*1709Smlf 	dadk_cleanup,
154*1709Smlf 	0
155*1709Smlf };
156*1709Smlf 
157*1709Smlf /*
158*1709Smlf  * Local static data
159*1709Smlf  */
160*1709Smlf 
161*1709Smlf #ifdef	DADK_DEBUG
162*1709Smlf #define	DENT	0x0001
163*1709Smlf #define	DERR	0x0002
164*1709Smlf #define	DIO	0x0004
165*1709Smlf #define	DGEOM	0x0010
166*1709Smlf #define	DSTATE  0x0020
167*1709Smlf static	int	dadk_debug = DGEOM;
168*1709Smlf 
169*1709Smlf #endif	/* DADK_DEBUG */
170*1709Smlf 
171*1709Smlf static int dadk_check_media_time = 3000000;	/* 3 Second State Check */
172*1709Smlf static int dadk_dk_maxphys = 0x80000;
173*1709Smlf 
174*1709Smlf static char	*dadk_cmds[] = {
175*1709Smlf 	"\000Unknown",			/* unknown 		*/
176*1709Smlf 	"\001read sector",		/* DCMD_READ 1		*/
177*1709Smlf 	"\002write sector",		/* DCMD_WRITE 2		*/
178*1709Smlf 	"\003format track",		/* DCMD_FMTTRK 3	*/
179*1709Smlf 	"\004format whole drive",	/* DCMD_FMTDRV 4	*/
180*1709Smlf 	"\005recalibrate",		/* DCMD_RECAL  5	*/
181*1709Smlf 	"\006seek sector",		/* DCMD_SEEK   6	*/
182*1709Smlf 	"\007read verify",		/* DCMD_RDVER  7	*/
183*1709Smlf 	"\010read defect list",		/* DCMD_GETDEF 8	*/
184*1709Smlf 	"\011lock door",		/* DCMD_LOCK   9	*/
185*1709Smlf 	"\012unlock door",		/* DCMD_UNLOCK 10	*/
186*1709Smlf 	"\013start motor",		/* DCMD_START_MOTOR 11	*/
187*1709Smlf 	"\014stop motor",		/* DCMD_STOP_MOTOR 12	*/
188*1709Smlf 	"\015eject",			/* DCMD_EJECT  13	*/
189*1709Smlf 	"\016update geometry",		/* DCMD_UPDATE_GEOM  14	*/
190*1709Smlf 	"\017get state",		/* DCMD_GET_STATE  15	*/
191*1709Smlf 	"\020cdrom pause",		/* DCMD_PAUSE  16	*/
192*1709Smlf 	"\021cdrom resume",		/* DCMD_RESUME  17	*/
193*1709Smlf 	"\022cdrom play track index",	/* DCMD_PLAYTRKIND  18	*/
194*1709Smlf 	"\023cdrom play msf",		/* DCMD_PLAYMSF  19	*/
195*1709Smlf 	"\024cdrom sub channel",	/* DCMD_SUBCHNL  20	*/
196*1709Smlf 	"\025cdrom read mode 1",	/* DCMD_READMODE1  21	*/
197*1709Smlf 	"\026cdrom read toc header",	/* DCMD_READTOCHDR  22	*/
198*1709Smlf 	"\027cdrom read toc entry",	/* DCMD_READTOCENT  23	*/
199*1709Smlf 	"\030cdrom read offset",	/* DCMD_READOFFSET  24	*/
200*1709Smlf 	"\031cdrom read mode 2",	/* DCMD_READMODE2  25	*/
201*1709Smlf 	"\032cdrom volume control",	/* DCMD_VOLCTRL  26	*/
202*1709Smlf 	"\033flush cache",		/* DCMD_FLUSH_CACHE  27	*/
203*1709Smlf 	NULL
204*1709Smlf };
205*1709Smlf 
206*1709Smlf static char *dadk_sense[] = {
207*1709Smlf 	"\000Success",			/* DERR_SUCCESS		*/
208*1709Smlf 	"\001address mark not found",	/* DERR_AMNF		*/
209*1709Smlf 	"\002track 0 not found",	/* DERR_TKONF		*/
210*1709Smlf 	"\003aborted command",		/* DERR_ABORT		*/
211*1709Smlf 	"\004write fault",		/* DERR_DWF		*/
212*1709Smlf 	"\005ID not found",		/* DERR_IDNF		*/
213*1709Smlf 	"\006drive busy",		/* DERR_BUSY		*/
214*1709Smlf 	"\007uncorrectable data error",	/* DERR_UNC		*/
215*1709Smlf 	"\010bad block detected",	/* DERR_BBK		*/
216*1709Smlf 	"\011invalid command",		/* DERR_INVCDB		*/
217*1709Smlf 	"\012device hard error",	/* DERR_HARD		*/
218*1709Smlf 	"\013illegal length indicated", /* DERR_ILI		*/
219*1709Smlf 	"\014end of media",		/* DERR_EOM		*/
220*1709Smlf 	"\015media change requested",	/* DERR_MCR		*/
221*1709Smlf 	"\016recovered from error",	/* DERR_RECOVER		*/
222*1709Smlf 	"\017device not ready",		/* DERR_NOTREADY	*/
223*1709Smlf 	"\020medium error",		/* DERR_MEDIUM		*/
224*1709Smlf 	"\021hardware error",		/* DERR_HW		*/
225*1709Smlf 	"\022illegal request",		/* DERR_ILL		*/
226*1709Smlf 	"\023unit attention",		/* DERR_UNIT_ATTN	*/
227*1709Smlf 	"\024data protection",		/* DERR_DATA_PROT	*/
228*1709Smlf 	"\025miscompare",		/* DERR_MISCOMPARE	*/
229*1709Smlf 	"\026ICRC error during UDMA",	/* DERR_ICRC		*/
230*1709Smlf 	"\027reserved",			/* DERR_RESV		*/
231*1709Smlf 	NULL
232*1709Smlf };
233*1709Smlf 
234*1709Smlf static char *dadk_name = "Disk";
235*1709Smlf 
236*1709Smlf /*
237*1709Smlf  *	This is the loadable module wrapper
238*1709Smlf  */
239*1709Smlf #include <sys/modctl.h>
240*1709Smlf 
241*1709Smlf extern struct mod_ops mod_miscops;
242*1709Smlf 
243*1709Smlf static struct modlmisc modlmisc = {
244*1709Smlf 	&mod_miscops,	/* Type of module */
245*1709Smlf 	"Direct Attached Disk %I%"
246*1709Smlf };
247*1709Smlf 
248*1709Smlf static struct modlinkage modlinkage = {
249*1709Smlf 	MODREV_1, (void *)&modlmisc, NULL
250*1709Smlf };
251*1709Smlf 
252*1709Smlf int
253*1709Smlf _init(void)
254*1709Smlf {
255*1709Smlf #ifdef DADK_DEBUG
256*1709Smlf 	if (dadk_debug & DENT)
257*1709Smlf 		PRF("dadk_init: call\n");
258*1709Smlf #endif
259*1709Smlf 
260*1709Smlf #if defined(__x86)
261*1709Smlf 	/* set the max physical address for iob allocs on x86 */
262*1709Smlf 	dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
263*1709Smlf 
264*1709Smlf 	/*
265*1709Smlf 	 * set the sgllen for iob allocs on x86. If this is set less than
266*1709Smlf 	 * the number of pages the buffer will take (taking into account
267*1709Smlf 	 * alignment), it would force the allocator to try and allocate
268*1709Smlf 	 * contiguous pages.
269*1709Smlf 	 */
270*1709Smlf 	dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
271*1709Smlf #endif
272*1709Smlf 
273*1709Smlf 	return (mod_install(&modlinkage));
274*1709Smlf }
275*1709Smlf 
276*1709Smlf int
277*1709Smlf _fini(void)
278*1709Smlf {
279*1709Smlf #ifdef DADK_DEBUG
280*1709Smlf 	if (dadk_debug & DENT)
281*1709Smlf 		PRF("dadk_fini: call\n");
282*1709Smlf #endif
283*1709Smlf 
284*1709Smlf 	return (mod_remove(&modlinkage));
285*1709Smlf }
286*1709Smlf 
287*1709Smlf int
288*1709Smlf _info(struct modinfo *modinfop)
289*1709Smlf {
290*1709Smlf 	return (mod_info(&modlinkage, modinfop));
291*1709Smlf }
292*1709Smlf 
293*1709Smlf struct tgdk_obj *
294*1709Smlf dadk_create()
295*1709Smlf {
296*1709Smlf 	struct tgdk_obj *dkobjp;
297*1709Smlf 	struct dadk *dadkp;
298*1709Smlf 
299*1709Smlf 	dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
300*1709Smlf 	if (!dkobjp)
301*1709Smlf 		return (NULL);
302*1709Smlf 	dadkp = (struct dadk *)(dkobjp+1);
303*1709Smlf 
304*1709Smlf 	dkobjp->tg_ops  = (struct  tgdk_objops *)&dadk_ops;
305*1709Smlf 	dkobjp->tg_data = (opaque_t)dadkp;
306*1709Smlf 	dkobjp->tg_ext = &(dkobjp->tg_extblk);
307*1709Smlf 	dadkp->dad_extp = &(dkobjp->tg_extblk);
308*1709Smlf 
309*1709Smlf #ifdef DADK_DEBUG
310*1709Smlf 	if (dadk_debug & DENT)
311*1709Smlf 		PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
312*1709Smlf #endif
313*1709Smlf 	return (dkobjp);
314*1709Smlf }
315*1709Smlf 
316*1709Smlf int
317*1709Smlf dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
318*1709Smlf 	opaque_t bbhobjp, void *lkarg)
319*1709Smlf {
320*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
321*1709Smlf 	struct scsi_device *sdevp = (struct scsi_device *)devp;
322*1709Smlf 
323*1709Smlf 	dadkp->dad_sd = devp;
324*1709Smlf 	dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
325*1709Smlf 	sdevp->sd_private = (caddr_t)dadkp;
326*1709Smlf 
327*1709Smlf 	/* initialize the communication object */
328*1709Smlf 	dadkp->dad_com.com_data = (opaque_t)dadkp;
329*1709Smlf 	dadkp->dad_com.com_ops  = &dadk_com_ops;
330*1709Smlf 
331*1709Smlf 	dadkp->dad_bbhobjp = bbhobjp;
332*1709Smlf 	BBH_INIT(bbhobjp);
333*1709Smlf 
334*1709Smlf 	dadkp->dad_flcobjp = flcobjp;
335*1709Smlf 	return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
336*1709Smlf }
337*1709Smlf 
338*1709Smlf int
339*1709Smlf dadk_free(struct tgdk_obj *dkobjp)
340*1709Smlf {
341*1709Smlf 	TGDK_CLEANUP(dkobjp);
342*1709Smlf 	kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
343*1709Smlf 
344*1709Smlf 	return (DDI_SUCCESS);
345*1709Smlf }
346*1709Smlf 
347*1709Smlf void
348*1709Smlf dadk_cleanup(struct tgdk_obj *dkobjp)
349*1709Smlf {
350*1709Smlf 	struct dadk *dadkp;
351*1709Smlf 
352*1709Smlf 	dadkp = (struct dadk *)(dkobjp->tg_data);
353*1709Smlf 	if (dadkp->dad_sd)
354*1709Smlf 		dadkp->dad_sd->sd_private = NULL;
355*1709Smlf 	if (dadkp->dad_bbhobjp) {
356*1709Smlf 		BBH_FREE(dadkp->dad_bbhobjp);
357*1709Smlf 		dadkp->dad_bbhobjp = NULL;
358*1709Smlf 	}
359*1709Smlf 	if (dadkp->dad_flcobjp) {
360*1709Smlf 		FLC_FREE(dadkp->dad_flcobjp);
361*1709Smlf 		dadkp->dad_flcobjp = NULL;
362*1709Smlf 	}
363*1709Smlf }
364*1709Smlf 
365*1709Smlf /* ARGSUSED */
366*1709Smlf int
367*1709Smlf dadk_probe(opaque_t objp, int kmsflg)
368*1709Smlf {
369*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
370*1709Smlf 	struct scsi_device *devp;
371*1709Smlf 	char   name[80];
372*1709Smlf 
373*1709Smlf 	devp = dadkp->dad_sd;
374*1709Smlf 	if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
375*1709Smlf 		(devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
376*1709Smlf 		return (DDI_PROBE_FAILURE);
377*1709Smlf 	}
378*1709Smlf 
379*1709Smlf 	switch (devp->sd_inq->inq_dtype) {
380*1709Smlf 		case DTYPE_DIRECT:
381*1709Smlf 			dadkp->dad_ctype = DKC_DIRECT;
382*1709Smlf 			dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
383*1709Smlf 			dadkp->dad_extp->tg_ctype = DKC_DIRECT;
384*1709Smlf 			break;
385*1709Smlf 		case DTYPE_RODIRECT: /* eg cdrom */
386*1709Smlf 			dadkp->dad_ctype = DKC_CDROM;
387*1709Smlf 			dadkp->dad_extp->tg_rdonly = 1;
388*1709Smlf 			dadkp->dad_rdonly = 1;
389*1709Smlf 			dadkp->dad_cdrom = 1;
390*1709Smlf 			dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
391*1709Smlf 			dadkp->dad_extp->tg_ctype = DKC_CDROM;
392*1709Smlf 			break;
393*1709Smlf 		case DTYPE_WORM:
394*1709Smlf 		case DTYPE_OPTICAL:
395*1709Smlf 		default:
396*1709Smlf 			return (DDI_PROBE_FAILURE);
397*1709Smlf 	}
398*1709Smlf 
399*1709Smlf 	dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
400*1709Smlf 
401*1709Smlf 	dadkp->dad_secshf = SCTRSHFT;
402*1709Smlf 	dadkp->dad_blkshf = 0;
403*1709Smlf 
404*1709Smlf 	/* display the device name */
405*1709Smlf 	(void) strcpy(name, "Vendor '");
406*1709Smlf 	gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
407*1709Smlf 	(void) strcat(name, "' Product '");
408*1709Smlf 	gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
409*1709Smlf 	(void) strcat(name, "'");
410*1709Smlf 	gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
411*1709Smlf 
412*1709Smlf 	return (DDI_PROBE_SUCCESS);
413*1709Smlf }
414*1709Smlf 
415*1709Smlf 
416*1709Smlf /* ARGSUSED */
417*1709Smlf int
418*1709Smlf dadk_attach(opaque_t objp)
419*1709Smlf {
420*1709Smlf 	return (DDI_SUCCESS);
421*1709Smlf }
422*1709Smlf 
423*1709Smlf int
424*1709Smlf dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
425*1709Smlf {
426*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
427*1709Smlf 	/* free the old bbh object */
428*1709Smlf 	if (dadkp->dad_bbhobjp)
429*1709Smlf 		BBH_FREE(dadkp->dad_bbhobjp);
430*1709Smlf 
431*1709Smlf 	/* initialize the new bbh object */
432*1709Smlf 	dadkp->dad_bbhobjp = bbhobjp;
433*1709Smlf 	BBH_INIT(bbhobjp);
434*1709Smlf 
435*1709Smlf 	return (DDI_SUCCESS);
436*1709Smlf }
437*1709Smlf 
438*1709Smlf /* ARGSUSED */
439*1709Smlf int
440*1709Smlf dadk_open(opaque_t objp, int flag)
441*1709Smlf {
442*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
443*1709Smlf 	int error;
444*1709Smlf 	int wce;
445*1709Smlf 
446*1709Smlf 	if (!dadkp->dad_rmb) {
447*1709Smlf 		if (dadkp->dad_phyg.g_cap) {
448*1709Smlf 			FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
449*1709Smlf 			    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
450*1709Smlf 			return (DDI_SUCCESS);
451*1709Smlf 		}
452*1709Smlf 	} else {
453*1709Smlf 	    mutex_enter(&dadkp->dad_mutex);
454*1709Smlf 	    dadkp->dad_iostate = DKIO_NONE;
455*1709Smlf 	    cv_broadcast(&dadkp->dad_state_cv);
456*1709Smlf 	    mutex_exit(&dadkp->dad_mutex);
457*1709Smlf 
458*1709Smlf 	    if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0, DADK_SILENT) ||
459*1709Smlf 		dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
460*1709Smlf 		dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0, DADK_SILENT)) {
461*1709Smlf 		    return (DDI_FAILURE);
462*1709Smlf 	    }
463*1709Smlf 
464*1709Smlf 	    mutex_enter(&dadkp->dad_mutex);
465*1709Smlf 	    dadkp->dad_iostate = DKIO_INSERTED;
466*1709Smlf 	    cv_broadcast(&dadkp->dad_state_cv);
467*1709Smlf 	    mutex_exit(&dadkp->dad_mutex);
468*1709Smlf 	}
469*1709Smlf 
470*1709Smlf 	/*
471*1709Smlf 	 * get write cache enable state
472*1709Smlf 	 * If there is an error, must assume that write cache
473*1709Smlf 	 * is enabled.
474*1709Smlf 	 * NOTE: Since there is currently no Solaris mechanism to
475*1709Smlf 	 * change the state of the Write Cache Enable feature,
476*1709Smlf 	 * this code just checks the value of the WCE bit
477*1709Smlf 	 * obtained at device init time.  If a mechanism
478*1709Smlf 	 * is added to the driver to change WCE, dad_wce
479*1709Smlf 	 * must be updated appropriately.
480*1709Smlf 	 */
481*1709Smlf 	error = CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETWCE,
482*1709Smlf 	    (uintptr_t)&wce, 0);
483*1709Smlf 	mutex_enter(&dadkp->dad_mutex);
484*1709Smlf 	dadkp->dad_wce = (error != 0) || (wce != 0);
485*1709Smlf 	mutex_exit(&dadkp->dad_mutex);
486*1709Smlf 
487*1709Smlf 	/* logical disk geometry */
488*1709Smlf 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETGEOM,
489*1709Smlf 	    (uintptr_t)&dadkp->dad_logg, 0);
490*1709Smlf 	if (dadkp->dad_logg.g_cap == 0)
491*1709Smlf 		return (DDI_FAILURE);
492*1709Smlf 
493*1709Smlf 	/* get physical disk geometry */
494*1709Smlf 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETPHYGEOM,
495*1709Smlf 	    (uintptr_t)&dadkp->dad_phyg, 0);
496*1709Smlf 	if (dadkp->dad_phyg.g_cap == 0)
497*1709Smlf 		return (DDI_FAILURE);
498*1709Smlf 
499*1709Smlf 	dadk_setcap(dadkp);
500*1709Smlf 
501*1709Smlf 	/* start profiling */
502*1709Smlf 	FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
503*1709Smlf 		ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
504*1709Smlf 
505*1709Smlf 	return (DDI_SUCCESS);
506*1709Smlf }
507*1709Smlf 
508*1709Smlf static void
509*1709Smlf dadk_setcap(struct dadk *dadkp)
510*1709Smlf {
511*1709Smlf 	int	 totsize;
512*1709Smlf 	int	 i;
513*1709Smlf 
514*1709Smlf 	totsize = dadkp->dad_phyg.g_secsiz;
515*1709Smlf 
516*1709Smlf 	if (totsize == 0) {
517*1709Smlf 		if (dadkp->dad_cdrom) {
518*1709Smlf 			totsize = 2048;
519*1709Smlf 		} else {
520*1709Smlf 			totsize = NBPSCTR;
521*1709Smlf 		}
522*1709Smlf 	} else {
523*1709Smlf 		/* Round down sector size to multiple of 512B */
524*1709Smlf 		totsize &= ~(NBPSCTR-1);
525*1709Smlf 	}
526*1709Smlf 	dadkp->dad_phyg.g_secsiz = totsize;
527*1709Smlf 
528*1709Smlf 	/* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
529*1709Smlf 	totsize >>= SCTRSHFT;
530*1709Smlf 	for (i = 0; totsize != 1; i++, totsize >>= 1);
531*1709Smlf 	dadkp->dad_blkshf = i;
532*1709Smlf 	dadkp->dad_secshf = i + SCTRSHFT;
533*1709Smlf }
534*1709Smlf 
535*1709Smlf 
536*1709Smlf int
537*1709Smlf dadk_close(opaque_t objp)
538*1709Smlf {
539*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
540*1709Smlf 
541*1709Smlf 	if (dadkp->dad_rmb) {
542*1709Smlf 		(void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
543*1709Smlf 		    DADK_SILENT);
544*1709Smlf 		(void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
545*1709Smlf 	}
546*1709Smlf 	FLC_STOP_KSTAT(dadkp->dad_flcobjp);
547*1709Smlf 	return (DDI_SUCCESS);
548*1709Smlf }
549*1709Smlf 
550*1709Smlf int
551*1709Smlf dadk_strategy(opaque_t objp, struct buf *bp)
552*1709Smlf {
553*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
554*1709Smlf 
555*1709Smlf 	if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
556*1709Smlf 		bioerror(bp, EROFS);
557*1709Smlf 		return (DDI_FAILURE);
558*1709Smlf 	}
559*1709Smlf 
560*1709Smlf 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
561*1709Smlf 		bioerror(bp, ENXIO);
562*1709Smlf 		return (DDI_FAILURE);
563*1709Smlf 	}
564*1709Smlf 
565*1709Smlf 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
566*1709Smlf 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
567*1709Smlf 
568*1709Smlf 	return (DDI_SUCCESS);
569*1709Smlf }
570*1709Smlf 
571*1709Smlf int
572*1709Smlf dadk_dump(opaque_t objp, struct buf *bp)
573*1709Smlf {
574*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
575*1709Smlf 	struct cmpkt *pktp;
576*1709Smlf 
577*1709Smlf 	if (dadkp->dad_rdonly) {
578*1709Smlf 		bioerror(bp, EROFS);
579*1709Smlf 		return (DDI_FAILURE);
580*1709Smlf 	}
581*1709Smlf 
582*1709Smlf 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
583*1709Smlf 		bioerror(bp, ENXIO);
584*1709Smlf 		return (DDI_FAILURE);
585*1709Smlf 	}
586*1709Smlf 
587*1709Smlf 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
588*1709Smlf 
589*1709Smlf 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
590*1709Smlf 	if (!pktp) {
591*1709Smlf 		cmn_err(CE_WARN, "no resources for dumping");
592*1709Smlf 		bioerror(bp, EIO);
593*1709Smlf 		return (DDI_FAILURE);
594*1709Smlf 	}
595*1709Smlf 	pktp->cp_flags |= CPF_NOINTR;
596*1709Smlf 
597*1709Smlf 	(void) dadk_ioprep(dadkp, pktp);
598*1709Smlf 	dadk_transport(dadkp, bp);
599*1709Smlf 	pktp->cp_byteleft -= pktp->cp_bytexfer;
600*1709Smlf 
601*1709Smlf 	while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
602*1709Smlf 		(void) dadk_iosetup(dadkp, pktp);
603*1709Smlf 		dadk_transport(dadkp, bp);
604*1709Smlf 		pktp->cp_byteleft -= pktp->cp_bytexfer;
605*1709Smlf 	}
606*1709Smlf 
607*1709Smlf 	if (pktp->cp_private)
608*1709Smlf 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
609*1709Smlf 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
610*1709Smlf 	return (DDI_SUCCESS);
611*1709Smlf }
612*1709Smlf 
613*1709Smlf /* ARGSUSED  */
614*1709Smlf int
615*1709Smlf dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
616*1709Smlf 	cred_t *cred_p, int *rval_p)
617*1709Smlf {
618*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
619*1709Smlf 
620*1709Smlf 	switch (cmd) {
621*1709Smlf 	case DKIOCGETDEF:
622*1709Smlf 	    {
623*1709Smlf 		struct buf	*bp;
624*1709Smlf 		int		err, head;
625*1709Smlf 		unsigned char	*secbuf;
626*1709Smlf 		STRUCT_DECL(defect_header, adh);
627*1709Smlf 
628*1709Smlf 		STRUCT_INIT(adh, flag & FMODELS);
629*1709Smlf 
630*1709Smlf 		/*
631*1709Smlf 		 * copyin header ....
632*1709Smlf 		 * yields head number and buffer address
633*1709Smlf 		 */
634*1709Smlf 		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
635*1709Smlf 		    flag))
636*1709Smlf 			return (EFAULT);
637*1709Smlf 		head = STRUCT_FGET(adh, head);
638*1709Smlf 		if (head < 0 || head >= dadkp->dad_phyg.g_head)
639*1709Smlf 			return (ENXIO);
640*1709Smlf 		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
641*1709Smlf 		if (!secbuf)
642*1709Smlf 			return (ENOMEM);
643*1709Smlf 		bp = getrbuf(KM_SLEEP);
644*1709Smlf 		if (!bp) {
645*1709Smlf 			kmem_free(secbuf, NBPSCTR);
646*1709Smlf 			return (ENOMEM);
647*1709Smlf 		}
648*1709Smlf 
649*1709Smlf 		bp->b_edev = dev;
650*1709Smlf 		bp->b_dev  = cmpdev(dev);
651*1709Smlf 		bp->b_flags = B_BUSY;
652*1709Smlf 		bp->b_resid = 0;
653*1709Smlf 		bp->b_bcount = NBPSCTR;
654*1709Smlf 		bp->b_un.b_addr = (caddr_t)secbuf;
655*1709Smlf 		bp->b_blkno = head; /* I had to put it somwhere! */
656*1709Smlf 		bp->b_forw = (struct buf *)dadkp;
657*1709Smlf 		bp->b_back = (struct buf *)DCMD_GETDEF;
658*1709Smlf 
659*1709Smlf 		FLC_ENQUE(dadkp->dad_flcobjp, bp);
660*1709Smlf 		err = biowait(bp);
661*1709Smlf 		if (!err) {
662*1709Smlf 			if (ddi_copyout((caddr_t)secbuf,
663*1709Smlf 			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
664*1709Smlf 				err = ENXIO;
665*1709Smlf 		}
666*1709Smlf 		kmem_free(secbuf, NBPSCTR);
667*1709Smlf 		freerbuf(bp);
668*1709Smlf 		return (err);
669*1709Smlf 	    }
670*1709Smlf 	case DIOCTL_RWCMD:
671*1709Smlf 	    {
672*1709Smlf 		struct dadkio_rwcmd *rwcmdp;
673*1709Smlf 		int status, rw;
674*1709Smlf 
675*1709Smlf 		/*
676*1709Smlf 		 * copied in by cmdk and, if necessary, converted to the
677*1709Smlf 		 * correct datamodel
678*1709Smlf 		 */
679*1709Smlf 		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
680*1709Smlf 
681*1709Smlf 		/*
682*1709Smlf 		 * handle the complex cases here; we pass these
683*1709Smlf 		 * through to the driver, which will queue them and
684*1709Smlf 		 * handle the requests asynchronously.  The simpler
685*1709Smlf 		 * cases ,which can return immediately, fail here, and
686*1709Smlf 		 * the request reverts to the dadk_ioctl routine, while
687*1709Smlf 		 *  will reroute them directly to the ata driver.
688*1709Smlf 		 */
689*1709Smlf 		switch (rwcmdp->cmd) {
690*1709Smlf 			case DADKIO_RWCMD_READ :
691*1709Smlf 				/*FALLTHROUGH*/
692*1709Smlf 			case DADKIO_RWCMD_WRITE:
693*1709Smlf 				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
694*1709Smlf 				    B_WRITE : B_READ);
695*1709Smlf 				status = dadk_dk_buf_setup(dadkp,
696*1709Smlf 				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
697*1709Smlf 				    UIO_SYSSPACE : UIO_USERSPACE), rw);
698*1709Smlf 				return (status);
699*1709Smlf 			default:
700*1709Smlf 				return (EINVAL);
701*1709Smlf 		}
702*1709Smlf 	    }
703*1709Smlf 	case DKIOCFLUSHWRITECACHE:
704*1709Smlf 		{
705*1709Smlf 			struct buf *bp;
706*1709Smlf 			int err = 0;
707*1709Smlf 			struct dk_callback *dkc = (struct dk_callback *)arg;
708*1709Smlf 			struct cmpkt *pktp;
709*1709Smlf 			int is_sync = 1;
710*1709Smlf 
711*1709Smlf 			mutex_enter(&dadkp->dad_mutex);
712*1709Smlf 			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
713*1709Smlf 				err = dadkp->dad_noflush ? ENOTSUP : 0;
714*1709Smlf 				mutex_exit(&dadkp->dad_mutex);
715*1709Smlf 				/*
716*1709Smlf 				 * If a callback was requested: a
717*1709Smlf 				 * callback will always be done if the
718*1709Smlf 				 * caller saw the DKIOCFLUSHWRITECACHE
719*1709Smlf 				 * ioctl return 0, and never done if the
720*1709Smlf 				 * caller saw the ioctl return an error.
721*1709Smlf 				 */
722*1709Smlf 				if ((flag & FKIOCTL) && dkc != NULL &&
723*1709Smlf 				    dkc->dkc_callback != NULL) {
724*1709Smlf 					(*dkc->dkc_callback)(dkc->dkc_cookie,
725*1709Smlf 					    err);
726*1709Smlf 					/*
727*1709Smlf 					 * Did callback and reported error.
728*1709Smlf 					 * Since we did a callback, ioctl
729*1709Smlf 					 * should return 0.
730*1709Smlf 					 */
731*1709Smlf 					err = 0;
732*1709Smlf 				}
733*1709Smlf 				return (err);
734*1709Smlf 			}
735*1709Smlf 			mutex_exit(&dadkp->dad_mutex);
736*1709Smlf 
737*1709Smlf 			bp = getrbuf(KM_SLEEP);
738*1709Smlf 
739*1709Smlf 			bp->b_edev = dev;
740*1709Smlf 			bp->b_dev  = cmpdev(dev);
741*1709Smlf 			bp->b_flags = B_BUSY;
742*1709Smlf 			bp->b_resid = 0;
743*1709Smlf 			bp->b_bcount = 0;
744*1709Smlf 			SET_BP_SEC(bp, 0);
745*1709Smlf 
746*1709Smlf 			if ((flag & FKIOCTL) && dkc != NULL &&
747*1709Smlf 			    dkc->dkc_callback != NULL) {
748*1709Smlf 				struct dk_callback *dkc2 =
749*1709Smlf 				    (struct dk_callback *)kmem_zalloc(
750*1709Smlf 				    sizeof (struct dk_callback), KM_SLEEP);
751*1709Smlf 
752*1709Smlf 				bcopy(dkc, dkc2, sizeof (*dkc2));
753*1709Smlf 				/*
754*1709Smlf 				 * Borrow b_list to carry private data
755*1709Smlf 				 * to the b_iodone func.
756*1709Smlf 				 */
757*1709Smlf 				bp->b_list = (struct buf *)dkc2;
758*1709Smlf 				bp->b_iodone = dadk_flushdone;
759*1709Smlf 				is_sync = 0;
760*1709Smlf 			}
761*1709Smlf 
762*1709Smlf 			/*
763*1709Smlf 			 * Setup command pkt
764*1709Smlf 			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
765*1709Smlf 			 */
766*1709Smlf 			pktp = dadk_pktprep(dadkp, NULL, bp,
767*1709Smlf 			    dadk_iodone, DDI_DMA_SLEEP, NULL);
768*1709Smlf 
769*1709Smlf 			pktp->cp_time = DADK_FLUSH_CACHE_TIME;
770*1709Smlf 
771*1709Smlf 			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
772*1709Smlf 			pktp->cp_byteleft = 0;
773*1709Smlf 			pktp->cp_private = NULL;
774*1709Smlf 			pktp->cp_secleft = 0;
775*1709Smlf 			pktp->cp_srtsec = -1;
776*1709Smlf 			pktp->cp_bytexfer = 0;
777*1709Smlf 
778*1709Smlf 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
779*1709Smlf 
780*1709Smlf 			FLC_ENQUE(dadkp->dad_flcobjp, bp);
781*1709Smlf 
782*1709Smlf 			if (is_sync) {
783*1709Smlf 				err = biowait(bp);
784*1709Smlf 				freerbuf(bp);
785*1709Smlf 			}
786*1709Smlf 			return (err);
787*1709Smlf 		}
788*1709Smlf 	default:
789*1709Smlf 		if (!dadkp->dad_rmb)
790*1709Smlf 			return (CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag));
791*1709Smlf 	}
792*1709Smlf 
793*1709Smlf 	switch (cmd) {
794*1709Smlf 	case CDROMSTOP:
795*1709Smlf 		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
796*1709Smlf 			0, DADK_SILENT));
797*1709Smlf 	case CDROMSTART:
798*1709Smlf 		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
799*1709Smlf 			0, DADK_SILENT));
800*1709Smlf 	case DKIOCLOCK:
801*1709Smlf 		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
802*1709Smlf 	case DKIOCUNLOCK:
803*1709Smlf 		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
804*1709Smlf 	case DKIOCEJECT:
805*1709Smlf 	case CDROMEJECT:
806*1709Smlf 		{
807*1709Smlf 			int ret;
808*1709Smlf 
809*1709Smlf 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
810*1709Smlf 				DADK_SILENT)) {
811*1709Smlf 				return (ret);
812*1709Smlf 			}
813*1709Smlf 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
814*1709Smlf 				DADK_SILENT)) {
815*1709Smlf 				return (ret);
816*1709Smlf 			}
817*1709Smlf 			mutex_enter(&dadkp->dad_mutex);
818*1709Smlf 			dadkp->dad_iostate = DKIO_EJECTED;
819*1709Smlf 			cv_broadcast(&dadkp->dad_state_cv);
820*1709Smlf 			mutex_exit(&dadkp->dad_mutex);
821*1709Smlf 
822*1709Smlf 			return (0);
823*1709Smlf 
824*1709Smlf 		}
825*1709Smlf 	default:
826*1709Smlf 		return (ENOTTY);
827*1709Smlf 	/*
828*1709Smlf 	 * cdrom audio commands
829*1709Smlf 	 */
830*1709Smlf 	case CDROMPAUSE:
831*1709Smlf 		cmd = DCMD_PAUSE;
832*1709Smlf 		break;
833*1709Smlf 	case CDROMRESUME:
834*1709Smlf 		cmd = DCMD_RESUME;
835*1709Smlf 		break;
836*1709Smlf 	case CDROMPLAYMSF:
837*1709Smlf 		cmd = DCMD_PLAYMSF;
838*1709Smlf 		break;
839*1709Smlf 	case CDROMPLAYTRKIND:
840*1709Smlf 		cmd = DCMD_PLAYTRKIND;
841*1709Smlf 		break;
842*1709Smlf 	case CDROMREADTOCHDR:
843*1709Smlf 		cmd = DCMD_READTOCHDR;
844*1709Smlf 		break;
845*1709Smlf 	case CDROMREADTOCENTRY:
846*1709Smlf 		cmd = DCMD_READTOCENT;
847*1709Smlf 		break;
848*1709Smlf 	case CDROMVOLCTRL:
849*1709Smlf 		cmd = DCMD_VOLCTRL;
850*1709Smlf 		break;
851*1709Smlf 	case CDROMSUBCHNL:
852*1709Smlf 		cmd = DCMD_SUBCHNL;
853*1709Smlf 		break;
854*1709Smlf 	case CDROMREADMODE2:
855*1709Smlf 		cmd = DCMD_READMODE2;
856*1709Smlf 		break;
857*1709Smlf 	case CDROMREADMODE1:
858*1709Smlf 		cmd = DCMD_READMODE1;
859*1709Smlf 		break;
860*1709Smlf 	case CDROMREADOFFSET:
861*1709Smlf 		cmd = DCMD_READOFFSET;
862*1709Smlf 		break;
863*1709Smlf 	}
864*1709Smlf 	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
865*1709Smlf }
866*1709Smlf 
867*1709Smlf int
868*1709Smlf dadk_flushdone(struct buf *bp)
869*1709Smlf {
870*1709Smlf 	struct dk_callback *dkc = (struct dk_callback *)bp->b_list;
871*1709Smlf 
872*1709Smlf 	ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
873*1709Smlf 
874*1709Smlf 	(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
875*1709Smlf 
876*1709Smlf 	kmem_free(dkc, sizeof (*dkc));
877*1709Smlf 	freerbuf(bp);
878*1709Smlf 	return (0);
879*1709Smlf }
880*1709Smlf 
881*1709Smlf int
882*1709Smlf dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
883*1709Smlf {
884*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
885*1709Smlf 
886*1709Smlf 	bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
887*1709Smlf 	    sizeof (struct tgdk_geom));
888*1709Smlf 	return (DDI_SUCCESS);
889*1709Smlf }
890*1709Smlf 
891*1709Smlf int
892*1709Smlf dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
893*1709Smlf {
894*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
895*1709Smlf 	bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
896*1709Smlf 	    sizeof (struct tgdk_geom));
897*1709Smlf 	return (DDI_SUCCESS);
898*1709Smlf }
899*1709Smlf 
900*1709Smlf int
901*1709Smlf dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
902*1709Smlf {
903*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
904*1709Smlf 
905*1709Smlf 	dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
906*1709Smlf 	dadkp->dad_logg.g_head = dkgeom_p->g_head;
907*1709Smlf 	dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
908*1709Smlf 	dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
909*1709Smlf 	return (DDI_SUCCESS);
910*1709Smlf }
911*1709Smlf 
912*1709Smlf 
913*1709Smlf tgdk_iob_handle
914*1709Smlf dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
915*1709Smlf {
916*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
917*1709Smlf 	struct buf *bp;
918*1709Smlf 	struct tgdk_iob *iobp;
919*1709Smlf 	size_t rlen;
920*1709Smlf 
921*1709Smlf 	iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
922*1709Smlf 	if (iobp == NULL)
923*1709Smlf 		return (NULL);
924*1709Smlf 	if ((bp = getrbuf(kmsflg)) == NULL) {
925*1709Smlf 		kmem_free(iobp, sizeof (*iobp));
926*1709Smlf 		return (NULL);
927*1709Smlf 	}
928*1709Smlf 
929*1709Smlf 	iobp->b_psec  = LBLK2SEC(blkno, dadkp->dad_blkshf);
930*1709Smlf 	iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
931*1709Smlf 	iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
932*1709Smlf 				>> dadkp->dad_secshf) << dadkp->dad_secshf;
933*1709Smlf 
934*1709Smlf 	bp->b_un.b_addr = 0;
935*1709Smlf 	/*
936*1709Smlf 	 * use i_ddi_mem_alloc() for now until we have an interface to allocate
937*1709Smlf 	 * memory for DMA which doesn't require a DMA handle. ddi_iopb_alloc()
938*1709Smlf 	 * is obsolete and we want more flexibility in controlling the DMA
939*1709Smlf 	 * address constraints..
940*1709Smlf 	 */
941*1709Smlf 	if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
942*1709Smlf 	    (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
943*1709Smlf 	    &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
944*1709Smlf 		freerbuf(bp);
945*1709Smlf 		kmem_free(iobp, sizeof (*iobp));
946*1709Smlf 		return (NULL);
947*1709Smlf 	}
948*1709Smlf 	iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
949*1709Smlf 	iobp->b_bp = bp;
950*1709Smlf 	iobp->b_lblk = blkno;
951*1709Smlf 	iobp->b_xfer = xfer;
952*1709Smlf 	iobp->b_lblk = blkno;
953*1709Smlf 	iobp->b_xfer = xfer;
954*1709Smlf 	return (iobp);
955*1709Smlf }
956*1709Smlf 
957*1709Smlf /* ARGSUSED */
958*1709Smlf int
959*1709Smlf dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
960*1709Smlf {
961*1709Smlf 	struct buf *bp;
962*1709Smlf 
963*1709Smlf 	if (iobp) {
964*1709Smlf 		if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
965*1709Smlf 			bp = iobp->b_bp;
966*1709Smlf 			if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
967*1709Smlf 				i_ddi_mem_free((caddr_t)bp->b_un.b_addr, 0);
968*1709Smlf 			freerbuf(bp);
969*1709Smlf 		}
970*1709Smlf 		kmem_free(iobp, sizeof (*iobp));
971*1709Smlf 	}
972*1709Smlf 	return (DDI_SUCCESS);
973*1709Smlf }
974*1709Smlf 
975*1709Smlf /* ARGSUSED */
976*1709Smlf caddr_t
977*1709Smlf dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
978*1709Smlf {
979*1709Smlf 	return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
980*1709Smlf }
981*1709Smlf 
982*1709Smlf 
983*1709Smlf caddr_t
984*1709Smlf dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
985*1709Smlf {
986*1709Smlf 	struct dadk	*dadkp = (struct dadk *)objp;
987*1709Smlf 	struct buf	*bp;
988*1709Smlf 	int		err;
989*1709Smlf 
990*1709Smlf 	bp = iobp->b_bp;
991*1709Smlf 	if (dadkp->dad_rdonly && !(rw & B_READ)) {
992*1709Smlf 		bioerror(bp, EROFS);
993*1709Smlf 		return (NULL);
994*1709Smlf 	}
995*1709Smlf 
996*1709Smlf 	bp->b_flags |= (B_BUSY | rw);
997*1709Smlf 	bp->b_bcount = iobp->b_pbytecnt;
998*1709Smlf 	SET_BP_SEC(bp, iobp->b_psec);
999*1709Smlf 	bp->av_back = (struct buf *)0;
1000*1709Smlf 	bp->b_resid = 0;
1001*1709Smlf 
1002*1709Smlf 	/* call flow control */
1003*1709Smlf 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1004*1709Smlf 	err = biowait(bp);
1005*1709Smlf 
1006*1709Smlf 	bp->b_bcount = iobp->b_xfer;
1007*1709Smlf 	bp->b_flags &= ~(B_DONE|B_BUSY);
1008*1709Smlf 
1009*1709Smlf 	if (err)
1010*1709Smlf 		return (NULL);
1011*1709Smlf 
1012*1709Smlf 	return (bp->b_un.b_addr+iobp->b_pbyteoff);
1013*1709Smlf }
1014*1709Smlf 
1015*1709Smlf static void
1016*1709Smlf dadk_transport(opaque_t com_data, struct buf *bp)
1017*1709Smlf {
1018*1709Smlf 	struct dadk *dadkp = (struct dadk *)com_data;
1019*1709Smlf 
1020*1709Smlf 	if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1021*1709Smlf 	    CTL_SEND_SUCCESS)
1022*1709Smlf 		return;
1023*1709Smlf 	dadk_restart((void*)GDA_BP_PKT(bp));
1024*1709Smlf }
1025*1709Smlf 
1026*1709Smlf static int
1027*1709Smlf dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1028*1709Smlf {
1029*1709Smlf 	struct cmpkt *pktp;
1030*1709Smlf 	struct dadk *dadkp = (struct dadk *)com_data;
1031*1709Smlf 
1032*1709Smlf 	if (GDA_BP_PKT(bp))
1033*1709Smlf 		return (DDI_SUCCESS);
1034*1709Smlf 
1035*1709Smlf 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1036*1709Smlf 	if (!pktp)
1037*1709Smlf 		return (DDI_FAILURE);
1038*1709Smlf 
1039*1709Smlf 	return (dadk_ioprep(dadkp, pktp));
1040*1709Smlf }
1041*1709Smlf 
1042*1709Smlf /*
1043*1709Smlf  * Read, Write preparation
1044*1709Smlf  */
1045*1709Smlf static int
1046*1709Smlf dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1047*1709Smlf {
1048*1709Smlf 	struct buf *bp;
1049*1709Smlf 
1050*1709Smlf 	bp = pktp->cp_bp;
1051*1709Smlf 	if (bp->b_forw == (struct buf *)dadkp)
1052*1709Smlf 		*((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1053*1709Smlf 
1054*1709Smlf 	else if (bp->b_flags & B_READ)
1055*1709Smlf 		*((char *)(pktp->cp_cdbp)) = DCMD_READ;
1056*1709Smlf 	else
1057*1709Smlf 		*((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1058*1709Smlf 	pktp->cp_byteleft = bp->b_bcount;
1059*1709Smlf 
1060*1709Smlf 	/* setup the bad block list handle */
1061*1709Smlf 	pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1062*1709Smlf 	return (dadk_iosetup(dadkp, pktp));
1063*1709Smlf }
1064*1709Smlf 
1065*1709Smlf static int
1066*1709Smlf dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1067*1709Smlf {
1068*1709Smlf 	struct buf	*bp;
1069*1709Smlf 	bbh_cookie_t	bbhckp;
1070*1709Smlf 	int		seccnt;
1071*1709Smlf 
1072*1709Smlf 	seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1073*1709Smlf 	pktp->cp_secleft -= seccnt;
1074*1709Smlf 
1075*1709Smlf 	if (pktp->cp_secleft) {
1076*1709Smlf 		pktp->cp_srtsec += seccnt;
1077*1709Smlf 	} else {
1078*1709Smlf 		/* get the first cookie from the bad block list */
1079*1709Smlf 		if (!pktp->cp_private) {
1080*1709Smlf 			bp = pktp->cp_bp;
1081*1709Smlf 			pktp->cp_srtsec  = GET_BP_SEC(bp);
1082*1709Smlf 			pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1083*1709Smlf 		} else {
1084*1709Smlf 			bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1085*1709Smlf 			    pktp->cp_private);
1086*1709Smlf 			pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1087*1709Smlf 			    bbhckp);
1088*1709Smlf 			pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1089*1709Smlf 			    bbhckp);
1090*1709Smlf 		}
1091*1709Smlf 	}
1092*1709Smlf 
1093*1709Smlf 	pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1094*1709Smlf 
1095*1709Smlf 	if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1096*1709Smlf 		return (DDI_SUCCESS);
1097*1709Smlf 	} else {
1098*1709Smlf 		return (DDI_FAILURE);
1099*1709Smlf 	}
1100*1709Smlf 
1101*1709Smlf 
1102*1709Smlf 
1103*1709Smlf 
1104*1709Smlf }
1105*1709Smlf 
1106*1709Smlf static struct cmpkt *
1107*1709Smlf dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1108*1709Smlf     void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1109*1709Smlf {
1110*1709Smlf 	struct cmpkt *pktp;
1111*1709Smlf 
1112*1709Smlf 	pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1113*1709Smlf 	    arg);
1114*1709Smlf 
1115*1709Smlf 	if (pktp) {
1116*1709Smlf 		pktp->cp_callback = dadk_pktcb;
1117*1709Smlf 		pktp->cp_time = DADK_IO_TIME;
1118*1709Smlf 		pktp->cp_flags = 0;
1119*1709Smlf 		pktp->cp_iodone = cb_func;
1120*1709Smlf 		pktp->cp_dev_private = (opaque_t)dadkp;
1121*1709Smlf 
1122*1709Smlf 	}
1123*1709Smlf 
1124*1709Smlf 	return (pktp);
1125*1709Smlf }
1126*1709Smlf 
1127*1709Smlf 
1128*1709Smlf static void
1129*1709Smlf dadk_restart(void *vpktp)
1130*1709Smlf {
1131*1709Smlf 	struct cmpkt *pktp = (struct cmpkt *)vpktp;
1132*1709Smlf 
1133*1709Smlf 	if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1134*1709Smlf 		return;
1135*1709Smlf 	pktp->cp_iodone(pktp->cp_bp);
1136*1709Smlf }
1137*1709Smlf 
1138*1709Smlf static int
1139*1709Smlf dadk_ioretry(struct cmpkt *pktp, int action)
1140*1709Smlf {
1141*1709Smlf 	struct buf *bp;
1142*1709Smlf 	struct dadk *dadkp = PKT2DADK(pktp);
1143*1709Smlf 
1144*1709Smlf 	switch (action) {
1145*1709Smlf 	case QUE_COMMAND:
1146*1709Smlf 		if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1147*1709Smlf 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1148*1709Smlf 			if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1149*1709Smlf 				CTL_SEND_SUCCESS) {
1150*1709Smlf 				return (JUST_RETURN);
1151*1709Smlf 			}
1152*1709Smlf 			gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1153*1709Smlf 				CE_WARN,
1154*1709Smlf 				"transport of command fails\n");
1155*1709Smlf 		} else
1156*1709Smlf 			gda_log(dadkp->dad_sd->sd_dev,
1157*1709Smlf 				dadk_name, CE_WARN,
1158*1709Smlf 				"exceeds maximum number of retries\n");
1159*1709Smlf 		bioerror(pktp->cp_bp, ENXIO);
1160*1709Smlf 		/*FALLTHROUGH*/
1161*1709Smlf 	case COMMAND_DONE_ERROR:
1162*1709Smlf 		bp = pktp->cp_bp;
1163*1709Smlf 		bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1164*1709Smlf 		    pktp->cp_resid;
1165*1709Smlf 		if (geterror(bp) == 0) {
1166*1709Smlf 			if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1167*1709Smlf 			    (pktp->cp_dev_private == (opaque_t)dadkp) &&
1168*1709Smlf 			    ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1169*1709Smlf 				/*
1170*1709Smlf 				 * Flag "unimplemented" responses for
1171*1709Smlf 				 * DCMD_FLUSH_CACHE as ENOTSUP
1172*1709Smlf 				 */
1173*1709Smlf 				bioerror(bp, ENOTSUP);
1174*1709Smlf 				mutex_enter(&dadkp->dad_mutex);
1175*1709Smlf 				dadkp->dad_noflush = 1;
1176*1709Smlf 				mutex_exit(&dadkp->dad_mutex);
1177*1709Smlf 			} else {
1178*1709Smlf 				bioerror(bp, EIO);
1179*1709Smlf 			}
1180*1709Smlf 		}
1181*1709Smlf 		/*FALLTHROUGH*/
1182*1709Smlf 	case COMMAND_DONE:
1183*1709Smlf 	default:
1184*1709Smlf 		return (COMMAND_DONE);
1185*1709Smlf 	}
1186*1709Smlf }
1187*1709Smlf 
1188*1709Smlf 
1189*1709Smlf static void
1190*1709Smlf dadk_pktcb(struct cmpkt *pktp)
1191*1709Smlf {
1192*1709Smlf 	int action;
1193*1709Smlf 	struct dadkio_rwcmd *rwcmdp;
1194*1709Smlf 
1195*1709Smlf 	rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru;  /* ioctl packet */
1196*1709Smlf 
1197*1709Smlf 	if (pktp->cp_reason == CPS_SUCCESS) {
1198*1709Smlf 		if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1199*1709Smlf 			rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1200*1709Smlf 		pktp->cp_iodone(pktp->cp_bp);
1201*1709Smlf 		return;
1202*1709Smlf 	}
1203*1709Smlf 
1204*1709Smlf 	if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1205*1709Smlf 		if (pktp->cp_reason == CPS_CHKERR)
1206*1709Smlf 			dadk_recorderr(pktp, rwcmdp);
1207*1709Smlf 		dadk_iodone(pktp->cp_bp);
1208*1709Smlf 		return;
1209*1709Smlf 	}
1210*1709Smlf 
1211*1709Smlf 	if (pktp->cp_reason == CPS_CHKERR)
1212*1709Smlf 		action = dadk_chkerr(pktp);
1213*1709Smlf 	else
1214*1709Smlf 		action = COMMAND_DONE_ERROR;
1215*1709Smlf 
1216*1709Smlf 	if (action == JUST_RETURN)
1217*1709Smlf 		return;
1218*1709Smlf 
1219*1709Smlf 	if (action != COMMAND_DONE) {
1220*1709Smlf 		if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1221*1709Smlf 			return;
1222*1709Smlf 	}
1223*1709Smlf 	pktp->cp_iodone(pktp->cp_bp);
1224*1709Smlf }
1225*1709Smlf 
1226*1709Smlf 
1227*1709Smlf 
1228*1709Smlf static struct dadkio_derr dadk_errtab[] = {
1229*1709Smlf 	{COMMAND_DONE, GDA_INFORMATIONAL},	/*  0 DERR_SUCCESS	*/
1230*1709Smlf 	{QUE_COMMAND, GDA_FATAL},		/*  1 DERR_AMNF		*/
1231*1709Smlf 	{QUE_COMMAND, GDA_FATAL},		/*  2 DERR_TKONF	*/
1232*1709Smlf 	{COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT	*/
1233*1709Smlf 	{QUE_COMMAND, GDA_RETRYABLE},		/*  4 DERR_DWF		*/
1234*1709Smlf 	{QUE_COMMAND, GDA_FATAL},		/*  5 DERR_IDNF		*/
1235*1709Smlf 	{JUST_RETURN, GDA_INFORMATIONAL},	/*  6 DERR_BUSY		*/
1236*1709Smlf 	{QUE_COMMAND, GDA_FATAL},		/*  7 DERR_UNC		*/
1237*1709Smlf 	{QUE_COMMAND, GDA_RETRYABLE},		/*  8 DERR_BBK		*/
1238*1709Smlf 	{COMMAND_DONE_ERROR, GDA_FATAL},	/*  9 DERR_INVCDB	*/
1239*1709Smlf 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 10 DERR_HARD		*/
1240*1709Smlf 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 11 DERR_ILI		*/
1241*1709Smlf 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 12 DERR_EOM		*/
1242*1709Smlf 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 13 DERR_MCR		*/
1243*1709Smlf 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 14 DERR_RECOVER	*/
1244*1709Smlf 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 15 DERR_NOTREADY	*/
1245*1709Smlf 	{QUE_COMMAND, GDA_RETRYABLE},		/* 16 DERR_MEDIUM	*/
1246*1709Smlf 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 17 DERR_HW		*/
1247*1709Smlf 	{COMMAND_DONE, GDA_FATAL},		/* 18 DERR_ILL		*/
1248*1709Smlf 	{COMMAND_DONE, GDA_FATAL},		/* 19 DERR_UNIT_ATTN	*/
1249*1709Smlf 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 20 DERR_DATA_PROT	*/
1250*1709Smlf 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 21 DERR_MISCOMPARE	*/
1251*1709Smlf 	{QUE_COMMAND, GDA_RETRYABLE},		/* 22 DERR_ICRC		*/
1252*1709Smlf 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 23 DERR_RESV		*/
1253*1709Smlf };
1254*1709Smlf 
1255*1709Smlf static int
1256*1709Smlf dadk_chkerr(struct cmpkt *pktp)
1257*1709Smlf {
1258*1709Smlf 	int err_blkno;
1259*1709Smlf 	struct dadk *dadkp;
1260*1709Smlf 	int scb;
1261*1709Smlf 
1262*1709Smlf 	if (*(char *)pktp->cp_scbp == DERR_SUCCESS)
1263*1709Smlf 		return (COMMAND_DONE);
1264*1709Smlf 
1265*1709Smlf 	/* check error code table */
1266*1709Smlf 	dadkp = PKT2DADK(pktp);
1267*1709Smlf 	scb = (int)(*(char *)pktp->cp_scbp);
1268*1709Smlf 	if (pktp->cp_retry) {
1269*1709Smlf 		err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1270*1709Smlf 			pktp->cp_resid) >> dadkp->dad_secshf);
1271*1709Smlf 	} else
1272*1709Smlf 		err_blkno = -1;
1273*1709Smlf 
1274*1709Smlf 	/* if attempting to read a sector from a cdrom audio disk */
1275*1709Smlf 	if ((dadkp->dad_cdrom) &&
1276*1709Smlf 	    (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1277*1709Smlf 	    (scb == DERR_ILL)) {
1278*1709Smlf 		return (COMMAND_DONE);
1279*1709Smlf 	}
1280*1709Smlf 	if (pktp->cp_passthru == NULL) {
1281*1709Smlf 		gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1282*1709Smlf 		    dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1283*1709Smlf 		    err_blkno, dadk_cmds, dadk_sense);
1284*1709Smlf 	}
1285*1709Smlf 
1286*1709Smlf 	if (scb == DERR_BUSY) {
1287*1709Smlf 		(void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1288*1709Smlf 	}
1289*1709Smlf 
1290*1709Smlf 	return (dadk_errtab[scb].d_action);
1291*1709Smlf }
1292*1709Smlf 
1293*1709Smlf static void
1294*1709Smlf dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1295*1709Smlf {
1296*1709Smlf 	struct dadk *dadkp;
1297*1709Smlf 	int scb;
1298*1709Smlf 
1299*1709Smlf 	dadkp = PKT2DADK(pktp);
1300*1709Smlf 	scb = (int)(*(char *)pktp->cp_scbp);
1301*1709Smlf 
1302*1709Smlf 
1303*1709Smlf 	rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1304*1709Smlf 		((pktp->cp_bytexfer -
1305*1709Smlf 		pktp->cp_resid) >> dadkp->dad_secshf);
1306*1709Smlf 
1307*1709Smlf 	rwcmdp->status.resid = pktp->cp_bp->b_resid +
1308*1709Smlf 		pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1309*1709Smlf 	switch ((int)(* (char *)pktp->cp_scbp)) {
1310*1709Smlf 	case DERR_AMNF:
1311*1709Smlf 	case DERR_ABORT:
1312*1709Smlf 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1313*1709Smlf 		break;
1314*1709Smlf 	case DERR_DWF:
1315*1709Smlf 	case DERR_IDNF:
1316*1709Smlf 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1317*1709Smlf 		break;
1318*1709Smlf 	case DERR_TKONF:
1319*1709Smlf 	case DERR_UNC:
1320*1709Smlf 	case DERR_BBK:
1321*1709Smlf 		rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1322*1709Smlf 		rwcmdp->status.failed_blk_is_valid = 1;
1323*1709Smlf 		rwcmdp->status.resid = 0;
1324*1709Smlf 		break;
1325*1709Smlf 	case DERR_BUSY:
1326*1709Smlf 		rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1327*1709Smlf 		break;
1328*1709Smlf 	case DERR_INVCDB:
1329*1709Smlf 	case DERR_HARD:
1330*1709Smlf 		rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1331*1709Smlf 		break;
1332*1709Smlf 	case DERR_ICRC:
1333*1709Smlf 	default:
1334*1709Smlf 		rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1335*1709Smlf 	}
1336*1709Smlf 
1337*1709Smlf 	if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1338*1709Smlf 		return;
1339*1709Smlf 	gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1340*1709Smlf 		rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1341*1709Smlf 		dadk_cmds, dadk_sense);
1342*1709Smlf }
1343*1709Smlf 
1344*1709Smlf /*ARGSUSED*/
1345*1709Smlf static void
1346*1709Smlf dadk_polldone(struct buf *bp)
1347*1709Smlf {
1348*1709Smlf }
1349*1709Smlf 
1350*1709Smlf static void
1351*1709Smlf dadk_iodone(struct buf *bp)
1352*1709Smlf {
1353*1709Smlf 	struct cmpkt *pktp;
1354*1709Smlf 	struct dadk *dadkp;
1355*1709Smlf 
1356*1709Smlf 	pktp  = GDA_BP_PKT(bp);
1357*1709Smlf 	dadkp = PKT2DADK(pktp);
1358*1709Smlf 
1359*1709Smlf 	/* check for all iodone */
1360*1709Smlf 	pktp->cp_byteleft -= pktp->cp_bytexfer;
1361*1709Smlf 	if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1362*1709Smlf 		pktp->cp_retry = 0;
1363*1709Smlf 		(void) dadk_iosetup(dadkp, pktp);
1364*1709Smlf 
1365*1709Smlf 
1366*1709Smlf 	/* 	transport the next one */
1367*1709Smlf 		if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1368*1709Smlf 			return;
1369*1709Smlf 		if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1370*1709Smlf 			return;
1371*1709Smlf 	}
1372*1709Smlf 
1373*1709Smlf 	/* start next one */
1374*1709Smlf 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1375*1709Smlf 
1376*1709Smlf 	/* free pkt */
1377*1709Smlf 	if (pktp->cp_private)
1378*1709Smlf 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1379*1709Smlf 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1380*1709Smlf 	biodone(bp);
1381*1709Smlf }
1382*1709Smlf 
1383*1709Smlf int
1384*1709Smlf dadk_check_media(opaque_t objp, int *state)
1385*1709Smlf {
1386*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
1387*1709Smlf 
1388*1709Smlf 	if (!dadkp->dad_rmb) {
1389*1709Smlf 		return (ENXIO);
1390*1709Smlf 	}
1391*1709Smlf #ifdef DADK_DEBUG
1392*1709Smlf 	if (dadk_debug & DSTATE)
1393*1709Smlf 		PRF("dadk_check_media: user state %x disk state %x\n",
1394*1709Smlf 			*state, dadkp->dad_iostate);
1395*1709Smlf #endif
1396*1709Smlf 	/*
1397*1709Smlf 	 * If state already changed just return
1398*1709Smlf 	 */
1399*1709Smlf 	if (*state != dadkp->dad_iostate) {
1400*1709Smlf 		*state = dadkp->dad_iostate;
1401*1709Smlf 		return (0);
1402*1709Smlf 	}
1403*1709Smlf 
1404*1709Smlf 	/*
1405*1709Smlf 	 * Startup polling on thread state
1406*1709Smlf 	 */
1407*1709Smlf 	mutex_enter(&dadkp->dad_mutex);
1408*1709Smlf 	if (dadkp->dad_thread_cnt == 0) {
1409*1709Smlf 		/*
1410*1709Smlf 		 * One thread per removable dadk device
1411*1709Smlf 		 */
1412*1709Smlf 		(void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1413*1709Smlf 		    TS_RUN, v.v_maxsyspri - 2);
1414*1709Smlf 	}
1415*1709Smlf 	dadkp->dad_thread_cnt++;
1416*1709Smlf 
1417*1709Smlf 	/*
1418*1709Smlf 	 * Wait for state to change
1419*1709Smlf 	 */
1420*1709Smlf 	do {
1421*1709Smlf 		if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1422*1709Smlf 			dadkp->dad_thread_cnt--;
1423*1709Smlf 			mutex_exit(&dadkp->dad_mutex);
1424*1709Smlf 			return (EINTR);
1425*1709Smlf 		}
1426*1709Smlf 	} while (*state == dadkp->dad_iostate);
1427*1709Smlf 	*state = dadkp->dad_iostate;
1428*1709Smlf 	dadkp->dad_thread_cnt--;
1429*1709Smlf 	mutex_exit(&dadkp->dad_mutex);
1430*1709Smlf 	return (0);
1431*1709Smlf }
1432*1709Smlf 
1433*1709Smlf 
1434*1709Smlf #define	MEDIA_ACCESS_DELAY 2000000
1435*1709Smlf 
1436*1709Smlf static void
1437*1709Smlf dadk_watch_thread(struct dadk *dadkp)
1438*1709Smlf {
1439*1709Smlf 	enum dkio_state state;
1440*1709Smlf 	int interval;
1441*1709Smlf 
1442*1709Smlf 	interval = drv_usectohz(dadk_check_media_time);
1443*1709Smlf 
1444*1709Smlf 	do {
1445*1709Smlf 		if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1446*1709Smlf 		    DADK_SILENT)) {
1447*1709Smlf 			/*
1448*1709Smlf 			 * Assume state remained the same
1449*1709Smlf 			 */
1450*1709Smlf 			state = dadkp->dad_iostate;
1451*1709Smlf 		}
1452*1709Smlf 
1453*1709Smlf 		/*
1454*1709Smlf 		 * now signal the waiting thread if this is *not* the
1455*1709Smlf 		 * specified state;
1456*1709Smlf 		 * delay the signal if the state is DKIO_INSERTED
1457*1709Smlf 		 * to allow the target to recover
1458*1709Smlf 		 */
1459*1709Smlf 		if (state != dadkp->dad_iostate) {
1460*1709Smlf 
1461*1709Smlf 			dadkp->dad_iostate = state;
1462*1709Smlf 			if (state == DKIO_INSERTED) {
1463*1709Smlf 				/*
1464*1709Smlf 				 * delay the signal to give the drive a chance
1465*1709Smlf 				 * to do what it apparently needs to do
1466*1709Smlf 				 */
1467*1709Smlf 				(void) timeout((void(*)(void *))cv_broadcast,
1468*1709Smlf 				    (void *)&dadkp->dad_state_cv,
1469*1709Smlf 				    drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1470*1709Smlf 			} else {
1471*1709Smlf 				cv_broadcast(&dadkp->dad_state_cv);
1472*1709Smlf 			}
1473*1709Smlf 		}
1474*1709Smlf 		delay(interval);
1475*1709Smlf 	} while (dadkp->dad_thread_cnt);
1476*1709Smlf }
1477*1709Smlf 
1478*1709Smlf int
1479*1709Smlf dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1480*1709Smlf {
1481*1709Smlf 	struct dadk *dadkp = (struct dadk *)objp;
1482*1709Smlf 	struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1483*1709Smlf 
1484*1709Smlf 	if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1485*1709Smlf 		*sinqpp = dadkp->dad_sd->sd_inq;
1486*1709Smlf 		return (DDI_SUCCESS);
1487*1709Smlf 	}
1488*1709Smlf 
1489*1709Smlf 	return (DDI_FAILURE);
1490*1709Smlf }
1491*1709Smlf 
1492*1709Smlf static int
1493*1709Smlf dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1494*1709Smlf 
1495*1709Smlf {
1496*1709Smlf 	struct buf *bp;
1497*1709Smlf 	int err;
1498*1709Smlf 	struct cmpkt *pktp;
1499*1709Smlf 
1500*1709Smlf 	if ((bp = getrbuf(KM_SLEEP)) == NULL) {
1501*1709Smlf 		return (ENOMEM);
1502*1709Smlf 	}
1503*1709Smlf 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1504*1709Smlf 	if (!pktp) {
1505*1709Smlf 		freerbuf(bp);
1506*1709Smlf 		return (ENOMEM);
1507*1709Smlf 	}
1508*1709Smlf 	bp->b_back  = (struct buf *)arg;
1509*1709Smlf 	bp->b_forw  = (struct buf *)dadkp->dad_flcobjp;
1510*1709Smlf 	pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1511*1709Smlf 
1512*1709Smlf 	err = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, (uintptr_t)pktp, flags);
1513*1709Smlf 	freerbuf(bp);
1514*1709Smlf 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1515*1709Smlf 	return (err);
1516*1709Smlf 
1517*1709Smlf 
1518*1709Smlf }
1519*1709Smlf 
1520*1709Smlf static void
1521*1709Smlf dadk_rmb_iodone(struct buf *bp)
1522*1709Smlf {
1523*1709Smlf 	struct cmpkt *pktp;
1524*1709Smlf 	struct dadk *dadkp;
1525*1709Smlf 
1526*1709Smlf 	pktp  = GDA_BP_PKT(bp);
1527*1709Smlf 	dadkp = PKT2DADK(pktp);
1528*1709Smlf 
1529*1709Smlf 	bp->b_flags &= ~(B_DONE|B_BUSY);
1530*1709Smlf 
1531*1709Smlf 	/* Start next one */
1532*1709Smlf 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1533*1709Smlf 
1534*1709Smlf 	biodone(bp);
1535*1709Smlf }
1536*1709Smlf 
1537*1709Smlf static int
1538*1709Smlf dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1539*1709Smlf 	enum uio_seg dataspace, int rw)
1540*1709Smlf {
1541*1709Smlf 	struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1542*1709Smlf 	struct buf	*bp;
1543*1709Smlf 	struct iovec	aiov;
1544*1709Smlf 	struct uio	auio;
1545*1709Smlf 	struct uio	*uio = &auio;
1546*1709Smlf 	int		status;
1547*1709Smlf 
1548*1709Smlf 	bp = getrbuf(KM_SLEEP);
1549*1709Smlf 
1550*1709Smlf 	bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1551*1709Smlf 	bp->b_back  = (struct buf *)rwcmdp;	/* ioctl packet */
1552*1709Smlf 
1553*1709Smlf 	bzero((caddr_t)&auio, sizeof (struct uio));
1554*1709Smlf 	bzero((caddr_t)&aiov, sizeof (struct iovec));
1555*1709Smlf 	aiov.iov_base = rwcmdp->bufaddr;
1556*1709Smlf 	aiov.iov_len = rwcmdp->buflen;
1557*1709Smlf 	uio->uio_iov = &aiov;
1558*1709Smlf 
1559*1709Smlf 	uio->uio_iovcnt = 1;
1560*1709Smlf 	uio->uio_resid = rwcmdp->buflen;
1561*1709Smlf 	uio->uio_segflg = dataspace;
1562*1709Smlf 
1563*1709Smlf 	/* Let physio do the rest... */
1564*1709Smlf 	status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1565*1709Smlf 
1566*1709Smlf 	freerbuf(bp);
1567*1709Smlf 	return (status);
1568*1709Smlf 
1569*1709Smlf }
1570*1709Smlf 
1571*1709Smlf /* Do not let a user gendisk request get too big or */
1572*1709Smlf /* else we could use to many resources.		    */
1573*1709Smlf 
1574*1709Smlf static void
1575*1709Smlf dadkmin(struct buf *bp)
1576*1709Smlf {
1577*1709Smlf 	if (bp->b_bcount > dadk_dk_maxphys)
1578*1709Smlf 		bp->b_bcount = dadk_dk_maxphys;
1579*1709Smlf }
1580*1709Smlf 
1581*1709Smlf static int
1582*1709Smlf dadk_dk_strategy(struct buf *bp)
1583*1709Smlf {
1584*1709Smlf 	dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1585*1709Smlf 	    bp);
1586*1709Smlf 	return (0);
1587*1709Smlf }
1588*1709Smlf 
1589*1709Smlf static void
1590*1709Smlf dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1591*1709Smlf {
1592*1709Smlf 	struct  cmpkt *pktp;
1593*1709Smlf 
1594*1709Smlf 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1595*1709Smlf 	if (!pktp) {
1596*1709Smlf 		bioerror(bp, ENOMEM);
1597*1709Smlf 		biodone(bp);
1598*1709Smlf 		return;
1599*1709Smlf 	}
1600*1709Smlf 
1601*1709Smlf 	pktp->cp_passthru = rwcmdp;
1602*1709Smlf 
1603*1709Smlf 	(void) dadk_ioprep(dadkp, pktp);
1604*1709Smlf 
1605*1709Smlf 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1606*1709Smlf }
1607