xref: /onnv-gate/usr/src/uts/common/io/emul64_bsd.c (revision 85:1bca8d4eab66)
1*85Scth /*
2*85Scth  * CDDL HEADER START
3*85Scth  *
4*85Scth  * The contents of this file are subject to the terms of the
5*85Scth  * Common Development and Distribution License, Version 1.0 only
6*85Scth  * (the "License").  You may not use this file except in compliance
7*85Scth  * with the License.
8*85Scth  *
9*85Scth  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*85Scth  * or http://www.opensolaris.org/os/licensing.
11*85Scth  * See the License for the specific language governing permissions
12*85Scth  * and limitations under the License.
13*85Scth  *
14*85Scth  * When distributing Covered Code, include this CDDL HEADER in each
15*85Scth  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*85Scth  * If applicable, add the following below this CDDL HEADER, with the
17*85Scth  * fields enclosed by brackets "[]" replaced with your own identifying
18*85Scth  * information: Portions Copyright [yyyy] [name of copyright owner]
19*85Scth  *
20*85Scth  * CDDL HEADER END
21*85Scth  */
22*85Scth /*
23*85Scth  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*85Scth  * Use is subject to license terms.
25*85Scth  */
26*85Scth 
27*85Scth #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*85Scth 
29*85Scth /*
30*85Scth  * pseudo scsi disk driver
31*85Scth  */
32*85Scth 
33*85Scth #include <sys/scsi/scsi.h>
34*85Scth #include <sys/ddi.h>
35*85Scth #include <sys/sunddi.h>
36*85Scth #include <sys/kmem.h>
37*85Scth #include <sys/taskq.h>
38*85Scth #include <sys/disp.h>
39*85Scth #include <sys/types.h>
40*85Scth #include <sys/buf.h>
41*85Scth 
42*85Scth #include <sys/emul64.h>
43*85Scth #include <sys/emul64cmd.h>
44*85Scth #include <sys/emul64var.h>
45*85Scth 
46*85Scth /*
47*85Scth  * Mode sense/select page control
48*85Scth  */
49*85Scth #define	MODE_SENSE_PC_CURRENT		0
50*85Scth #define	MODE_SENSE_PC_CHANGEABLE	1
51*85Scth #define	MODE_SENSE_PC_DEFAULT		2
52*85Scth #define	MODE_SENSE_PC_SAVED		3
53*85Scth 
54*85Scth /*
55*85Scth  * Byte conversion macros
56*85Scth  */
57*85Scth #if	defined(_BIG_ENDIAN)
58*85Scth #define	ushort_to_scsi_ushort(n)	(n)
59*85Scth #define	uint32_to_scsi_uint32(n)	(n)
60*85Scth #define	uint64_to_scsi_uint64(n)	(n)
61*85Scth #elif	defined(_LITTLE_ENDIAN)
62*85Scth 
63*85Scth #define	ushort_to_scsi_ushort(n)			\
64*85Scth 		((((n) & 0x00ff) << 8) |		\
65*85Scth 		(((n)  & 0xff00) >> 8))
66*85Scth 
67*85Scth #define	uint32_to_scsi_uint32(n)			\
68*85Scth 		((((n) & 0x000000ff) << 24) |		\
69*85Scth 		(((n)  & 0x0000ff00) << 8) |		\
70*85Scth 		(((n)  & 0x00ff0000) >> 8) |		\
71*85Scth 		(((n)  & 0xff000000) >> 24))
72*85Scth #define	uint64_to_scsi_uint64(n)				\
73*85Scth 		((((n) & 0x00000000000000ff) << 56) |           \
74*85Scth 		(((n)  & 0x000000000000ff00) << 40) |           \
75*85Scth 		(((n)  & 0x0000000000ff0000) << 24) |           \
76*85Scth 		(((n)  & 0x00000000ff000000) << 8) |            \
77*85Scth 		(((n)  & 0x000000ff00000000) >> 8) |            \
78*85Scth 		(((n)  & 0x0000ff0000000000) >> 24) |           \
79*85Scth 		(((n)  & 0x00ff000000000000) >> 40) |           \
80*85Scth 		(((n)  & 0xff00000000000000) >> 56))
81*85Scth #else
82*85Scth error no _BIG_ENDIAN or _LITTLE_ENDIAN
83*85Scth #endif
84*85Scth #define	uint_to_byte0(n)		((n) & 0xff)
85*85Scth #define	uint_to_byte1(n)		(((n)>>8) & 0xff)
86*85Scth #define	uint_to_byte2(n)		(((n)>>16) & 0xff)
87*85Scth #define	uint_to_byte3(n)		(((n)>>24) & 0xff)
88*85Scth 
89*85Scth /*
90*85Scth  * struct prop_map
91*85Scth  *
92*85Scth  * This structure maps a property name to the place to store its value.
93*85Scth  */
94*85Scth struct prop_map {
95*85Scth 	char 		*pm_name;	/* Name of the property. */
96*85Scth 	int		*pm_value;	/* Place to store the value. */
97*85Scth };
98*85Scth 
99*85Scth static int emul64_debug_blklist = 0;
100*85Scth 
101*85Scth /*
102*85Scth  * Some interesting statistics.  These are protected by the
103*85Scth  * emul64_stats_mutex.  It would be nice to have an ioctl to print them out,
104*85Scth  * but we don't have the development time for that now.  You can at least
105*85Scth  * look at them with adb.
106*85Scth  */
107*85Scth 
108*85Scth int		emul64_collect_stats = 1; /* Collect stats if non-zero */
109*85Scth kmutex_t	emul64_stats_mutex;	/* Protect these variables */
110*85Scth long		emul64_nowrite_count = 0; /* # active nowrite ranges */
111*85Scth static uint64_t	emul64_skipped_io = 0;	/* Skipped I/O operations, because of */
112*85Scth 					/* EMUL64_WRITE_OFF. */
113*85Scth static uint64_t	emul64_skipped_blk = 0;	/* Skipped blocks because of */
114*85Scth 					/* EMUL64_WRITE_OFF. */
115*85Scth static uint64_t	emul64_io_ops = 0;	/* Total number of I/O operations */
116*85Scth 					/* including skipped and actual. */
117*85Scth static uint64_t	emul64_io_blocks = 0;	/* Total number of blocks involved */
118*85Scth 					/* in I/O operations. */
119*85Scth static uint64_t	emul64_nonzero = 0;	/* Number of non-zero data blocks */
120*85Scth 					/* currently held in memory */
121*85Scth static uint64_t	emul64_max_list_length = 0; /* Maximum size of a linked */
122*85Scth 					    /* list of non-zero blocks. */
123*85Scth uint64_t emul64_taskq_max = 0;		/* emul64_scsi_start uses the taskq */
124*85Scth 					/* mechanism to dispatch work. */
125*85Scth 					/* If the number of entries in the */
126*85Scth 					/* exceeds the maximum for the queue */
127*85Scth 					/* the queue a 1 second delay is */
128*85Scth 					/* encountered in taskq_ent_alloc. */
129*85Scth 					/* This counter counts the number */
130*85Scth 					/* times that this happens. */
131*85Scth 
132*85Scth /*
133*85Scth  * Since emul64 does no physical I/O, operations that would normally be I/O
134*85Scth  * intensive become CPU bound.  An example of this is RAID 5
135*85Scth  * initialization.  When the kernel becomes CPU bound, it looks as if the
136*85Scth  * machine is hung.
137*85Scth  *
138*85Scth  * To avoid this problem, we provide a function, emul64_yield_check, that does a
139*85Scth  * delay from time to time to yield up the CPU.  The following variables
140*85Scth  * are tunables for this algorithm.
141*85Scth  *
142*85Scth  *	emul64_num_delay_called	Number of times we called delay.  This is
143*85Scth  *				not really a tunable.  Rather it is a
144*85Scth  *				counter that provides useful information
145*85Scth  *				for adjusting the tunables.
146*85Scth  *	emul64_yield_length	Number of microseconds to yield the CPU.
147*85Scth  *	emul64_yield_period	Number of I/O operations between yields.
148*85Scth  *	emul64_yield_enable	emul64 will yield the CPU, only if this
149*85Scth  *				variable contains a non-zero value.  This
150*85Scth  *				allows the yield functionality to be turned
151*85Scth  *				off for experimentation purposes.
152*85Scth  *
153*85Scth  * The value of 1000 for emul64_yield_period has been determined by
154*85Scth  * experience with running the tests.
155*85Scth  */
156*85Scth static uint64_t		emul64_num_delay_called = 0;
157*85Scth static int		emul64_yield_length = 1000;
158*85Scth static int		emul64_yield_period = 1000;
159*85Scth static int		emul64_yield_enable = 1;
160*85Scth static kmutex_t		emul64_yield_mutex;
161*85Scth static kcondvar_t 	emul64_yield_cv;
162*85Scth 
163*85Scth /*
164*85Scth  * This array establishes a set of tunable variables that can be set by
165*85Scth  * defining properties in the emul64.conf file.
166*85Scth  */
167*85Scth struct prop_map emul64_properties[] = {
168*85Scth 	"emul64_collect_stats",		&emul64_collect_stats,
169*85Scth 	"emul64_yield_length",		&emul64_yield_length,
170*85Scth 	"emul64_yield_period",		&emul64_yield_period,
171*85Scth 	"emul64_yield_enable",		&emul64_yield_enable,
172*85Scth 	"emul64_max_task",		&emul64_max_task,
173*85Scth 	"emul64_task_nthreads",		&emul64_task_nthreads
174*85Scth };
175*85Scth 
176*85Scth static unsigned char *emul64_zeros = NULL; /* Block of 0s for comparison */
177*85Scth 
178*85Scth extern void emul64_check_cond(struct scsi_pkt *pkt, uchar_t key,
179*85Scth 				uchar_t asc, uchar_t ascq);
180*85Scth /* ncyl=250000 acyl=2 nhead=24 nsect=357 */
181*85Scth uint_t dkg_rpm = 3600;
182*85Scth 
183*85Scth static int bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *);
184*85Scth static int bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *);
185*85Scth static int bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *);
186*85Scth static int bsd_mode_sense_dad_mode_format(struct scsi_pkt *);
187*85Scth static int bsd_mode_sense_dad_mode_cache(struct scsi_pkt *);
188*85Scth static int bsd_readblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t,
189*85Scth 				int, unsigned char *);
190*85Scth static int bsd_writeblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t,
191*85Scth 				int, unsigned char *);
192*85Scth emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t);
193*85Scth static blklist_t *bsd_findblk(emul64_tgt_t *, diskaddr_t, avl_index_t *);
194*85Scth static void bsd_allocblk(emul64_tgt_t *, diskaddr_t, caddr_t, avl_index_t);
195*85Scth static void bsd_freeblk(emul64_tgt_t *, blklist_t *);
196*85Scth static void emul64_yield_check();
197*85Scth static emul64_rng_overlap_t bsd_tgt_overlap(emul64_tgt_t *, diskaddr_t, int);
198*85Scth 
199*85Scth char *emul64_name = "emul64";
200*85Scth 
201*85Scth 
202*85Scth /* XXX replace with FORMG0COUNT */
203*85Scth #define	GETG0COUNT(cdb)		((cdb)->g0_count0)
204*85Scth 
205*85Scth #define	GETG1COUNT(cdb)		((cdb)->g1_count1 << 8)  + \
206*85Scth 				((cdb)->g1_count0)
207*85Scth 
208*85Scth #define	GETG4COUNT(cdb)	\
209*85Scth 			((uint64_t)(cdb)->g4_count3 << 24)  + \
210*85Scth 			((uint64_t)(cdb)->g4_count2 << 16)  + \
211*85Scth 			((uint64_t)(cdb)->g4_count1 << 8)  + \
212*85Scth 			((uint64_t)(cdb)->g4_count0)
213*85Scth 
214*85Scth 
215*85Scth /*
216*85Scth  * Initialize globals in this file.
217*85Scth  */
218*85Scth void
219*85Scth emul64_bsd_init()
220*85Scth {
221*85Scth 	emul64_zeros = (unsigned char *) kmem_zalloc(DEV_BSIZE, KM_SLEEP);
222*85Scth 	mutex_init(&emul64_stats_mutex, NULL, MUTEX_DRIVER, NULL);
223*85Scth 	mutex_init(&emul64_yield_mutex, NULL, MUTEX_DRIVER, NULL);
224*85Scth 	cv_init(&emul64_yield_cv, NULL, CV_DRIVER, NULL);
225*85Scth }
226*85Scth 
227*85Scth /*
228*85Scth  * Clean up globals in this file.
229*85Scth  */
230*85Scth void
231*85Scth emul64_bsd_fini()
232*85Scth {
233*85Scth 	cv_destroy(&emul64_yield_cv);
234*85Scth 	mutex_destroy(&emul64_yield_mutex);
235*85Scth 	mutex_destroy(&emul64_stats_mutex);
236*85Scth 	if (emul64_zeros != NULL) {
237*85Scth 		kmem_free(emul64_zeros, DEV_BSIZE);
238*85Scth 		emul64_zeros = NULL;
239*85Scth 	}
240*85Scth }
241*85Scth 
242*85Scth /*
243*85Scth  * Attempt to get the values of the properties that are specified in the
244*85Scth  * emul64_properties array.  If the property exists, copy its value to the
245*85Scth  * specified location.  All the properties have been assigned default
246*85Scth  * values in this driver, so if we cannot get the property that is not a
247*85Scth  * problem.
248*85Scth  */
249*85Scth void
250*85Scth emul64_bsd_get_props(dev_info_t *dip)
251*85Scth {
252*85Scth 	uint_t		count;
253*85Scth 	uint_t		i;
254*85Scth 	struct prop_map	*pmp;
255*85Scth 	int		*properties;
256*85Scth 
257*85Scth 	for (pmp = emul64_properties, i = 0;
258*85Scth 		i < sizeof (emul64_properties) / sizeof (struct prop_map);
259*85Scth 		i++, pmp++) {
260*85Scth 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
261*85Scth 				DDI_PROP_DONTPASS,
262*85Scth 				pmp->pm_name, &properties,
263*85Scth 				&count) == DDI_PROP_SUCCESS) {
264*85Scth 			if (count >= 1) {
265*85Scth 				*pmp->pm_value = *properties;
266*85Scth 			}
267*85Scth 			ddi_prop_free((void *) properties);
268*85Scth 		}
269*85Scth 	}
270*85Scth }
271*85Scth 
272*85Scth int
273*85Scth emul64_bsd_blkcompare(const void *a1, const void *b1)
274*85Scth {
275*85Scth 	blklist_t	*a = (blklist_t *)a1;
276*85Scth 	blklist_t	*b = (blklist_t *)b1;
277*85Scth 
278*85Scth 	if (a->bl_blkno < b->bl_blkno)
279*85Scth 		return (-1);
280*85Scth 	if (a->bl_blkno == b->bl_blkno)
281*85Scth 		return (0);
282*85Scth 	return (1);
283*85Scth }
284*85Scth 
285*85Scth /* ARGSUSED 0 */
286*85Scth int
287*85Scth bsd_scsi_start_stop_unit(struct scsi_pkt *pkt)
288*85Scth {
289*85Scth 	return (0);
290*85Scth }
291*85Scth 
292*85Scth /* ARGSUSED 0 */
293*85Scth int
294*85Scth bsd_scsi_test_unit_ready(struct scsi_pkt *pkt)
295*85Scth {
296*85Scth 	return (0);
297*85Scth }
298*85Scth 
299*85Scth /* ARGSUSED 0 */
300*85Scth int
301*85Scth bsd_scsi_request_sense(struct scsi_pkt *pkt)
302*85Scth {
303*85Scth 	return (0);
304*85Scth }
305*85Scth 
306*85Scth int
307*85Scth bsd_scsi_inq_page0(struct scsi_pkt *pkt, uchar_t pqdtype)
308*85Scth {
309*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
310*85Scth 
311*85Scth 	if (sp->cmd_count < 6) {
312*85Scth 		cmn_err(CE_CONT, "%s: bsd_scsi_inq_page0: size %d required\n",
313*85Scth 		    emul64_name, 6);
314*85Scth 		return (EIO);
315*85Scth 	}
316*85Scth 
317*85Scth 	sp->cmd_addr[0] = pqdtype;	/* periph qual., dtype */
318*85Scth 	sp->cmd_addr[1] = 0;		/* page code */
319*85Scth 	sp->cmd_addr[2] = 0;		/* reserved */
320*85Scth 	sp->cmd_addr[3] = 6 - 3;	/* length */
321*85Scth 	sp->cmd_addr[4] = 0;		/* 1st page */
322*85Scth 	sp->cmd_addr[5] = 0x83;		/* 2nd page */
323*85Scth 
324*85Scth 	pkt->pkt_resid = sp->cmd_count - 6;
325*85Scth 	return (0);
326*85Scth }
327*85Scth 
328*85Scth int
329*85Scth bsd_scsi_inq_page83(struct scsi_pkt *pkt, uchar_t pqdtype)
330*85Scth {
331*85Scth 	struct emul64		*emul64 = PKT2EMUL64(pkt);
332*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
333*85Scth 	int			instance = ddi_get_instance(emul64->emul64_dip);
334*85Scth 
335*85Scth 	if (sp->cmd_count < 22) {
336*85Scth 		cmn_err(CE_CONT, "%s: bsd_scsi_inq_page83: size %d required\n",
337*85Scth 		    emul64_name, 22);
338*85Scth 		return (EIO);
339*85Scth 	}
340*85Scth 
341*85Scth 	sp->cmd_addr[0] = pqdtype;	/* periph qual., dtype */
342*85Scth 	sp->cmd_addr[1] = 0x83;		/* page code */
343*85Scth 	sp->cmd_addr[2] = 0;		/* reserved */
344*85Scth 	sp->cmd_addr[3] = (22 - 8) + 4;	/* length */
345*85Scth 
346*85Scth 	sp->cmd_addr[4] = 1;		/* code set - binary */
347*85Scth 	sp->cmd_addr[5] = 3;		/* association and device ID type 3 */
348*85Scth 	sp->cmd_addr[6] = 0;		/* reserved */
349*85Scth 	sp->cmd_addr[7] = 22 - 8;	/* ID length */
350*85Scth 
351*85Scth 	sp->cmd_addr[8] = 0xde;		/* @8: identifier, byte 0 */
352*85Scth 	sp->cmd_addr[9] = 0xca;
353*85Scth 	sp->cmd_addr[10] = 0xde;
354*85Scth 	sp->cmd_addr[11] = 0x80;
355*85Scth 
356*85Scth 	sp->cmd_addr[12] = 0xba;
357*85Scth 	sp->cmd_addr[13] = 0xbe;
358*85Scth 	sp->cmd_addr[14] = 0xab;
359*85Scth 	sp->cmd_addr[15] = 0xba;
360*85Scth 					/* @22: */
361*85Scth 
362*85Scth 	/*
363*85Scth 	 * Instances seem to be assigned sequentially, so it unlikely that we
364*85Scth 	 * will have more than 65535 of them.
365*85Scth 	 */
366*85Scth 	sp->cmd_addr[16] = uint_to_byte1(instance);
367*85Scth 	sp->cmd_addr[17] = uint_to_byte0(instance);
368*85Scth 	sp->cmd_addr[18] = uint_to_byte1(TGT(sp));
369*85Scth 	sp->cmd_addr[19] = uint_to_byte0(TGT(sp));
370*85Scth 	sp->cmd_addr[20] = uint_to_byte1(LUN(sp));
371*85Scth 	sp->cmd_addr[21] = uint_to_byte0(LUN(sp));
372*85Scth 
373*85Scth 	pkt->pkt_resid = sp->cmd_count - 22;
374*85Scth 	return (0);
375*85Scth }
376*85Scth 
377*85Scth int
378*85Scth bsd_scsi_inquiry(struct scsi_pkt *pkt)
379*85Scth {
380*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
381*85Scth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
382*85Scth 	emul64_tgt_t		*tgt;
383*85Scth 	uchar_t			pqdtype;
384*85Scth 	struct scsi_inquiry	inq;
385*85Scth 
386*85Scth 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
387*85Scth 	tgt = find_tgt(sp->cmd_emul64,
388*85Scth 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
389*85Scth 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
390*85Scth 
391*85Scth 	if (sp->cmd_count < sizeof (inq)) {
392*85Scth 		cmn_err(CE_CONT, "%s: bsd_scsi_inquiry: size %d required\n",
393*85Scth 		    emul64_name, (int)sizeof (inq));
394*85Scth 		return (EIO);
395*85Scth 	}
396*85Scth 
397*85Scth 	if (cdb->cdb_opaque[1] & 0xfc) {
398*85Scth 		cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: 0x%x",
399*85Scth 		    emul64_name, cdb->cdb_opaque[1]);
400*85Scth 		emul64_check_cond(pkt, 0x5, 0x24, 0x0);	/* inv. fld in cdb */
401*85Scth 		return (0);
402*85Scth 	}
403*85Scth 
404*85Scth 	pqdtype = tgt->emul64_tgt_dtype;
405*85Scth 	if (cdb->cdb_opaque[1] & 0x1) {
406*85Scth 		switch (cdb->cdb_opaque[2]) {
407*85Scth 			case 0x00:
408*85Scth 				return (bsd_scsi_inq_page0(pkt, pqdtype));
409*85Scth 			case 0x83:
410*85Scth 				return (bsd_scsi_inq_page83(pkt, pqdtype));
411*85Scth 			default:
412*85Scth 				cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: "
413*85Scth 				    "unsupported 0x%x",
414*85Scth 				    emul64_name, cdb->cdb_opaque[2]);
415*85Scth 				return (0);
416*85Scth 		}
417*85Scth 	}
418*85Scth 
419*85Scth 	/* set up the inquiry data we return */
420*85Scth 	(void) bzero((void *)&inq, sizeof (inq));
421*85Scth 
422*85Scth 	inq.inq_dtype = pqdtype;
423*85Scth 	inq.inq_ansi = 2;
424*85Scth 	inq.inq_rdf = 2;
425*85Scth 	inq.inq_len = sizeof (inq) - 4;
426*85Scth 	inq.inq_wbus16 = 1;
427*85Scth 	inq.inq_cmdque = 1;
428*85Scth 
429*85Scth 	(void) bcopy(tgt->emul64_tgt_inq, inq.inq_vid,
430*85Scth 	    sizeof (tgt->emul64_tgt_inq));
431*85Scth 	(void) bcopy("1", inq.inq_revision, 2);
432*85Scth 	(void) bcopy((void *)&inq, sp->cmd_addr, sizeof (inq));
433*85Scth 
434*85Scth 	pkt->pkt_resid = sp->cmd_count - sizeof (inq);
435*85Scth 	return (0);
436*85Scth }
437*85Scth 
438*85Scth /* ARGSUSED 0 */
439*85Scth int
440*85Scth bsd_scsi_format(struct scsi_pkt *pkt)
441*85Scth {
442*85Scth 	return (0);
443*85Scth }
444*85Scth 
445*85Scth int
446*85Scth bsd_scsi_io(struct scsi_pkt *pkt)
447*85Scth {
448*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
449*85Scth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
450*85Scth 	diskaddr_t		lblkno;
451*85Scth 	int			nblks;
452*85Scth 
453*85Scth 	switch (cdb->scc_cmd) {
454*85Scth 	case SCMD_READ:
455*85Scth 			lblkno = (uint32_t)GETG0ADDR(cdb);
456*85Scth 			nblks = GETG0COUNT(cdb);
457*85Scth 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
458*85Scth 					pkt->pkt_address.a_target,
459*85Scth 					pkt->pkt_address.a_lun,
460*85Scth 					lblkno, nblks, sp->cmd_addr);
461*85Scth 			if (emul64debug) {
462*85Scth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
463*85Scth 				    "read g0 blk=%lld (0x%llx) nblks=%d\n",
464*85Scth 				    emul64_name, lblkno, lblkno, nblks);
465*85Scth 			}
466*85Scth 		break;
467*85Scth 	case SCMD_WRITE:
468*85Scth 			lblkno = (uint32_t)GETG0ADDR(cdb);
469*85Scth 			nblks = GETG0COUNT(cdb);
470*85Scth 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
471*85Scth 					pkt->pkt_address.a_target,
472*85Scth 					pkt->pkt_address.a_lun,
473*85Scth 					lblkno, nblks, sp->cmd_addr);
474*85Scth 			if (emul64debug) {
475*85Scth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
476*85Scth 				    "write g0 blk=%lld (0x%llx) nblks=%d\n",
477*85Scth 				    emul64_name, lblkno, lblkno, nblks);
478*85Scth 			}
479*85Scth 		break;
480*85Scth 	case SCMD_READ_G1:
481*85Scth 			lblkno = (uint32_t)GETG1ADDR(cdb);
482*85Scth 			nblks = GETG1COUNT(cdb);
483*85Scth 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
484*85Scth 					pkt->pkt_address.a_target,
485*85Scth 					pkt->pkt_address.a_lun,
486*85Scth 					lblkno, nblks, sp->cmd_addr);
487*85Scth 			if (emul64debug) {
488*85Scth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
489*85Scth 				    "read g1 blk=%lld (0x%llx) nblks=%d\n",
490*85Scth 				    emul64_name, lblkno, lblkno, nblks);
491*85Scth 			}
492*85Scth 		break;
493*85Scth 	case SCMD_WRITE_G1:
494*85Scth 			lblkno = (uint32_t)GETG1ADDR(cdb);
495*85Scth 			nblks = GETG1COUNT(cdb);
496*85Scth 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
497*85Scth 					pkt->pkt_address.a_target,
498*85Scth 					pkt->pkt_address.a_lun,
499*85Scth 					lblkno, nblks, sp->cmd_addr);
500*85Scth 			if (emul64debug) {
501*85Scth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
502*85Scth 				    "write g1 blk=%lld (0x%llx) nblks=%d\n",
503*85Scth 				    emul64_name, lblkno, lblkno, nblks);
504*85Scth 			}
505*85Scth 		break;
506*85Scth 	case SCMD_READ_G4:
507*85Scth 			lblkno = GETG4ADDR(cdb);
508*85Scth 			lblkno <<= 32;
509*85Scth 			lblkno |= (uint32_t)GETG4ADDRTL(cdb);
510*85Scth 			nblks = GETG4COUNT(cdb);
511*85Scth 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
512*85Scth 					pkt->pkt_address.a_target,
513*85Scth 					pkt->pkt_address.a_lun,
514*85Scth 					lblkno, nblks, sp->cmd_addr);
515*85Scth 			if (emul64debug) {
516*85Scth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
517*85Scth 				    "read g4 blk=%lld (0x%llx) nblks=%d\n",
518*85Scth 				    emul64_name, lblkno, lblkno, nblks);
519*85Scth 			}
520*85Scth 		break;
521*85Scth 	case SCMD_WRITE_G4:
522*85Scth 			lblkno = GETG4ADDR(cdb);
523*85Scth 			lblkno <<= 32;
524*85Scth 			lblkno |= (uint32_t)GETG4ADDRTL(cdb);
525*85Scth 			nblks = GETG4COUNT(cdb);
526*85Scth 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
527*85Scth 					pkt->pkt_address.a_target,
528*85Scth 					pkt->pkt_address.a_lun,
529*85Scth 					lblkno, nblks, sp->cmd_addr);
530*85Scth 			if (emul64debug) {
531*85Scth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
532*85Scth 				    "write g4 blk=%lld (0x%llx) nblks=%d\n",
533*85Scth 				    emul64_name, lblkno, lblkno, nblks);
534*85Scth 			}
535*85Scth 		break;
536*85Scth 	default:
537*85Scth 		cmn_err(CE_WARN, "%s: bsd_scsi_io: unhandled I/O: 0x%x",
538*85Scth 		    emul64_name, cdb->scc_cmd);
539*85Scth 		break;
540*85Scth 	}
541*85Scth 
542*85Scth 	if (pkt->pkt_resid != 0)
543*85Scth 		cmn_err(CE_WARN, "%s: bsd_scsi_io: "
544*85Scth 		    "pkt_resid: 0x%lx, lblkno %lld, nblks %d",
545*85Scth 		    emul64_name, pkt->pkt_resid, lblkno, nblks);
546*85Scth 
547*85Scth 	return (0);
548*85Scth }
549*85Scth 
550*85Scth int
551*85Scth bsd_scsi_log_sense(struct scsi_pkt *pkt)
552*85Scth {
553*85Scth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
554*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
555*85Scth 	int			page_code;
556*85Scth 
557*85Scth 	if (sp->cmd_count < 9) {
558*85Scth 		cmn_err(CE_CONT, "%s: bsd_scsi_log_sense size %d required\n",
559*85Scth 		    emul64_name, 9);
560*85Scth 		return (EIO);
561*85Scth 	}
562*85Scth 
563*85Scth 	page_code = cdb->cdb_opaque[2] & 0x3f;
564*85Scth 	if (page_code) {
565*85Scth 		cmn_err(CE_CONT, "%s: bsd_scsi_log_sense: "
566*85Scth 		    "page 0x%x not supported\n", emul64_name, page_code);
567*85Scth 		emul64_check_cond(pkt, 0x5, 0x24, 0x0); /* inv. fld in cdb */
568*85Scth 		return (0);
569*85Scth 	}
570*85Scth 
571*85Scth 	sp->cmd_addr[0] = 0;		/* page code */
572*85Scth 	sp->cmd_addr[1] = 0;		/* reserved */
573*85Scth 	sp->cmd_addr[2] = 0;		/* MSB of page length */
574*85Scth 	sp->cmd_addr[3] = 8 - 3;	/* LSB of page length */
575*85Scth 
576*85Scth 	sp->cmd_addr[4] = 0;		/* MSB of parameter code */
577*85Scth 	sp->cmd_addr[5] = 0;		/* LSB of parameter code */
578*85Scth 	sp->cmd_addr[6] = 0;		/* parameter control byte */
579*85Scth 	sp->cmd_addr[7] = 4 - 3;	/* parameter length */
580*85Scth 	sp->cmd_addr[8] = 0x0;		/* parameter value */
581*85Scth 
582*85Scth 	pkt->pkt_resid = sp->cmd_count - 9;
583*85Scth 	return (0);
584*85Scth }
585*85Scth 
586*85Scth int
587*85Scth bsd_scsi_mode_sense(struct scsi_pkt *pkt)
588*85Scth {
589*85Scth 	union scsi_cdb	*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
590*85Scth 	int		page_control;
591*85Scth 	int		page_code;
592*85Scth 	int		rval = 0;
593*85Scth 
594*85Scth 	switch (cdb->scc_cmd) {
595*85Scth 	case SCMD_MODE_SENSE:
596*85Scth 			page_code = cdb->cdb_opaque[2] & 0x3f;
597*85Scth 			page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
598*85Scth 			if (emul64debug) {
599*85Scth 				cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
600*85Scth 				    "page=0x%x control=0x%x nbytes=%d\n",
601*85Scth 				    emul64_name, page_code, page_control,
602*85Scth 				    GETG0COUNT(cdb));
603*85Scth 			}
604*85Scth 		break;
605*85Scth 	case SCMD_MODE_SENSE_G1:
606*85Scth 			page_code = cdb->cdb_opaque[2] & 0x3f;
607*85Scth 			page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
608*85Scth 			if (emul64debug) {
609*85Scth 				cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
610*85Scth 				    "page=0x%x control=0x%x nbytes=%d\n",
611*85Scth 				    emul64_name, page_code, page_control,
612*85Scth 				    GETG1COUNT(cdb));
613*85Scth 			}
614*85Scth 		break;
615*85Scth 	default:
616*85Scth 		cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
617*85Scth 		    "cmd 0x%x not supported\n", emul64_name, cdb->scc_cmd);
618*85Scth 		return (EIO);
619*85Scth 	}
620*85Scth 
621*85Scth 	switch (page_code) {
622*85Scth 	case DAD_MODE_GEOMETRY:
623*85Scth 		rval = bsd_mode_sense_dad_mode_geometry(pkt);
624*85Scth 		break;
625*85Scth 	case DAD_MODE_ERR_RECOV:
626*85Scth 		rval = bsd_mode_sense_dad_mode_err_recov(pkt);
627*85Scth 		break;
628*85Scth 	case MODEPAGE_DISCO_RECO:
629*85Scth 		rval = bsd_mode_sense_modepage_disco_reco(pkt);
630*85Scth 		break;
631*85Scth 	case DAD_MODE_FORMAT:
632*85Scth 		rval = bsd_mode_sense_dad_mode_format(pkt);
633*85Scth 		break;
634*85Scth 	case DAD_MODE_CACHE:
635*85Scth 		rval = bsd_mode_sense_dad_mode_cache(pkt);
636*85Scth 		break;
637*85Scth 	default:
638*85Scth 		cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
639*85Scth 		    "page 0x%x not supported\n", emul64_name, page_code);
640*85Scth 		rval = EIO;
641*85Scth 		break;
642*85Scth 	}
643*85Scth 
644*85Scth 	return (rval);
645*85Scth }
646*85Scth 
647*85Scth 
648*85Scth static int
649*85Scth bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *pkt)
650*85Scth {
651*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
652*85Scth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
653*85Scth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
654*85Scth 	emul64_tgt_t		*tgt;
655*85Scth 	int			page_control;
656*85Scth 	struct mode_header	header;
657*85Scth 	struct mode_geometry	page4;
658*85Scth 	int			ncyl;
659*85Scth 	int			rval = 0;
660*85Scth 
661*85Scth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
662*85Scth 
663*85Scth 	if (emul64debug) {
664*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: "
665*85Scth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
666*85Scth 	}
667*85Scth 
668*85Scth 	if (sp->cmd_count < (sizeof (header) + sizeof (page4))) {
669*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: "
670*85Scth 		    "size %d required\n",
671*85Scth 		    emul64_name, (int)(sizeof (header) + sizeof (page4)));
672*85Scth 		return (EIO);
673*85Scth 	}
674*85Scth 
675*85Scth 	(void) bzero(&header, sizeof (header));
676*85Scth 	(void) bzero(&page4, sizeof (page4));
677*85Scth 
678*85Scth 	header.length = sizeof (header) + sizeof (page4) - 1;
679*85Scth 	header.bdesc_length = 0;
680*85Scth 
681*85Scth 	page4.mode_page.code = DAD_MODE_GEOMETRY;
682*85Scth 	page4.mode_page.ps = 1;
683*85Scth 	page4.mode_page.length = sizeof (page4) - sizeof (struct mode_page);
684*85Scth 
685*85Scth 	switch (page_control) {
686*85Scth 	case MODE_SENSE_PC_CURRENT:
687*85Scth 	case MODE_SENSE_PC_DEFAULT:
688*85Scth 	case MODE_SENSE_PC_SAVED:
689*85Scth 		EMUL64_MUTEX_ENTER(sp->cmd_emul64);
690*85Scth 		tgt = find_tgt(sp->cmd_emul64,
691*85Scth 		    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
692*85Scth 		EMUL64_MUTEX_EXIT(sp->cmd_emul64);
693*85Scth 		ncyl = tgt->emul64_tgt_ncyls;
694*85Scth 		page4.cyl_ub = uint_to_byte2(ncyl);
695*85Scth 		page4.cyl_mb = uint_to_byte1(ncyl);
696*85Scth 		page4.cyl_lb = uint_to_byte0(ncyl);
697*85Scth 		page4.heads = uint_to_byte0(tgt->emul64_tgt_nheads);
698*85Scth 		page4.rpm = ushort_to_scsi_ushort(dkg_rpm);
699*85Scth 		break;
700*85Scth 	case MODE_SENSE_PC_CHANGEABLE:
701*85Scth 		page4.cyl_ub = 0xff;
702*85Scth 		page4.cyl_mb = 0xff;
703*85Scth 		page4.cyl_lb = 0xff;
704*85Scth 		page4.heads = 0xff;
705*85Scth 		page4.rpm = 0xffff;
706*85Scth 		break;
707*85Scth 	}
708*85Scth 
709*85Scth 	(void) bcopy(&header, addr, sizeof (header));
710*85Scth 	(void) bcopy(&page4, addr + sizeof (header), sizeof (page4));
711*85Scth 
712*85Scth 	pkt->pkt_resid = sp->cmd_count - sizeof (page4) - sizeof (header);
713*85Scth 	rval = 0;
714*85Scth 
715*85Scth 	return (rval);
716*85Scth }
717*85Scth 
718*85Scth static int
719*85Scth bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *pkt)
720*85Scth {
721*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
722*85Scth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
723*85Scth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
724*85Scth 	int			page_control;
725*85Scth 	struct mode_header	header;
726*85Scth 	struct mode_err_recov	page1;
727*85Scth 	int			rval = 0;
728*85Scth 
729*85Scth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
730*85Scth 
731*85Scth 	if (emul64debug) {
732*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: "
733*85Scth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
734*85Scth 	}
735*85Scth 
736*85Scth 	if (sp->cmd_count < (sizeof (header) + sizeof (page1))) {
737*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: "
738*85Scth 		    "size %d required\n",
739*85Scth 		    emul64_name, (int)(sizeof (header) + sizeof (page1)));
740*85Scth 		return (EIO);
741*85Scth 	}
742*85Scth 
743*85Scth 	(void) bzero(&header, sizeof (header));
744*85Scth 	(void) bzero(&page1, sizeof (page1));
745*85Scth 
746*85Scth 	header.length = sizeof (header) + sizeof (page1) - 1;
747*85Scth 	header.bdesc_length = 0;
748*85Scth 
749*85Scth 	page1.mode_page.code = DAD_MODE_ERR_RECOV;
750*85Scth 	page1.mode_page.ps = 1;
751*85Scth 	page1.mode_page.length = sizeof (page1) - sizeof (struct mode_page);
752*85Scth 
753*85Scth 	switch (page_control) {
754*85Scth 	case MODE_SENSE_PC_CURRENT:
755*85Scth 	case MODE_SENSE_PC_DEFAULT:
756*85Scth 	case MODE_SENSE_PC_SAVED:
757*85Scth 		break;
758*85Scth 	case MODE_SENSE_PC_CHANGEABLE:
759*85Scth 		break;
760*85Scth 	}
761*85Scth 
762*85Scth 	(void) bcopy(&header, addr, sizeof (header));
763*85Scth 	(void) bcopy(&page1, addr + sizeof (header), sizeof (page1));
764*85Scth 
765*85Scth 	pkt->pkt_resid = sp->cmd_count - sizeof (page1) - sizeof (header);
766*85Scth 	rval = 0;
767*85Scth 
768*85Scth 	return (rval);
769*85Scth }
770*85Scth 
771*85Scth static int
772*85Scth bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *pkt)
773*85Scth {
774*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
775*85Scth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
776*85Scth 	int			rval = 0;
777*85Scth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
778*85Scth 	int			page_control;
779*85Scth 	struct mode_header	header;
780*85Scth 	struct mode_disco_reco	page2;
781*85Scth 
782*85Scth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
783*85Scth 
784*85Scth 	if (emul64debug) {
785*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: "
786*85Scth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
787*85Scth 	}
788*85Scth 
789*85Scth 	if (sp->cmd_count < (sizeof (header) + sizeof (page2))) {
790*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: "
791*85Scth 		    "size %d required\n",
792*85Scth 		    emul64_name, (int)(sizeof (header) + sizeof (page2)));
793*85Scth 		return (EIO);
794*85Scth 	}
795*85Scth 
796*85Scth 	(void) bzero(&header, sizeof (header));
797*85Scth 	(void) bzero(&page2, sizeof (page2));
798*85Scth 
799*85Scth 	header.length = sizeof (header) + sizeof (page2) - 1;
800*85Scth 	header.bdesc_length = 0;
801*85Scth 
802*85Scth 	page2.mode_page.code = MODEPAGE_DISCO_RECO;
803*85Scth 	page2.mode_page.ps = 1;
804*85Scth 	page2.mode_page.length = sizeof (page2) - sizeof (struct mode_page);
805*85Scth 
806*85Scth 	switch (page_control) {
807*85Scth 	case MODE_SENSE_PC_CURRENT:
808*85Scth 	case MODE_SENSE_PC_DEFAULT:
809*85Scth 	case MODE_SENSE_PC_SAVED:
810*85Scth 		break;
811*85Scth 	case MODE_SENSE_PC_CHANGEABLE:
812*85Scth 		break;
813*85Scth 	}
814*85Scth 
815*85Scth 	(void) bcopy(&header, addr, sizeof (header));
816*85Scth 	(void) bcopy(&page2, addr + sizeof (header), sizeof (page2));
817*85Scth 
818*85Scth 	pkt->pkt_resid = sp->cmd_count - sizeof (page2) - sizeof (header);
819*85Scth 	rval = 0;
820*85Scth 
821*85Scth 	return (rval);
822*85Scth }
823*85Scth 
824*85Scth static int
825*85Scth bsd_mode_sense_dad_mode_format(struct scsi_pkt *pkt)
826*85Scth {
827*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
828*85Scth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
829*85Scth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
830*85Scth 	emul64_tgt_t		*tgt;
831*85Scth 	int			page_control;
832*85Scth 	struct mode_header	header;
833*85Scth 	struct mode_format	page3;
834*85Scth 	int			rval = 0;
835*85Scth 
836*85Scth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
837*85Scth 
838*85Scth 	if (emul64debug) {
839*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: "
840*85Scth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
841*85Scth 	}
842*85Scth 
843*85Scth 	if (sp->cmd_count < (sizeof (header) + sizeof (page3))) {
844*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: "
845*85Scth 		    "size %d required\n",
846*85Scth 		    emul64_name, (int)(sizeof (header) + sizeof (page3)));
847*85Scth 		return (EIO);
848*85Scth 	}
849*85Scth 
850*85Scth 	(void) bzero(&header, sizeof (header));
851*85Scth 	(void) bzero(&page3, sizeof (page3));
852*85Scth 
853*85Scth 	header.length = sizeof (header) + sizeof (page3) - 1;
854*85Scth 	header.bdesc_length = 0;
855*85Scth 
856*85Scth 	page3.mode_page.code = DAD_MODE_FORMAT;
857*85Scth 	page3.mode_page.ps = 1;
858*85Scth 	page3.mode_page.length = sizeof (page3) - sizeof (struct mode_page);
859*85Scth 
860*85Scth 	switch (page_control) {
861*85Scth 	case MODE_SENSE_PC_CURRENT:
862*85Scth 	case MODE_SENSE_PC_DEFAULT:
863*85Scth 	case MODE_SENSE_PC_SAVED:
864*85Scth 		page3.data_bytes_sect = ushort_to_scsi_ushort(DEV_BSIZE);
865*85Scth 		page3.interleave = ushort_to_scsi_ushort(1);
866*85Scth 		EMUL64_MUTEX_ENTER(sp->cmd_emul64);
867*85Scth 		tgt = find_tgt(sp->cmd_emul64,
868*85Scth 			pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
869*85Scth 		EMUL64_MUTEX_EXIT(sp->cmd_emul64);
870*85Scth 		page3.sect_track = ushort_to_scsi_ushort(tgt->emul64_tgt_nsect);
871*85Scth 		break;
872*85Scth 	case MODE_SENSE_PC_CHANGEABLE:
873*85Scth 		break;
874*85Scth 	}
875*85Scth 
876*85Scth 	(void) bcopy(&header, addr, sizeof (header));
877*85Scth 	(void) bcopy(&page3, addr + sizeof (header), sizeof (page3));
878*85Scth 
879*85Scth 	pkt->pkt_resid = sp->cmd_count - sizeof (page3) - sizeof (header);
880*85Scth 	rval = 0;
881*85Scth 
882*85Scth 	return (rval);
883*85Scth }
884*85Scth 
885*85Scth static int
886*85Scth bsd_mode_sense_dad_mode_cache(struct scsi_pkt *pkt)
887*85Scth {
888*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
889*85Scth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
890*85Scth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
891*85Scth 	int			page_control;
892*85Scth 	struct mode_header	header;
893*85Scth 	struct mode_cache	page8;
894*85Scth 	int			rval = 0;
895*85Scth 
896*85Scth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
897*85Scth 
898*85Scth 	if (emul64debug) {
899*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: "
900*85Scth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
901*85Scth 	}
902*85Scth 
903*85Scth 	if (sp->cmd_count < (sizeof (header) + sizeof (page8))) {
904*85Scth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: "
905*85Scth 		    "size %d required\n",
906*85Scth 		    emul64_name, (int)(sizeof (header) + sizeof (page8)));
907*85Scth 		return (EIO);
908*85Scth 	}
909*85Scth 
910*85Scth 	(void) bzero(&header, sizeof (header));
911*85Scth 	(void) bzero(&page8, sizeof (page8));
912*85Scth 
913*85Scth 	header.length = sizeof (header) + sizeof (page8) - 1;
914*85Scth 	header.bdesc_length = 0;
915*85Scth 
916*85Scth 	page8.mode_page.code = DAD_MODE_CACHE;
917*85Scth 	page8.mode_page.ps = 1;
918*85Scth 	page8.mode_page.length = sizeof (page8) - sizeof (struct mode_page);
919*85Scth 
920*85Scth 	switch (page_control) {
921*85Scth 	case MODE_SENSE_PC_CURRENT:
922*85Scth 	case MODE_SENSE_PC_DEFAULT:
923*85Scth 	case MODE_SENSE_PC_SAVED:
924*85Scth 		break;
925*85Scth 	case MODE_SENSE_PC_CHANGEABLE:
926*85Scth 		break;
927*85Scth 	}
928*85Scth 
929*85Scth 	(void) bcopy(&header, addr, sizeof (header));
930*85Scth 	(void) bcopy(&page8, addr + sizeof (header), sizeof (page8));
931*85Scth 
932*85Scth 	pkt->pkt_resid = sp->cmd_count - sizeof (page8) - sizeof (header);
933*85Scth 	rval = 0;
934*85Scth 
935*85Scth 	return (rval);
936*85Scth }
937*85Scth 
938*85Scth /* ARGSUSED 0 */
939*85Scth int
940*85Scth bsd_scsi_mode_select(struct scsi_pkt *pkt)
941*85Scth {
942*85Scth 	return (0);
943*85Scth }
944*85Scth 
945*85Scth int
946*85Scth bsd_scsi_read_capacity_8(struct scsi_pkt *pkt)
947*85Scth {
948*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
949*85Scth 	emul64_tgt_t		*tgt;
950*85Scth 	struct scsi_capacity	cap;
951*85Scth 	int			rval = 0;
952*85Scth 
953*85Scth 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
954*85Scth 	tgt = find_tgt(sp->cmd_emul64,
955*85Scth 		pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
956*85Scth 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
957*85Scth 	if (tgt->emul64_tgt_sectors > 0xffffffff)
958*85Scth 		cap.capacity = 0xffffffff;
959*85Scth 	else
960*85Scth 		cap.capacity =
961*85Scth 		    uint32_to_scsi_uint32(tgt->emul64_tgt_sectors);
962*85Scth 	cap.lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE);
963*85Scth 
964*85Scth 	pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity);
965*85Scth 
966*85Scth 	(void) bcopy(&cap, (caddr_t)sp->cmd_addr,
967*85Scth 		    sizeof (struct scsi_capacity));
968*85Scth 	return (rval);
969*85Scth }
970*85Scth 
971*85Scth int
972*85Scth bsd_scsi_read_capacity_16(struct scsi_pkt *pkt)
973*85Scth {
974*85Scth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
975*85Scth 	emul64_tgt_t		*tgt;
976*85Scth 	struct scsi_capacity_16 cap;
977*85Scth 	int			rval = 0;
978*85Scth 
979*85Scth 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
980*85Scth 	tgt = find_tgt(sp->cmd_emul64,
981*85Scth 		pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
982*85Scth 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
983*85Scth 
984*85Scth 	cap.sc_capacity = uint64_to_scsi_uint64(tgt->emul64_tgt_sectors);
985*85Scth 	cap.sc_lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE);
986*85Scth 	cap.sc_rto_en = 0;
987*85Scth 	cap.sc_prot_en = 0;
988*85Scth 	cap.sc_rsvd0 = 0;
989*85Scth 	bzero(&cap.sc_rsvd1[0], sizeof (cap.sc_rsvd1));
990*85Scth 
991*85Scth 	pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity_16);
992*85Scth 
993*85Scth 	(void) bcopy(&cap, (caddr_t)sp->cmd_addr,
994*85Scth 			sizeof (struct scsi_capacity_16));
995*85Scth 	return (rval);
996*85Scth }
997*85Scth int
998*85Scth bsd_scsi_read_capacity(struct scsi_pkt *pkt)
999*85Scth {
1000*85Scth 	return (bsd_scsi_read_capacity_8(pkt));
1001*85Scth }
1002*85Scth 
1003*85Scth 
1004*85Scth /* ARGSUSED 0 */
1005*85Scth int
1006*85Scth bsd_scsi_reserve(struct scsi_pkt *pkt)
1007*85Scth {
1008*85Scth 	return (0);
1009*85Scth }
1010*85Scth 
1011*85Scth /* ARGSUSED 0 */
1012*85Scth int
1013*85Scth bsd_scsi_release(struct scsi_pkt *pkt)
1014*85Scth {
1015*85Scth 	return (0);
1016*85Scth }
1017*85Scth 
1018*85Scth 
1019*85Scth int
1020*85Scth bsd_scsi_read_defect_list(struct scsi_pkt *pkt)
1021*85Scth {
1022*85Scth 	pkt->pkt_resid = 0;
1023*85Scth 	return (0);
1024*85Scth }
1025*85Scth 
1026*85Scth 
1027*85Scth /* ARGSUSED 0 */
1028*85Scth int
1029*85Scth bsd_scsi_reassign_block(struct scsi_pkt *pkt)
1030*85Scth {
1031*85Scth 	return (0);
1032*85Scth }
1033*85Scth 
1034*85Scth 
1035*85Scth static int
1036*85Scth bsd_readblks(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun,
1037*85Scth 		diskaddr_t blkno, int nblks, unsigned char *bufaddr)
1038*85Scth {
1039*85Scth 	emul64_tgt_t	*tgt;
1040*85Scth 	blklist_t	*blk;
1041*85Scth 	emul64_rng_overlap_t overlap;
1042*85Scth 	int		i = 0;
1043*85Scth 
1044*85Scth 	if (emul64debug) {
1045*85Scth 		cmn_err(CE_CONT, "%s: bsd_readblks: "
1046*85Scth 		    "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1047*85Scth 		    emul64_name, a_target, a_lun, blkno, blkno, nblks);
1048*85Scth 	}
1049*85Scth 
1050*85Scth 	emul64_yield_check();
1051*85Scth 
1052*85Scth 	EMUL64_MUTEX_ENTER(emul64);
1053*85Scth 	tgt = find_tgt(emul64, a_target, a_lun);
1054*85Scth 	EMUL64_MUTEX_EXIT(emul64);
1055*85Scth 	if (tgt == NULL) {
1056*85Scth 		cmn_err(CE_WARN, "%s: bsd_readblks: no target for %d,%d\n",
1057*85Scth 		    emul64_name, a_target, a_lun);
1058*85Scth 		goto unlocked_out;
1059*85Scth 	}
1060*85Scth 
1061*85Scth 	if (emul64_collect_stats) {
1062*85Scth 		mutex_enter(&emul64_stats_mutex);
1063*85Scth 		emul64_io_ops++;
1064*85Scth 		emul64_io_blocks += nblks;
1065*85Scth 		mutex_exit(&emul64_stats_mutex);
1066*85Scth 	}
1067*85Scth 	mutex_enter(&tgt->emul64_tgt_blk_lock);
1068*85Scth 
1069*85Scth 	/*
1070*85Scth 	 * Keep the ioctls from changing the nowrite list for the duration
1071*85Scth 	 * of this I/O by grabbing emul64_tgt_nw_lock.  This will keep the
1072*85Scth 	 * results from our call to bsd_tgt_overlap from changing while we
1073*85Scth 	 * do the I/O.
1074*85Scth 	 */
1075*85Scth 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER);
1076*85Scth 
1077*85Scth 	overlap = bsd_tgt_overlap(tgt, blkno, nblks);
1078*85Scth 	switch (overlap) {
1079*85Scth 	case O_SAME:
1080*85Scth 	case O_SUBSET:
1081*85Scth 	case O_OVERLAP:
1082*85Scth 		cmn_err(CE_WARN, "%s: bsd_readblks: "
1083*85Scth 		    "read to blocked area %lld,%d\n",
1084*85Scth 		    emul64_name, blkno, nblks);
1085*85Scth 		rw_exit(&tgt->emul64_tgt_nw_lock);
1086*85Scth 		goto errout;
1087*85Scth 	case O_NONE:
1088*85Scth 		break;
1089*85Scth 	}
1090*85Scth 	for (i = 0; i < nblks; i++) {
1091*85Scth 		if (emul64_debug_blklist)
1092*85Scth 			cmn_err(CE_CONT, "%s: bsd_readblks: "
1093*85Scth 			    "%d of %d: blkno %lld\n",
1094*85Scth 			    emul64_name, i+1, nblks, blkno);
1095*85Scth 		if (blkno > tgt->emul64_tgt_sectors)
1096*85Scth 			break;
1097*85Scth 		blk = bsd_findblk(tgt, blkno, NULL);
1098*85Scth 		if (blk) {
1099*85Scth 			(void) bcopy(blk->bl_data, bufaddr, DEV_BSIZE);
1100*85Scth 		} else {
1101*85Scth 			(void) bzero(bufaddr, DEV_BSIZE);
1102*85Scth 		}
1103*85Scth 		blkno++;
1104*85Scth 		bufaddr += DEV_BSIZE;
1105*85Scth 	}
1106*85Scth 	rw_exit(&tgt->emul64_tgt_nw_lock);
1107*85Scth 
1108*85Scth errout:
1109*85Scth 	mutex_exit(&tgt->emul64_tgt_blk_lock);
1110*85Scth 
1111*85Scth unlocked_out:
1112*85Scth 	return ((nblks - i) * DEV_BSIZE);
1113*85Scth }
1114*85Scth 
1115*85Scth 
1116*85Scth static int
1117*85Scth bsd_writeblks(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun,
1118*85Scth 		diskaddr_t blkno, int nblks, unsigned char *bufaddr)
1119*85Scth {
1120*85Scth 	emul64_tgt_t	*tgt;
1121*85Scth 	blklist_t	*blk;
1122*85Scth 	emul64_rng_overlap_t overlap;
1123*85Scth 	avl_index_t	where;
1124*85Scth 	int		i = 0;
1125*85Scth 
1126*85Scth 	if (emul64debug) {
1127*85Scth 		cmn_err(CE_CONT, "%s: bsd_writeblks: "
1128*85Scth 		    "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1129*85Scth 		    emul64_name, a_target, a_lun, blkno, blkno, nblks);
1130*85Scth 	}
1131*85Scth 
1132*85Scth 	emul64_yield_check();
1133*85Scth 
1134*85Scth 	EMUL64_MUTEX_ENTER(emul64);
1135*85Scth 	tgt = find_tgt(emul64, a_target, a_lun);
1136*85Scth 	EMUL64_MUTEX_EXIT(emul64);
1137*85Scth 	if (tgt == NULL) {
1138*85Scth 		cmn_err(CE_WARN, "%s: bsd_writeblks: no target for %d,%d\n",
1139*85Scth 		    emul64_name, a_target, a_lun);
1140*85Scth 		goto unlocked_out;
1141*85Scth 	}
1142*85Scth 
1143*85Scth 	if (emul64_collect_stats) {
1144*85Scth 		mutex_enter(&emul64_stats_mutex);
1145*85Scth 		emul64_io_ops++;
1146*85Scth 		emul64_io_blocks += nblks;
1147*85Scth 		mutex_exit(&emul64_stats_mutex);
1148*85Scth 	}
1149*85Scth 	mutex_enter(&tgt->emul64_tgt_blk_lock);
1150*85Scth 
1151*85Scth 	/*
1152*85Scth 	 * Keep the ioctls from changing the nowrite list for the duration
1153*85Scth 	 * of this I/O by grabbing emul64_tgt_nw_lock.  This will keep the
1154*85Scth 	 * results from our call to bsd_tgt_overlap from changing while we
1155*85Scth 	 * do the I/O.
1156*85Scth 	 */
1157*85Scth 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER);
1158*85Scth 	overlap = bsd_tgt_overlap(tgt, blkno, nblks);
1159*85Scth 	switch (overlap) {
1160*85Scth 	case O_SAME:
1161*85Scth 	case O_SUBSET:
1162*85Scth 		if (emul64_collect_stats) {
1163*85Scth 			mutex_enter(&emul64_stats_mutex);
1164*85Scth 			emul64_skipped_io++;
1165*85Scth 			emul64_skipped_blk += nblks;
1166*85Scth 			mutex_exit(&emul64_stats_mutex);
1167*85Scth 		}
1168*85Scth 		rw_exit(&tgt->emul64_tgt_nw_lock);
1169*85Scth 		mutex_exit(&tgt->emul64_tgt_blk_lock);
1170*85Scth 		return (0);
1171*85Scth 	case O_OVERLAP:
1172*85Scth 	case O_NONE:
1173*85Scth 		break;
1174*85Scth 	}
1175*85Scth 	for (i = 0; i < nblks; i++) {
1176*85Scth 		if ((overlap == O_NONE) ||
1177*85Scth 		    (bsd_tgt_overlap(tgt, blkno, 1) == O_NONE)) {
1178*85Scth 			/*
1179*85Scth 			 * If there was no overlap for the entire I/O range
1180*85Scth 			 * or if there is no overlap for this particular
1181*85Scth 			 * block, then we need to do the write.
1182*85Scth 			 */
1183*85Scth 			if (emul64_debug_blklist)
1184*85Scth 				cmn_err(CE_CONT, "%s: bsd_writeblks: "
1185*85Scth 				    "%d of %d: blkno %lld\n",
1186*85Scth 				    emul64_name, i+1, nblks, blkno);
1187*85Scth 			if (blkno > tgt->emul64_tgt_sectors) {
1188*85Scth 				cmn_err(CE_WARN, "%s: bsd_writeblks: "
1189*85Scth 				    "blkno %lld, tgt_sectors %lld\n",
1190*85Scth 				    emul64_name, blkno,
1191*85Scth 				    tgt->emul64_tgt_sectors);
1192*85Scth 				break;
1193*85Scth 			}
1194*85Scth 
1195*85Scth 			blk = bsd_findblk(tgt, blkno, &where);
1196*85Scth 			if (bcmp(bufaddr, emul64_zeros, DEV_BSIZE) == 0) {
1197*85Scth 				if (blk) {
1198*85Scth 					bsd_freeblk(tgt, blk);
1199*85Scth 				}
1200*85Scth 			} else {
1201*85Scth 				if (blk) {
1202*85Scth 					(void) bcopy(bufaddr, blk->bl_data,
1203*85Scth 							DEV_BSIZE);
1204*85Scth 				} else {
1205*85Scth 					bsd_allocblk(tgt,
1206*85Scth 							blkno,
1207*85Scth 							(caddr_t)bufaddr,
1208*85Scth 							where);
1209*85Scth 				}
1210*85Scth 			}
1211*85Scth 		}
1212*85Scth 		blkno++;
1213*85Scth 		bufaddr += DEV_BSIZE;
1214*85Scth 	}
1215*85Scth 
1216*85Scth 	/*
1217*85Scth 	 * Now that we're done with our I/O, allow the ioctls to change the
1218*85Scth 	 * nowrite list.
1219*85Scth 	 */
1220*85Scth 	rw_exit(&tgt->emul64_tgt_nw_lock);
1221*85Scth 
1222*85Scth errout:
1223*85Scth 	mutex_exit(&tgt->emul64_tgt_blk_lock);
1224*85Scth 
1225*85Scth unlocked_out:
1226*85Scth 	return ((nblks - i) * DEV_BSIZE);
1227*85Scth }
1228*85Scth 
1229*85Scth emul64_tgt_t *
1230*85Scth find_tgt(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun)
1231*85Scth {
1232*85Scth 	emul64_tgt_t	*tgt;
1233*85Scth 
1234*85Scth 	tgt = emul64->emul64_tgt;
1235*85Scth 	while (tgt) {
1236*85Scth 		if (tgt->emul64_tgt_saddr.a_target == a_target &&
1237*85Scth 		    tgt->emul64_tgt_saddr.a_lun == a_lun) {
1238*85Scth 			break;
1239*85Scth 		}
1240*85Scth 		tgt = tgt->emul64_tgt_next;
1241*85Scth 	}
1242*85Scth 	return (tgt);
1243*85Scth 
1244*85Scth }
1245*85Scth 
1246*85Scth /*
1247*85Scth  * Free all blocks that are part of the specified range.
1248*85Scth  */
1249*85Scth int
1250*85Scth bsd_freeblkrange(emul64_tgt_t *tgt, emul64_range_t *range)
1251*85Scth {
1252*85Scth 	blklist_t	*blk;
1253*85Scth 	blklist_t	*nextblk;
1254*85Scth 
1255*85Scth 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1256*85Scth 	for (blk = (blklist_t *)avl_first(&tgt->emul64_tgt_data);
1257*85Scth 		blk != NULL;
1258*85Scth 		blk = nextblk) {
1259*85Scth 		/*
1260*85Scth 		 * We need to get the next block pointer now, because blk
1261*85Scth 		 * will be freed inside the if statement.
1262*85Scth 		 */
1263*85Scth 		nextblk = AVL_NEXT(&tgt->emul64_tgt_data, blk);
1264*85Scth 
1265*85Scth 		if (emul64_overlap(range, blk->bl_blkno, (size_t)1) != O_NONE) {
1266*85Scth 			bsd_freeblk(tgt, blk);
1267*85Scth 		}
1268*85Scth 	}
1269*85Scth 	return (0);
1270*85Scth }
1271*85Scth 
1272*85Scth static blklist_t *
1273*85Scth bsd_findblk(emul64_tgt_t *tgt, diskaddr_t blkno, avl_index_t *where)
1274*85Scth {
1275*85Scth 	blklist_t	*blk;
1276*85Scth 	blklist_t	search;
1277*85Scth 
1278*85Scth 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1279*85Scth 
1280*85Scth 	search.bl_blkno = blkno;
1281*85Scth 	blk = (blklist_t *)avl_find(&tgt->emul64_tgt_data, &search, where);
1282*85Scth 	return (blk);
1283*85Scth }
1284*85Scth 
1285*85Scth 
1286*85Scth static void
1287*85Scth bsd_allocblk(emul64_tgt_t *tgt,
1288*85Scth 		diskaddr_t blkno,
1289*85Scth 		caddr_t data,
1290*85Scth 		avl_index_t where)
1291*85Scth {
1292*85Scth 	blklist_t	*blk;
1293*85Scth 
1294*85Scth 	if (emul64_debug_blklist)
1295*85Scth 		cmn_err(CE_CONT, "%s: bsd_allocblk: %llu\n",
1296*85Scth 		    emul64_name, blkno);
1297*85Scth 
1298*85Scth 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1299*85Scth 
1300*85Scth 	blk = (blklist_t *)kmem_zalloc(sizeof (blklist_t), KM_SLEEP);
1301*85Scth 	blk->bl_data = (uchar_t *)kmem_zalloc(DEV_BSIZE, KM_SLEEP);
1302*85Scth 	blk->bl_blkno = blkno;
1303*85Scth 	(void) bcopy(data, blk->bl_data, DEV_BSIZE);
1304*85Scth 	avl_insert(&tgt->emul64_tgt_data, (void *) blk, where);
1305*85Scth 
1306*85Scth 	if (emul64_collect_stats) {
1307*85Scth 		mutex_enter(&emul64_stats_mutex);
1308*85Scth 		emul64_nonzero++;
1309*85Scth 		tgt->emul64_list_length++;
1310*85Scth 		if (tgt->emul64_list_length > emul64_max_list_length) {
1311*85Scth 			emul64_max_list_length = tgt->emul64_list_length;
1312*85Scth 		}
1313*85Scth 		mutex_exit(&emul64_stats_mutex);
1314*85Scth 	}
1315*85Scth }
1316*85Scth 
1317*85Scth static void
1318*85Scth bsd_freeblk(emul64_tgt_t *tgt, blklist_t *blk)
1319*85Scth {
1320*85Scth 	if (emul64_debug_blklist)
1321*85Scth 		cmn_err(CE_CONT, "%s: bsd_freeblk: <%d,%d> blk=%lld\n",
1322*85Scth 		    emul64_name, tgt->emul64_tgt_saddr.a_target,
1323*85Scth 		    tgt->emul64_tgt_saddr.a_lun, blk->bl_blkno);
1324*85Scth 
1325*85Scth 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1326*85Scth 
1327*85Scth 	avl_remove(&tgt->emul64_tgt_data, (void *) blk);
1328*85Scth 	if (emul64_collect_stats) {
1329*85Scth 		mutex_enter(&emul64_stats_mutex);
1330*85Scth 		emul64_nonzero--;
1331*85Scth 		tgt->emul64_list_length--;
1332*85Scth 		mutex_exit(&emul64_stats_mutex);
1333*85Scth 	}
1334*85Scth 	kmem_free(blk->bl_data, DEV_BSIZE);
1335*85Scth 	kmem_free(blk, sizeof (blklist_t));
1336*85Scth }
1337*85Scth 
1338*85Scth /*
1339*85Scth  * Look for overlap between a nowrite range and a block range.
1340*85Scth  *
1341*85Scth  * NOTE:  Callers of this function must hold the tgt->emul64_tgt_nw_lock
1342*85Scth  *	  lock.  For the purposes of this function, a reader lock is
1343*85Scth  *	  sufficient.
1344*85Scth  */
1345*85Scth static emul64_rng_overlap_t
1346*85Scth bsd_tgt_overlap(emul64_tgt_t *tgt, diskaddr_t blkno, int count)
1347*85Scth {
1348*85Scth 	emul64_nowrite_t	*nw;
1349*85Scth 	emul64_rng_overlap_t	rv = O_NONE;
1350*85Scth 
1351*85Scth 	for (nw = tgt->emul64_tgt_nowrite;
1352*85Scth 		(nw != NULL) && (rv == O_NONE);
1353*85Scth 		nw = nw->emul64_nwnext) {
1354*85Scth 		rv = emul64_overlap(&nw->emul64_blocked,
1355*85Scth 				    blkno,
1356*85Scth 				    (size_t)count);
1357*85Scth 	}
1358*85Scth 	return (rv);
1359*85Scth }
1360*85Scth 
1361*85Scth /*
1362*85Scth  * Operations that do a lot of I/O, such as RAID 5 initializations, result
1363*85Scth  * in a CPU bound kernel when the device is an emul64 device.  This makes
1364*85Scth  * the machine look hung.  To avoid this problem, give up the CPU from time
1365*85Scth  * to time.
1366*85Scth  */
1367*85Scth 
1368*85Scth static void
1369*85Scth emul64_yield_check()
1370*85Scth {
1371*85Scth 	static uint_t	emul64_io_count = 0;	/* # I/Os since last wait */
1372*85Scth 	static uint_t	emul64_waiting = FALSE;	/* TRUE -> a thread is in */
1373*85Scth 						/*   cv_timed wait. */
1374*85Scth 	clock_t		ticks;
1375*85Scth 
1376*85Scth 	if (emul64_yield_enable == 0)
1377*85Scth 		return;
1378*85Scth 
1379*85Scth 	mutex_enter(&emul64_yield_mutex);
1380*85Scth 
1381*85Scth 	if (emul64_waiting == TRUE) {
1382*85Scth 		/*
1383*85Scth 		 * Another thread has already started the timer.  We'll
1384*85Scth 		 * just wait here until their time expires, and they
1385*85Scth 		 * broadcast to us.  When they do that, we'll return and
1386*85Scth 		 * let our caller do more I/O.
1387*85Scth 		 */
1388*85Scth 		cv_wait(&emul64_yield_cv, &emul64_yield_mutex);
1389*85Scth 	} else if (emul64_io_count++ > emul64_yield_period) {
1390*85Scth 		/*
1391*85Scth 		 * Set emul64_waiting to let other threads know that we
1392*85Scth 		 * have started the timer.
1393*85Scth 		 */
1394*85Scth 		emul64_waiting = TRUE;
1395*85Scth 		emul64_num_delay_called++;
1396*85Scth 		ticks = drv_usectohz(emul64_yield_length);
1397*85Scth 		if (ticks == 0)
1398*85Scth 			ticks = 1;
1399*85Scth 		(void) cv_timedwait(&emul64_yield_cv,
1400*85Scth 		    &emul64_yield_mutex, ddi_get_lbolt() + ticks);
1401*85Scth 		emul64_io_count = 0;
1402*85Scth 		emul64_waiting = FALSE;
1403*85Scth 
1404*85Scth 		/* Broadcast in case others are waiting. */
1405*85Scth 		cv_broadcast(&emul64_yield_cv);
1406*85Scth 	}
1407*85Scth 
1408*85Scth 	mutex_exit(&emul64_yield_mutex);
1409*85Scth }
1410