xref: /netbsd-src/sys/dev/scsipi/sd.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: sd.c,v 1.305 2014/03/16 05:20:29 dholland Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Originally written by Julian Elischer (julian@dialix.oz.au)
34  * for TRW Financial Systems for use under the MACH(2.5) operating system.
35  *
36  * TRW Financial Systems, in accordance with their agreement with Carnegie
37  * Mellon University, makes this software available to CMU to distribute
38  * or use in any manner that they see fit as long as this message is kept with
39  * the software. For this reason TFS also grants any other persons or
40  * organisations permission to use or modify this software.
41  *
42  * TFS supplies this software to be publicly redistributed
43  * on the understanding that TFS is not responsible for the correct
44  * functioning of this software in any circumstances.
45  *
46  * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
47  */
48 
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.305 2014/03/16 05:20:29 dholland Exp $");
51 
52 #include "opt_scsi.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/file.h>
58 #include <sys/stat.h>
59 #include <sys/ioctl.h>
60 #include <sys/scsiio.h>
61 #include <sys/buf.h>
62 #include <sys/bufq.h>
63 #include <sys/uio.h>
64 #include <sys/malloc.h>
65 #include <sys/errno.h>
66 #include <sys/device.h>
67 #include <sys/disklabel.h>
68 #include <sys/disk.h>
69 #include <sys/proc.h>
70 #include <sys/conf.h>
71 #include <sys/vnode.h>
72 #include <sys/rnd.h>
73 #include <sys/cprng.h>
74 
75 #include <dev/scsipi/scsi_spc.h>
76 #include <dev/scsipi/scsipi_all.h>
77 #include <dev/scsipi/scsi_all.h>
78 #include <dev/scsipi/scsipi_disk.h>
79 #include <dev/scsipi/scsi_disk.h>
80 #include <dev/scsipi/scsiconf.h>
81 #include <dev/scsipi/scsipi_base.h>
82 #include <dev/scsipi/sdvar.h>
83 
84 #include <prop/proplib.h>
85 
86 #define	SDUNIT(dev)			DISKUNIT(dev)
87 #define	SDPART(dev)			DISKPART(dev)
88 #define	SDMINOR(unit, part)		DISKMINOR(unit, part)
89 #define	MAKESDDEV(maj, unit, part)	MAKEDISKDEV(maj, unit, part)
90 
91 #define	SDLABELDEV(dev)	(MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
92 
93 #define	SD_DEFAULT_BLKSIZE	512
94 
95 static void	sdminphys(struct buf *);
96 static void	sdgetdefaultlabel(struct sd_softc *, struct disklabel *);
97 static int	sdgetdisklabel(struct sd_softc *);
98 static void	sdstart(struct scsipi_periph *);
99 static void	sdrestart(void *);
100 static void	sddone(struct scsipi_xfer *, int);
101 static bool	sd_suspend(device_t, const pmf_qual_t *);
102 static bool	sd_shutdown(device_t, int);
103 static int	sd_interpret_sense(struct scsipi_xfer *);
104 static int	sdlastclose(device_t);
105 
106 static int	sd_mode_sense(struct sd_softc *, u_int8_t, void *, size_t, int,
107 		    int, int *);
108 static int	sd_mode_select(struct sd_softc *, u_int8_t, void *, size_t, int,
109 		    int);
110 static int	sd_validate_blksize(struct scsipi_periph *, int);
111 static u_int64_t sd_read_capacity(struct scsipi_periph *, int *, int flags);
112 static int	sd_get_simplifiedparms(struct sd_softc *, struct disk_parms *,
113 		    int);
114 static int	sd_get_capacity(struct sd_softc *, struct disk_parms *, int);
115 static int	sd_get_parms(struct sd_softc *, struct disk_parms *, int);
116 static int	sd_get_parms_page4(struct sd_softc *, struct disk_parms *,
117 		    int);
118 static int	sd_get_parms_page5(struct sd_softc *, struct disk_parms *,
119 		    int);
120 
121 static int	sd_flush(struct sd_softc *, int);
122 static int	sd_getcache(struct sd_softc *, int *);
123 static int	sd_setcache(struct sd_softc *, int);
124 
125 static int	sdmatch(device_t, cfdata_t, void *);
126 static void	sdattach(device_t, device_t, void *);
127 static int	sddetach(device_t, int);
128 static void	sd_set_geometry(struct sd_softc *);
129 
130 CFATTACH_DECL3_NEW(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
131     NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
132 
133 extern struct cfdriver sd_cd;
134 
135 static const struct scsipi_inquiry_pattern sd_patterns[] = {
136 	{T_DIRECT, T_FIXED,
137 	 "",         "",                 ""},
138 	{T_DIRECT, T_REMOV,
139 	 "",         "",                 ""},
140 	{T_OPTICAL, T_FIXED,
141 	 "",         "",                 ""},
142 	{T_OPTICAL, T_REMOV,
143 	 "",         "",                 ""},
144 	{T_SIMPLE_DIRECT, T_FIXED,
145 	 "",         "",                 ""},
146 	{T_SIMPLE_DIRECT, T_REMOV,
147 	 "",         "",                 ""},
148 };
149 
150 static dev_type_open(sdopen);
151 static dev_type_close(sdclose);
152 static dev_type_read(sdread);
153 static dev_type_write(sdwrite);
154 static dev_type_ioctl(sdioctl);
155 static dev_type_strategy(sdstrategy);
156 static dev_type_dump(sddump);
157 static dev_type_size(sdsize);
158 
159 const struct bdevsw sd_bdevsw = {
160 	.d_open = sdopen,
161 	.d_close = sdclose,
162 	.d_strategy = sdstrategy,
163 	.d_ioctl = sdioctl,
164 	.d_dump = sddump,
165 	.d_psize = sdsize,
166 	.d_flag = D_DISK
167 };
168 
169 const struct cdevsw sd_cdevsw = {
170 	.d_open = sdopen,
171 	.d_close = sdclose,
172 	.d_read = sdread,
173 	.d_write = sdwrite,
174 	.d_ioctl = sdioctl,
175 	.d_stop = nostop,
176 	.d_tty = notty,
177 	.d_poll = nopoll,
178 	.d_mmap = nommap,
179 	.d_kqfilter = nokqfilter,
180 	.d_flag = D_DISK
181 };
182 
183 static struct dkdriver sddkdriver = { sdstrategy, sdminphys };
184 
185 static const struct scsipi_periphsw sd_switch = {
186 	sd_interpret_sense,	/* check our error handler first */
187 	sdstart,		/* have a queue, served by this */
188 	NULL,			/* have no async handler */
189 	sddone,			/* deal with stats at interrupt time */
190 };
191 
192 struct sd_mode_sense_data {
193 	/*
194 	 * XXX
195 	 * We are not going to parse this as-is -- it just has to be large
196 	 * enough.
197 	 */
198 	union {
199 		struct scsi_mode_parameter_header_6 small;
200 		struct scsi_mode_parameter_header_10 big;
201 	} header;
202 	struct scsi_general_block_descriptor blk_desc;
203 	union scsi_disk_pages pages;
204 };
205 
206 /*
207  * The routine called by the low level scsi routine when it discovers
208  * A device suitable for this driver
209  */
210 static int
211 sdmatch(device_t parent, cfdata_t match,
212     void *aux)
213 {
214 	struct scsipibus_attach_args *sa = aux;
215 	int priority;
216 
217 	(void)scsipi_inqmatch(&sa->sa_inqbuf,
218 	    sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
219 	    sizeof(sd_patterns[0]), &priority);
220 
221 	return (priority);
222 }
223 
224 /*
225  * Attach routine common to atapi & scsi.
226  */
227 static void
228 sdattach(device_t parent, device_t self, void *aux)
229 {
230 	struct sd_softc *sd = device_private(self);
231 	struct scsipibus_attach_args *sa = aux;
232 	struct scsipi_periph *periph = sa->sa_periph;
233 	int error, result;
234 	struct disk_parms *dp = &sd->params;
235 	char pbuf[9];
236 
237 	SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
238 
239 	sd->sc_dev = self;
240 	sd->type = (sa->sa_inqbuf.type & SID_TYPE);
241 	strncpy(sd->name, sa->sa_inqbuf.product, sizeof(sd->name));
242 	if (sd->type == T_SIMPLE_DIRECT)
243 		periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
244 
245 	if (SCSIPI_BUSTYPE_TYPE(scsipi_periph_bustype(sa->sa_periph)) ==
246 	    SCSIPI_BUSTYPE_SCSI && periph->periph_version == 0)
247 		sd->flags |= SDF_ANCIENT;
248 
249 	bufq_alloc(&sd->buf_queue, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
250 
251 	callout_init(&sd->sc_callout, 0);
252 
253 	/*
254 	 * Store information needed to contact our base driver
255 	 */
256 	sd->sc_periph = periph;
257 
258 	periph->periph_dev = sd->sc_dev;
259 	periph->periph_switch = &sd_switch;
260 
261         /*
262          * Increase our openings to the maximum-per-periph
263          * supported by the adapter.  This will either be
264          * clamped down or grown by the adapter if necessary.
265          */
266 	periph->periph_openings =
267 	    SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
268 	periph->periph_flags |= PERIPH_GROW_OPENINGS;
269 
270 	/*
271 	 * Initialize and attach the disk structure.
272 	 */
273 	disk_init(&sd->sc_dk, device_xname(sd->sc_dev), &sddkdriver);
274 	disk_attach(&sd->sc_dk);
275 
276 	/*
277 	 * Use the subdriver to request information regarding the drive.
278 	 */
279 	aprint_naive("\n");
280 	aprint_normal("\n");
281 
282 	if (periph->periph_quirks & PQUIRK_START)
283 		(void)scsipi_start(periph, SSS_START, XS_CTL_SILENT);
284 
285 	error = scsipi_test_unit_ready(periph,
286 	    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
287 	    XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
288 
289 	if (error)
290 		result = SDGP_RESULT_OFFLINE;
291 	else
292 		result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
293 	aprint_normal_dev(sd->sc_dev, "");
294 	switch (result) {
295 	case SDGP_RESULT_OK:
296 		format_bytes(pbuf, sizeof(pbuf),
297 		    (u_int64_t)dp->disksize * dp->blksize);
298 	        aprint_normal(
299 		"%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
300 		    pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
301 		    (unsigned long long)dp->disksize);
302 		break;
303 
304 	case SDGP_RESULT_OFFLINE:
305 		aprint_normal("drive offline");
306 		break;
307 
308 	case SDGP_RESULT_UNFORMATTED:
309 		aprint_normal("unformatted media");
310 		break;
311 
312 #ifdef DIAGNOSTIC
313 	default:
314 		panic("sdattach: unknown result from get_parms");
315 		break;
316 #endif
317 	}
318 	aprint_normal("\n");
319 
320 	/*
321 	 * Establish a shutdown hook so that we can ensure that
322 	 * our data has actually made it onto the platter at
323 	 * shutdown time.  Note that this relies on the fact
324 	 * that the shutdown hooks at the "leaves" of the device tree
325 	 * are run, first (thus guaranteeing that our hook runs before
326 	 * our ancestors').
327 	 */
328 	if (!pmf_device_register1(self, sd_suspend, NULL, sd_shutdown))
329 		aprint_error_dev(self, "couldn't establish power handler\n");
330 
331 	/*
332 	 * attach the device into the random source list
333 	 */
334 	rnd_attach_source(&sd->rnd_source, device_xname(sd->sc_dev),
335 			  RND_TYPE_DISK, 0);
336 
337 	/* Discover wedges on this disk. */
338 	dkwedge_discover(&sd->sc_dk);
339 
340 	/*
341 	 * Disk insertion and removal times can be a useful source
342 	 * of entropy, though the estimator should never _count_
343 	 * these bits, on insertion, because the deltas to the
344 	 * nonexistent) previous event should never allow it.
345 	 */
346 	rnd_add_uint32(&sd->rnd_source, 0);
347 }
348 
349 static int
350 sddetach(device_t self, int flags)
351 {
352 	struct sd_softc *sd = device_private(self);
353 	int s, bmaj, cmaj, i, mn, rc;
354 
355 	rnd_add_uint32(&sd->rnd_source, 0);
356 
357 	if ((rc = disk_begindetach(&sd->sc_dk, sdlastclose, self, flags)) != 0)
358 		return rc;
359 
360 	/* locate the major number */
361 	bmaj = bdevsw_lookup_major(&sd_bdevsw);
362 	cmaj = cdevsw_lookup_major(&sd_cdevsw);
363 
364 	/* Nuke the vnodes for any open instances */
365 	for (i = 0; i < MAXPARTITIONS; i++) {
366 		mn = SDMINOR(device_unit(self), i);
367 		vdevgone(bmaj, mn, mn, VBLK);
368 		vdevgone(cmaj, mn, mn, VCHR);
369 	}
370 
371 	/* kill any pending restart */
372 	callout_stop(&sd->sc_callout);
373 
374 	/* Delete all of our wedges. */
375 	dkwedge_delall(&sd->sc_dk);
376 
377 	s = splbio();
378 
379 	/* Kill off any queued buffers. */
380 	bufq_drain(sd->buf_queue);
381 
382 	bufq_free(sd->buf_queue);
383 
384 	/* Kill off any pending commands. */
385 	scsipi_kill_pending(sd->sc_periph);
386 
387 	splx(s);
388 
389 	/* Detach from the disk list. */
390 	disk_detach(&sd->sc_dk);
391 	disk_destroy(&sd->sc_dk);
392 
393 	callout_destroy(&sd->sc_callout);
394 
395 	pmf_device_deregister(self);
396 
397 	/* Unhook the entropy source. */
398 	rnd_detach_source(&sd->rnd_source);
399 
400 	return (0);
401 }
402 
403 /*
404  * open the device. Make sure the partition info is a up-to-date as can be.
405  */
406 static int
407 sdopen(dev_t dev, int flag, int fmt, struct lwp *l)
408 {
409 	struct sd_softc *sd;
410 	struct scsipi_periph *periph;
411 	struct scsipi_adapter *adapt;
412 	int unit, part;
413 	int error;
414 
415 	unit = SDUNIT(dev);
416 	sd = device_lookup_private(&sd_cd, unit);
417 	if (sd == NULL)
418 		return (ENXIO);
419 
420 	if (!device_is_active(sd->sc_dev))
421 		return (ENODEV);
422 
423 	part = SDPART(dev);
424 
425 	mutex_enter(&sd->sc_dk.dk_openlock);
426 
427 	/*
428 	 * If there are wedges, and this is not RAW_PART, then we
429 	 * need to fail.
430 	 */
431 	if (sd->sc_dk.dk_nwedges != 0 && part != RAW_PART) {
432 		error = EBUSY;
433 		goto bad1;
434 	}
435 
436 	periph = sd->sc_periph;
437 	adapt = periph->periph_channel->chan_adapter;
438 
439 	SC_DEBUG(periph, SCSIPI_DB1,
440 	    ("sdopen: dev=0x%"PRIx64" (unit %d (of %d), partition %d)\n", dev, unit,
441 	    sd_cd.cd_ndevs, part));
442 
443 	/*
444 	 * If this is the first open of this device, add a reference
445 	 * to the adapter.
446 	 */
447 	if (sd->sc_dk.dk_openmask == 0 &&
448 	    (error = scsipi_adapter_addref(adapt)) != 0)
449 		goto bad1;
450 
451 	if ((periph->periph_flags & PERIPH_OPEN) != 0) {
452 		/*
453 		 * If any partition is open, but the disk has been invalidated,
454 		 * disallow further opens of non-raw partition
455 		 */
456 		if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
457 		    (part != RAW_PART || fmt != S_IFCHR)) {
458 			error = EIO;
459 			goto bad2;
460 		}
461 	} else {
462 		int silent;
463 
464 		if ((part == RAW_PART && fmt == S_IFCHR) || (flag & FSILENT))
465 			silent = XS_CTL_SILENT;
466 		else
467 			silent = 0;
468 
469 		/* Check that it is still responding and ok. */
470 		error = scsipi_test_unit_ready(periph,
471 		    XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
472 		    silent);
473 
474 		/*
475 		 * Start the pack spinning if necessary. Always allow the
476 		 * raw parition to be opened, for raw IOCTLs. Data transfers
477 		 * will check for SDEV_MEDIA_LOADED.
478 		 */
479 		if (error == EIO) {
480 			int error2;
481 
482 			error2 = scsipi_start(periph, SSS_START, silent);
483 			switch (error2) {
484 			case 0:
485 				error = 0;
486 				break;
487 			case EIO:
488 			case EINVAL:
489 				break;
490 			default:
491 				error = error2;
492 				break;
493 			}
494 		}
495 		if (error) {
496 			if (silent && (flag & FSILENT) == 0)
497 				goto out;
498 			goto bad2;
499 		}
500 
501 		periph->periph_flags |= PERIPH_OPEN;
502 
503 		if (periph->periph_flags & PERIPH_REMOVABLE) {
504 			/* Lock the pack in. */
505 			error = scsipi_prevent(periph, SPAMR_PREVENT_DT,
506 			    XS_CTL_IGNORE_ILLEGAL_REQUEST |
507 			    XS_CTL_IGNORE_MEDIA_CHANGE |
508 			    XS_CTL_SILENT);
509 			if (error)
510 				goto bad3;
511 		}
512 
513 		if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
514 			int param_error;
515 			periph->periph_flags |= PERIPH_MEDIA_LOADED;
516 
517 			/*
518 			 * Load the physical device parameters.
519 			 *
520 			 * Note that if media is present but unformatted,
521 			 * we allow the open (so that it can be formatted!).
522 			 * The drive should refuse real I/O, if the media is
523 			 * unformatted.
524 			 */
525 			if ((param_error = sd_get_parms(sd, &sd->params, 0))
526 			     == SDGP_RESULT_OFFLINE) {
527 				error = ENXIO;
528 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
529 				goto bad3;
530 			}
531 			SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
532 
533 			/* Load the partition info if not already loaded. */
534 			if (param_error == 0) {
535 				if ((sdgetdisklabel(sd) != 0) && (part != RAW_PART)) {
536 					error = EIO;
537 					goto bad3;
538 				}
539 				SC_DEBUG(periph, SCSIPI_DB3,
540 				     ("Disklabel loaded "));
541 			}
542 		}
543 	}
544 
545 	/* Check that the partition exists. */
546 	if (part != RAW_PART &&
547 	    (part >= sd->sc_dk.dk_label->d_npartitions ||
548 	     sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
549 		error = ENXIO;
550 		goto bad3;
551 	}
552 
553  out:	/* Insure only one open at a time. */
554 	switch (fmt) {
555 	case S_IFCHR:
556 		sd->sc_dk.dk_copenmask |= (1 << part);
557 		break;
558 	case S_IFBLK:
559 		sd->sc_dk.dk_bopenmask |= (1 << part);
560 		break;
561 	}
562 	sd->sc_dk.dk_openmask =
563 	    sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
564 
565 	SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
566 	mutex_exit(&sd->sc_dk.dk_openlock);
567 	return (0);
568 
569  bad3:
570 	if (sd->sc_dk.dk_openmask == 0) {
571 		if (periph->periph_flags & PERIPH_REMOVABLE)
572 			scsipi_prevent(periph, SPAMR_ALLOW,
573 			    XS_CTL_IGNORE_ILLEGAL_REQUEST |
574 			    XS_CTL_IGNORE_MEDIA_CHANGE |
575 			    XS_CTL_SILENT);
576 		periph->periph_flags &= ~PERIPH_OPEN;
577 	}
578 
579  bad2:
580 	if (sd->sc_dk.dk_openmask == 0)
581 		scsipi_adapter_delref(adapt);
582 
583  bad1:
584 	mutex_exit(&sd->sc_dk.dk_openlock);
585 	return (error);
586 }
587 
588 /*
589  * Caller must hold sd->sc_dk.dk_openlock.
590  */
591 static int
592 sdlastclose(device_t self)
593 {
594 	struct sd_softc *sd = device_private(self);
595 	struct scsipi_periph *periph = sd->sc_periph;
596 	struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
597 
598 	/*
599 	 * If the disk cache needs flushing, and the disk supports
600 	 * it, do it now.
601 	 */
602 	if ((sd->flags & SDF_DIRTY) != 0) {
603 		if (sd_flush(sd, 0)) {
604 			aprint_error_dev(sd->sc_dev,
605 				"cache synchronization failed\n");
606 			sd->flags &= ~SDF_FLUSHING;
607 		} else
608 			sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
609 	}
610 
611 	scsipi_wait_drain(periph);
612 
613 	if (periph->periph_flags & PERIPH_REMOVABLE)
614 		scsipi_prevent(periph, SPAMR_ALLOW,
615 		    XS_CTL_IGNORE_ILLEGAL_REQUEST |
616 		    XS_CTL_IGNORE_NOT_READY |
617 		    XS_CTL_SILENT);
618 	periph->periph_flags &= ~PERIPH_OPEN;
619 
620 	scsipi_wait_drain(periph);
621 
622 	scsipi_adapter_delref(adapt);
623 
624 	return 0;
625 }
626 
627 /*
628  * close the device.. only called if we are the LAST occurence of an open
629  * device.  Convenient now but usually a pain.
630  */
631 static int
632 sdclose(dev_t dev, int flag, int fmt, struct lwp *l)
633 {
634 	struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(dev));
635 	int part = SDPART(dev);
636 
637 	mutex_enter(&sd->sc_dk.dk_openlock);
638 	switch (fmt) {
639 	case S_IFCHR:
640 		sd->sc_dk.dk_copenmask &= ~(1 << part);
641 		break;
642 	case S_IFBLK:
643 		sd->sc_dk.dk_bopenmask &= ~(1 << part);
644 		break;
645 	}
646 	sd->sc_dk.dk_openmask =
647 	    sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
648 
649 	if (sd->sc_dk.dk_openmask == 0)
650 		sdlastclose(sd->sc_dev);
651 
652 	mutex_exit(&sd->sc_dk.dk_openlock);
653 	return (0);
654 }
655 
656 /*
657  * Actually translate the requested transfer into one the physical driver
658  * can understand.  The transfer is described by a buf and will include
659  * only one physical transfer.
660  */
661 static void
662 sdstrategy(struct buf *bp)
663 {
664 	struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(bp->b_dev));
665 	struct scsipi_periph *periph = sd->sc_periph;
666 	struct disklabel *lp;
667 	daddr_t blkno;
668 	int s;
669 	bool sector_aligned;
670 
671 	SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
672 	SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
673 	    ("%d bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
674 	/*
675 	 * If the device has been made invalid, error out
676 	 */
677 	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
678 	    !device_is_active(sd->sc_dev)) {
679 		if (periph->periph_flags & PERIPH_OPEN)
680 			bp->b_error = EIO;
681 		else
682 			bp->b_error = ENODEV;
683 		goto done;
684 	}
685 
686 	lp = sd->sc_dk.dk_label;
687 
688 	/*
689 	 * The transfer must be a whole number of blocks, offset must not be
690 	 * negative.
691 	 */
692 	if (lp->d_secsize == DEV_BSIZE) {
693 		sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
694 	} else {
695 		sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
696 	}
697 	if (!sector_aligned || bp->b_blkno < 0) {
698 		bp->b_error = EINVAL;
699 		goto done;
700 	}
701 	/*
702 	 * If it's a null transfer, return immediatly
703 	 */
704 	if (bp->b_bcount == 0)
705 		goto done;
706 
707 	/*
708 	 * Do bounds checking, adjust transfer. if error, process.
709 	 * If end of partition, just return.
710 	 */
711 	if (SDPART(bp->b_dev) == RAW_PART) {
712 		if (bounds_check_with_mediasize(bp, DEV_BSIZE,
713 		    sd->params.disksize512) <= 0)
714 			goto done;
715 	} else {
716 		if (bounds_check_with_label(&sd->sc_dk, bp,
717 		    (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
718 			goto done;
719 	}
720 
721 	/*
722 	 * Now convert the block number to absolute and put it in
723 	 * terms of the device's logical block size.
724 	 */
725 	if (lp->d_secsize == DEV_BSIZE)
726 		blkno = bp->b_blkno;
727 	else if (lp->d_secsize > DEV_BSIZE)
728 		blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
729 	else
730 		blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
731 
732 	if (SDPART(bp->b_dev) != RAW_PART)
733 		blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
734 
735 	bp->b_rawblkno = blkno;
736 
737 	s = splbio();
738 
739 	/*
740 	 * Place it in the queue of disk activities for this disk.
741 	 *
742 	 * XXX Only do disksort() if the current operating mode does not
743 	 * XXX include tagged queueing.
744 	 */
745 	bufq_put(sd->buf_queue, bp);
746 
747 	/*
748 	 * Tell the device to get going on the transfer if it's
749 	 * not doing anything, otherwise just wait for completion
750 	 */
751 	sdstart(sd->sc_periph);
752 
753 	splx(s);
754 	return;
755 
756 done:
757 	/*
758 	 * Correctly set the buf to indicate a completed xfer
759 	 */
760 	bp->b_resid = bp->b_bcount;
761 	biodone(bp);
762 }
763 
764 /*
765  * sdstart looks to see if there is a buf waiting for the device
766  * and that the device is not already busy. If both are true,
767  * It dequeues the buf and creates a scsi command to perform the
768  * transfer in the buf. The transfer request will call scsipi_done
769  * on completion, which will in turn call this routine again
770  * so that the next queued transfer is performed.
771  * The bufs are queued by the strategy routine (sdstrategy)
772  *
773  * This routine is also called after other non-queued requests
774  * have been made of the scsi driver, to ensure that the queue
775  * continues to be drained.
776  *
777  * must be called at the correct (highish) spl level
778  * sdstart() is called at splbio from sdstrategy, sdrestart and scsipi_done
779  */
780 static void
781 sdstart(struct scsipi_periph *periph)
782 {
783 	struct sd_softc *sd = device_private(periph->periph_dev);
784 	struct disklabel *lp = sd->sc_dk.dk_label;
785 	struct buf *bp = 0;
786 	struct scsipi_rw_16 cmd16;
787 	struct scsipi_rw_10 cmd_big;
788 	struct scsi_rw_6 cmd_small;
789 	struct scsipi_generic *cmdp;
790 	struct scsipi_xfer *xs;
791 	int nblks, cmdlen, error __diagused, flags;
792 
793 	SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
794 	/*
795 	 * Check if the device has room for another command
796 	 */
797 	while (periph->periph_active < periph->periph_openings) {
798 		/*
799 		 * there is excess capacity, but a special waits
800 		 * It'll need the adapter as soon as we clear out of the
801 		 * way and let it run (user level wait).
802 		 */
803 		if (periph->periph_flags & PERIPH_WAITING) {
804 			periph->periph_flags &= ~PERIPH_WAITING;
805 			wakeup((void *)periph);
806 			return;
807 		}
808 
809 		/*
810 		 * If the device has become invalid, abort all the
811 		 * reads and writes until all files have been closed and
812 		 * re-opened
813 		 */
814 		if (__predict_false(
815 		    (periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)) {
816 			if ((bp = bufq_get(sd->buf_queue)) != NULL) {
817 				bp->b_error = EIO;
818 				bp->b_resid = bp->b_bcount;
819 				biodone(bp);
820 				continue;
821 			} else {
822 				return;
823 			}
824 		}
825 
826 		/*
827 		 * See if there is a buf with work for us to do..
828 		 */
829 		if ((bp = bufq_peek(sd->buf_queue)) == NULL)
830 			return;
831 
832 		/*
833 		 * We have a buf, now we should make a command.
834 		 */
835 
836 		if (lp->d_secsize == DEV_BSIZE)
837 			nblks = bp->b_bcount >> DEV_BSHIFT;
838 		else
839 			nblks = howmany(bp->b_bcount, lp->d_secsize);
840 
841 		/*
842 		 * Fill out the scsi command.  Use the smallest CDB possible
843 		 * (6-byte, 10-byte, or 16-byte).
844 		 */
845 		if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
846 		    ((nblks & 0xff) == nblks) &&
847 		    !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
848 			/* 6-byte CDB */
849 			memset(&cmd_small, 0, sizeof(cmd_small));
850 			cmd_small.opcode = (bp->b_flags & B_READ) ?
851 			    SCSI_READ_6_COMMAND : SCSI_WRITE_6_COMMAND;
852 			_lto3b(bp->b_rawblkno, cmd_small.addr);
853 			cmd_small.length = nblks & 0xff;
854 			cmdlen = sizeof(cmd_small);
855 			cmdp = (struct scsipi_generic *)&cmd_small;
856 		} else if ((bp->b_rawblkno & 0xffffffff) == bp->b_rawblkno) {
857 			/* 10-byte CDB */
858 			memset(&cmd_big, 0, sizeof(cmd_big));
859 			cmd_big.opcode = (bp->b_flags & B_READ) ?
860 			    READ_10 : WRITE_10;
861 			_lto4b(bp->b_rawblkno, cmd_big.addr);
862 			_lto2b(nblks, cmd_big.length);
863 			cmdlen = sizeof(cmd_big);
864 			cmdp = (struct scsipi_generic *)&cmd_big;
865 		} else {
866 			/* 16-byte CDB */
867 			memset(&cmd16, 0, sizeof(cmd16));
868 			cmd16.opcode = (bp->b_flags & B_READ) ?
869 			    READ_16 : WRITE_16;
870 			_lto8b(bp->b_rawblkno, cmd16.addr);
871 			_lto4b(nblks, cmd16.length);
872 			cmdlen = sizeof(cmd16);
873 			cmdp = (struct scsipi_generic *)&cmd16;
874 		}
875 
876 		/* Instrumentation. */
877 		disk_busy(&sd->sc_dk);
878 
879 		/*
880 		 * Mark the disk dirty so that the cache will be
881 		 * flushed on close.
882 		 */
883 		if ((bp->b_flags & B_READ) == 0)
884 			sd->flags |= SDF_DIRTY;
885 
886 		/*
887 		 * Figure out what flags to use.
888 		 */
889 		flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
890 		if (bp->b_flags & B_READ)
891 			flags |= XS_CTL_DATA_IN;
892 		else
893 			flags |= XS_CTL_DATA_OUT;
894 
895 		/*
896 		 * Call the routine that chats with the adapter.
897 		 * Note: we cannot sleep as we may be an interrupt
898 		 */
899 		xs = scsipi_make_xs(periph, cmdp, cmdlen,
900 		    (u_char *)bp->b_data, bp->b_bcount,
901 		    SDRETRIES, SD_IO_TIMEOUT, bp, flags);
902 		if (__predict_false(xs == NULL)) {
903 			/*
904 			 * out of memory. Keep this buffer in the queue, and
905 			 * retry later.
906 			 */
907 			callout_reset(&sd->sc_callout, hz / 2, sdrestart,
908 			    periph);
909 			return;
910 		}
911 		/*
912 		 * need to dequeue the buffer before queuing the command,
913 		 * because cdstart may be called recursively from the
914 		 * HBA driver
915 		 */
916 #ifdef DIAGNOSTIC
917 		if (bufq_get(sd->buf_queue) != bp)
918 			panic("sdstart(): dequeued wrong buf");
919 #else
920 		bufq_get(sd->buf_queue);
921 #endif
922 		error = scsipi_execute_xs(xs);
923 		/* with a scsipi_xfer preallocated, scsipi_command can't fail */
924 		KASSERT(error == 0);
925 	}
926 }
927 
928 static void
929 sdrestart(void *v)
930 {
931 	int s = splbio();
932 	sdstart((struct scsipi_periph *)v);
933 	splx(s);
934 }
935 
936 static void
937 sddone(struct scsipi_xfer *xs, int error)
938 {
939 	struct sd_softc *sd = device_private(xs->xs_periph->periph_dev);
940 	struct buf *bp = xs->bp;
941 
942 	if (sd->flags & SDF_FLUSHING) {
943 		/* Flush completed, no longer dirty. */
944 		sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
945 	}
946 
947 	if (bp) {
948 		bp->b_error = error;
949 		bp->b_resid = xs->resid;
950 		if (error) {
951 			/* on a read/write error bp->b_resid is zero, so fix */
952 			bp->b_resid = bp->b_bcount;
953 		}
954 
955 		disk_unbusy(&sd->sc_dk, bp->b_bcount - bp->b_resid,
956 		    (bp->b_flags & B_READ));
957 		rnd_add_uint32(&sd->rnd_source, bp->b_rawblkno);
958 
959 		biodone(bp);
960 	}
961 }
962 
963 static void
964 sdminphys(struct buf *bp)
965 {
966 	struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(bp->b_dev));
967 	long xmax;
968 
969 	/*
970 	 * If the device is ancient, we want to make sure that
971 	 * the transfer fits into a 6-byte cdb.
972 	 *
973 	 * XXX Note that the SCSI-I spec says that 256-block transfers
974 	 * are allowed in a 6-byte read/write, and are specified
975 	 * by settng the "length" to 0.  However, we're conservative
976 	 * here, allowing only 255-block transfers in case an
977 	 * ancient device gets confused by length == 0.  A length of 0
978 	 * in a 10-byte read/write actually means 0 blocks.
979 	 */
980 	if ((sd->flags & SDF_ANCIENT) &&
981 	    ((sd->sc_periph->periph_flags &
982 	    (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
983 		xmax = sd->sc_dk.dk_label->d_secsize * 0xff;
984 
985 		if (bp->b_bcount > xmax)
986 			bp->b_bcount = xmax;
987 	}
988 
989 	scsipi_adapter_minphys(sd->sc_periph->periph_channel, bp);
990 }
991 
992 static int
993 sdread(dev_t dev, struct uio *uio, int ioflag)
994 {
995 
996 	return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
997 }
998 
999 static int
1000 sdwrite(dev_t dev, struct uio *uio, int ioflag)
1001 {
1002 
1003 	return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
1004 }
1005 
1006 /*
1007  * Perform special action on behalf of the user
1008  * Knows about the internals of this device
1009  */
1010 static int
1011 sdioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1012 {
1013 	struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(dev));
1014 	struct scsipi_periph *periph = sd->sc_periph;
1015 	int part = SDPART(dev);
1016 	int error;
1017 	int s;
1018 #ifdef __HAVE_OLD_DISKLABEL
1019 	struct disklabel *newlabel = NULL;
1020 #endif
1021 
1022 	SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1023 
1024 	/*
1025 	 * If the device is not valid, some IOCTLs can still be
1026 	 * handled on the raw partition. Check this here.
1027 	 */
1028 	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1029 		switch (cmd) {
1030 		case DIOCKLABEL:
1031 		case DIOCWLABEL:
1032 		case DIOCLOCK:
1033 		case DIOCEJECT:
1034 		case ODIOCEJECT:
1035 		case DIOCGCACHE:
1036 		case DIOCSCACHE:
1037 		case DIOCGSTRATEGY:
1038 		case DIOCSSTRATEGY:
1039 		case SCIOCIDENTIFY:
1040 		case OSCIOCIDENTIFY:
1041 		case SCIOCCOMMAND:
1042 		case SCIOCDEBUG:
1043 			if (part == RAW_PART)
1044 				break;
1045 		/* FALLTHROUGH */
1046 		default:
1047 			if ((periph->periph_flags & PERIPH_OPEN) == 0)
1048 				return (ENODEV);
1049 			else
1050 				return (EIO);
1051 		}
1052 	}
1053 
1054 	error = disk_ioctl(&sd->sc_dk, cmd, addr, flag, l);
1055 	if (error != EPASSTHROUGH)
1056 		return (error);
1057 
1058 	error = 0;
1059 	switch (cmd) {
1060 	case DIOCGDINFO:
1061 		*(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1062 		return (0);
1063 
1064 #ifdef __HAVE_OLD_DISKLABEL
1065 	case ODIOCGDINFO:
1066 		newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1067 		if (newlabel == NULL)
1068 			return EIO;
1069 		memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1070 		if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1071 			memcpy(addr, newlabel, sizeof (struct olddisklabel));
1072 		else
1073 			error = ENOTTY;
1074 		free(newlabel, M_TEMP);
1075 		return error;
1076 #endif
1077 
1078 	case DIOCGPART:
1079 		((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1080 		((struct partinfo *)addr)->part =
1081 		    &sd->sc_dk.dk_label->d_partitions[part];
1082 		return (0);
1083 
1084 	case DIOCWDINFO:
1085 	case DIOCSDINFO:
1086 #ifdef __HAVE_OLD_DISKLABEL
1087 	case ODIOCWDINFO:
1088 	case ODIOCSDINFO:
1089 #endif
1090 	{
1091 		struct disklabel *lp;
1092 
1093 		if ((flag & FWRITE) == 0)
1094 			return (EBADF);
1095 
1096 #ifdef __HAVE_OLD_DISKLABEL
1097  		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1098 			newlabel = malloc(sizeof *newlabel, M_TEMP,
1099 			    M_WAITOK | M_ZERO);
1100 			if (newlabel == NULL)
1101 				return EIO;
1102 			memcpy(newlabel, addr, sizeof (struct olddisklabel));
1103 			lp = newlabel;
1104 		} else
1105 #endif
1106 		lp = (struct disklabel *)addr;
1107 
1108 		mutex_enter(&sd->sc_dk.dk_openlock);
1109 		sd->flags |= SDF_LABELLING;
1110 
1111 		error = setdisklabel(sd->sc_dk.dk_label,
1112 		    lp, /*sd->sc_dk.dk_openmask : */0,
1113 		    sd->sc_dk.dk_cpulabel);
1114 		if (error == 0) {
1115 			if (cmd == DIOCWDINFO
1116 #ifdef __HAVE_OLD_DISKLABEL
1117 			    || cmd == ODIOCWDINFO
1118 #endif
1119 			   )
1120 				error = writedisklabel(SDLABELDEV(dev),
1121 				    sdstrategy, sd->sc_dk.dk_label,
1122 				    sd->sc_dk.dk_cpulabel);
1123 		}
1124 
1125 		sd->flags &= ~SDF_LABELLING;
1126 		mutex_exit(&sd->sc_dk.dk_openlock);
1127 #ifdef __HAVE_OLD_DISKLABEL
1128 		if (newlabel != NULL)
1129 			free(newlabel, M_TEMP);
1130 #endif
1131 		return (error);
1132 	}
1133 
1134 	case DIOCKLABEL:
1135 		if (*(int *)addr)
1136 			periph->periph_flags |= PERIPH_KEEP_LABEL;
1137 		else
1138 			periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1139 		return (0);
1140 
1141 	case DIOCWLABEL:
1142 		if ((flag & FWRITE) == 0)
1143 			return (EBADF);
1144 		if (*(int *)addr)
1145 			sd->flags |= SDF_WLABEL;
1146 		else
1147 			sd->flags &= ~SDF_WLABEL;
1148 		return (0);
1149 
1150 	case DIOCLOCK:
1151 		if (periph->periph_flags & PERIPH_REMOVABLE)
1152 			return (scsipi_prevent(periph,
1153 			    (*(int *)addr) ?
1154 			    SPAMR_PREVENT_DT : SPAMR_ALLOW, 0));
1155 		else
1156 			return (ENOTTY);
1157 
1158 	case DIOCEJECT:
1159 		if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1160 			return (ENOTTY);
1161 		if (*(int *)addr == 0) {
1162 			/*
1163 			 * Don't force eject: check that we are the only
1164 			 * partition open. If so, unlock it.
1165 			 */
1166 			if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1167 			    sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1168 			    sd->sc_dk.dk_openmask) {
1169 				error = scsipi_prevent(periph, SPAMR_ALLOW,
1170 				    XS_CTL_IGNORE_NOT_READY);
1171 				if (error)
1172 					return (error);
1173 			} else {
1174 				return (EBUSY);
1175 			}
1176 		}
1177 		/* FALLTHROUGH */
1178 	case ODIOCEJECT:
1179 		return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1180 		    ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1181 
1182 	case DIOCGDEFLABEL:
1183 		sdgetdefaultlabel(sd, (struct disklabel *)addr);
1184 		return (0);
1185 
1186 #ifdef __HAVE_OLD_DISKLABEL
1187 	case ODIOCGDEFLABEL:
1188 		newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1189 		if (newlabel == NULL)
1190 			return EIO;
1191 		sdgetdefaultlabel(sd, newlabel);
1192 		if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1193 			memcpy(addr, newlabel, sizeof (struct olddisklabel));
1194 		else
1195 			error = ENOTTY;
1196 		free(newlabel, M_TEMP);
1197 		return error;
1198 #endif
1199 
1200 	case DIOCGCACHE:
1201 		return (sd_getcache(sd, (int *) addr));
1202 
1203 	case DIOCSCACHE:
1204 		if ((flag & FWRITE) == 0)
1205 			return (EBADF);
1206 		return (sd_setcache(sd, *(int *) addr));
1207 
1208 	case DIOCCACHESYNC:
1209 		/*
1210 		 * XXX Do we really need to care about having a writable
1211 		 * file descriptor here?
1212 		 */
1213 		if ((flag & FWRITE) == 0)
1214 			return (EBADF);
1215 		if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1216 			error = sd_flush(sd, 0);
1217 			if (error)
1218 				sd->flags &= ~SDF_FLUSHING;
1219 			else
1220 				sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1221 		}
1222 		return (error);
1223 
1224 	case DIOCAWEDGE:
1225 	    {
1226 	    	struct dkwedge_info *dkw = (void *) addr;
1227 
1228 		if ((flag & FWRITE) == 0)
1229 			return (EBADF);
1230 
1231 		/* If the ioctl happens here, the parent is us. */
1232 		strlcpy(dkw->dkw_parent, device_xname(sd->sc_dev),
1233 			sizeof(dkw->dkw_parent));
1234 		return (dkwedge_add(dkw));
1235 	    }
1236 
1237 	case DIOCDWEDGE:
1238 	    {
1239 	    	struct dkwedge_info *dkw = (void *) addr;
1240 
1241 		if ((flag & FWRITE) == 0)
1242 			return (EBADF);
1243 
1244 		/* If the ioctl happens here, the parent is us. */
1245 		strlcpy(dkw->dkw_parent, device_xname(sd->sc_dev),
1246 			sizeof(dkw->dkw_parent));
1247 		return (dkwedge_del(dkw));
1248 	    }
1249 
1250 	case DIOCLWEDGES:
1251 	    {
1252 	    	struct dkwedge_list *dkwl = (void *) addr;
1253 
1254 		return (dkwedge_list(&sd->sc_dk, dkwl, l));
1255 	    }
1256 
1257 	case DIOCGSTRATEGY:
1258 	    {
1259 		struct disk_strategy *dks = addr;
1260 
1261 		s = splbio();
1262 		strlcpy(dks->dks_name, bufq_getstrategyname(sd->buf_queue),
1263 		    sizeof(dks->dks_name));
1264 		splx(s);
1265 		dks->dks_paramlen = 0;
1266 
1267 		return 0;
1268 	    }
1269 
1270 	case DIOCSSTRATEGY:
1271 	    {
1272 		struct disk_strategy *dks = addr;
1273 		struct bufq_state *new;
1274 		struct bufq_state *old;
1275 
1276 		if ((flag & FWRITE) == 0) {
1277 			return EBADF;
1278 		}
1279 
1280 		if (dks->dks_param != NULL) {
1281 			return EINVAL;
1282 		}
1283 		dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
1284 		error = bufq_alloc(&new, dks->dks_name,
1285 		    BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
1286 		if (error) {
1287 			return error;
1288 		}
1289 		s = splbio();
1290 		old = sd->buf_queue;
1291 		bufq_move(new, old);
1292 		sd->buf_queue = new;
1293 		splx(s);
1294 		bufq_free(old);
1295 
1296 		return 0;
1297 	    }
1298 
1299 	default:
1300 		if (part != RAW_PART)
1301 			return (ENOTTY);
1302 		return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, l));
1303 	}
1304 
1305 #ifdef DIAGNOSTIC
1306 	panic("sdioctl: impossible");
1307 #endif
1308 }
1309 
1310 static void
1311 sdgetdefaultlabel(struct sd_softc *sd, struct disklabel *lp)
1312 {
1313 
1314 	memset(lp, 0, sizeof(struct disklabel));
1315 
1316 	lp->d_secsize = sd->params.blksize;
1317 	lp->d_ntracks = sd->params.heads;
1318 	lp->d_nsectors = sd->params.sectors;
1319 	lp->d_ncylinders = sd->params.cyls;
1320 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1321 
1322 	switch (SCSIPI_BUSTYPE_TYPE(scsipi_periph_bustype(sd->sc_periph))) {
1323 	case SCSIPI_BUSTYPE_SCSI:
1324 		lp->d_type = DTYPE_SCSI;
1325 		break;
1326 	case SCSIPI_BUSTYPE_ATAPI:
1327 		lp->d_type = DTYPE_ATAPI;
1328 		break;
1329 	}
1330 	/*
1331 	 * XXX
1332 	 * We could probe the mode pages to figure out what kind of disc it is.
1333 	 * Is this worthwhile?
1334 	 */
1335 	strncpy(lp->d_typename, sd->name, 16);
1336 	strncpy(lp->d_packname, "fictitious", 16);
1337 	if (sd->params.disksize > UINT32_MAX)
1338 		lp->d_secperunit = UINT32_MAX;
1339 	else
1340 		lp->d_secperunit = sd->params.disksize;
1341 	lp->d_rpm = sd->params.rot_rate;
1342 	lp->d_interleave = 1;
1343 	lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ?
1344 	    D_REMOVABLE : 0;
1345 
1346 	lp->d_partitions[RAW_PART].p_offset = 0;
1347 	lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
1348 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1349 	lp->d_npartitions = RAW_PART + 1;
1350 
1351 	lp->d_magic = DISKMAGIC;
1352 	lp->d_magic2 = DISKMAGIC;
1353 	lp->d_checksum = dkcksum(lp);
1354 }
1355 
1356 
1357 /*
1358  * Load the label information on the named device
1359  */
1360 static int
1361 sdgetdisklabel(struct sd_softc *sd)
1362 {
1363 	struct disklabel *lp = sd->sc_dk.dk_label;
1364 	const char *errstring;
1365 
1366 	memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1367 
1368 	sdgetdefaultlabel(sd, lp);
1369 
1370 	if (lp->d_secpercyl == 0) {
1371 		lp->d_secpercyl = 100;
1372 		/* as long as it's not 0 - readdisklabel divides by it (?) */
1373 	}
1374 
1375 	/*
1376 	 * Call the generic disklabel extraction routine
1377 	 */
1378 	errstring = readdisklabel(MAKESDDEV(0, device_unit(sd->sc_dev),
1379 	    RAW_PART), sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1380 	if (errstring) {
1381 		aprint_error_dev(sd->sc_dev, "%s\n", errstring);
1382 		return EIO;
1383 	}
1384 	return 0;
1385 }
1386 
1387 static bool
1388 sd_shutdown(device_t self, int how)
1389 {
1390 	struct sd_softc *sd = device_private(self);
1391 
1392 	/*
1393 	 * If the disk cache needs to be flushed, and the disk supports
1394 	 * it, flush it.  We're cold at this point, so we poll for
1395 	 * completion.
1396 	 */
1397 	if ((sd->flags & SDF_DIRTY) != 0) {
1398 		if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1399 			aprint_error_dev(sd->sc_dev,
1400 				"cache synchronization failed\n");
1401 			sd->flags &= ~SDF_FLUSHING;
1402 		} else
1403 			sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1404 	}
1405 	return true;
1406 }
1407 
1408 static bool
1409 sd_suspend(device_t dv, const pmf_qual_t *qual)
1410 {
1411 	return sd_shutdown(dv, boothowto); /* XXX no need to poll */
1412 }
1413 
1414 /*
1415  * Check Errors
1416  */
1417 static int
1418 sd_interpret_sense(struct scsipi_xfer *xs)
1419 {
1420 	struct scsipi_periph *periph = xs->xs_periph;
1421 	struct scsi_sense_data *sense = &xs->sense.scsi_sense;
1422 	struct sd_softc *sd = device_private(periph->periph_dev);
1423 	int s, error, retval = EJUSTRETURN;
1424 
1425 	/*
1426 	 * If the periph is already recovering, just do the normal
1427 	 * error processing.
1428 	 */
1429 	if (periph->periph_flags & PERIPH_RECOVERING)
1430 		return (retval);
1431 
1432 	/*
1433 	 * Ignore errors from accessing illegal fields (e.g. trying to
1434 	 * lock the door of a digicam, which doesn't have a door that
1435 	 * can be locked) for the SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL command.
1436 	 */
1437 	if (xs->cmd->opcode == SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL &&
1438 	    SSD_SENSE_KEY(sense->flags) == SKEY_ILLEGAL_REQUEST &&
1439 	    sense->asc == 0x24 &&
1440 	    sense->ascq == 0x00) { /* Illegal field in CDB */
1441 		if (!(xs->xs_control & XS_CTL_SILENT)) {
1442 			scsipi_printaddr(periph);
1443 			printf("no door lock\n");
1444 		}
1445 		xs->xs_control |= XS_CTL_IGNORE_ILLEGAL_REQUEST;
1446 		return (retval);
1447 	}
1448 
1449 
1450 
1451 	/*
1452 	 * If the device is not open yet, let the generic code handle it.
1453 	 */
1454 	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1455 		return (retval);
1456 
1457 	/*
1458 	 * If it isn't a extended or extended/deferred error, let
1459 	 * the generic code handle it.
1460 	 */
1461 	if (SSD_RCODE(sense->response_code) != SSD_RCODE_CURRENT &&
1462 	    SSD_RCODE(sense->response_code) != SSD_RCODE_DEFERRED)
1463 		return (retval);
1464 
1465 	if (SSD_SENSE_KEY(sense->flags) == SKEY_NOT_READY &&
1466 	    sense->asc == 0x4) {
1467 		if (sense->ascq == 0x01)	{
1468 			/*
1469 			 * Unit In The Process Of Becoming Ready.
1470 			 */
1471 			printf("%s: waiting for pack to spin up...\n",
1472 			    device_xname(sd->sc_dev));
1473 			if (!callout_pending(&periph->periph_callout))
1474 				scsipi_periph_freeze(periph, 1);
1475 			callout_reset(&periph->periph_callout,
1476 			    5 * hz, scsipi_periph_timed_thaw, periph);
1477 			retval = ERESTART;
1478 		} else if (sense->ascq == 0x02) {
1479 			printf("%s: pack is stopped, restarting...\n",
1480 			    device_xname(sd->sc_dev));
1481 			s = splbio();
1482 			periph->periph_flags |= PERIPH_RECOVERING;
1483 			splx(s);
1484 			error = scsipi_start(periph, SSS_START,
1485 			    XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1486 			    XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1487 			if (error) {
1488 				aprint_error_dev(sd->sc_dev,
1489 					"unable to restart pack\n");
1490 				retval = error;
1491 			} else
1492 				retval = ERESTART;
1493 			s = splbio();
1494 			periph->periph_flags &= ~PERIPH_RECOVERING;
1495 			splx(s);
1496 		}
1497 	}
1498 	if (SSD_SENSE_KEY(sense->flags) == SKEY_MEDIUM_ERROR &&
1499 	    sense->asc == 0x31 &&
1500 	    sense->ascq == 0x00)	{ /* maybe for any asq ? */
1501 		/* Medium Format Corrupted */
1502 		retval = EFTYPE;
1503 	}
1504 	return (retval);
1505 }
1506 
1507 
1508 static int
1509 sdsize(dev_t dev)
1510 {
1511 	struct sd_softc *sd;
1512 	int part, unit, omask;
1513 	int size;
1514 
1515 	unit = SDUNIT(dev);
1516 	sd = device_lookup_private(&sd_cd, unit);
1517 	if (sd == NULL)
1518 		return (-1);
1519 
1520 	if (!device_is_active(sd->sc_dev))
1521 		return (-1);
1522 
1523 	part = SDPART(dev);
1524 	omask = sd->sc_dk.dk_openmask & (1 << part);
1525 
1526 	if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1527 		return (-1);
1528 	if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1529 		size = -1;
1530 	else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1531 		size = -1;
1532 	else
1533 		size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1534 		    (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1535 	if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1536 		return (-1);
1537 	return (size);
1538 }
1539 
1540 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1541 static struct scsipi_xfer sx;
1542 static int sddoingadump;
1543 
1544 /*
1545  * dump all of physical memory into the partition specified, starting
1546  * at offset 'dumplo' into the partition.
1547  */
1548 static int
1549 sddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1550 {
1551 	struct sd_softc *sd;	/* disk unit to do the I/O */
1552 	struct disklabel *lp;	/* disk's disklabel */
1553 	int	unit, part;
1554 	int	sectorsize;	/* size of a disk sector */
1555 	int	nsects;		/* number of sectors in partition */
1556 	int	sectoff;	/* sector offset of partition */
1557 	int	totwrt;		/* total number of sectors left to write */
1558 	int	nwrt;		/* current number of sectors to write */
1559 	struct scsipi_rw_10 cmd;	/* write command */
1560 	struct scsipi_xfer *xs;	/* ... convenience */
1561 	struct scsipi_periph *periph;
1562 	struct scsipi_channel *chan;
1563 
1564 	/* Check if recursive dump; if so, punt. */
1565 	if (sddoingadump)
1566 		return (EFAULT);
1567 
1568 	/* Mark as active early. */
1569 	sddoingadump = 1;
1570 
1571 	unit = SDUNIT(dev);	/* Decompose unit & partition. */
1572 	part = SDPART(dev);
1573 
1574 	/* Check for acceptable drive number. */
1575 	sd = device_lookup_private(&sd_cd, unit);
1576 	if (sd == NULL)
1577 		return (ENXIO);
1578 
1579 	if (!device_is_active(sd->sc_dev))
1580 		return (ENODEV);
1581 
1582 	periph = sd->sc_periph;
1583 	chan = periph->periph_channel;
1584 
1585 	/* Make sure it was initialized. */
1586 	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1587 		return (ENXIO);
1588 
1589 	/* Convert to disk sectors.  Request must be a multiple of size. */
1590 	lp = sd->sc_dk.dk_label;
1591 	sectorsize = lp->d_secsize;
1592 	if ((size % sectorsize) != 0)
1593 		return (EFAULT);
1594 	totwrt = size / sectorsize;
1595 	blkno = dbtob(blkno) / sectorsize;	/* blkno in DEV_BSIZE units */
1596 
1597 	nsects = lp->d_partitions[part].p_size;
1598 	sectoff = lp->d_partitions[part].p_offset;
1599 
1600 	/* Check transfer bounds against partition size. */
1601 	if ((blkno < 0) || ((blkno + totwrt) > nsects))
1602 		return (EINVAL);
1603 
1604 	/* Offset block number to start of partition. */
1605 	blkno += sectoff;
1606 
1607 	xs = &sx;
1608 
1609 	while (totwrt > 0) {
1610 		nwrt = totwrt;		/* XXX */
1611 #ifndef	SD_DUMP_NOT_TRUSTED
1612 		/*
1613 		 *  Fill out the scsi command
1614 		 */
1615 		memset(&cmd, 0, sizeof(cmd));
1616 		cmd.opcode = WRITE_10;
1617 		_lto4b(blkno, cmd.addr);
1618 		_lto2b(nwrt, cmd.length);
1619 		/*
1620 		 * Fill out the scsipi_xfer structure
1621 		 *    Note: we cannot sleep as we may be an interrupt
1622 		 * don't use scsipi_command() as it may want to wait
1623 		 * for an xs.
1624 		 */
1625 		memset(xs, 0, sizeof(sx));
1626 		xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1627 		    XS_CTL_DATA_OUT;
1628 		xs->xs_status = 0;
1629 		xs->xs_periph = periph;
1630 		xs->xs_retries = SDRETRIES;
1631 		xs->timeout = 10000;	/* 10000 millisecs for a disk ! */
1632 		xs->cmd = (struct scsipi_generic *)&cmd;
1633 		xs->cmdlen = sizeof(cmd);
1634 		xs->resid = nwrt * sectorsize;
1635 		xs->error = XS_NOERROR;
1636 		xs->bp = 0;
1637 		xs->data = va;
1638 		xs->datalen = nwrt * sectorsize;
1639 		callout_init(&xs->xs_callout, 0);
1640 
1641 		/*
1642 		 * Pass all this info to the scsi driver.
1643 		 */
1644 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1645 		if ((xs->xs_status & XS_STS_DONE) == 0 ||
1646 		    xs->error != XS_NOERROR)
1647 			return (EIO);
1648 #else	/* SD_DUMP_NOT_TRUSTED */
1649 		/* Let's just talk about this first... */
1650 		printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1651 		delay(500 * 1000);	/* half a second */
1652 #endif	/* SD_DUMP_NOT_TRUSTED */
1653 
1654 		/* update block count */
1655 		totwrt -= nwrt;
1656 		blkno += nwrt;
1657 		va = (char *)va + sectorsize * nwrt;
1658 	}
1659 	sddoingadump = 0;
1660 	return (0);
1661 }
1662 
1663 static int
1664 sd_mode_sense(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1665     int page, int flags, int *big)
1666 {
1667 
1668 	if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1669 	    !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1670 		*big = 1;
1671 		return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1672 		    size + sizeof(struct scsi_mode_parameter_header_10),
1673 		    flags, SDRETRIES, 6000);
1674 	} else {
1675 		*big = 0;
1676 		return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1677 		    size + sizeof(struct scsi_mode_parameter_header_6),
1678 		    flags, SDRETRIES, 6000);
1679 	}
1680 }
1681 
1682 static int
1683 sd_mode_select(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1684     int flags, int big)
1685 {
1686 
1687 	if (big) {
1688 		struct scsi_mode_parameter_header_10 *header = sense;
1689 
1690 		_lto2b(0, header->data_length);
1691 		return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1692 		    size + sizeof(struct scsi_mode_parameter_header_10),
1693 		    flags, SDRETRIES, 6000);
1694 	} else {
1695 		struct scsi_mode_parameter_header_6 *header = sense;
1696 
1697 		header->data_length = 0;
1698 		return scsipi_mode_select(sd->sc_periph, byte2, sense,
1699 		    size + sizeof(struct scsi_mode_parameter_header_6),
1700 		    flags, SDRETRIES, 6000);
1701 	}
1702 }
1703 
1704 /*
1705  * sd_validate_blksize:
1706  *
1707  *	Validate the block size.  Print error if periph is specified,
1708  */
1709 static int
1710 sd_validate_blksize(struct scsipi_periph *periph, int len)
1711 {
1712 
1713 	switch (len) {
1714 	case 256:
1715 	case 512:
1716 	case 1024:
1717 	case 2048:
1718 	case 4096:
1719 		return 1;
1720 	}
1721 
1722 	if (periph) {
1723 		scsipi_printaddr(periph);
1724 		printf("%s sector size: 0x%x.  Defaulting to %d bytes.\n",
1725 		    (len ^ (1 << (ffs(len) - 1))) ?
1726 		    "preposterous" : "unsupported",
1727 		    len, SD_DEFAULT_BLKSIZE);
1728 	}
1729 
1730 	return 0;
1731 }
1732 
1733 /*
1734  * sd_read_capacity:
1735  *
1736  *	Find out from the device what its capacity is.
1737  */
1738 static u_int64_t
1739 sd_read_capacity(struct scsipi_periph *periph, int *blksize, int flags)
1740 {
1741 	union {
1742 		struct scsipi_read_capacity_10 cmd;
1743 		struct scsipi_read_capacity_16 cmd16;
1744 	} cmd;
1745 	union {
1746 		struct scsipi_read_capacity_10_data data;
1747 		struct scsipi_read_capacity_16_data data16;
1748 	} *datap;
1749 	uint64_t rv;
1750 
1751 	memset(&cmd, 0, sizeof(cmd));
1752 	cmd.cmd.opcode = READ_CAPACITY_10;
1753 
1754 	/*
1755 	 * Don't allocate data buffer on stack;
1756 	 * The lower driver layer might use the same stack and
1757 	 * if it uses region which is in the same cacheline,
1758 	 * cache flush ops against the data buffer won't work properly.
1759 	 */
1760 	datap = malloc(sizeof(*datap), M_TEMP, M_WAITOK);
1761 	if (datap == NULL)
1762 		return 0;
1763 
1764 	/*
1765 	 * If the command works, interpret the result as a 4 byte
1766 	 * number of blocks
1767 	 */
1768 	rv = 0;
1769 	memset(datap, 0, sizeof(datap->data));
1770 	if (scsipi_command(periph, (void *)&cmd.cmd, sizeof(cmd.cmd),
1771 	    (void *)datap, sizeof(datap->data), SCSIPIRETRIES, 20000, NULL,
1772 	    flags | XS_CTL_DATA_IN | XS_CTL_SILENT) != 0)
1773 		goto out;
1774 
1775 	if (_4btol(datap->data.addr) != 0xffffffff) {
1776 		*blksize = _4btol(datap->data.length);
1777 		rv = _4btol(datap->data.addr) + 1;
1778 		goto out;
1779 	}
1780 
1781 	/*
1782 	 * Device is larger than can be reflected by READ CAPACITY (10).
1783 	 * Try READ CAPACITY (16).
1784 	 */
1785 
1786 	memset(&cmd, 0, sizeof(cmd));
1787 	cmd.cmd16.opcode = READ_CAPACITY_16;
1788 	cmd.cmd16.byte2 = SRC16_SERVICE_ACTION;
1789 	_lto4b(sizeof(datap->data16), cmd.cmd16.len);
1790 
1791 	memset(datap, 0, sizeof(datap->data16));
1792 	if (scsipi_command(periph, (void *)&cmd.cmd16, sizeof(cmd.cmd16),
1793 	    (void *)datap, sizeof(datap->data16), SCSIPIRETRIES, 20000, NULL,
1794 	    flags | XS_CTL_DATA_IN | XS_CTL_SILENT) != 0)
1795 		goto out;
1796 
1797 	*blksize = _4btol(datap->data16.length);
1798 	rv = _8btol(datap->data16.addr) + 1;
1799 
1800  out:
1801 	free(datap, M_TEMP);
1802 	return rv;
1803 }
1804 
1805 static int
1806 sd_get_simplifiedparms(struct sd_softc *sd, struct disk_parms *dp, int flags)
1807 {
1808 	struct {
1809 		struct scsi_mode_parameter_header_6 header;
1810 		/* no block descriptor */
1811 		u_int8_t pg_code; /* page code (should be 6) */
1812 		u_int8_t pg_length; /* page length (should be 11) */
1813 		u_int8_t wcd; /* bit0: cache disable */
1814 		u_int8_t lbs[2]; /* logical block size */
1815 		u_int8_t size[5]; /* number of log. blocks */
1816 		u_int8_t pp; /* power/performance */
1817 		u_int8_t flags;
1818 		u_int8_t resvd;
1819 	} scsipi_sense;
1820 	u_int64_t blocks;
1821 	int error, blksize;
1822 
1823 	/*
1824 	 * sd_read_capacity (ie "read capacity") and mode sense page 6
1825 	 * give the same information. Do both for now, and check
1826 	 * for consistency.
1827 	 * XXX probably differs for removable media
1828 	 */
1829 	dp->blksize = SD_DEFAULT_BLKSIZE;
1830 	if ((blocks = sd_read_capacity(sd->sc_periph, &blksize, flags)) == 0)
1831 		return (SDGP_RESULT_OFFLINE);		/* XXX? */
1832 
1833 	error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1834 	    &scsipi_sense.header, sizeof(scsipi_sense),
1835 	    flags, SDRETRIES, 6000);
1836 
1837 	if (error != 0)
1838 		return (SDGP_RESULT_OFFLINE);		/* XXX? */
1839 
1840 	dp->blksize = blksize;
1841 	if (!sd_validate_blksize(NULL, dp->blksize))
1842 		dp->blksize = _2btol(scsipi_sense.lbs);
1843 	if (!sd_validate_blksize(sd->sc_periph, dp->blksize))
1844 		dp->blksize = SD_DEFAULT_BLKSIZE;
1845 
1846 	/*
1847 	 * Create a pseudo-geometry.
1848 	 */
1849 	dp->heads = 64;
1850 	dp->sectors = 32;
1851 	dp->cyls = blocks / (dp->heads * dp->sectors);
1852 	dp->disksize = _5btol(scsipi_sense.size);
1853 	if (dp->disksize <= UINT32_MAX && dp->disksize != blocks) {
1854 		printf("RBC size: mode sense=%llu, get cap=%llu\n",
1855 		       (unsigned long long)dp->disksize,
1856 		       (unsigned long long)blocks);
1857 		dp->disksize = blocks;
1858 	}
1859 	dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1860 
1861 	return (SDGP_RESULT_OK);
1862 }
1863 
1864 /*
1865  * Get the scsi driver to send a full inquiry to the * device and use the
1866  * results to fill out the disk parameter structure.
1867  */
1868 static int
1869 sd_get_capacity(struct sd_softc *sd, struct disk_parms *dp, int flags)
1870 {
1871 	u_int64_t blocks;
1872 	int error, blksize;
1873 #if 0
1874 	int i;
1875 	u_int8_t *p;
1876 #endif
1877 
1878 	dp->disksize = blocks = sd_read_capacity(sd->sc_periph, &blksize,
1879 	    flags);
1880 	if (blocks == 0) {
1881 		struct scsipi_read_format_capacities cmd;
1882 		struct {
1883 			struct scsipi_capacity_list_header header;
1884 			struct scsipi_capacity_descriptor desc;
1885 		} __packed data;
1886 
1887 		memset(&cmd, 0, sizeof(cmd));
1888 		memset(&data, 0, sizeof(data));
1889 		cmd.opcode = READ_FORMAT_CAPACITIES;
1890 		_lto2b(sizeof(data), cmd.length);
1891 
1892 		error = scsipi_command(sd->sc_periph,
1893 		    (void *)&cmd, sizeof(cmd), (void *)&data, sizeof(data),
1894 		    SDRETRIES, 20000, NULL,
1895 		    flags | XS_CTL_DATA_IN);
1896 		if (error == EFTYPE) {
1897 			/* Medium Format Corrupted, handle as not formatted */
1898 			return (SDGP_RESULT_UNFORMATTED);
1899 		}
1900 		if (error || data.header.length == 0)
1901 			return (SDGP_RESULT_OFFLINE);
1902 
1903 #if 0
1904 printf("rfc: length=%d\n", data.header.length);
1905 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + data.header.length, p = (void *)&data; i; i--, p++) printf(" %02x", *p); printf("\n");
1906 #endif
1907 		switch (data.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1908 		case SCSIPI_CAP_DESC_CODE_RESERVED:
1909 		case SCSIPI_CAP_DESC_CODE_FORMATTED:
1910 			break;
1911 
1912 		case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1913 			return (SDGP_RESULT_UNFORMATTED);
1914 
1915 		case SCSIPI_CAP_DESC_CODE_NONE:
1916 			return (SDGP_RESULT_OFFLINE);
1917 		}
1918 
1919 		dp->disksize = blocks = _4btol(data.desc.nblks);
1920 		if (blocks == 0)
1921 			return (SDGP_RESULT_OFFLINE);		/* XXX? */
1922 
1923 		blksize = _3btol(data.desc.blklen);
1924 
1925 	} else if (!sd_validate_blksize(NULL, blksize)) {
1926 		struct sd_mode_sense_data scsipi_sense;
1927 		int big, bsize;
1928 		struct scsi_general_block_descriptor *bdesc;
1929 
1930 		memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1931 		error = sd_mode_sense(sd, 0, &scsipi_sense,
1932 		    sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1933 		if (!error) {
1934 			if (big) {
1935 				bdesc = (void *)(&scsipi_sense.header.big + 1);
1936 				bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1937 			} else {
1938 				bdesc = (void *)(&scsipi_sense.header.small + 1);
1939 				bsize = scsipi_sense.header.small.blk_desc_len;
1940 			}
1941 
1942 #if 0
1943 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1944 printf("page 0 bsize=%d\n", bsize);
1945 printf("page 0 ok\n");
1946 #endif
1947 
1948 			if (bsize >= 8) {
1949 				blksize = _3btol(bdesc->blklen);
1950 			}
1951 		}
1952 	}
1953 
1954 	if (!sd_validate_blksize(sd->sc_periph, blksize))
1955 		blksize = SD_DEFAULT_BLKSIZE;
1956 
1957 	dp->blksize = blksize;
1958 	dp->disksize512 = (blocks * dp->blksize) / DEV_BSIZE;
1959 	return (0);
1960 }
1961 
1962 static int
1963 sd_get_parms_page4(struct sd_softc *sd, struct disk_parms *dp, int flags)
1964 {
1965 	struct sd_mode_sense_data scsipi_sense;
1966 	int error;
1967 	int big, byte2;
1968 	size_t poffset;
1969 	union scsi_disk_pages *pages;
1970 
1971 	byte2 = SMS_DBD;
1972 again:
1973 	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1974 	error = sd_mode_sense(sd, byte2, &scsipi_sense,
1975 	    (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1976 	    sizeof(scsipi_sense.pages.rigid_geometry), 4,
1977 	    flags | XS_CTL_SILENT, &big);
1978 	if (error) {
1979 		if (byte2 == SMS_DBD) {
1980 			/* No result; try once more with DBD off */
1981 			byte2 = 0;
1982 			goto again;
1983 		}
1984 		return (error);
1985 	}
1986 
1987 	if (big) {
1988 		poffset = sizeof scsipi_sense.header.big;
1989 		poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1990 	} else {
1991 		poffset = sizeof scsipi_sense.header.small;
1992 		poffset += scsipi_sense.header.small.blk_desc_len;
1993 	}
1994 
1995 	if (poffset > sizeof(scsipi_sense) - sizeof(pages->rigid_geometry))
1996 		return ERESTART;
1997 
1998 	pages = (void *)((u_long)&scsipi_sense + poffset);
1999 #if 0
2000 	{
2001 		size_t i;
2002 		u_int8_t *p;
2003 
2004 		printf("page 4 sense:");
2005 		for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
2006 		    i--, p++)
2007 			printf(" %02x", *p);
2008 		printf("\n");
2009 		printf("page 4 pg_code=%d sense=%p/%p\n",
2010 		    pages->rigid_geometry.pg_code, &scsipi_sense, pages);
2011 	}
2012 #endif
2013 
2014 	if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
2015 		return (ERESTART);
2016 
2017 	SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
2018 	    ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
2019 	    _3btol(pages->rigid_geometry.ncyl),
2020 	    pages->rigid_geometry.nheads,
2021 	    _2btol(pages->rigid_geometry.st_cyl_wp),
2022 	    _2btol(pages->rigid_geometry.st_cyl_rwc),
2023 	    _2btol(pages->rigid_geometry.land_zone)));
2024 
2025 	/*
2026 	 * KLUDGE!! (for zone recorded disks)
2027 	 * give a number of sectors so that sec * trks * cyls
2028 	 * is <= disk_size
2029 	 * can lead to wasted space! THINK ABOUT THIS !
2030 	 */
2031 	dp->heads = pages->rigid_geometry.nheads;
2032 	dp->cyls = _3btol(pages->rigid_geometry.ncyl);
2033 	if (dp->heads == 0 || dp->cyls == 0)
2034 		return (ERESTART);
2035 	dp->sectors = dp->disksize / (dp->heads * dp->cyls);	/* XXX */
2036 
2037 	dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
2038 	if (dp->rot_rate == 0)
2039 		dp->rot_rate = 3600;
2040 
2041 #if 0
2042 printf("page 4 ok\n");
2043 #endif
2044 	return (0);
2045 }
2046 
2047 static int
2048 sd_get_parms_page5(struct sd_softc *sd, struct disk_parms *dp, int flags)
2049 {
2050 	struct sd_mode_sense_data scsipi_sense;
2051 	int error;
2052 	int big, byte2;
2053 	size_t poffset;
2054 	union scsi_disk_pages *pages;
2055 
2056 	byte2 = SMS_DBD;
2057 again:
2058 	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2059 	error = sd_mode_sense(sd, 0, &scsipi_sense,
2060 	    (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
2061 	    sizeof(scsipi_sense.pages.flex_geometry), 5,
2062 	    flags | XS_CTL_SILENT, &big);
2063 	if (error) {
2064 		if (byte2 == SMS_DBD) {
2065 			/* No result; try once more with DBD off */
2066 			byte2 = 0;
2067 			goto again;
2068 		}
2069 		return (error);
2070 	}
2071 
2072 	if (big) {
2073 		poffset = sizeof scsipi_sense.header.big;
2074 		poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
2075 	} else {
2076 		poffset = sizeof scsipi_sense.header.small;
2077 		poffset += scsipi_sense.header.small.blk_desc_len;
2078 	}
2079 
2080 	if (poffset > sizeof(scsipi_sense) - sizeof(pages->flex_geometry))
2081 		return ERESTART;
2082 
2083 	pages = (void *)((u_long)&scsipi_sense + poffset);
2084 #if 0
2085 	{
2086 		size_t i;
2087 		u_int8_t *p;
2088 
2089 		printf("page 5 sense:");
2090 		for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
2091 		    i--, p++)
2092 			printf(" %02x", *p);
2093 		printf("\n");
2094 		printf("page 5 pg_code=%d sense=%p/%p\n",
2095 		    pages->flex_geometry.pg_code, &scsipi_sense, pages);
2096 	}
2097 #endif
2098 
2099 	if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
2100 		return (ERESTART);
2101 
2102 	SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
2103 	    ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
2104 	    _3btol(pages->flex_geometry.ncyl),
2105 	    pages->flex_geometry.nheads,
2106 	    pages->flex_geometry.ph_sec_tr,
2107 	    _2btol(pages->flex_geometry.bytes_s)));
2108 
2109 	dp->heads = pages->flex_geometry.nheads;
2110 	dp->cyls = _2btol(pages->flex_geometry.ncyl);
2111 	dp->sectors = pages->flex_geometry.ph_sec_tr;
2112 	if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
2113 		return (ERESTART);
2114 
2115 	dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
2116 	if (dp->rot_rate == 0)
2117 		dp->rot_rate = 3600;
2118 
2119 #if 0
2120 printf("page 5 ok\n");
2121 #endif
2122 	return (0);
2123 }
2124 
2125 static int
2126 sd_get_parms(struct sd_softc *sd, struct disk_parms *dp, int flags)
2127 {
2128 	int error;
2129 
2130 	/*
2131 	 * If offline, the SDEV_MEDIA_LOADED flag will be
2132 	 * cleared by the caller if necessary.
2133 	 */
2134 	if (sd->type == T_SIMPLE_DIRECT) {
2135 		error = sd_get_simplifiedparms(sd, dp, flags);
2136 		if (!error)
2137 			disk_blocksize(&sd->sc_dk, dp->blksize);
2138 		return (error);
2139 	}
2140 
2141 	error = sd_get_capacity(sd, dp, flags);
2142 	if (error)
2143 		return (error);
2144 
2145 	disk_blocksize(&sd->sc_dk, dp->blksize);
2146 
2147 	if (sd->type == T_OPTICAL)
2148 		goto page0;
2149 
2150 	if (sd->sc_periph->periph_flags & PERIPH_REMOVABLE) {
2151 		if (!sd_get_parms_page5(sd, dp, flags) ||
2152 		    !sd_get_parms_page4(sd, dp, flags))
2153 			goto setprops;
2154 	} else {
2155 		if (!sd_get_parms_page4(sd, dp, flags) ||
2156 		    !sd_get_parms_page5(sd, dp, flags))
2157 			goto setprops;
2158 	}
2159 
2160 page0:
2161 	printf("%s: fabricating a geometry\n", device_xname(sd->sc_dev));
2162 	/* Try calling driver's method for figuring out geometry. */
2163 	if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
2164 	    !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
2165 		(sd->sc_periph, dp, dp->disksize)) {
2166 		/*
2167 		 * Use adaptec standard fictitious geometry
2168 		 * this depends on which controller (e.g. 1542C is
2169 		 * different. but we have to put SOMETHING here..)
2170 		 */
2171 		dp->heads = 64;
2172 		dp->sectors = 32;
2173 		dp->cyls = dp->disksize / (64 * 32);
2174 	}
2175 	dp->rot_rate = 3600;
2176 
2177 setprops:
2178 	sd_set_geometry(sd);
2179 
2180 	return (SDGP_RESULT_OK);
2181 }
2182 
2183 static int
2184 sd_flush(struct sd_softc *sd, int flags)
2185 {
2186 	struct scsipi_periph *periph = sd->sc_periph;
2187 	struct scsi_synchronize_cache_10 cmd;
2188 
2189 	/*
2190 	 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
2191 	 * We issue with address 0 length 0, which should be
2192 	 * interpreted by the device as "all remaining blocks
2193 	 * starting at address 0".  We ignore ILLEGAL REQUEST
2194 	 * in the event that the command is not supported by
2195 	 * the device, and poll for completion so that we know
2196 	 * that the cache has actually been flushed.
2197 	 *
2198 	 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
2199 	 * command, as indicated by our quirks flags.
2200 	 *
2201 	 * XXX What about older devices?
2202 	 */
2203 	if (periph->periph_version < 2 ||
2204 	    (periph->periph_quirks & PQUIRK_NOSYNCCACHE))
2205 		return (0);
2206 
2207 	sd->flags |= SDF_FLUSHING;
2208 	memset(&cmd, 0, sizeof(cmd));
2209 	cmd.opcode = SCSI_SYNCHRONIZE_CACHE_10;
2210 
2211 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
2212 	    SDRETRIES, 100000, NULL, flags | XS_CTL_IGNORE_ILLEGAL_REQUEST));
2213 }
2214 
2215 static int
2216 sd_getcache(struct sd_softc *sd, int *bitsp)
2217 {
2218 	struct scsipi_periph *periph = sd->sc_periph;
2219 	struct sd_mode_sense_data scsipi_sense;
2220 	int error, bits = 0;
2221 	int big;
2222 	union scsi_disk_pages *pages;
2223 
2224 	if (periph->periph_version < 2)
2225 		return (EOPNOTSUPP);
2226 
2227 	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2228 	error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2229 	    sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2230 	if (error)
2231 		return (error);
2232 
2233 	if (big)
2234 		pages = (void *)(&scsipi_sense.header.big + 1);
2235 	else
2236 		pages = (void *)(&scsipi_sense.header.small + 1);
2237 
2238 	if ((pages->caching_params.flags & CACHING_RCD) == 0)
2239 		bits |= DKCACHE_READ;
2240 	if (pages->caching_params.flags & CACHING_WCE)
2241 		bits |= DKCACHE_WRITE;
2242 	if (pages->caching_params.pg_code & PGCODE_PS)
2243 		bits |= DKCACHE_SAVE;
2244 
2245 	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2246 	error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2247 	    sizeof(scsipi_sense.pages.caching_params),
2248 	    SMS_PCTRL_CHANGEABLE|8, 0, &big);
2249 	if (error == 0) {
2250 		if (big)
2251 			pages = (void *)(&scsipi_sense.header.big + 1);
2252 		else
2253 			pages = (void *)(&scsipi_sense.header.small + 1);
2254 
2255 		if (pages->caching_params.flags & CACHING_RCD)
2256 			bits |= DKCACHE_RCHANGE;
2257 		if (pages->caching_params.flags & CACHING_WCE)
2258 			bits |= DKCACHE_WCHANGE;
2259 	}
2260 
2261 	*bitsp = bits;
2262 
2263 	return (0);
2264 }
2265 
2266 static int
2267 sd_setcache(struct sd_softc *sd, int bits)
2268 {
2269 	struct scsipi_periph *periph = sd->sc_periph;
2270 	struct sd_mode_sense_data scsipi_sense;
2271 	int error;
2272 	uint8_t oflags, byte2 = 0;
2273 	int big;
2274 	union scsi_disk_pages *pages;
2275 
2276 	if (periph->periph_version < 2)
2277 		return (EOPNOTSUPP);
2278 
2279 	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2280 	error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2281 	    sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2282 	if (error)
2283 		return (error);
2284 
2285 	if (big)
2286 		pages = (void *)(&scsipi_sense.header.big + 1);
2287 	else
2288 		pages = (void *)(&scsipi_sense.header.small + 1);
2289 
2290 	oflags = pages->caching_params.flags;
2291 
2292 	if (bits & DKCACHE_READ)
2293 		pages->caching_params.flags &= ~CACHING_RCD;
2294 	else
2295 		pages->caching_params.flags |= CACHING_RCD;
2296 
2297 	if (bits & DKCACHE_WRITE)
2298 		pages->caching_params.flags |= CACHING_WCE;
2299 	else
2300 		pages->caching_params.flags &= ~CACHING_WCE;
2301 
2302 	if (oflags == pages->caching_params.flags)
2303 		return (0);
2304 
2305 	pages->caching_params.pg_code &= PGCODE_MASK;
2306 
2307 	if (bits & DKCACHE_SAVE)
2308 		byte2 |= SMS_SP;
2309 
2310 	return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2311 	    sizeof(struct scsi_mode_page_header) +
2312 	    pages->caching_params.pg_length, 0, big));
2313 }
2314 
2315 static void
2316 sd_set_geometry(struct sd_softc *sd)
2317 {
2318 	struct disk_geom *dg = &sd->sc_dk.dk_geom;
2319 
2320 	memset(dg, 0, sizeof(*dg));
2321 
2322 	dg->dg_secperunit = sd->params.disksize;
2323 	dg->dg_secsize = sd->params.blksize;
2324 	dg->dg_nsectors = sd->params.sectors;
2325 	dg->dg_ntracks = sd->params.heads;
2326 	dg->dg_ncylinders = sd->params.cyls;
2327 
2328 	disk_set_info(sd->sc_dev, &sd->sc_dk, NULL);
2329 }
2330