xref: /openbsd-src/sys/scsi/sd.c (revision 9f11ffb7133c203312a01e4b986886bc88c7d74b)
1 /*	$OpenBSD: sd.c,v 1.277 2019/01/20 20:28:37 krw Exp $	*/
2 /*	$NetBSD: sd.c,v 1.111 1997/04/02 02:29:41 mycroft Exp $	*/
3 
4 /*-
5  * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Charles M. Hannum.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Originally written by Julian Elischer (julian@dialix.oz.au)
35  * for TRW Financial Systems for use under the MACH(2.5) operating system.
36  *
37  * TRW Financial Systems, in accordance with their agreement with Carnegie
38  * Mellon University, makes this software available to CMU to distribute
39  * or use in any manner that they see fit as long as this message is kept with
40  * the software. For this reason TFS also grants any other persons or
41  * organisations permission to use or modify this software.
42  *
43  * TFS supplies this software to be publicly redistributed
44  * on the understanding that TFS is not responsible for the correct
45  * functioning of this software in any circumstances.
46  *
47  * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
48  */
49 
50 #include <sys/stdint.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/timeout.h>
54 #include <sys/fcntl.h>
55 #include <sys/stat.h>
56 #include <sys/ioctl.h>
57 #include <sys/mtio.h>
58 #include <sys/mutex.h>
59 #include <sys/buf.h>
60 #include <sys/uio.h>
61 #include <sys/malloc.h>
62 #include <sys/pool.h>
63 #include <sys/errno.h>
64 #include <sys/device.h>
65 #include <sys/disklabel.h>
66 #include <sys/disk.h>
67 #include <sys/conf.h>
68 #include <sys/scsiio.h>
69 #include <sys/dkio.h>
70 #include <sys/reboot.h>
71 
72 #include <scsi/scsi_all.h>
73 #include <scsi/scsi_disk.h>
74 #include <scsi/scsiconf.h>
75 #include <scsi/sdvar.h>
76 
77 #include <ufs/ffs/fs.h>			/* for BBSIZE and SBSIZE */
78 
79 #include <sys/vnode.h>
80 
81 int	sdmatch(struct device *, void *, void *);
82 void	sdattach(struct device *, struct device *, void *);
83 int	sdactivate(struct device *, int);
84 int	sddetach(struct device *, int);
85 
86 void	sdminphys(struct buf *);
87 int	sdgetdisklabel(dev_t, struct sd_softc *, struct disklabel *, int);
88 void	sdstart(struct scsi_xfer *);
89 int	sd_interpret_sense(struct scsi_xfer *);
90 int	sd_read_cap_10(struct sd_softc *, int);
91 int	sd_read_cap_16(struct sd_softc *, int);
92 int	sd_size(struct sd_softc *, int);
93 int	sd_thin_pages(struct sd_softc *, int);
94 int	sd_vpd_block_limits(struct sd_softc *, int);
95 int	sd_vpd_thin(struct sd_softc *, int);
96 int	sd_thin_params(struct sd_softc *, int);
97 int	sd_get_parms(struct sd_softc *, struct disk_parms *, int);
98 int	sd_flush(struct sd_softc *, int);
99 
100 void	viscpy(u_char *, u_char *, int);
101 
102 int	sd_ioctl_inquiry(struct sd_softc *, struct dk_inquiry *);
103 int	sd_ioctl_cache(struct sd_softc *, long, struct dk_cache *);
104 
105 void	sd_cmd_rw6(struct scsi_xfer *, int, u_int64_t, u_int);
106 void	sd_cmd_rw10(struct scsi_xfer *, int, u_int64_t, u_int);
107 void	sd_cmd_rw12(struct scsi_xfer *, int, u_int64_t, u_int);
108 void	sd_cmd_rw16(struct scsi_xfer *, int, u_int64_t, u_int);
109 
110 void	sd_buf_done(struct scsi_xfer *);
111 
112 struct cfattach sd_ca = {
113 	sizeof(struct sd_softc), sdmatch, sdattach,
114 	sddetach, sdactivate
115 };
116 
117 struct cfdriver sd_cd = {
118 	NULL, "sd", DV_DISK
119 };
120 
121 const struct scsi_inquiry_pattern sd_patterns[] = {
122 	{T_DIRECT, T_FIXED,
123 	 "",         "",                 ""},
124 	{T_DIRECT, T_REMOV,
125 	 "",         "",                 ""},
126 	{T_RDIRECT, T_FIXED,
127 	 "",         "",                 ""},
128 	{T_RDIRECT, T_REMOV,
129 	 "",         "",                 ""},
130 	{T_OPTICAL, T_FIXED,
131 	 "",         "",                 ""},
132 	{T_OPTICAL, T_REMOV,
133 	 "",         "",                 ""},
134 };
135 
136 #define sdlookup(unit) (struct sd_softc *)disk_lookup(&sd_cd, (unit))
137 
138 int
139 sdmatch(struct device *parent, void *match, void *aux)
140 {
141 	struct scsi_attach_args *sa = aux;
142 	int priority;
143 
144 	(void)scsi_inqmatch(sa->sa_inqbuf,
145 	    sd_patterns, nitems(sd_patterns),
146 	    sizeof(sd_patterns[0]), &priority);
147 
148 	return (priority);
149 }
150 
151 /*
152  * The routine called by the low level scsi routine when it discovers
153  * a device suitable for this driver.
154  */
155 void
156 sdattach(struct device *parent, struct device *self, void *aux)
157 {
158 	struct sd_softc *sc = (struct sd_softc *)self;
159 	struct scsi_attach_args *sa = aux;
160 	struct disk_parms *dp = &sc->params;
161 	struct scsi_link *link = sa->sa_sc_link;
162 	int sd_autoconf = scsi_autoconf | SCSI_SILENT |
163 	    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE;
164 	struct dk_cache dkc;
165 	int error, result, sortby = BUFQ_DEFAULT;
166 
167 	SC_DEBUG(link, SDEV_DB2, ("sdattach:\n"));
168 
169 	/*
170 	 * Store information needed to contact our base driver
171 	 */
172 	sc->sc_link = link;
173 	link->interpret_sense = sd_interpret_sense;
174 	link->device_softc = sc;
175 
176 	if ((link->flags & SDEV_ATAPI) && (link->flags & SDEV_REMOVABLE))
177 		link->quirks |= SDEV_NOSYNCCACHE;
178 
179 	if (!(link->inqdata.flags & SID_RelAdr))
180 		link->quirks |= SDEV_ONLYBIG;
181 
182 	/*
183 	 * Note if this device is ancient.  This is used in sdminphys().
184 	 */
185 	if (!(link->flags & SDEV_ATAPI) &&
186 	    SCSISPC(sa->sa_inqbuf->version) == 0)
187 		sc->flags |= SDF_ANCIENT;
188 
189 	/*
190 	 * Use the subdriver to request information regarding
191 	 * the drive. We cannot use interrupts yet, so the
192 	 * request must specify this.
193 	 */
194 	printf("\n");
195 
196 	scsi_xsh_set(&sc->sc_xsh, link, sdstart);
197 	timeout_set(&sc->sc_timeout, (void (*)(void *))scsi_xsh_add,
198 	    &sc->sc_xsh);
199 
200 	/* Spin up non-UMASS devices ready or not. */
201 	if ((link->flags & SDEV_UMASS) == 0)
202 		scsi_start(link, SSS_START, sd_autoconf);
203 
204 	/*
205 	 * Some devices (e.g. BlackBerry Pearl) won't admit they have
206 	 * media loaded unless its been locked in.
207 	 */
208 	if ((link->flags & SDEV_REMOVABLE) != 0)
209 		scsi_prevent(link, PR_PREVENT, sd_autoconf);
210 
211 	/* Check that it is still responding and ok. */
212 	error = scsi_test_unit_ready(sc->sc_link, TEST_READY_RETRIES * 3,
213 	    sd_autoconf);
214 
215 	if (error)
216 		result = SDGP_RESULT_OFFLINE;
217 	else
218 		result = sd_get_parms(sc, &sc->params, sd_autoconf);
219 
220 	if ((link->flags & SDEV_REMOVABLE) != 0)
221 		scsi_prevent(link, PR_ALLOW, sd_autoconf);
222 
223 	switch (result) {
224 	case SDGP_RESULT_OK:
225 		printf("%s: %lluMB, %lu bytes/sector, %llu sectors",
226 		    sc->sc_dev.dv_xname,
227 		    dp->disksize / (1048576 / dp->secsize), dp->secsize,
228 		    dp->disksize);
229 		if (ISSET(sc->flags, SDF_THIN)) {
230 			sortby = BUFQ_FIFO;
231 			printf(", thin");
232 		}
233 		if (ISSET(link->flags, SDEV_READONLY)) {
234 			printf(", readonly");
235 		}
236 		printf("\n");
237 		break;
238 
239 	case SDGP_RESULT_OFFLINE:
240 		break;
241 
242 #ifdef DIAGNOSTIC
243 	default:
244 		panic("sdattach: unknown result (%#x) from get_parms", result);
245 		break;
246 #endif
247 	}
248 
249 	/*
250 	 * Initialize disk structures.
251 	 */
252 	sc->sc_dk.dk_name = sc->sc_dev.dv_xname;
253 	bufq_init(&sc->sc_bufq, sortby);
254 
255 	/*
256 	 * Enable write cache by default.
257 	 */
258 	memset(&dkc, 0, sizeof(dkc));
259 	if (sd_ioctl_cache(sc, DIOCGCACHE, &dkc) == 0 && dkc.wrcache == 0) {
260 		dkc.wrcache = 1;
261 		sd_ioctl_cache(sc, DIOCSCACHE, &dkc);
262 	}
263 
264 	/* Attach disk. */
265 	disk_attach(&sc->sc_dev, &sc->sc_dk);
266 }
267 
268 int
269 sdactivate(struct device *self, int act)
270 {
271 	struct scsi_link *link;
272 	struct sd_softc *sc = (struct sd_softc *)self;
273 
274 	if (sc->flags & SDF_DYING)
275 		return (ENXIO);
276 	link = sc->sc_link;
277 
278 	switch (act) {
279 	case DVACT_SUSPEND:
280 		/*
281 		 * We flush the cache, since we our next step before
282 		 * DVACT_POWERDOWN might be a hibernate operation.
283 		 */
284 		if ((sc->flags & SDF_DIRTY) != 0)
285 			sd_flush(sc, SCSI_AUTOCONF);
286 		break;
287 	case DVACT_POWERDOWN:
288 		/*
289 		 * Stop the disk.  Stopping the disk should flush the
290 		 * cache, but we are paranoid so we flush the cache
291 		 * first.  We're cold at this point, so we poll for
292 		 * completion.
293 		 */
294 		if ((sc->flags & SDF_DIRTY) != 0)
295 			sd_flush(sc, SCSI_AUTOCONF);
296 		if (boothowto & RB_POWERDOWN)
297 			scsi_start(link, SSS_STOP,
298 			    SCSI_IGNORE_ILLEGAL_REQUEST |
299 			    SCSI_IGNORE_NOT_READY | SCSI_AUTOCONF);
300 		break;
301 	case DVACT_RESUME:
302 		scsi_start(link, SSS_START,
303 		    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_AUTOCONF);
304 		break;
305 	case DVACT_DEACTIVATE:
306 		sc->flags |= SDF_DYING;
307 		timeout_del(&sc->sc_timeout);
308 		scsi_xsh_del(&sc->sc_xsh);
309 		break;
310 	}
311 	return (0);
312 }
313 
314 int
315 sddetach(struct device *self, int flags)
316 {
317 	struct sd_softc *sc = (struct sd_softc *)self;
318 
319 	bufq_drain(&sc->sc_bufq);
320 
321 	disk_gone(sdopen, self->dv_unit);
322 
323 	/* Detach disk. */
324 	bufq_destroy(&sc->sc_bufq);
325 	disk_detach(&sc->sc_dk);
326 
327 	return (0);
328 }
329 
330 /*
331  * Open the device. Make sure the partition info is as up-to-date as can be.
332  */
333 int
334 sdopen(dev_t dev, int flag, int fmt, struct proc *p)
335 {
336 	struct scsi_link *link;
337 	struct sd_softc *sc;
338 	int error = 0, part, rawopen, unit;
339 
340 	unit = DISKUNIT(dev);
341 	part = DISKPART(dev);
342 
343 	rawopen = (part == RAW_PART) && (fmt == S_IFCHR);
344 
345 	sc = sdlookup(unit);
346 	if (sc == NULL)
347 		return (ENXIO);
348 	if (sc->flags & SDF_DYING) {
349 		device_unref(&sc->sc_dev);
350 		return (ENXIO);
351 	}
352 	link = sc->sc_link;
353 
354 	if (ISSET(flag, FWRITE) && ISSET(link->flags, SDEV_READONLY)) {
355 		device_unref(&sc->sc_dev);
356 		return (EACCES);
357 	}
358 
359 	SC_DEBUG(link, SDEV_DB1,
360 	    ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
361 	    sd_cd.cd_ndevs, part));
362 
363 	if ((error = disk_lock(&sc->sc_dk)) != 0) {
364 		device_unref(&sc->sc_dev);
365 		return (error);
366 	}
367 
368 	if (sc->sc_dk.dk_openmask != 0) {
369 		/*
370 		 * If any partition is open, but the disk has been invalidated,
371 		 * disallow further opens of non-raw partition.
372 		 */
373 		if (sc->flags & SDF_DYING) {
374 			error = ENXIO;
375 			goto die;
376 		}
377 		if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
378 			if (rawopen)
379 				goto out;
380 			error = EIO;
381 			goto bad;
382 		}
383 	} else {
384 		/* Spin up non-UMASS devices ready or not. */
385 		if (sc->flags & SDF_DYING) {
386 			error = ENXIO;
387 			goto die;
388 		}
389 		if ((link->flags & SDEV_UMASS) == 0)
390 			scsi_start(link, SSS_START, (rawopen ? SCSI_SILENT :
391 			    0) | SCSI_IGNORE_ILLEGAL_REQUEST |
392 			    SCSI_IGNORE_MEDIA_CHANGE);
393 
394 		/* Use sd_interpret_sense() for sense errors.
395 		 *
396 		 * But only after spinning the disk up! Just in case a broken
397 		 * device returns "Initialization command required." and causes
398 		 * a loop of scsi_start() calls.
399 		 */
400 		if (sc->flags & SDF_DYING) {
401 			error = ENXIO;
402 			goto die;
403 		}
404 		link->flags |= SDEV_OPEN;
405 
406 		/*
407 		 * Try to prevent the unloading of a removable device while
408 		 * it's open. But allow the open to proceed if the device can't
409 		 * be locked in.
410 		 */
411 		if ((link->flags & SDEV_REMOVABLE) != 0) {
412 			scsi_prevent(link, PR_PREVENT, SCSI_SILENT |
413 			    SCSI_IGNORE_ILLEGAL_REQUEST |
414 			    SCSI_IGNORE_MEDIA_CHANGE);
415 		}
416 
417 		/* Check that it is still responding and ok. */
418 		if (sc->flags & SDF_DYING) {
419 			error = ENXIO;
420 			goto die;
421 		}
422 		error = scsi_test_unit_ready(link,
423 		    TEST_READY_RETRIES, SCSI_SILENT |
424 		    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE);
425 		if (error) {
426 			if (rawopen) {
427 				error = 0;
428 				goto out;
429 			} else
430 				goto bad;
431 		}
432 
433 		/* Load the physical device parameters. */
434 		if (sc->flags & SDF_DYING) {
435 			error = ENXIO;
436 			goto die;
437 		}
438 		link->flags |= SDEV_MEDIA_LOADED;
439 		if (sd_get_parms(sc, &sc->params, (rawopen ? SCSI_SILENT : 0))
440 		    == SDGP_RESULT_OFFLINE) {
441 			if (sc->flags & SDF_DYING) {
442 				error = ENXIO;
443 				goto die;
444 			}
445 			link->flags &= ~SDEV_MEDIA_LOADED;
446 			error = ENXIO;
447 			goto bad;
448 		}
449 		SC_DEBUG(link, SDEV_DB3, ("Params loaded\n"));
450 
451 		/* Load the partition info if not already loaded. */
452 		error = sdgetdisklabel(dev, sc, sc->sc_dk.dk_label, 0);
453 		if (error == EIO || error == ENXIO)
454 			goto bad;
455 		SC_DEBUG(link, SDEV_DB3, ("Disklabel loaded\n"));
456 	}
457 
458 out:
459 	if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0)
460 		goto bad;
461 
462 	SC_DEBUG(link, SDEV_DB3, ("open complete\n"));
463 
464 	/* It's OK to fall through because dk_openmask is now non-zero. */
465 bad:
466 	if (sc->sc_dk.dk_openmask == 0) {
467 		if (sc->flags & SDF_DYING) {
468 			error = ENXIO;
469 			goto die;
470 		}
471 		if ((link->flags & SDEV_REMOVABLE) != 0)
472 			scsi_prevent(link, PR_ALLOW, SCSI_SILENT |
473 			    SCSI_IGNORE_ILLEGAL_REQUEST |
474 			    SCSI_IGNORE_MEDIA_CHANGE);
475 		if (sc->flags & SDF_DYING) {
476 			error = ENXIO;
477 			goto die;
478 		}
479 		link->flags &= ~(SDEV_OPEN | SDEV_MEDIA_LOADED);
480 	}
481 
482 die:
483 	disk_unlock(&sc->sc_dk);
484 	device_unref(&sc->sc_dev);
485 	return (error);
486 }
487 
488 /*
489  * Close the device. Only called if we are the last occurrence of an open
490  * device.  Convenient now but usually a pain.
491  */
492 int
493 sdclose(dev_t dev, int flag, int fmt, struct proc *p)
494 {
495 	struct scsi_link *link;
496 	struct sd_softc *sc;
497 	int part = DISKPART(dev);
498 	int error = 0;
499 
500 	sc = sdlookup(DISKUNIT(dev));
501 	if (sc == NULL)
502 		return (ENXIO);
503 	if (sc->flags & SDF_DYING) {
504 		device_unref(&sc->sc_dev);
505 		return (ENXIO);
506 	}
507 	link = sc->sc_link;
508 
509 	disk_lock_nointr(&sc->sc_dk);
510 
511 	disk_closepart(&sc->sc_dk, part, fmt);
512 
513 	if (((flag & FWRITE) != 0 || sc->sc_dk.dk_openmask == 0) &&
514 	    (sc->flags & SDF_DIRTY) != 0)
515 		sd_flush(sc, 0);
516 
517 	if (sc->sc_dk.dk_openmask == 0) {
518 		if (sc->flags & SDF_DYING) {
519 			error = ENXIO;
520 			goto die;
521 		}
522 		if ((link->flags & SDEV_REMOVABLE) != 0)
523 			scsi_prevent(link, PR_ALLOW,
524 			    SCSI_IGNORE_ILLEGAL_REQUEST |
525 			    SCSI_IGNORE_NOT_READY | SCSI_SILENT);
526 		if (sc->flags & SDF_DYING) {
527 			error = ENXIO;
528 			goto die;
529 		}
530 		link->flags &= ~(SDEV_OPEN | SDEV_MEDIA_LOADED);
531 
532 		if (link->flags & SDEV_EJECTING) {
533 			scsi_start(link, SSS_STOP|SSS_LOEJ, 0);
534 			if (sc->flags & SDF_DYING) {
535 				error = ENXIO;
536 				goto die;
537 			}
538 			link->flags &= ~SDEV_EJECTING;
539 		}
540 
541 		timeout_del(&sc->sc_timeout);
542 		scsi_xsh_del(&sc->sc_xsh);
543 	}
544 
545 die:
546 	disk_unlock(&sc->sc_dk);
547 	device_unref(&sc->sc_dev);
548 	return (error);
549 }
550 
551 /*
552  * Actually translate the requested transfer into one the physical driver
553  * can understand.  The transfer is described by a buf and will include
554  * only one physical transfer.
555  */
556 void
557 sdstrategy(struct buf *bp)
558 {
559 	struct scsi_link *link;
560 	struct sd_softc *sc;
561 	int s;
562 
563 	sc = sdlookup(DISKUNIT(bp->b_dev));
564 	if (sc == NULL) {
565 		bp->b_error = ENXIO;
566 		goto bad;
567 	}
568 	if (sc->flags & SDF_DYING) {
569 		bp->b_error = ENXIO;
570 		goto bad;
571 	}
572 	link = sc->sc_link;
573 
574 	SC_DEBUG(link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %lld\n",
575 	    bp->b_bcount, (long long)bp->b_blkno));
576 	/*
577 	 * If the device has been made invalid, error out
578 	 */
579 	if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
580 		if (link->flags & SDEV_OPEN)
581 			bp->b_error = EIO;
582 		else
583 			bp->b_error = ENODEV;
584 		goto bad;
585 	}
586 
587 	/* Validate the request. */
588 	if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1)
589 		goto done;
590 
591 	/* Place it in the queue of disk activities for this disk. */
592 	bufq_queue(&sc->sc_bufq, bp);
593 
594 	/*
595 	 * Tell the device to get going on the transfer if it's
596 	 * not doing anything, otherwise just wait for completion
597 	 */
598 	scsi_xsh_add(&sc->sc_xsh);
599 
600 	device_unref(&sc->sc_dev);
601 	return;
602 
603 bad:
604 	SET(bp->b_flags, B_ERROR);
605 	bp->b_resid = bp->b_bcount;
606 done:
607 	s = splbio();
608 	biodone(bp);
609 	splx(s);
610 	if (sc != NULL)
611 		device_unref(&sc->sc_dev);
612 }
613 
614 void
615 sd_cmd_rw6(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
616 {
617 	struct scsi_rw *cmd = (struct scsi_rw *)xs->cmd;
618 
619 	cmd->opcode = read ? READ_COMMAND : WRITE_COMMAND;
620 	_lto3b(secno, cmd->addr);
621 	cmd->length = nsecs;
622 
623 	xs->cmdlen = sizeof(*cmd);
624 }
625 
626 void
627 sd_cmd_rw10(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
628 {
629 	struct scsi_rw_big *cmd = (struct scsi_rw_big *)xs->cmd;
630 
631 	cmd->opcode = read ? READ_BIG : WRITE_BIG;
632 	_lto4b(secno, cmd->addr);
633 	_lto2b(nsecs, cmd->length);
634 
635 	xs->cmdlen = sizeof(*cmd);
636 }
637 
638 void
639 sd_cmd_rw12(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
640 {
641 	struct scsi_rw_12 *cmd = (struct scsi_rw_12 *)xs->cmd;
642 
643 	cmd->opcode = read ? READ_12 : WRITE_12;
644 	_lto4b(secno, cmd->addr);
645 	_lto4b(nsecs, cmd->length);
646 
647 	xs->cmdlen = sizeof(*cmd);
648 }
649 
650 void
651 sd_cmd_rw16(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
652 {
653 	struct scsi_rw_16 *cmd = (struct scsi_rw_16 *)xs->cmd;
654 
655 	cmd->opcode = read ? READ_16 : WRITE_16;
656 	_lto8b(secno, cmd->addr);
657 	_lto4b(nsecs, cmd->length);
658 
659 	xs->cmdlen = sizeof(*cmd);
660 }
661 
662 /*
663  * sdstart looks to see if there is a buf waiting for the device
664  * and that the device is not already busy. If both are true,
665  * It dequeues the buf and creates a scsi command to perform the
666  * transfer in the buf. The transfer request will call scsi_done
667  * on completion, which will in turn call this routine again
668  * so that the next queued transfer is performed.
669  * The bufs are queued by the strategy routine (sdstrategy)
670  *
671  * This routine is also called after other non-queued requests
672  * have been made of the scsi driver, to ensure that the queue
673  * continues to be drained.
674  */
675 void
676 sdstart(struct scsi_xfer *xs)
677 {
678 	struct scsi_link *link = xs->sc_link;
679 	struct sd_softc *sc = link->device_softc;
680 	struct buf *bp;
681 	u_int64_t secno;
682 	int nsecs;
683 	int read;
684 	struct partition *p;
685 
686 	if (sc->flags & SDF_DYING) {
687 		scsi_xs_put(xs);
688 		return;
689 	}
690 	if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
691 		bufq_drain(&sc->sc_bufq);
692 		scsi_xs_put(xs);
693 		return;
694 	}
695 
696 	bp = bufq_dequeue(&sc->sc_bufq);
697 	if (bp == NULL) {
698 		scsi_xs_put(xs);
699 		return;
700 	}
701 
702 	secno = DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno);
703 
704 	p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
705 	secno += DL_GETPOFFSET(p);
706 	nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize);
707 	read = bp->b_flags & B_READ;
708 
709 	/*
710 	 *  Fill out the scsi command.  If the transfer will
711 	 *  fit in a "small" cdb, use it.
712 	 */
713 	if (!(link->flags & SDEV_ATAPI) &&
714 	    !(link->quirks & SDEV_ONLYBIG) &&
715 	    ((secno & 0x1fffff) == secno) &&
716 	    ((nsecs & 0xff) == nsecs))
717 		sd_cmd_rw6(xs, read, secno, nsecs);
718 	else if (((secno & 0xffffffff) == secno) &&
719 	    ((nsecs & 0xffff) == nsecs))
720 		sd_cmd_rw10(xs, read, secno, nsecs);
721 	else if (((secno & 0xffffffff) == secno) &&
722 	    ((nsecs & 0xffffffff) == nsecs))
723 		sd_cmd_rw12(xs, read, secno, nsecs);
724 	else
725 		sd_cmd_rw16(xs, read, secno, nsecs);
726 
727 	xs->flags |= (read ? SCSI_DATA_IN : SCSI_DATA_OUT);
728 	xs->timeout = 60000;
729 	xs->data = bp->b_data;
730 	xs->datalen = bp->b_bcount;
731 
732 	xs->done = sd_buf_done;
733 	xs->cookie = bp;
734 	xs->bp = bp;
735 
736 	/* Instrumentation. */
737 	disk_busy(&sc->sc_dk);
738 
739 	/* Mark disk as dirty. */
740 	if (!read)
741 		sc->flags |= SDF_DIRTY;
742 
743 	scsi_xs_exec(xs);
744 
745 	/* move onto the next io */
746 	if (ISSET(sc->flags, SDF_WAITING))
747 		CLR(sc->flags, SDF_WAITING);
748 	else if (bufq_peek(&sc->sc_bufq))
749 		scsi_xsh_add(&sc->sc_xsh);
750 }
751 
752 void
753 sd_buf_done(struct scsi_xfer *xs)
754 {
755 	struct sd_softc *sc = xs->sc_link->device_softc;
756 	struct buf *bp = xs->cookie;
757 	int error, s;
758 
759 	switch (xs->error) {
760 	case XS_NOERROR:
761 		bp->b_error = 0;
762 		CLR(bp->b_flags, B_ERROR);
763 		bp->b_resid = xs->resid;
764 		break;
765 
766 	case XS_SENSE:
767 	case XS_SHORTSENSE:
768 #ifdef SCSIDEBUG
769 		scsi_sense_print_debug(xs);
770 #endif
771 		error = sd_interpret_sense(xs);
772 		if (error == 0) {
773 			bp->b_error = 0;
774 			CLR(bp->b_flags, B_ERROR);
775 			bp->b_resid = xs->resid;
776 			break;
777 		}
778 		if (error != ERESTART) {
779 			bp->b_error = error;
780 			SET(bp->b_flags, B_ERROR);
781 			xs->retries = 0;
782 		}
783 		goto retry;
784 
785 	case XS_BUSY:
786 		if (xs->retries) {
787 			if (scsi_delay(xs, 1) != ERESTART)
788 				xs->retries = 0;
789 		}
790 		goto retry;
791 
792 	case XS_TIMEOUT:
793 retry:
794 		if (xs->retries--) {
795 			scsi_xs_exec(xs);
796 			return;
797 		}
798 		/* FALLTHROUGH */
799 
800 	default:
801 		if (bp->b_error == 0)
802 			bp->b_error = EIO;
803 		SET(bp->b_flags, B_ERROR);
804 		bp->b_resid = bp->b_bcount;
805 		break;
806 	}
807 
808 	disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid, bp->b_blkno,
809 	    bp->b_flags & B_READ);
810 
811 	s = splbio();
812 	biodone(bp);
813 	splx(s);
814 	scsi_xs_put(xs);
815 }
816 
817 void
818 sdminphys(struct buf *bp)
819 {
820 	struct scsi_link *link;
821 	struct sd_softc *sc;
822 	long max;
823 
824 	sc = sdlookup(DISKUNIT(bp->b_dev));
825 	if (sc == NULL)
826 		return;  /* XXX - right way to fail this? */
827 	if (sc->flags & SDF_DYING) {
828 		device_unref(&sc->sc_dev);
829 		return;
830 	}
831 	link = sc->sc_link;
832 
833 	/*
834 	 * If the device is ancient, we want to make sure that
835 	 * the transfer fits into a 6-byte cdb.
836 	 *
837 	 * XXX Note that the SCSI-I spec says that 256-block transfers
838 	 * are allowed in a 6-byte read/write, and are specified
839 	 * by setting the "length" to 0.  However, we're conservative
840 	 * here, allowing only 255-block transfers in case an
841 	 * ancient device gets confused by length == 0.  A length of 0
842 	 * in a 10-byte read/write actually means 0 blocks.
843 	 */
844 	if (sc->flags & SDF_ANCIENT) {
845 		max = sc->sc_dk.dk_label->d_secsize * 0xff;
846 
847 		if (bp->b_bcount > max)
848 			bp->b_bcount = max;
849 	}
850 
851 	(*link->adapter->scsi_minphys)(bp, link);
852 
853 	device_unref(&sc->sc_dev);
854 }
855 
856 int
857 sdread(dev_t dev, struct uio *uio, int ioflag)
858 {
859 	return (physio(sdstrategy, dev, B_READ, sdminphys, uio));
860 }
861 
862 int
863 sdwrite(dev_t dev, struct uio *uio, int ioflag)
864 {
865 	return (physio(sdstrategy, dev, B_WRITE, sdminphys, uio));
866 }
867 
868 /*
869  * Perform special action on behalf of the user
870  * Knows about the internals of this device
871  */
872 int
873 sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
874 {
875 	struct scsi_link *link;
876 	struct sd_softc *sc;
877 	struct disklabel *lp;
878 	int error = 0;
879 	int part = DISKPART(dev);
880 
881 	sc = sdlookup(DISKUNIT(dev));
882 	if (sc == NULL)
883 		return (ENXIO);
884 	if (sc->flags & SDF_DYING) {
885 		device_unref(&sc->sc_dev);
886 		return (ENXIO);
887 	}
888 	link = sc->sc_link;
889 
890 	SC_DEBUG(link, SDEV_DB2, ("sdioctl 0x%lx\n", cmd));
891 
892 	/*
893 	 * If the device is not valid.. abandon ship
894 	 */
895 	if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
896 		switch (cmd) {
897 		case DIOCLOCK:
898 		case DIOCEJECT:
899 		case SCIOCIDENTIFY:
900 		case SCIOCCOMMAND:
901 		case SCIOCDEBUG:
902 			if (part == RAW_PART)
903 				break;
904 		/* FALLTHROUGH */
905 		default:
906 			if ((link->flags & SDEV_OPEN) == 0) {
907 				error = ENODEV;
908 				goto exit;
909 			} else {
910 				error = EIO;
911 				goto exit;
912 			}
913 		}
914 	}
915 
916 	switch (cmd) {
917 	case DIOCRLDINFO:
918 		lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK);
919 		sdgetdisklabel(dev, sc, lp, 0);
920 		memcpy(sc->sc_dk.dk_label, lp, sizeof(*lp));
921 		free(lp, M_TEMP, sizeof(*lp));
922 		goto exit;
923 
924 	case DIOCGPDINFO:
925 		sdgetdisklabel(dev, sc, (struct disklabel *)addr, 1);
926 		goto exit;
927 
928 	case DIOCGDINFO:
929 		*(struct disklabel *)addr = *(sc->sc_dk.dk_label);
930 		goto exit;
931 
932 	case DIOCGPART:
933 		((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
934 		((struct partinfo *)addr)->part =
935 		    &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)];
936 		goto exit;
937 
938 	case DIOCWDINFO:
939 	case DIOCSDINFO:
940 		if ((flag & FWRITE) == 0) {
941 			error = EBADF;
942 			goto exit;
943 		}
944 
945 		if ((error = disk_lock(&sc->sc_dk)) != 0)
946 			goto exit;
947 
948 		error = setdisklabel(sc->sc_dk.dk_label,
949 		    (struct disklabel *)addr, sc->sc_dk.dk_openmask);
950 		if (error == 0) {
951 			if (cmd == DIOCWDINFO)
952 				error = writedisklabel(DISKLABELDEV(dev),
953 				    sdstrategy, sc->sc_dk.dk_label);
954 		}
955 
956 		disk_unlock(&sc->sc_dk);
957 		goto exit;
958 
959 	case DIOCLOCK:
960 		error = scsi_prevent(link,
961 		    (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0);
962 		goto exit;
963 
964 	case MTIOCTOP:
965 		if (((struct mtop *)addr)->mt_op != MTOFFL) {
966 			error = EIO;
967 			goto exit;
968 		}
969 		/* FALLTHROUGH */
970 	case DIOCEJECT:
971 		if ((link->flags & SDEV_REMOVABLE) == 0) {
972 			error = ENOTTY;
973 			goto exit;
974 		}
975 		link->flags |= SDEV_EJECTING;
976 		goto exit;
977 
978 	case DIOCINQ:
979 		error = scsi_do_ioctl(link, cmd, addr, flag);
980 		if (error == ENOTTY)
981 			error = sd_ioctl_inquiry(sc,
982 			    (struct dk_inquiry *)addr);
983 		goto exit;
984 
985 	case DIOCSCACHE:
986 		if (!ISSET(flag, FWRITE)) {
987 			error = EBADF;
988 			goto exit;
989 		}
990 		/* FALLTHROUGH */
991 	case DIOCGCACHE:
992 		error = sd_ioctl_cache(sc, cmd, (struct dk_cache *)addr);
993 		goto exit;
994 
995 	case DIOCCACHESYNC:
996 		if (!ISSET(flag, FWRITE)) {
997 			error = EBADF;
998 			goto exit;
999 		}
1000 		if ((sc->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)
1001 			error = sd_flush(sc, 0);
1002 		goto exit;
1003 
1004 	default:
1005 		if (part != RAW_PART) {
1006 			error = ENOTTY;
1007 			goto exit;
1008 		}
1009 		error = scsi_do_ioctl(link, cmd, addr, flag);
1010 	}
1011 
1012  exit:
1013 	device_unref(&sc->sc_dev);
1014 	return (error);
1015 }
1016 
1017 int
1018 sd_ioctl_inquiry(struct sd_softc *sc, struct dk_inquiry *di)
1019 {
1020 	struct scsi_link *link;
1021 	struct scsi_vpd_serial *vpd;
1022 
1023 	vpd = dma_alloc(sizeof(*vpd), PR_WAITOK | PR_ZERO);
1024 
1025 	if (sc->flags & SDF_DYING) {
1026 		dma_free(vpd, sizeof(*vpd));
1027 		return (ENXIO);
1028 	}
1029 	link = sc->sc_link;
1030 
1031 	bzero(di, sizeof(struct dk_inquiry));
1032 	scsi_strvis(di->vendor, link->inqdata.vendor,
1033 	    sizeof(link->inqdata.vendor));
1034 	scsi_strvis(di->product, link->inqdata.product,
1035 	    sizeof(link->inqdata.product));
1036 	scsi_strvis(di->revision, link->inqdata.revision,
1037 	    sizeof(link->inqdata.revision));
1038 
1039 	/* the serial vpd page is optional */
1040 	if (scsi_inquire_vpd(link, vpd, sizeof(*vpd), SI_PG_SERIAL, 0) == 0)
1041 		scsi_strvis(di->serial, vpd->serial, sizeof(vpd->serial));
1042 	else
1043 		strlcpy(di->serial, "(unknown)", sizeof(vpd->serial));
1044 
1045 	dma_free(vpd, sizeof(*vpd));
1046 	return (0);
1047 }
1048 
1049 int
1050 sd_ioctl_cache(struct sd_softc *sc, long cmd, struct dk_cache *dkc)
1051 {
1052 	struct scsi_link *link;
1053 	union scsi_mode_sense_buf *buf;
1054 	struct page_caching_mode *mode = NULL;
1055 	u_int wrcache, rdcache;
1056 	int big;
1057 	int rv;
1058 
1059 	if (sc->flags & SDF_DYING)
1060 		return (ENXIO);
1061 	link = sc->sc_link;
1062 
1063 	if (ISSET(link->flags, SDEV_UMASS))
1064 		return (EOPNOTSUPP);
1065 
1066 	/* see if the adapter has special handling */
1067 	rv = scsi_do_ioctl(link, cmd, (caddr_t)dkc, 0);
1068 	if (rv != ENOTTY)
1069 		return (rv);
1070 
1071 	buf = dma_alloc(sizeof(*buf), PR_WAITOK);
1072 	if (buf == NULL)
1073 		return (ENOMEM);
1074 
1075 	if (sc->flags & SDF_DYING) {
1076 		rv = ENXIO;
1077 		goto done;
1078 	}
1079 	rv = scsi_do_mode_sense(link, PAGE_CACHING_MODE,
1080 	    buf, (void **)&mode, NULL, NULL, NULL,
1081 	    sizeof(*mode) - 4, scsi_autoconf | SCSI_SILENT, &big);
1082 	if (rv != 0)
1083 		goto done;
1084 
1085 	if ((mode == NULL) || (!DISK_PGCODE(mode, PAGE_CACHING_MODE))) {
1086 		rv = EIO;
1087 		goto done;
1088 	}
1089 
1090 	wrcache = (ISSET(mode->flags, PG_CACHE_FL_WCE) ? 1 : 0);
1091 	rdcache = (ISSET(mode->flags, PG_CACHE_FL_RCD) ? 0 : 1);
1092 
1093 	switch (cmd) {
1094 	case DIOCGCACHE:
1095 		dkc->wrcache = wrcache;
1096 		dkc->rdcache = rdcache;
1097 		break;
1098 
1099 	case DIOCSCACHE:
1100 		if (dkc->wrcache == wrcache && dkc->rdcache == rdcache)
1101 			break;
1102 
1103 		if (dkc->wrcache)
1104 			SET(mode->flags, PG_CACHE_FL_WCE);
1105 		else
1106 			CLR(mode->flags, PG_CACHE_FL_WCE);
1107 
1108 		if (dkc->rdcache)
1109 			CLR(mode->flags, PG_CACHE_FL_RCD);
1110 		else
1111 			SET(mode->flags, PG_CACHE_FL_RCD);
1112 
1113 		if (sc->flags & SDF_DYING) {
1114 			rv = ENXIO;
1115 			goto done;
1116 		}
1117 		if (big) {
1118 			rv = scsi_mode_select_big(link, SMS_PF,
1119 			    &buf->hdr_big, scsi_autoconf | SCSI_SILENT, 20000);
1120 		} else {
1121 			rv = scsi_mode_select(link, SMS_PF,
1122 			    &buf->hdr, scsi_autoconf | SCSI_SILENT, 20000);
1123 		}
1124 		break;
1125 	}
1126 
1127 done:
1128 	dma_free(buf, sizeof(*buf));
1129 	return (rv);
1130 }
1131 
1132 /*
1133  * Load the label information on the named device
1134  */
1135 int
1136 sdgetdisklabel(dev_t dev, struct sd_softc *sc, struct disklabel *lp,
1137     int spoofonly)
1138 {
1139 	struct scsi_link *link;
1140 	size_t len;
1141 	char packname[sizeof(lp->d_packname) + 1];
1142 	char product[17], vendor[9];
1143 
1144 	if (sc->flags & SDF_DYING)
1145 		return (ENXIO);
1146 	link = sc->sc_link;
1147 
1148 	bzero(lp, sizeof(struct disklabel));
1149 
1150 	lp->d_secsize = sc->params.secsize;
1151 	lp->d_ntracks = sc->params.heads;
1152 	lp->d_nsectors = sc->params.sectors;
1153 	lp->d_ncylinders = sc->params.cyls;
1154 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1155 	if (lp->d_secpercyl == 0) {
1156 		lp->d_secpercyl = 100;
1157 		/* as long as it's not 0 - readdisklabel divides by it */
1158 	}
1159 
1160 	lp->d_type = DTYPE_SCSI;
1161 	if ((link->inqdata.device & SID_TYPE) == T_OPTICAL)
1162 		strncpy(lp->d_typename, "SCSI optical",
1163 		    sizeof(lp->d_typename));
1164 	else
1165 		strncpy(lp->d_typename, "SCSI disk",
1166 		    sizeof(lp->d_typename));
1167 
1168 	/*
1169 	 * Try to fit '<vendor> <product>' into d_packname. If that doesn't fit
1170 	 * then leave out '<vendor> ' and use only as much of '<product>' as
1171 	 * does fit.
1172 	 */
1173 	viscpy(vendor, link->inqdata.vendor, 8);
1174 	viscpy(product, link->inqdata.product, 16);
1175 	len = snprintf(packname, sizeof(packname), "%s %s", vendor, product);
1176 	if (len > sizeof(lp->d_packname)) {
1177 		strlcpy(packname, product, sizeof(packname));
1178 		len = strlen(packname);
1179 	}
1180 	/*
1181 	 * It is safe to use len as the count of characters to copy because
1182 	 * packname is sizeof(lp->d_packname)+1, the string in packname is
1183 	 * always null terminated and len does not count the terminating null.
1184 	 * d_packname is not a null terminated string.
1185 	 */
1186 	memcpy(lp->d_packname, packname, len);
1187 
1188 	DL_SETDSIZE(lp, sc->params.disksize);
1189 	lp->d_version = 1;
1190 	lp->d_flags = 0;
1191 
1192 	/* XXX - these values for BBSIZE and SBSIZE assume ffs */
1193 	lp->d_bbsize = BBSIZE;
1194 	lp->d_sbsize = SBSIZE;
1195 
1196 	lp->d_magic = DISKMAGIC;
1197 	lp->d_magic2 = DISKMAGIC;
1198 	lp->d_checksum = dkcksum(lp);
1199 
1200 	/*
1201 	 * Call the generic disklabel extraction routine
1202 	 */
1203 	return readdisklabel(DISKLABELDEV(dev), sdstrategy, lp, spoofonly);
1204 }
1205 
1206 
1207 /*
1208  * Check Errors
1209  */
1210 int
1211 sd_interpret_sense(struct scsi_xfer *xs)
1212 {
1213 	struct scsi_sense_data *sense = &xs->sense;
1214 	struct scsi_link *link = xs->sc_link;
1215 	u_int8_t serr = sense->error_code & SSD_ERRCODE;
1216 	int retval;
1217 
1218 	/*
1219 	 * Let the generic code handle everything except a few categories of
1220 	 * LUN not ready errors on open devices.
1221 	 */
1222 	if (((link->flags & SDEV_OPEN) == 0) ||
1223 	    (serr != SSD_ERRCODE_CURRENT && serr != SSD_ERRCODE_DEFERRED) ||
1224 	    ((sense->flags & SSD_KEY) != SKEY_NOT_READY) ||
1225 	    (sense->extra_len < 6))
1226 		return (scsi_interpret_sense(xs));
1227 
1228 	if ((xs->flags & SCSI_IGNORE_NOT_READY) != 0)
1229 		return (0);
1230 
1231 	switch (ASC_ASCQ(sense)) {
1232 	case SENSE_NOT_READY_BECOMING_READY:
1233 		SC_DEBUG(link, SDEV_DB1, ("becoming ready.\n"));
1234 		retval = scsi_delay(xs, 5);
1235 		break;
1236 
1237 	case SENSE_NOT_READY_INIT_REQUIRED:
1238 		SC_DEBUG(link, SDEV_DB1, ("spinning up\n"));
1239 		retval = scsi_start(link, SSS_START,
1240 		    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_NOSLEEP);
1241 		if (retval == 0)
1242 			retval = ERESTART;
1243 		else if (retval == ENOMEM)
1244 			/* Can't issue the command. Fall back on a delay. */
1245 			retval = scsi_delay(xs, 5);
1246 		else
1247 			SC_DEBUG(link, SDEV_DB1, ("spin up failed (%#x)\n",
1248 			    retval));
1249 		break;
1250 
1251 	default:
1252 		retval = scsi_interpret_sense(xs);
1253 		break;
1254 	}
1255 
1256 	return (retval);
1257 }
1258 
1259 daddr_t
1260 sdsize(dev_t dev)
1261 {
1262 	struct disklabel *lp;
1263 	struct sd_softc *sc;
1264 	int part, omask;
1265 	daddr_t size;
1266 
1267 	sc = sdlookup(DISKUNIT(dev));
1268 	if (sc == NULL)
1269 		return -1;
1270 	if (sc->flags & SDF_DYING) {
1271 		size = -1;
1272 		goto exit;
1273 	}
1274 
1275 	part = DISKPART(dev);
1276 	omask = sc->sc_dk.dk_openmask & (1 << part);
1277 
1278 	if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) {
1279 		size = -1;
1280 		goto exit;
1281 	}
1282 
1283 	lp = sc->sc_dk.dk_label;
1284 	if (sc->flags & SDF_DYING) {
1285 		size = -1;
1286 		goto exit;
1287 	}
1288 	if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) == 0)
1289 		size = -1;
1290 	else if (lp->d_partitions[part].p_fstype != FS_SWAP)
1291 		size = -1;
1292 	else
1293 		size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part]));
1294 	if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1295 		size = -1;
1296 
1297  exit:
1298 	device_unref(&sc->sc_dev);
1299 	return size;
1300 }
1301 
1302 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1303 static int sddoingadump;
1304 
1305 /*
1306  * dump all of physical memory into the partition specified, starting
1307  * at offset 'dumplo' into the partition.
1308  */
1309 int
1310 sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
1311 {
1312 	struct sd_softc *sc;	/* disk unit to do the I/O */
1313 	struct disklabel *lp;	/* disk's disklabel */
1314 	int	unit, part;
1315 	u_int32_t sectorsize;	/* size of a disk sector */
1316 	u_int64_t nsects;	/* number of sectors in partition */
1317 	u_int64_t sectoff;	/* sector offset of partition */
1318 	u_int64_t totwrt;	/* total number of sectors left to write */
1319 	u_int32_t nwrt;		/* current number of sectors to write */
1320 	struct scsi_xfer *xs;	/* ... convenience */
1321 	int rv;
1322 
1323 	/* Check if recursive dump; if so, punt. */
1324 	if (sddoingadump)
1325 		return EFAULT;
1326 	if (blkno < 0)
1327 		return EINVAL;
1328 
1329 	/* Mark as active early. */
1330 	sddoingadump = 1;
1331 
1332 	unit = DISKUNIT(dev);	/* Decompose unit & partition. */
1333 	part = DISKPART(dev);
1334 
1335 	/* Check for acceptable drive number. */
1336 	if (unit >= sd_cd.cd_ndevs || (sc = sd_cd.cd_devs[unit]) == NULL)
1337 		return ENXIO;
1338 
1339 	/*
1340 	 * XXX Can't do this check, since the media might have been
1341 	 * XXX marked `invalid' by successful unmounting of all
1342 	 * XXX filesystems.
1343 	 */
1344 #if 0
1345 	/* Make sure it was initialized. */
1346 	if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) != SDEV_MEDIA_LOADED)
1347 		return ENXIO;
1348 #endif
1349 
1350 	/* Convert to disk sectors.  Request must be a multiple of size. */
1351 	lp = sc->sc_dk.dk_label;
1352 	sectorsize = lp->d_secsize;
1353 	if ((size % sectorsize) != 0)
1354 		return EFAULT;
1355 	if ((blkno % DL_BLKSPERSEC(lp)) != 0)
1356 		return EFAULT;
1357 	totwrt = size / sectorsize;
1358 	blkno = DL_BLKTOSEC(lp, blkno);
1359 
1360 	nsects = DL_GETPSIZE(&lp->d_partitions[part]);
1361 	sectoff = DL_GETPOFFSET(&lp->d_partitions[part]);
1362 
1363 	/* Check transfer bounds against partition size. */
1364 	if ((blkno + totwrt) > nsects)
1365 		return EINVAL;
1366 
1367 	/* Offset block number to start of partition. */
1368 	blkno += sectoff;
1369 
1370 	while (totwrt > 0) {
1371 		if (totwrt > UINT32_MAX)
1372 			nwrt = UINT32_MAX;
1373 		else
1374 			nwrt = totwrt;
1375 
1376 #ifndef	SD_DUMP_NOT_TRUSTED
1377 		xs = scsi_xs_get(sc->sc_link, SCSI_NOSLEEP);
1378 		if (xs == NULL)
1379 			return (ENOMEM);
1380 
1381 		xs->timeout = 10000;
1382 		xs->flags |= SCSI_DATA_OUT;
1383 		xs->data = va;
1384 		xs->datalen = nwrt * sectorsize;
1385 
1386 		sd_cmd_rw10(xs, 0, blkno, nwrt); /* XXX */
1387 
1388 		rv = scsi_xs_sync(xs);
1389 		scsi_xs_put(xs);
1390 		if (rv != 0)
1391 			return (ENXIO);
1392 #else	/* SD_DUMP_NOT_TRUSTED */
1393 		/* Let's just talk about this first... */
1394 		printf("sd%d: dump addr 0x%x, blk %lld\n", unit, va,
1395 		    (long long)blkno);
1396 		delay(500 * 1000);	/* half a second */
1397 #endif	/* SD_DUMP_NOT_TRUSTED */
1398 
1399 		/* update block count */
1400 		totwrt -= nwrt;
1401 		blkno += nwrt;
1402 		va += sectorsize * nwrt;
1403 	}
1404 
1405 	sddoingadump = 0;
1406 
1407 	return (0);
1408 }
1409 
1410 /*
1411  * Copy up to len chars from src to dst, ignoring non-printables.
1412  * Must be room for len+1 chars in dst so we can write the NUL.
1413  * Does not assume src is NUL-terminated.
1414  */
1415 void
1416 viscpy(u_char *dst, u_char *src, int len)
1417 {
1418 	while (len > 0 && *src != '\0') {
1419 		if (*src < 0x20 || *src >= 0x80) {
1420 			src++;
1421 			continue;
1422 		}
1423 		*dst++ = *src++;
1424 		len--;
1425 	}
1426 	*dst = '\0';
1427 }
1428 
1429 int
1430 sd_read_cap_10(struct sd_softc *sc, int flags)
1431 {
1432 	struct scsi_read_capacity cdb;
1433 	struct scsi_read_cap_data *rdcap;
1434 	struct scsi_xfer *xs;
1435 	int rv = ENOMEM;
1436 
1437 	CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
1438 
1439 	rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
1440 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1441 	if (rdcap == NULL)
1442 		return (ENOMEM);
1443 
1444 	if (sc->flags & SDF_DYING) {
1445 		rv = ENXIO;
1446 		goto done;
1447 	}
1448 	xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
1449 	if (xs == NULL)
1450 		goto done;
1451 
1452 	bzero(&cdb, sizeof(cdb));
1453 	cdb.opcode = READ_CAPACITY;
1454 
1455 	memcpy(xs->cmd, &cdb, sizeof(cdb));
1456 	xs->cmdlen = sizeof(cdb);
1457 	xs->data = (void *)rdcap;
1458 	xs->datalen = sizeof(*rdcap);
1459 	xs->timeout = 20000;
1460 
1461 	rv = scsi_xs_sync(xs);
1462 	scsi_xs_put(xs);
1463 
1464 	if (rv == 0) {
1465 		sc->params.disksize = _4btol(rdcap->addr) + 1ll;
1466 		sc->params.secsize = _4btol(rdcap->length);
1467 		CLR(sc->flags, SDF_THIN);
1468 	}
1469 
1470 done:
1471 	dma_free(rdcap, sizeof(*rdcap));
1472 	return (rv);
1473 }
1474 
1475 int
1476 sd_read_cap_16(struct sd_softc *sc, int flags)
1477 {
1478 	struct scsi_read_capacity_16 cdb;
1479 	struct scsi_read_cap_data_16 *rdcap;
1480 	struct scsi_xfer *xs;
1481 	int rv = ENOMEM;
1482 
1483 	CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
1484 
1485 	rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
1486 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1487 	if (rdcap == NULL)
1488 		return (ENOMEM);
1489 
1490 	if (sc->flags & SDF_DYING) {
1491 		rv = ENXIO;
1492 		goto done;
1493 	}
1494 	xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
1495 	if (xs == NULL)
1496 		goto done;
1497 
1498 	bzero(&cdb, sizeof(cdb));
1499 	cdb.opcode = READ_CAPACITY_16;
1500 	cdb.byte2 = SRC16_SERVICE_ACTION;
1501 	_lto4b(sizeof(*rdcap), cdb.length);
1502 
1503 	memcpy(xs->cmd, &cdb, sizeof(cdb));
1504 	xs->cmdlen = sizeof(cdb);
1505 	xs->data = (void *)rdcap;
1506 	xs->datalen = sizeof(*rdcap);
1507 	xs->timeout = 20000;
1508 
1509 	rv = scsi_xs_sync(xs);
1510 	scsi_xs_put(xs);
1511 
1512 	if (rv == 0) {
1513 		if (_8btol(rdcap->addr) == 0) {
1514 			rv = EIO;
1515 			goto done;
1516 		}
1517 
1518 		sc->params.disksize = _8btol(rdcap->addr) + 1;
1519 		sc->params.secsize = _4btol(rdcap->length);
1520 		if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE))
1521 			SET(sc->flags, SDF_THIN);
1522 		else
1523 			CLR(sc->flags, SDF_THIN);
1524 	}
1525 
1526 done:
1527 	dma_free(rdcap, sizeof(*rdcap));
1528 	return (rv);
1529 }
1530 
1531 int
1532 sd_size(struct sd_softc *sc, int flags)
1533 {
1534 	int rv;
1535 
1536 	if (sc->flags & SDF_DYING)
1537 		return (ENXIO);
1538 	if (SCSISPC(sc->sc_link->inqdata.version) >= 3) {
1539 		rv = sd_read_cap_16(sc, flags);
1540 		if (rv != 0)
1541 			rv = sd_read_cap_10(sc, flags);
1542 	} else {
1543 		rv = sd_read_cap_10(sc, flags);
1544 		if (rv == 0 && sc->params.disksize == 0x100000000ll)
1545 			rv = sd_read_cap_16(sc, flags);
1546 	}
1547 
1548 	return (rv);
1549 }
1550 
1551 int
1552 sd_thin_pages(struct sd_softc *sc, int flags)
1553 {
1554 	struct scsi_vpd_hdr *pg;
1555 	size_t len = 0;
1556 	u_int8_t *pages;
1557 	int i, score = 0;
1558 	int rv;
1559 
1560 	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1561 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1562 	if (pg == NULL)
1563 		return (ENOMEM);
1564 
1565 	if (sc->flags & SDF_DYING) {
1566 		rv = ENXIO;
1567 		goto done;
1568 	}
1569 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1570 	    SI_PG_SUPPORTED, flags);
1571 	if (rv != 0)
1572 		goto done;
1573 
1574 	len = _2btol(pg->page_length);
1575 
1576 	dma_free(pg, sizeof(*pg));
1577 	pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ?
1578 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1579 	if (pg == NULL)
1580 		return (ENOMEM);
1581 
1582 	if (sc->flags & SDF_DYING) {
1583 		rv = ENXIO;
1584 		goto done;
1585 	}
1586 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len,
1587 	    SI_PG_SUPPORTED, flags);
1588 	if (rv != 0)
1589 		goto done;
1590 
1591 	pages = (u_int8_t *)(pg + 1);
1592 	if (pages[0] != SI_PG_SUPPORTED) {
1593 		rv = EIO;
1594 		goto done;
1595 	}
1596 
1597 	for (i = 1; i < len; i++) {
1598 		switch (pages[i]) {
1599 		case SI_PG_DISK_LIMITS:
1600 		case SI_PG_DISK_THIN:
1601 			score++;
1602 			break;
1603 		}
1604 	}
1605 
1606 	if (score < 2)
1607 		rv = EOPNOTSUPP;
1608 
1609 done:
1610 	dma_free(pg, sizeof(*pg) + len);
1611 	return (rv);
1612 }
1613 
1614 int
1615 sd_vpd_block_limits(struct sd_softc *sc, int flags)
1616 {
1617 	struct scsi_vpd_disk_limits *pg;
1618 	int rv;
1619 
1620 	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1621 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1622 	if (pg == NULL)
1623 		return (ENOMEM);
1624 
1625 	if (sc->flags & SDF_DYING) {
1626 		rv = ENXIO;
1627 		goto done;
1628 	}
1629 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1630 	    SI_PG_DISK_LIMITS, flags);
1631 	if (rv != 0)
1632 		goto done;
1633 
1634 	if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) {
1635 		sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count);
1636 		sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count);
1637 	} else
1638 		rv = EOPNOTSUPP;
1639 
1640 done:
1641 	dma_free(pg, sizeof(*pg));
1642 	return (rv);
1643 }
1644 
1645 int
1646 sd_vpd_thin(struct sd_softc *sc, int flags)
1647 {
1648 	struct scsi_vpd_disk_thin *pg;
1649 	int rv;
1650 
1651 	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1652 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1653 	if (pg == NULL)
1654 		return (ENOMEM);
1655 
1656 	if (sc->flags & SDF_DYING) {
1657 		rv = ENXIO;
1658 		goto done;
1659 	}
1660 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1661 	    SI_PG_DISK_THIN, flags);
1662 	if (rv != 0)
1663 		goto done;
1664 
1665 #ifdef notyet
1666 	if (ISSET(pg->flags, VPD_DISK_THIN_TPU))
1667 		sc->sc_delete = sd_unmap;
1668 	else if (ISSET(pg->flags, VPD_DISK_THIN_TPWS)) {
1669 		sc->sc_delete = sd_write_same_16;
1670 		sc->params.unmap_descs = 1; /* WRITE SAME 16 only does one */
1671 	} else
1672 		rv = EOPNOTSUPP;
1673 #endif
1674 
1675 done:
1676 	dma_free(pg, sizeof(*pg));
1677 	return (rv);
1678 }
1679 
1680 int
1681 sd_thin_params(struct sd_softc *sc, int flags)
1682 {
1683 	int rv;
1684 
1685 	rv = sd_thin_pages(sc, flags);
1686 	if (rv != 0)
1687 		return (rv);
1688 
1689 	rv = sd_vpd_block_limits(sc, flags);
1690 	if (rv != 0)
1691 		return (rv);
1692 
1693 	rv = sd_vpd_thin(sc, flags);
1694 	if (rv != 0)
1695 		return (rv);
1696 
1697 	return (0);
1698 }
1699 
1700 /*
1701  * Fill out the disk parameter structure. Return SDGP_RESULT_OK if the
1702  * structure is correctly filled in, SDGP_RESULT_OFFLINE otherwise. The caller
1703  * is responsible for clearing the SDEV_MEDIA_LOADED flag if the structure
1704  * cannot be completed.
1705  */
1706 int
1707 sd_get_parms(struct sd_softc *sc, struct disk_parms *dp, int flags)
1708 {
1709 	struct scsi_link *link;
1710 	union scsi_mode_sense_buf *buf = NULL;
1711 	struct page_rigid_geometry *rigid = NULL;
1712 	struct page_flex_geometry *flex = NULL;
1713 	struct page_reduced_geometry *reduced = NULL;
1714 	u_char *page0 = NULL;
1715 	u_int32_t heads = 0, sectors = 0, cyls = 0, secsize = 0;
1716 	int err = 0, big;
1717 
1718 	if (sd_size(sc, flags) != 0)
1719 		return (SDGP_RESULT_OFFLINE);
1720 
1721 	if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) {
1722 		/* we dont know the unmap limits, so we cant use thin shizz */
1723 		CLR(sc->flags, SDF_THIN);
1724 	}
1725 
1726 	buf = dma_alloc(sizeof(*buf), PR_NOWAIT);
1727 	if (buf == NULL)
1728 		goto validate;
1729 
1730 	if (sc->flags & SDF_DYING)
1731 		goto die;
1732 	link = sc->sc_link;
1733 
1734 	/*
1735 	 * Ask for page 0 (vendor specific) mode sense data to find
1736 	 * READONLY info. The only thing USB devices will ask for.
1737 	 */
1738 	err = scsi_do_mode_sense(link, 0, buf, (void **)&page0,
1739 	    NULL, NULL, NULL, 1, flags | SCSI_SILENT, &big);
1740 	if (sc->flags & SDF_DYING)
1741 		goto die;
1742 	if (err == 0) {
1743 		if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT)
1744 			SET(link->flags, SDEV_READONLY);
1745 		else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT)
1746 			SET(link->flags, SDEV_READONLY);
1747 		else
1748 			CLR(link->flags, SDEV_READONLY);
1749 	}
1750 
1751 	/*
1752 	 * Many UMASS devices choke when asked about their geometry. Most
1753 	 * don't have a meaningful geometry anyway, so just fake it if
1754 	 * scsi_size() worked.
1755 	 */
1756 	if ((link->flags & SDEV_UMASS) && (dp->disksize > 0))
1757 		goto validate;
1758 
1759 	switch (link->inqdata.device & SID_TYPE) {
1760 	case T_OPTICAL:
1761 		/* No more information needed or available. */
1762 		break;
1763 
1764 	case T_RDIRECT:
1765 		/* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */
1766 		err = scsi_do_mode_sense(link, PAGE_REDUCED_GEOMETRY,
1767 		    buf, (void **)&reduced, NULL, NULL, &secsize,
1768 		    sizeof(*reduced), flags | SCSI_SILENT, NULL);
1769 		if (!err && reduced &&
1770 		    DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) {
1771 			if (dp->disksize == 0)
1772 				dp->disksize = _5btol(reduced->sectors);
1773 			if (secsize == 0)
1774 				secsize = _2btol(reduced->bytes_s);
1775 		}
1776 		break;
1777 
1778 	default:
1779 		/*
1780 		 * NOTE: Some devices leave off the last four bytes of
1781 		 * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages.
1782 		 * The only information in those four bytes is RPM information
1783 		 * so accept the page. The extra bytes will be zero and RPM will
1784 		 * end up with the default value of 3600.
1785 		 */
1786 		if (((link->flags & SDEV_ATAPI) == 0) ||
1787 		    ((link->flags & SDEV_REMOVABLE) == 0))
1788 			err = scsi_do_mode_sense(link,
1789 			    PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL,
1790 			    NULL, &secsize, sizeof(*rigid) - 4,
1791 			    flags | SCSI_SILENT, NULL);
1792 		if (!err && rigid && DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) {
1793 			heads = rigid->nheads;
1794 			cyls = _3btol(rigid->ncyl);
1795 			if (heads * cyls > 0)
1796 				sectors = dp->disksize / (heads * cyls);
1797 		} else {
1798 			if (sc->flags & SDF_DYING)
1799 				goto die;
1800 			err = scsi_do_mode_sense(link,
1801 			    PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL,
1802 			    &secsize, sizeof(*flex) - 4,
1803 			    flags | SCSI_SILENT, NULL);
1804 			if (!err && flex &&
1805 			    DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) {
1806 				sectors = flex->ph_sec_tr;
1807 				heads = flex->nheads;
1808 				cyls = _2btol(flex->ncyl);
1809 				if (secsize == 0)
1810 					secsize = _2btol(flex->bytes_s);
1811 				if (dp->disksize == 0)
1812 					dp->disksize = heads * cyls * sectors;
1813 			}
1814 		}
1815 		break;
1816 	}
1817 
1818 validate:
1819 	if (buf)
1820 		dma_free(buf, sizeof(*buf));
1821 
1822 	if (dp->disksize == 0)
1823 		return (SDGP_RESULT_OFFLINE);
1824 
1825 	if (dp->secsize == 0)
1826 		dp->secsize = (secsize == 0) ? 512 : secsize;
1827 
1828 	/*
1829 	 * Restrict secsize values to powers of two between 512 and 64k.
1830 	 */
1831 	switch (dp->secsize) {
1832 	case 0x200:	/* == 512, == DEV_BSIZE on all architectures. */
1833 	case 0x400:
1834 	case 0x800:
1835 	case 0x1000:
1836 	case 0x2000:
1837 	case 0x4000:
1838 	case 0x8000:
1839 	case 0x10000:
1840 		break;
1841 	default:
1842 		SC_DEBUG(sc->sc_link, SDEV_DB1,
1843 		    ("sd_get_parms: bad secsize: %#lx\n", dp->secsize));
1844 		return (SDGP_RESULT_OFFLINE);
1845 	}
1846 
1847 	/*
1848 	 * XXX THINK ABOUT THIS!!  Using values such that sectors * heads *
1849 	 * cyls is <= disk_size can lead to wasted space. We need a more
1850 	 * careful calculation/validation to make everything work out
1851 	 * optimally.
1852 	 */
1853 	if (dp->disksize > 0xffffffff && (dp->heads * dp->sectors) < 0xffff) {
1854 		dp->heads = 511;
1855 		dp->sectors = 255;
1856 		cyls = 0;
1857 	} else {
1858 		/*
1859 		 * Use standard geometry values for anything we still don't
1860 		 * know.
1861 		 */
1862 		dp->heads = (heads == 0) ? 255 : heads;
1863 		dp->sectors = (sectors == 0) ? 63 : sectors;
1864 	}
1865 
1866 	dp->cyls = (cyls == 0) ? dp->disksize / (dp->heads * dp->sectors) :
1867 	    cyls;
1868 
1869 	if (dp->cyls == 0) {
1870 		dp->heads = dp->cyls = 1;
1871 		dp->sectors = dp->disksize;
1872 	}
1873 
1874 	return (SDGP_RESULT_OK);
1875 
1876 die:
1877 	dma_free(buf, sizeof(*buf));
1878 	return (SDGP_RESULT_OFFLINE);
1879 }
1880 
1881 int
1882 sd_flush(struct sd_softc *sc, int flags)
1883 {
1884 	struct scsi_link *link;
1885 	struct scsi_xfer *xs;
1886 	struct scsi_synchronize_cache *cmd;
1887 	int error;
1888 
1889 	if (sc->flags & SDF_DYING)
1890 		return (ENXIO);
1891 	link = sc->sc_link;
1892 
1893 	if (link->quirks & SDEV_NOSYNCCACHE)
1894 		return (0);
1895 
1896 	/*
1897 	 * Issue a SYNCHRONIZE CACHE. Address 0, length 0 means "all remaining
1898 	 * blocks starting at address 0". Ignore ILLEGAL REQUEST in the event
1899 	 * that the command is not supported by the device.
1900 	 */
1901 
1902 	xs = scsi_xs_get(link, flags);
1903 	if (xs == NULL) {
1904 		SC_DEBUG(link, SDEV_DB1, ("cache sync failed to get xs\n"));
1905 		return (EIO);
1906 	}
1907 
1908 	cmd = (struct scsi_synchronize_cache *)xs->cmd;
1909 	cmd->opcode = SYNCHRONIZE_CACHE;
1910 
1911 	xs->cmdlen = sizeof(*cmd);
1912 	xs->timeout = 100000;
1913 	xs->flags |= SCSI_IGNORE_ILLEGAL_REQUEST;
1914 
1915 	error = scsi_xs_sync(xs);
1916 
1917 	scsi_xs_put(xs);
1918 
1919 	if (error)
1920 		SC_DEBUG(link, SDEV_DB1, ("cache sync failed\n"));
1921 	else
1922 		sc->flags &= ~SDF_DIRTY;
1923 
1924 	return (error);
1925 }
1926