xref: /openbsd-src/sys/scsi/sd.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: sd.c,v 1.269 2016/03/19 15:37:33 bluhm Exp $	*/
2 /*	$NetBSD: sd.c,v 1.111 1997/04/02 02:29:41 mycroft Exp $	*/
3 
4 /*-
5  * Copyright (c) 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Charles M. Hannum.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Originally written by Julian Elischer (julian@dialix.oz.au)
35  * for TRW Financial Systems for use under the MACH(2.5) operating system.
36  *
37  * TRW Financial Systems, in accordance with their agreement with Carnegie
38  * Mellon University, makes this software available to CMU to distribute
39  * or use in any manner that they see fit as long as this message is kept with
40  * the software. For this reason TFS also grants any other persons or
41  * organisations permission to use or modify this software.
42  *
43  * TFS supplies this software to be publicly redistributed
44  * on the understanding that TFS is not responsible for the correct
45  * functioning of this software in any circumstances.
46  *
47  * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
48  */
49 
50 #include <sys/stdint.h>
51 #include <sys/types.h>
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/timeout.h>
55 #include <sys/file.h>
56 #include <sys/stat.h>
57 #include <sys/ioctl.h>
58 #include <sys/mtio.h>
59 #include <sys/mutex.h>
60 #include <sys/buf.h>
61 #include <sys/uio.h>
62 #include <sys/malloc.h>
63 #include <sys/pool.h>
64 #include <sys/errno.h>
65 #include <sys/device.h>
66 #include <sys/disklabel.h>
67 #include <sys/disk.h>
68 #include <sys/conf.h>
69 #include <sys/scsiio.h>
70 #include <sys/dkio.h>
71 #include <sys/reboot.h>
72 
73 #include <scsi/scsi_all.h>
74 #include <scsi/scsi_disk.h>
75 #include <scsi/scsiconf.h>
76 #include <scsi/sdvar.h>
77 
78 #include <ufs/ffs/fs.h>			/* for BBSIZE and SBSIZE */
79 
80 #include <sys/vnode.h>
81 
82 int	sdmatch(struct device *, void *, void *);
83 void	sdattach(struct device *, struct device *, void *);
84 int	sdactivate(struct device *, int);
85 int	sddetach(struct device *, int);
86 
87 void	sdminphys(struct buf *);
88 int	sdgetdisklabel(dev_t, struct sd_softc *, struct disklabel *, int);
89 void	sdstart(struct scsi_xfer *);
90 int	sd_interpret_sense(struct scsi_xfer *);
91 int	sd_read_cap_10(struct sd_softc *, int);
92 int	sd_read_cap_16(struct sd_softc *, int);
93 int	sd_size(struct sd_softc *, int);
94 int	sd_thin_pages(struct sd_softc *, int);
95 int	sd_vpd_block_limits(struct sd_softc *, int);
96 int	sd_vpd_thin(struct sd_softc *, int);
97 int	sd_thin_params(struct sd_softc *, int);
98 int	sd_get_parms(struct sd_softc *, struct disk_parms *, int);
99 void	sd_flush(struct sd_softc *, int);
100 
101 void	viscpy(u_char *, u_char *, int);
102 
103 int	sd_ioctl_inquiry(struct sd_softc *, struct dk_inquiry *);
104 int	sd_ioctl_cache(struct sd_softc *, long, struct dk_cache *);
105 
106 void	sd_cmd_rw6(struct scsi_xfer *, int, u_int64_t, u_int);
107 void	sd_cmd_rw10(struct scsi_xfer *, int, u_int64_t, u_int);
108 void	sd_cmd_rw12(struct scsi_xfer *, int, u_int64_t, u_int);
109 void	sd_cmd_rw16(struct scsi_xfer *, int, u_int64_t, u_int);
110 
111 void	sd_buf_done(struct scsi_xfer *);
112 
113 struct cfattach sd_ca = {
114 	sizeof(struct sd_softc), sdmatch, sdattach,
115 	sddetach, sdactivate
116 };
117 
118 struct cfdriver sd_cd = {
119 	NULL, "sd", DV_DISK
120 };
121 
122 const struct scsi_inquiry_pattern sd_patterns[] = {
123 	{T_DIRECT, T_FIXED,
124 	 "",         "",                 ""},
125 	{T_DIRECT, T_REMOV,
126 	 "",         "",                 ""},
127 	{T_RDIRECT, T_FIXED,
128 	 "",         "",                 ""},
129 	{T_RDIRECT, T_REMOV,
130 	 "",         "",                 ""},
131 	{T_OPTICAL, T_FIXED,
132 	 "",         "",                 ""},
133 	{T_OPTICAL, T_REMOV,
134 	 "",         "",                 ""},
135 };
136 
137 #define sdlookup(unit) (struct sd_softc *)disk_lookup(&sd_cd, (unit))
138 
139 int
140 sdmatch(struct device *parent, void *match, void *aux)
141 {
142 	struct scsi_attach_args *sa = aux;
143 	int priority;
144 
145 	(void)scsi_inqmatch(sa->sa_inqbuf,
146 	    sd_patterns, nitems(sd_patterns),
147 	    sizeof(sd_patterns[0]), &priority);
148 
149 	return (priority);
150 }
151 
152 /*
153  * The routine called by the low level scsi routine when it discovers
154  * a device suitable for this driver.
155  */
156 void
157 sdattach(struct device *parent, struct device *self, void *aux)
158 {
159 	struct sd_softc *sc = (struct sd_softc *)self;
160 	struct scsi_attach_args *sa = aux;
161 	struct disk_parms *dp = &sc->params;
162 	struct scsi_link *link = sa->sa_sc_link;
163 	int sd_autoconf = scsi_autoconf | SCSI_SILENT |
164 	    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE;
165 	struct dk_cache dkc;
166 	int error, result, sortby = BUFQ_DEFAULT;
167 
168 	SC_DEBUG(link, SDEV_DB2, ("sdattach:\n"));
169 
170 	/*
171 	 * Store information needed to contact our base driver
172 	 */
173 	sc->sc_link = link;
174 	link->interpret_sense = sd_interpret_sense;
175 	link->device_softc = sc;
176 
177 	if ((link->flags & SDEV_ATAPI) && (link->flags & SDEV_REMOVABLE))
178 		link->quirks |= SDEV_NOSYNCCACHE;
179 
180 	if (!(link->inqdata.flags & SID_RelAdr))
181 		link->quirks |= SDEV_ONLYBIG;
182 
183 	/*
184 	 * Note if this device is ancient.  This is used in sdminphys().
185 	 */
186 	if (!(link->flags & SDEV_ATAPI) &&
187 	    SCSISPC(sa->sa_inqbuf->version) == 0)
188 		sc->flags |= SDF_ANCIENT;
189 
190 	/*
191 	 * Use the subdriver to request information regarding
192 	 * the drive. We cannot use interrupts yet, so the
193 	 * request must specify this.
194 	 */
195 	printf("\n");
196 
197 	scsi_xsh_set(&sc->sc_xsh, link, sdstart);
198 	timeout_set(&sc->sc_timeout, (void (*)(void *))scsi_xsh_add,
199 	    &sc->sc_xsh);
200 
201 	/* Spin up non-UMASS devices ready or not. */
202 	if ((link->flags & SDEV_UMASS) == 0)
203 		scsi_start(link, SSS_START, sd_autoconf);
204 
205 	/*
206 	 * Some devices (e.g. BlackBerry Pearl) won't admit they have
207 	 * media loaded unless its been locked in.
208 	 */
209 	if ((link->flags & SDEV_REMOVABLE) != 0)
210 		scsi_prevent(link, PR_PREVENT, sd_autoconf);
211 
212 	/* Check that it is still responding and ok. */
213 	error = scsi_test_unit_ready(sc->sc_link, TEST_READY_RETRIES * 3,
214 	    sd_autoconf);
215 
216 	if (error)
217 		result = SDGP_RESULT_OFFLINE;
218 	else
219 		result = sd_get_parms(sc, &sc->params, sd_autoconf);
220 
221 	if ((link->flags & SDEV_REMOVABLE) != 0)
222 		scsi_prevent(link, PR_ALLOW, sd_autoconf);
223 
224 	switch (result) {
225 	case SDGP_RESULT_OK:
226 		printf("%s: %lluMB, %lu bytes/sector, %llu sectors",
227 		    sc->sc_dev.dv_xname,
228 		    dp->disksize / (1048576 / dp->secsize), dp->secsize,
229 		    dp->disksize);
230 		if (ISSET(sc->flags, SDF_THIN)) {
231 			sortby = BUFQ_FIFO;
232 			printf(", thin");
233 		}
234 		if (ISSET(link->flags, SDEV_READONLY)) {
235 			printf(", readonly");
236 		}
237 		printf("\n");
238 		break;
239 
240 	case SDGP_RESULT_OFFLINE:
241 		break;
242 
243 #ifdef DIAGNOSTIC
244 	default:
245 		panic("sdattach: unknown result (%#x) from get_parms", result);
246 		break;
247 #endif
248 	}
249 
250 	/*
251 	 * Initialize disk structures.
252 	 */
253 	sc->sc_dk.dk_name = sc->sc_dev.dv_xname;
254 	bufq_init(&sc->sc_bufq, sortby);
255 
256 	/*
257 	 * Enable write cache by default.
258 	 */
259 	memset(&dkc, 0, sizeof(dkc));
260 	if (sd_ioctl_cache(sc, DIOCGCACHE, &dkc) == 0 && dkc.wrcache == 0) {
261 		dkc.wrcache = 1;
262 		sd_ioctl_cache(sc, DIOCSCACHE, &dkc);
263 	}
264 
265 	/* Attach disk. */
266 	disk_attach(&sc->sc_dev, &sc->sc_dk);
267 }
268 
269 int
270 sdactivate(struct device *self, int act)
271 {
272 	struct scsi_link *link;
273 	struct sd_softc *sc = (struct sd_softc *)self;
274 
275 	if (sc->flags & SDF_DYING)
276 		return (ENXIO);
277 	link = sc->sc_link;
278 
279 	switch (act) {
280 	case DVACT_SUSPEND:
281 		/*
282 		 * We flush the cache, since we our next step before
283 		 * DVACT_POWERDOWN might be a hibernate operation.
284 		 */
285 		if ((sc->flags & SDF_DIRTY) != 0)
286 			sd_flush(sc, SCSI_AUTOCONF);
287 		break;
288 	case DVACT_POWERDOWN:
289 		/*
290 		 * Stop the disk.  Stopping the disk should flush the
291 		 * cache, but we are paranoid so we flush the cache
292 		 * first.  We're cold at this point, so we poll for
293 		 * completion.
294 		 */
295 		if ((sc->flags & SDF_DIRTY) != 0)
296 			sd_flush(sc, SCSI_AUTOCONF);
297 		if (boothowto & RB_POWERDOWN)
298 			scsi_start(link, SSS_STOP,
299 			    SCSI_IGNORE_ILLEGAL_REQUEST |
300 			    SCSI_IGNORE_NOT_READY | SCSI_AUTOCONF);
301 		break;
302 	case DVACT_RESUME:
303 		scsi_start(link, SSS_START,
304 		    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_AUTOCONF);
305 		break;
306 	case DVACT_DEACTIVATE:
307 		sc->flags |= SDF_DYING;
308 		timeout_del(&sc->sc_timeout);
309 		scsi_xsh_del(&sc->sc_xsh);
310 		break;
311 	}
312 	return (0);
313 }
314 
315 int
316 sddetach(struct device *self, int flags)
317 {
318 	struct sd_softc *sc = (struct sd_softc *)self;
319 
320 	bufq_drain(&sc->sc_bufq);
321 
322 	disk_gone(sdopen, self->dv_unit);
323 
324 	/* Detach disk. */
325 	bufq_destroy(&sc->sc_bufq);
326 	disk_detach(&sc->sc_dk);
327 
328 	return (0);
329 }
330 
331 /*
332  * Open the device. Make sure the partition info is as up-to-date as can be.
333  */
334 int
335 sdopen(dev_t dev, int flag, int fmt, struct proc *p)
336 {
337 	struct scsi_link *link;
338 	struct sd_softc *sc;
339 	int error = 0, part, rawopen, unit;
340 
341 	unit = DISKUNIT(dev);
342 	part = DISKPART(dev);
343 
344 	rawopen = (part == RAW_PART) && (fmt == S_IFCHR);
345 
346 	sc = sdlookup(unit);
347 	if (sc == NULL)
348 		return (ENXIO);
349 	if (sc->flags & SDF_DYING) {
350 		device_unref(&sc->sc_dev);
351 		return (ENXIO);
352 	}
353 	link = sc->sc_link;
354 
355 	if (ISSET(flag, FWRITE) && ISSET(link->flags, SDEV_READONLY)) {
356 		device_unref(&sc->sc_dev);
357 		return (EACCES);
358 	}
359 
360 	SC_DEBUG(link, SDEV_DB1,
361 	    ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
362 	    sd_cd.cd_ndevs, part));
363 
364 	if ((error = disk_lock(&sc->sc_dk)) != 0) {
365 		device_unref(&sc->sc_dev);
366 		return (error);
367 	}
368 
369 	if (sc->sc_dk.dk_openmask != 0) {
370 		/*
371 		 * If any partition is open, but the disk has been invalidated,
372 		 * disallow further opens of non-raw partition.
373 		 */
374 		if (sc->flags & SDF_DYING) {
375 			error = ENXIO;
376 			goto die;
377 		}
378 		if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
379 			if (rawopen)
380 				goto out;
381 			error = EIO;
382 			goto bad;
383 		}
384 	} else {
385 		/* Spin up non-UMASS devices ready or not. */
386 		if (sc->flags & SDF_DYING) {
387 			error = ENXIO;
388 			goto die;
389 		}
390 		if ((link->flags & SDEV_UMASS) == 0)
391 			scsi_start(link, SSS_START, (rawopen ? SCSI_SILENT :
392 			    0) | SCSI_IGNORE_ILLEGAL_REQUEST |
393 			    SCSI_IGNORE_MEDIA_CHANGE);
394 
395 		/* Use sd_interpret_sense() for sense errors.
396 		 *
397 		 * But only after spinning the disk up! Just in case a broken
398 		 * device returns "Initialization command required." and causes
399 		 * a loop of scsi_start() calls.
400 		 */
401 		if (sc->flags & SDF_DYING) {
402 			error = ENXIO;
403 			goto die;
404 		}
405 		link->flags |= SDEV_OPEN;
406 
407 		/*
408 		 * Try to prevent the unloading of a removable device while
409 		 * it's open. But allow the open to proceed if the device can't
410 		 * be locked in.
411 		 */
412 		if ((link->flags & SDEV_REMOVABLE) != 0) {
413 			scsi_prevent(link, PR_PREVENT, SCSI_SILENT |
414 			    SCSI_IGNORE_ILLEGAL_REQUEST |
415 			    SCSI_IGNORE_MEDIA_CHANGE);
416 		}
417 
418 		/* Check that it is still responding and ok. */
419 		if (sc->flags & SDF_DYING) {
420 			error = ENXIO;
421 			goto die;
422 		}
423 		error = scsi_test_unit_ready(link,
424 		    TEST_READY_RETRIES, SCSI_SILENT |
425 		    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE);
426 		if (error) {
427 			if (rawopen) {
428 				error = 0;
429 				goto out;
430 			} else
431 				goto bad;
432 		}
433 
434 		/* Load the physical device parameters. */
435 		if (sc->flags & SDF_DYING) {
436 			error = ENXIO;
437 			goto die;
438 		}
439 		link->flags |= SDEV_MEDIA_LOADED;
440 		if (sd_get_parms(sc, &sc->params, (rawopen ? SCSI_SILENT : 0))
441 		    == SDGP_RESULT_OFFLINE) {
442 			if (sc->flags & SDF_DYING) {
443 				error = ENXIO;
444 				goto die;
445 			}
446 			link->flags &= ~SDEV_MEDIA_LOADED;
447 			error = ENXIO;
448 			goto bad;
449 		}
450 		SC_DEBUG(link, SDEV_DB3, ("Params loaded\n"));
451 
452 		/* Load the partition info if not already loaded. */
453 		error = sdgetdisklabel(dev, sc, sc->sc_dk.dk_label, 0);
454 		if (error == EIO || error == ENXIO)
455 			goto bad;
456 		SC_DEBUG(link, SDEV_DB3, ("Disklabel loaded\n"));
457 	}
458 
459 out:
460 	if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0)
461 		goto bad;
462 
463 	SC_DEBUG(link, SDEV_DB3, ("open complete\n"));
464 
465 	/* It's OK to fall through because dk_openmask is now non-zero. */
466 bad:
467 	if (sc->sc_dk.dk_openmask == 0) {
468 		if (sc->flags & SDF_DYING) {
469 			error = ENXIO;
470 			goto die;
471 		}
472 		if ((link->flags & SDEV_REMOVABLE) != 0)
473 			scsi_prevent(link, PR_ALLOW, SCSI_SILENT |
474 			    SCSI_IGNORE_ILLEGAL_REQUEST |
475 			    SCSI_IGNORE_MEDIA_CHANGE);
476 		if (sc->flags & SDF_DYING) {
477 			error = ENXIO;
478 			goto die;
479 		}
480 		link->flags &= ~(SDEV_OPEN | SDEV_MEDIA_LOADED);
481 	}
482 
483 die:
484 	disk_unlock(&sc->sc_dk);
485 	device_unref(&sc->sc_dev);
486 	return (error);
487 }
488 
489 /*
490  * Close the device. Only called if we are the last occurrence of an open
491  * device.  Convenient now but usually a pain.
492  */
493 int
494 sdclose(dev_t dev, int flag, int fmt, struct proc *p)
495 {
496 	struct scsi_link *link;
497 	struct sd_softc *sc;
498 	int part = DISKPART(dev);
499 	int error = 0;
500 
501 	sc = sdlookup(DISKUNIT(dev));
502 	if (sc == NULL)
503 		return (ENXIO);
504 	if (sc->flags & SDF_DYING) {
505 		device_unref(&sc->sc_dev);
506 		return (ENXIO);
507 	}
508 	link = sc->sc_link;
509 
510 	disk_lock_nointr(&sc->sc_dk);
511 
512 	disk_closepart(&sc->sc_dk, part, fmt);
513 
514 	if (sc->sc_dk.dk_openmask == 0) {
515 		if ((sc->flags & SDF_DIRTY) != 0)
516 			sd_flush(sc, 0);
517 
518 		if (sc->flags & SDF_DYING) {
519 			error = ENXIO;
520 			goto die;
521 		}
522 		if ((link->flags & SDEV_REMOVABLE) != 0)
523 			scsi_prevent(link, PR_ALLOW,
524 			    SCSI_IGNORE_ILLEGAL_REQUEST |
525 			    SCSI_IGNORE_NOT_READY | SCSI_SILENT);
526 		if (sc->flags & SDF_DYING) {
527 			error = ENXIO;
528 			goto die;
529 		}
530 		link->flags &= ~(SDEV_OPEN | SDEV_MEDIA_LOADED);
531 
532 		if (link->flags & SDEV_EJECTING) {
533 			scsi_start(link, SSS_STOP|SSS_LOEJ, 0);
534 			if (sc->flags & SDF_DYING) {
535 				error = ENXIO;
536 				goto die;
537 			}
538 			link->flags &= ~SDEV_EJECTING;
539 		}
540 
541 		timeout_del(&sc->sc_timeout);
542 		scsi_xsh_del(&sc->sc_xsh);
543 	}
544 
545 die:
546 	disk_unlock(&sc->sc_dk);
547 	device_unref(&sc->sc_dev);
548 	return (error);
549 }
550 
551 /*
552  * Actually translate the requested transfer into one the physical driver
553  * can understand.  The transfer is described by a buf and will include
554  * only one physical transfer.
555  */
556 void
557 sdstrategy(struct buf *bp)
558 {
559 	struct scsi_link *link;
560 	struct sd_softc *sc;
561 	int s;
562 
563 	sc = sdlookup(DISKUNIT(bp->b_dev));
564 	if (sc == NULL) {
565 		bp->b_error = ENXIO;
566 		goto bad;
567 	}
568 	if (sc->flags & SDF_DYING) {
569 		bp->b_error = ENXIO;
570 		goto bad;
571 	}
572 	link = sc->sc_link;
573 
574 	SC_DEBUG(link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %lld\n",
575 	    bp->b_bcount, (long long)bp->b_blkno));
576 	/*
577 	 * If the device has been made invalid, error out
578 	 */
579 	if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
580 		if (link->flags & SDEV_OPEN)
581 			bp->b_error = EIO;
582 		else
583 			bp->b_error = ENODEV;
584 		goto bad;
585 	}
586 
587 	/* Validate the request. */
588 	if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1)
589 		goto done;
590 
591 	/* Place it in the queue of disk activities for this disk. */
592 	bufq_queue(&sc->sc_bufq, bp);
593 
594 	/*
595 	 * Tell the device to get going on the transfer if it's
596 	 * not doing anything, otherwise just wait for completion
597 	 */
598 	scsi_xsh_add(&sc->sc_xsh);
599 
600 	device_unref(&sc->sc_dev);
601 	return;
602 
603  bad:
604 	bp->b_flags |= B_ERROR;
605 	bp->b_resid = bp->b_bcount;
606  done:
607 	s = splbio();
608 	biodone(bp);
609 	splx(s);
610 	if (sc != NULL)
611 		device_unref(&sc->sc_dev);
612 }
613 
614 void
615 sd_cmd_rw6(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
616 {
617 	struct scsi_rw *cmd = (struct scsi_rw *)xs->cmd;
618 
619 	cmd->opcode = read ? READ_COMMAND : WRITE_COMMAND;
620 	_lto3b(secno, cmd->addr);
621 	cmd->length = nsecs;
622 
623 	xs->cmdlen = sizeof(*cmd);
624 }
625 
626 void
627 sd_cmd_rw10(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
628 {
629 	struct scsi_rw_big *cmd = (struct scsi_rw_big *)xs->cmd;
630 
631 	cmd->opcode = read ? READ_BIG : WRITE_BIG;
632 	_lto4b(secno, cmd->addr);
633 	_lto2b(nsecs, cmd->length);
634 
635 	xs->cmdlen = sizeof(*cmd);
636 }
637 
638 void
639 sd_cmd_rw12(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
640 {
641 	struct scsi_rw_12 *cmd = (struct scsi_rw_12 *)xs->cmd;
642 
643 	cmd->opcode = read ? READ_12 : WRITE_12;
644 	_lto4b(secno, cmd->addr);
645 	_lto4b(nsecs, cmd->length);
646 
647 	xs->cmdlen = sizeof(*cmd);
648 }
649 
650 void
651 sd_cmd_rw16(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
652 {
653 	struct scsi_rw_16 *cmd = (struct scsi_rw_16 *)xs->cmd;
654 
655 	cmd->opcode = read ? READ_16 : WRITE_16;
656 	_lto8b(secno, cmd->addr);
657 	_lto4b(nsecs, cmd->length);
658 
659 	xs->cmdlen = sizeof(*cmd);
660 }
661 
662 /*
663  * sdstart looks to see if there is a buf waiting for the device
664  * and that the device is not already busy. If both are true,
665  * It dequeues the buf and creates a scsi command to perform the
666  * transfer in the buf. The transfer request will call scsi_done
667  * on completion, which will in turn call this routine again
668  * so that the next queued transfer is performed.
669  * The bufs are queued by the strategy routine (sdstrategy)
670  *
671  * This routine is also called after other non-queued requests
672  * have been made of the scsi driver, to ensure that the queue
673  * continues to be drained.
674  */
675 void
676 sdstart(struct scsi_xfer *xs)
677 {
678 	struct scsi_link *link = xs->sc_link;
679 	struct sd_softc *sc = link->device_softc;
680 	struct buf *bp;
681 	u_int64_t secno;
682 	int nsecs;
683 	int read;
684 	struct partition *p;
685 
686 	if (sc->flags & SDF_DYING) {
687 		scsi_xs_put(xs);
688 		return;
689 	}
690 	if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
691 		bufq_drain(&sc->sc_bufq);
692 		scsi_xs_put(xs);
693 		return;
694 	}
695 
696 	bp = bufq_dequeue(&sc->sc_bufq);
697 	if (bp == NULL) {
698 		scsi_xs_put(xs);
699 		return;
700 	}
701 
702 	secno = DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno);
703 
704 	p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
705 	secno += DL_GETPOFFSET(p);
706 	nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize);
707 	read = bp->b_flags & B_READ;
708 
709 	/*
710 	 *  Fill out the scsi command.  If the transfer will
711 	 *  fit in a "small" cdb, use it.
712 	 */
713 	if (!(link->flags & SDEV_ATAPI) &&
714 	    !(link->quirks & SDEV_ONLYBIG) &&
715 	    ((secno & 0x1fffff) == secno) &&
716 	    ((nsecs & 0xff) == nsecs))
717 		sd_cmd_rw6(xs, read, secno, nsecs);
718 	else if (((secno & 0xffffffff) == secno) &&
719 	    ((nsecs & 0xffff) == nsecs))
720 		sd_cmd_rw10(xs, read, secno, nsecs);
721 	else if (((secno & 0xffffffff) == secno) &&
722 	    ((nsecs & 0xffffffff) == nsecs))
723 		sd_cmd_rw12(xs, read, secno, nsecs);
724 	else
725 		sd_cmd_rw16(xs, read, secno, nsecs);
726 
727 	xs->flags |= (read ? SCSI_DATA_IN : SCSI_DATA_OUT);
728 	xs->timeout = 60000;
729 	xs->data = bp->b_data;
730 	xs->datalen = bp->b_bcount;
731 
732 	xs->done = sd_buf_done;
733 	xs->cookie = bp;
734 	xs->bp = bp;
735 
736 	/* Instrumentation. */
737 	disk_busy(&sc->sc_dk);
738 
739 	/* Mark disk as dirty. */
740 	if (!read)
741 		sc->flags |= SDF_DIRTY;
742 
743 	scsi_xs_exec(xs);
744 
745 	/* move onto the next io */
746 	if (ISSET(sc->flags, SDF_WAITING))
747 		CLR(sc->flags, SDF_WAITING);
748 	else if (bufq_peek(&sc->sc_bufq))
749 		scsi_xsh_add(&sc->sc_xsh);
750 }
751 
752 void
753 sd_buf_done(struct scsi_xfer *xs)
754 {
755 	struct sd_softc *sc = xs->sc_link->device_softc;
756 	struct buf *bp = xs->cookie;
757 	int error, s;
758 
759 	switch (xs->error) {
760 	case XS_NOERROR:
761 		bp->b_error = 0;
762 		bp->b_resid = xs->resid;
763 		break;
764 
765 	case XS_NO_CCB:
766 		/* The adapter is busy, requeue the buf and try it later. */
767 		disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid,
768 		    bp->b_flags & B_READ);
769 		bufq_requeue(&sc->sc_bufq, bp);
770 		scsi_xs_put(xs);
771 		SET(sc->flags, SDF_WAITING);
772 		timeout_add(&sc->sc_timeout, 1);
773 		return;
774 
775 	case XS_SENSE:
776 	case XS_SHORTSENSE:
777 #ifdef SCSIDEBUG
778 		scsi_sense_print_debug(xs);
779 #endif
780 		error = sd_interpret_sense(xs);
781 		if (error == 0) {
782 			bp->b_error = 0;
783 			bp->b_resid = xs->resid;
784 			break;
785 		}
786 		if (error != ERESTART) {
787 			bp->b_error = error;
788 			xs->retries = 0;
789 		}
790 		goto retry;
791 
792 	case XS_BUSY:
793 		if (xs->retries) {
794 			if (scsi_delay(xs, 1) != ERESTART)
795 				xs->retries = 0;
796 		}
797 		goto retry;
798 
799 	case XS_TIMEOUT:
800 retry:
801 		if (xs->retries--) {
802 			scsi_xs_exec(xs);
803 			return;
804 		}
805 		/* FALLTHROUGH */
806 
807 	default:
808 		if (bp->b_error == 0)
809 			bp->b_error = EIO;
810 		bp->b_flags |= B_ERROR;
811 		bp->b_resid = bp->b_bcount;
812 		break;
813 	}
814 
815 	disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid,
816 	    bp->b_flags & B_READ);
817 
818 	s = splbio();
819 	biodone(bp);
820 	splx(s);
821 	scsi_xs_put(xs);
822 }
823 
824 void
825 sdminphys(struct buf *bp)
826 {
827 	struct scsi_link *link;
828 	struct sd_softc *sc;
829 	long max;
830 
831 	sc = sdlookup(DISKUNIT(bp->b_dev));
832 	if (sc == NULL)
833 		return;  /* XXX - right way to fail this? */
834 	if (sc->flags & SDF_DYING) {
835 		device_unref(&sc->sc_dev);
836 		return;
837 	}
838 	link = sc->sc_link;
839 
840 	/*
841 	 * If the device is ancient, we want to make sure that
842 	 * the transfer fits into a 6-byte cdb.
843 	 *
844 	 * XXX Note that the SCSI-I spec says that 256-block transfers
845 	 * are allowed in a 6-byte read/write, and are specified
846 	 * by setting the "length" to 0.  However, we're conservative
847 	 * here, allowing only 255-block transfers in case an
848 	 * ancient device gets confused by length == 0.  A length of 0
849 	 * in a 10-byte read/write actually means 0 blocks.
850 	 */
851 	if (sc->flags & SDF_ANCIENT) {
852 		max = sc->sc_dk.dk_label->d_secsize * 0xff;
853 
854 		if (bp->b_bcount > max)
855 			bp->b_bcount = max;
856 	}
857 
858 	(*link->adapter->scsi_minphys)(bp, link);
859 
860 	device_unref(&sc->sc_dev);
861 }
862 
863 int
864 sdread(dev_t dev, struct uio *uio, int ioflag)
865 {
866 	return (physio(sdstrategy, dev, B_READ, sdminphys, uio));
867 }
868 
869 int
870 sdwrite(dev_t dev, struct uio *uio, int ioflag)
871 {
872 	return (physio(sdstrategy, dev, B_WRITE, sdminphys, uio));
873 }
874 
875 /*
876  * Perform special action on behalf of the user
877  * Knows about the internals of this device
878  */
879 int
880 sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
881 {
882 	struct scsi_link *link;
883 	struct sd_softc *sc;
884 	struct disklabel *lp;
885 	int error = 0;
886 	int part = DISKPART(dev);
887 
888 	sc = sdlookup(DISKUNIT(dev));
889 	if (sc == NULL)
890 		return (ENXIO);
891 	if (sc->flags & SDF_DYING) {
892 		device_unref(&sc->sc_dev);
893 		return (ENXIO);
894 	}
895 	link = sc->sc_link;
896 
897 	SC_DEBUG(link, SDEV_DB2, ("sdioctl 0x%lx\n", cmd));
898 
899 	/*
900 	 * If the device is not valid.. abandon ship
901 	 */
902 	if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
903 		switch (cmd) {
904 		case DIOCLOCK:
905 		case DIOCEJECT:
906 		case SCIOCIDENTIFY:
907 		case SCIOCCOMMAND:
908 		case SCIOCDEBUG:
909 			if (part == RAW_PART)
910 				break;
911 		/* FALLTHROUGH */
912 		default:
913 			if ((link->flags & SDEV_OPEN) == 0) {
914 				error = ENODEV;
915 				goto exit;
916 			} else {
917 				error = EIO;
918 				goto exit;
919 			}
920 		}
921 	}
922 
923 	switch (cmd) {
924 	case DIOCRLDINFO:
925 		lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK);
926 		sdgetdisklabel(dev, sc, lp, 0);
927 		memcpy(sc->sc_dk.dk_label, lp, sizeof(*lp));
928 		free(lp, M_TEMP, sizeof(*lp));
929 		goto exit;
930 
931 	case DIOCGPDINFO:
932 		sdgetdisklabel(dev, sc, (struct disklabel *)addr, 1);
933 		goto exit;
934 
935 	case DIOCGDINFO:
936 		*(struct disklabel *)addr = *(sc->sc_dk.dk_label);
937 		goto exit;
938 
939 	case DIOCGPART:
940 		((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
941 		((struct partinfo *)addr)->part =
942 		    &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)];
943 		goto exit;
944 
945 	case DIOCWDINFO:
946 	case DIOCSDINFO:
947 		if ((flag & FWRITE) == 0) {
948 			error = EBADF;
949 			goto exit;
950 		}
951 
952 		if ((error = disk_lock(&sc->sc_dk)) != 0)
953 			goto exit;
954 
955 		error = setdisklabel(sc->sc_dk.dk_label,
956 		    (struct disklabel *)addr, sc->sc_dk.dk_openmask);
957 		if (error == 0) {
958 			if (cmd == DIOCWDINFO)
959 				error = writedisklabel(DISKLABELDEV(dev),
960 				    sdstrategy, sc->sc_dk.dk_label);
961 		}
962 
963 		disk_unlock(&sc->sc_dk);
964 		goto exit;
965 
966 	case DIOCLOCK:
967 		error = scsi_prevent(link,
968 		    (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0);
969 		goto exit;
970 
971 	case MTIOCTOP:
972 		if (((struct mtop *)addr)->mt_op != MTOFFL) {
973 			error = EIO;
974 			goto exit;
975 		}
976 		/* FALLTHROUGH */
977 	case DIOCEJECT:
978 		if ((link->flags & SDEV_REMOVABLE) == 0) {
979 			error = ENOTTY;
980 			goto exit;
981 		}
982 		link->flags |= SDEV_EJECTING;
983 		goto exit;
984 
985 	case DIOCINQ:
986 		error = scsi_do_ioctl(link, cmd, addr, flag);
987 		if (error == ENOTTY)
988 			error = sd_ioctl_inquiry(sc,
989 			    (struct dk_inquiry *)addr);
990 		goto exit;
991 
992 	case DIOCSCACHE:
993 		if (!ISSET(flag, FWRITE)) {
994 			error = EBADF;
995 			goto exit;
996 		}
997 		/* FALLTHROUGH */
998 	case DIOCGCACHE:
999 		error = sd_ioctl_cache(sc, cmd, (struct dk_cache *)addr);
1000 		goto exit;
1001 
1002 	default:
1003 		if (part != RAW_PART) {
1004 			error = ENOTTY;
1005 			goto exit;
1006 		}
1007 		error = scsi_do_ioctl(link, cmd, addr, flag);
1008 	}
1009 
1010  exit:
1011 	device_unref(&sc->sc_dev);
1012 	return (error);
1013 }
1014 
1015 int
1016 sd_ioctl_inquiry(struct sd_softc *sc, struct dk_inquiry *di)
1017 {
1018 	struct scsi_link *link;
1019 	struct scsi_vpd_serial *vpd;
1020 
1021 	vpd = dma_alloc(sizeof(*vpd), PR_WAITOK | PR_ZERO);
1022 
1023 	if (sc->flags & SDF_DYING) {
1024 		dma_free(vpd, sizeof(*vpd));
1025 		return (ENXIO);
1026 	}
1027 	link = sc->sc_link;
1028 
1029 	bzero(di, sizeof(struct dk_inquiry));
1030 	scsi_strvis(di->vendor, link->inqdata.vendor,
1031 	    sizeof(link->inqdata.vendor));
1032 	scsi_strvis(di->product, link->inqdata.product,
1033 	    sizeof(link->inqdata.product));
1034 	scsi_strvis(di->revision, link->inqdata.revision,
1035 	    sizeof(link->inqdata.revision));
1036 
1037 	/* the serial vpd page is optional */
1038 	if (scsi_inquire_vpd(link, vpd, sizeof(*vpd), SI_PG_SERIAL, 0) == 0)
1039 		scsi_strvis(di->serial, vpd->serial, sizeof(vpd->serial));
1040 	else
1041 		strlcpy(di->serial, "(unknown)", sizeof(vpd->serial));
1042 
1043 	dma_free(vpd, sizeof(*vpd));
1044 	return (0);
1045 }
1046 
1047 int
1048 sd_ioctl_cache(struct sd_softc *sc, long cmd, struct dk_cache *dkc)
1049 {
1050 	struct scsi_link *link;
1051 	union scsi_mode_sense_buf *buf;
1052 	struct page_caching_mode *mode = NULL;
1053 	u_int wrcache, rdcache;
1054 	int big;
1055 	int rv;
1056 
1057 	if (sc->flags & SDF_DYING)
1058 		return (ENXIO);
1059 	link = sc->sc_link;
1060 
1061 	if (ISSET(link->flags, SDEV_UMASS))
1062 		return (EOPNOTSUPP);
1063 
1064 	/* see if the adapter has special handling */
1065 	rv = scsi_do_ioctl(link, cmd, (caddr_t)dkc, 0);
1066 	if (rv != ENOTTY)
1067 		return (rv);
1068 
1069 	buf = dma_alloc(sizeof(*buf), PR_WAITOK);
1070 	if (buf == NULL)
1071 		return (ENOMEM);
1072 
1073 	if (sc->flags & SDF_DYING) {
1074 		rv = ENXIO;
1075 		goto done;
1076 	}
1077 	rv = scsi_do_mode_sense(link, PAGE_CACHING_MODE,
1078 	    buf, (void **)&mode, NULL, NULL, NULL,
1079 	    sizeof(*mode) - 4, scsi_autoconf | SCSI_SILENT, &big);
1080 	if (rv != 0)
1081 		goto done;
1082 
1083 	if ((mode == NULL) || (!DISK_PGCODE(mode, PAGE_CACHING_MODE))) {
1084 		rv = EIO;
1085 		goto done;
1086 	}
1087 
1088 	wrcache = (ISSET(mode->flags, PG_CACHE_FL_WCE) ? 1 : 0);
1089 	rdcache = (ISSET(mode->flags, PG_CACHE_FL_RCD) ? 0 : 1);
1090 
1091 	switch (cmd) {
1092 	case DIOCGCACHE:
1093 		dkc->wrcache = wrcache;
1094 		dkc->rdcache = rdcache;
1095 		break;
1096 
1097 	case DIOCSCACHE:
1098 		if (dkc->wrcache == wrcache && dkc->rdcache == rdcache)
1099 			break;
1100 
1101 		if (dkc->wrcache)
1102 			SET(mode->flags, PG_CACHE_FL_WCE);
1103 		else
1104 			CLR(mode->flags, PG_CACHE_FL_WCE);
1105 
1106 		if (dkc->rdcache)
1107 			CLR(mode->flags, PG_CACHE_FL_RCD);
1108 		else
1109 			SET(mode->flags, PG_CACHE_FL_RCD);
1110 
1111 		if (sc->flags & SDF_DYING) {
1112 			rv = ENXIO;
1113 			goto done;
1114 		}
1115 		if (big) {
1116 			rv = scsi_mode_select_big(link, SMS_PF,
1117 			    &buf->hdr_big, scsi_autoconf | SCSI_SILENT, 20000);
1118 		} else {
1119 			rv = scsi_mode_select(link, SMS_PF,
1120 			    &buf->hdr, scsi_autoconf | SCSI_SILENT, 20000);
1121 		}
1122 		break;
1123 	}
1124 
1125 done:
1126 	dma_free(buf, sizeof(*buf));
1127 	return (rv);
1128 }
1129 
1130 /*
1131  * Load the label information on the named device
1132  */
1133 int
1134 sdgetdisklabel(dev_t dev, struct sd_softc *sc, struct disklabel *lp,
1135     int spoofonly)
1136 {
1137 	struct scsi_link *link;
1138 	size_t len;
1139 	char packname[sizeof(lp->d_packname) + 1];
1140 	char product[17], vendor[9];
1141 
1142 	if (sc->flags & SDF_DYING)
1143 		return (ENXIO);
1144 	link = sc->sc_link;
1145 
1146 	bzero(lp, sizeof(struct disklabel));
1147 
1148 	lp->d_secsize = sc->params.secsize;
1149 	lp->d_ntracks = sc->params.heads;
1150 	lp->d_nsectors = sc->params.sectors;
1151 	lp->d_ncylinders = sc->params.cyls;
1152 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1153 	if (lp->d_secpercyl == 0) {
1154 		lp->d_secpercyl = 100;
1155 		/* as long as it's not 0 - readdisklabel divides by it */
1156 	}
1157 
1158 	lp->d_type = DTYPE_SCSI;
1159 	if ((link->inqdata.device & SID_TYPE) == T_OPTICAL)
1160 		strncpy(lp->d_typename, "SCSI optical",
1161 		    sizeof(lp->d_typename));
1162 	else
1163 		strncpy(lp->d_typename, "SCSI disk",
1164 		    sizeof(lp->d_typename));
1165 
1166 	/*
1167 	 * Try to fit '<vendor> <product>' into d_packname. If that doesn't fit
1168 	 * then leave out '<vendor> ' and use only as much of '<product>' as
1169 	 * does fit.
1170 	 */
1171 	viscpy(vendor, link->inqdata.vendor, 8);
1172 	viscpy(product, link->inqdata.product, 16);
1173 	len = snprintf(packname, sizeof(packname), "%s %s", vendor, product);
1174 	if (len > sizeof(lp->d_packname)) {
1175 		strlcpy(packname, product, sizeof(packname));
1176 		len = strlen(packname);
1177 	}
1178 	/*
1179 	 * It is safe to use len as the count of characters to copy because
1180 	 * packname is sizeof(lp->d_packname)+1, the string in packname is
1181 	 * always null terminated and len does not count the terminating null.
1182 	 * d_packname is not a null terminated string.
1183 	 */
1184 	memcpy(lp->d_packname, packname, len);
1185 
1186 	DL_SETDSIZE(lp, sc->params.disksize);
1187 	lp->d_version = 1;
1188 	lp->d_flags = 0;
1189 
1190 	/* XXX - these values for BBSIZE and SBSIZE assume ffs */
1191 	lp->d_bbsize = BBSIZE;
1192 	lp->d_sbsize = SBSIZE;
1193 
1194 	lp->d_magic = DISKMAGIC;
1195 	lp->d_magic2 = DISKMAGIC;
1196 	lp->d_checksum = dkcksum(lp);
1197 
1198 	/*
1199 	 * Call the generic disklabel extraction routine
1200 	 */
1201 	return readdisklabel(DISKLABELDEV(dev), sdstrategy, lp, spoofonly);
1202 }
1203 
1204 
1205 /*
1206  * Check Errors
1207  */
1208 int
1209 sd_interpret_sense(struct scsi_xfer *xs)
1210 {
1211 	struct scsi_sense_data *sense = &xs->sense;
1212 	struct scsi_link *link = xs->sc_link;
1213 	u_int8_t serr = sense->error_code & SSD_ERRCODE;
1214 	int retval;
1215 
1216 	/*
1217 	 * Let the generic code handle everything except a few categories of
1218 	 * LUN not ready errors on open devices.
1219 	 */
1220 	if (((link->flags & SDEV_OPEN) == 0) ||
1221 	    (serr != SSD_ERRCODE_CURRENT && serr != SSD_ERRCODE_DEFERRED) ||
1222 	    ((sense->flags & SSD_KEY) != SKEY_NOT_READY) ||
1223 	    (sense->extra_len < 6))
1224 		return (scsi_interpret_sense(xs));
1225 
1226 	if ((xs->flags & SCSI_IGNORE_NOT_READY) != 0)
1227 		return (0);
1228 
1229 	switch (ASC_ASCQ(sense)) {
1230 	case SENSE_NOT_READY_BECOMING_READY:
1231 		SC_DEBUG(link, SDEV_DB1, ("becoming ready.\n"));
1232 		retval = scsi_delay(xs, 5);
1233 		break;
1234 
1235 	case SENSE_NOT_READY_INIT_REQUIRED:
1236 		SC_DEBUG(link, SDEV_DB1, ("spinning up\n"));
1237 		retval = scsi_start(link, SSS_START,
1238 		    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_NOSLEEP);
1239 		if (retval == 0)
1240 			retval = ERESTART;
1241 		else if (retval == ENOMEM)
1242 			/* Can't issue the command. Fall back on a delay. */
1243 			retval = scsi_delay(xs, 5);
1244 		else
1245 			SC_DEBUG(link, SDEV_DB1, ("spin up failed (%#x)\n",
1246 			    retval));
1247 		break;
1248 
1249 	default:
1250 		retval = scsi_interpret_sense(xs);
1251 		break;
1252 	}
1253 
1254 	return (retval);
1255 }
1256 
1257 daddr_t
1258 sdsize(dev_t dev)
1259 {
1260 	struct disklabel *lp;
1261 	struct sd_softc *sc;
1262 	int part, omask;
1263 	daddr_t size;
1264 
1265 	sc = sdlookup(DISKUNIT(dev));
1266 	if (sc == NULL)
1267 		return -1;
1268 	if (sc->flags & SDF_DYING) {
1269 		size = -1;
1270 		goto exit;
1271 	}
1272 
1273 	part = DISKPART(dev);
1274 	omask = sc->sc_dk.dk_openmask & (1 << part);
1275 
1276 	if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) {
1277 		size = -1;
1278 		goto exit;
1279 	}
1280 
1281 	lp = sc->sc_dk.dk_label;
1282 	if (sc->flags & SDF_DYING) {
1283 		size = -1;
1284 		goto exit;
1285 	}
1286 	if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) == 0)
1287 		size = -1;
1288 	else if (lp->d_partitions[part].p_fstype != FS_SWAP)
1289 		size = -1;
1290 	else
1291 		size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part]));
1292 	if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1293 		size = -1;
1294 
1295  exit:
1296 	device_unref(&sc->sc_dev);
1297 	return size;
1298 }
1299 
1300 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1301 static int sddoingadump;
1302 
1303 /*
1304  * dump all of physical memory into the partition specified, starting
1305  * at offset 'dumplo' into the partition.
1306  */
1307 int
1308 sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
1309 {
1310 	struct sd_softc *sc;	/* disk unit to do the I/O */
1311 	struct disklabel *lp;	/* disk's disklabel */
1312 	int	unit, part;
1313 	u_int32_t sectorsize;	/* size of a disk sector */
1314 	u_int64_t nsects;	/* number of sectors in partition */
1315 	u_int64_t sectoff;	/* sector offset of partition */
1316 	u_int64_t totwrt;	/* total number of sectors left to write */
1317 	u_int32_t nwrt;		/* current number of sectors to write */
1318 	struct scsi_xfer *xs;	/* ... convenience */
1319 	int rv;
1320 
1321 	/* Check if recursive dump; if so, punt. */
1322 	if (sddoingadump)
1323 		return EFAULT;
1324 	if (blkno < 0)
1325 		return EINVAL;
1326 
1327 	/* Mark as active early. */
1328 	sddoingadump = 1;
1329 
1330 	unit = DISKUNIT(dev);	/* Decompose unit & partition. */
1331 	part = DISKPART(dev);
1332 
1333 	/* Check for acceptable drive number. */
1334 	if (unit >= sd_cd.cd_ndevs || (sc = sd_cd.cd_devs[unit]) == NULL)
1335 		return ENXIO;
1336 
1337 	/*
1338 	 * XXX Can't do this check, since the media might have been
1339 	 * XXX marked `invalid' by successful unmounting of all
1340 	 * XXX filesystems.
1341 	 */
1342 #if 0
1343 	/* Make sure it was initialized. */
1344 	if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) != SDEV_MEDIA_LOADED)
1345 		return ENXIO;
1346 #endif
1347 
1348 	/* Convert to disk sectors.  Request must be a multiple of size. */
1349 	lp = sc->sc_dk.dk_label;
1350 	sectorsize = lp->d_secsize;
1351 	if ((size % sectorsize) != 0)
1352 		return EFAULT;
1353 	if ((blkno % DL_BLKSPERSEC(lp)) != 0)
1354 		return EFAULT;
1355 	totwrt = size / sectorsize;
1356 	blkno = DL_BLKTOSEC(lp, blkno);
1357 
1358 	nsects = DL_GETPSIZE(&lp->d_partitions[part]);
1359 	sectoff = DL_GETPOFFSET(&lp->d_partitions[part]);
1360 
1361 	/* Check transfer bounds against partition size. */
1362 	if ((blkno + totwrt) > nsects)
1363 		return EINVAL;
1364 
1365 	/* Offset block number to start of partition. */
1366 	blkno += sectoff;
1367 
1368 	while (totwrt > 0) {
1369 		if (totwrt > UINT32_MAX)
1370 			nwrt = UINT32_MAX;
1371 		else
1372 			nwrt = totwrt;
1373 
1374 #ifndef	SD_DUMP_NOT_TRUSTED
1375 		xs = scsi_xs_get(sc->sc_link, SCSI_NOSLEEP);
1376 		if (xs == NULL)
1377 			return (ENOMEM);
1378 
1379 		xs->timeout = 10000;
1380 		xs->flags |= SCSI_DATA_OUT;
1381 		xs->data = va;
1382 		xs->datalen = nwrt * sectorsize;
1383 
1384 		sd_cmd_rw10(xs, 0, blkno, nwrt); /* XXX */
1385 
1386 		rv = scsi_xs_sync(xs);
1387 		scsi_xs_put(xs);
1388 		if (rv != 0)
1389 			return (ENXIO);
1390 #else	/* SD_DUMP_NOT_TRUSTED */
1391 		/* Let's just talk about this first... */
1392 		printf("sd%d: dump addr 0x%x, blk %lld\n", unit, va,
1393 		    (long long)blkno);
1394 		delay(500 * 1000);	/* half a second */
1395 #endif	/* SD_DUMP_NOT_TRUSTED */
1396 
1397 		/* update block count */
1398 		totwrt -= nwrt;
1399 		blkno += nwrt;
1400 		va += sectorsize * nwrt;
1401 	}
1402 
1403 	sddoingadump = 0;
1404 
1405 	return (0);
1406 }
1407 
1408 /*
1409  * Copy up to len chars from src to dst, ignoring non-printables.
1410  * Must be room for len+1 chars in dst so we can write the NUL.
1411  * Does not assume src is NUL-terminated.
1412  */
1413 void
1414 viscpy(u_char *dst, u_char *src, int len)
1415 {
1416 	while (len > 0 && *src != '\0') {
1417 		if (*src < 0x20 || *src >= 0x80) {
1418 			src++;
1419 			continue;
1420 		}
1421 		*dst++ = *src++;
1422 		len--;
1423 	}
1424 	*dst = '\0';
1425 }
1426 
1427 int
1428 sd_read_cap_10(struct sd_softc *sc, int flags)
1429 {
1430 	struct scsi_read_capacity cdb;
1431 	struct scsi_read_cap_data *rdcap;
1432 	struct scsi_xfer *xs;
1433 	int rv = ENOMEM;
1434 
1435 	CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
1436 
1437 	rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
1438 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1439 	if (rdcap == NULL)
1440 		return (ENOMEM);
1441 
1442 	if (sc->flags & SDF_DYING) {
1443 		rv = ENXIO;
1444 		goto done;
1445 	}
1446 	xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
1447 	if (xs == NULL)
1448 		goto done;
1449 
1450 	bzero(&cdb, sizeof(cdb));
1451 	cdb.opcode = READ_CAPACITY;
1452 
1453 	memcpy(xs->cmd, &cdb, sizeof(cdb));
1454 	xs->cmdlen = sizeof(cdb);
1455 	xs->data = (void *)rdcap;
1456 	xs->datalen = sizeof(*rdcap);
1457 	xs->timeout = 20000;
1458 
1459 	rv = scsi_xs_sync(xs);
1460 	scsi_xs_put(xs);
1461 
1462 	if (rv == 0) {
1463 		sc->params.disksize = _4btol(rdcap->addr) + 1ll;
1464 		sc->params.secsize = _4btol(rdcap->length);
1465 		CLR(sc->flags, SDF_THIN);
1466 	}
1467 
1468  done:
1469 	dma_free(rdcap, sizeof(*rdcap));
1470 	return (rv);
1471 }
1472 
1473 int
1474 sd_read_cap_16(struct sd_softc *sc, int flags)
1475 {
1476 	struct scsi_read_capacity_16 cdb;
1477 	struct scsi_read_cap_data_16 *rdcap;
1478 	struct scsi_xfer *xs;
1479 	int rv = ENOMEM;
1480 
1481 	CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
1482 
1483 	rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
1484 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1485 	if (rdcap == NULL)
1486 		return (ENOMEM);
1487 
1488 	if (sc->flags & SDF_DYING) {
1489 		rv = ENXIO;
1490 		goto done;
1491 	}
1492 	xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
1493 	if (xs == NULL)
1494 		goto done;
1495 
1496 	bzero(&cdb, sizeof(cdb));
1497 	cdb.opcode = READ_CAPACITY_16;
1498 	cdb.byte2 = SRC16_SERVICE_ACTION;
1499 	_lto4b(sizeof(*rdcap), cdb.length);
1500 
1501 	memcpy(xs->cmd, &cdb, sizeof(cdb));
1502 	xs->cmdlen = sizeof(cdb);
1503 	xs->data = (void *)rdcap;
1504 	xs->datalen = sizeof(*rdcap);
1505 	xs->timeout = 20000;
1506 
1507 	rv = scsi_xs_sync(xs);
1508 	scsi_xs_put(xs);
1509 
1510 	if (rv == 0) {
1511 		if (_8btol(rdcap->addr) == 0) {
1512 			rv = EIO;
1513 			goto done;
1514 		}
1515 
1516 		sc->params.disksize = _8btol(rdcap->addr) + 1;
1517 		sc->params.secsize = _4btol(rdcap->length);
1518 		if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE))
1519 			SET(sc->flags, SDF_THIN);
1520 		else
1521 			CLR(sc->flags, SDF_THIN);
1522 	}
1523 
1524  done:
1525 	dma_free(rdcap, sizeof(*rdcap));
1526 	return (rv);
1527 }
1528 
1529 int
1530 sd_size(struct sd_softc *sc, int flags)
1531 {
1532 	int rv;
1533 
1534 	if (sc->flags & SDF_DYING)
1535 		return (ENXIO);
1536 	if (SCSISPC(sc->sc_link->inqdata.version) >= 3) {
1537 		rv = sd_read_cap_16(sc, flags);
1538 		if (rv != 0)
1539 			rv = sd_read_cap_10(sc, flags);
1540 	} else {
1541 		rv = sd_read_cap_10(sc, flags);
1542 		if (rv == 0 && sc->params.disksize == 0x100000000ll)
1543 			rv = sd_read_cap_16(sc, flags);
1544 	}
1545 
1546 	return (rv);
1547 }
1548 
1549 int
1550 sd_thin_pages(struct sd_softc *sc, int flags)
1551 {
1552 	struct scsi_vpd_hdr *pg;
1553 	size_t len = 0;
1554 	u_int8_t *pages;
1555 	int i, score = 0;
1556 	int rv;
1557 
1558 	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1559 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1560 	if (pg == NULL)
1561 		return (ENOMEM);
1562 
1563 	if (sc->flags & SDF_DYING) {
1564 		rv = ENXIO;
1565 		goto done;
1566 	}
1567 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1568 	    SI_PG_SUPPORTED, flags);
1569 	if (rv != 0)
1570 		goto done;
1571 
1572 	len = _2btol(pg->page_length);
1573 
1574 	dma_free(pg, sizeof(*pg));
1575 	pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ?
1576 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1577 	if (pg == NULL)
1578 		return (ENOMEM);
1579 
1580 	if (sc->flags & SDF_DYING) {
1581 		rv = ENXIO;
1582 		goto done;
1583 	}
1584 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len,
1585 	    SI_PG_SUPPORTED, flags);
1586 	if (rv != 0)
1587 		goto done;
1588 
1589 	pages = (u_int8_t *)(pg + 1);
1590 	if (pages[0] != SI_PG_SUPPORTED) {
1591 		rv = EIO;
1592 		goto done;
1593 	}
1594 
1595 	for (i = 1; i < len; i++) {
1596 		switch (pages[i]) {
1597 		case SI_PG_DISK_LIMITS:
1598 		case SI_PG_DISK_THIN:
1599 			score++;
1600 			break;
1601 		}
1602 	}
1603 
1604 	if (score < 2)
1605 		rv = EOPNOTSUPP;
1606 
1607  done:
1608 	dma_free(pg, sizeof(*pg) + len);
1609 	return (rv);
1610 }
1611 
1612 int
1613 sd_vpd_block_limits(struct sd_softc *sc, int flags)
1614 {
1615 	struct scsi_vpd_disk_limits *pg;
1616 	int rv;
1617 
1618 	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1619 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1620 	if (pg == NULL)
1621 		return (ENOMEM);
1622 
1623 	if (sc->flags & SDF_DYING) {
1624 		rv = ENXIO;
1625 		goto done;
1626 	}
1627 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1628 	    SI_PG_DISK_LIMITS, flags);
1629 	if (rv != 0)
1630 		goto done;
1631 
1632 	if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) {
1633 		sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count);
1634 		sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count);
1635 	} else
1636 		rv = EOPNOTSUPP;
1637 
1638  done:
1639 	dma_free(pg, sizeof(*pg));
1640 	return (rv);
1641 }
1642 
1643 int
1644 sd_vpd_thin(struct sd_softc *sc, int flags)
1645 {
1646 	struct scsi_vpd_disk_thin *pg;
1647 	int rv;
1648 
1649 	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1650 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1651 	if (pg == NULL)
1652 		return (ENOMEM);
1653 
1654 	if (sc->flags & SDF_DYING) {
1655 		rv = ENXIO;
1656 		goto done;
1657 	}
1658 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1659 	    SI_PG_DISK_THIN, flags);
1660 	if (rv != 0)
1661 		goto done;
1662 
1663 #ifdef notyet
1664 	if (ISSET(pg->flags, VPD_DISK_THIN_TPU))
1665 		sc->sc_delete = sd_unmap;
1666 	else if (ISSET(pg->flags, VPD_DISK_THIN_TPWS)) {
1667 		sc->sc_delete = sd_write_same_16;
1668 		sc->params.unmap_descs = 1; /* WRITE SAME 16 only does one */
1669 	} else
1670 		rv = EOPNOTSUPP;
1671 #endif
1672 
1673  done:
1674 	dma_free(pg, sizeof(*pg));
1675 	return (rv);
1676 }
1677 
1678 int
1679 sd_thin_params(struct sd_softc *sc, int flags)
1680 {
1681 	int rv;
1682 
1683 	rv = sd_thin_pages(sc, flags);
1684 	if (rv != 0)
1685 		return (rv);
1686 
1687 	rv = sd_vpd_block_limits(sc, flags);
1688 	if (rv != 0)
1689 		return (rv);
1690 
1691 	rv = sd_vpd_thin(sc, flags);
1692 	if (rv != 0)
1693 		return (rv);
1694 
1695 	return (0);
1696 }
1697 
1698 /*
1699  * Fill out the disk parameter structure. Return SDGP_RESULT_OK if the
1700  * structure is correctly filled in, SDGP_RESULT_OFFLINE otherwise. The caller
1701  * is responsible for clearing the SDEV_MEDIA_LOADED flag if the structure
1702  * cannot be completed.
1703  */
1704 int
1705 sd_get_parms(struct sd_softc *sc, struct disk_parms *dp, int flags)
1706 {
1707 	struct scsi_link *link;
1708 	union scsi_mode_sense_buf *buf = NULL;
1709 	struct page_rigid_geometry *rigid = NULL;
1710 	struct page_flex_geometry *flex = NULL;
1711 	struct page_reduced_geometry *reduced = NULL;
1712 	u_char *page0 = NULL;
1713 	u_int32_t heads = 0, sectors = 0, cyls = 0, secsize = 0;
1714 	int err = 0, big;
1715 
1716 	if (sd_size(sc, flags) != 0)
1717 		return (SDGP_RESULT_OFFLINE);
1718 
1719 	if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) {
1720 		/* we dont know the unmap limits, so we cant use thin shizz */
1721 		CLR(sc->flags, SDF_THIN);
1722 	}
1723 
1724 	buf = dma_alloc(sizeof(*buf), PR_NOWAIT);
1725 	if (buf == NULL)
1726 		goto validate;
1727 
1728 	if (sc->flags & SDF_DYING)
1729 		goto die;
1730 	link = sc->sc_link;
1731 
1732 	/*
1733 	 * Ask for page 0 (vendor specific) mode sense data to find
1734 	 * READONLY info. The only thing USB devices will ask for.
1735 	 */
1736 	err = scsi_do_mode_sense(link, 0, buf, (void **)&page0,
1737 	    NULL, NULL, NULL, 1, flags | SCSI_SILENT, &big);
1738 	if (sc->flags & SDF_DYING)
1739 		goto die;
1740 	if (err == 0) {
1741 		if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT)
1742 			SET(link->flags, SDEV_READONLY);
1743 		else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT)
1744 			SET(link->flags, SDEV_READONLY);
1745 		else
1746 			CLR(link->flags, SDEV_READONLY);
1747 	}
1748 
1749 	/*
1750 	 * Many UMASS devices choke when asked about their geometry. Most
1751 	 * don't have a meaningful geometry anyway, so just fake it if
1752 	 * scsi_size() worked.
1753 	 */
1754 	if ((link->flags & SDEV_UMASS) && (dp->disksize > 0))
1755 		goto validate;
1756 
1757 	switch (link->inqdata.device & SID_TYPE) {
1758 	case T_OPTICAL:
1759 		/* No more information needed or available. */
1760 		break;
1761 
1762 	case T_RDIRECT:
1763 		/* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */
1764 		err = scsi_do_mode_sense(link, PAGE_REDUCED_GEOMETRY,
1765 		    buf, (void **)&reduced, NULL, NULL, &secsize,
1766 		    sizeof(*reduced), flags | SCSI_SILENT, NULL);
1767 		if (!err && reduced &&
1768 		    DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) {
1769 			if (dp->disksize == 0)
1770 				dp->disksize = _5btol(reduced->sectors);
1771 			if (secsize == 0)
1772 				secsize = _2btol(reduced->bytes_s);
1773 		}
1774 		break;
1775 
1776 	default:
1777 		/*
1778 		 * NOTE: Some devices leave off the last four bytes of
1779 		 * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages.
1780 		 * The only information in those four bytes is RPM information
1781 		 * so accept the page. The extra bytes will be zero and RPM will
1782 		 * end up with the default value of 3600.
1783 		 */
1784 		if (((link->flags & SDEV_ATAPI) == 0) ||
1785 		    ((link->flags & SDEV_REMOVABLE) == 0))
1786 			err = scsi_do_mode_sense(link,
1787 			    PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL,
1788 			    NULL, &secsize, sizeof(*rigid) - 4,
1789 			    flags | SCSI_SILENT, NULL);
1790 		if (!err && rigid && DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) {
1791 			heads = rigid->nheads;
1792 			cyls = _3btol(rigid->ncyl);
1793 			if (heads * cyls > 0)
1794 				sectors = dp->disksize / (heads * cyls);
1795 		} else {
1796 			if (sc->flags & SDF_DYING)
1797 				goto die;
1798 			err = scsi_do_mode_sense(link,
1799 			    PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL,
1800 			    &secsize, sizeof(*flex) - 4,
1801 			    flags | SCSI_SILENT, NULL);
1802 			if (!err && flex &&
1803 			    DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) {
1804 				sectors = flex->ph_sec_tr;
1805 				heads = flex->nheads;
1806 				cyls = _2btol(flex->ncyl);
1807 				if (secsize == 0)
1808 					secsize = _2btol(flex->bytes_s);
1809 				if (dp->disksize == 0)
1810 					dp->disksize = heads * cyls * sectors;
1811 			}
1812 		}
1813 		break;
1814 	}
1815 
1816 validate:
1817 	if (buf)
1818 		dma_free(buf, sizeof(*buf));
1819 
1820 	if (dp->disksize == 0)
1821 		return (SDGP_RESULT_OFFLINE);
1822 
1823 	if (dp->secsize == 0)
1824 		dp->secsize = (secsize == 0) ? 512 : secsize;
1825 
1826 	/*
1827 	 * Restrict secsize values to powers of two between 512 and 64k.
1828 	 */
1829 	switch (dp->secsize) {
1830 	case 0x200:	/* == 512, == DEV_BSIZE on all architectures. */
1831 	case 0x400:
1832 	case 0x800:
1833 	case 0x1000:
1834 	case 0x2000:
1835 	case 0x4000:
1836 	case 0x8000:
1837 	case 0x10000:
1838 		break;
1839 	default:
1840 		SC_DEBUG(sc->sc_link, SDEV_DB1,
1841 		    ("sd_get_parms: bad secsize: %#lx\n", dp->secsize));
1842 		return (SDGP_RESULT_OFFLINE);
1843 	}
1844 
1845 	/*
1846 	 * XXX THINK ABOUT THIS!!  Using values such that sectors * heads *
1847 	 * cyls is <= disk_size can lead to wasted space. We need a more
1848 	 * careful calculation/validation to make everything work out
1849 	 * optimally.
1850 	 */
1851 	if (dp->disksize > 0xffffffff && (dp->heads * dp->sectors) < 0xffff) {
1852 		dp->heads = 511;
1853 		dp->sectors = 255;
1854 		cyls = 0;
1855 	} else {
1856 		/*
1857 		 * Use standard geometry values for anything we still don't
1858 		 * know.
1859 		 */
1860 		dp->heads = (heads == 0) ? 255 : heads;
1861 		dp->sectors = (sectors == 0) ? 63 : sectors;
1862 	}
1863 
1864 	dp->cyls = (cyls == 0) ? dp->disksize / (dp->heads * dp->sectors) :
1865 	    cyls;
1866 
1867 	if (dp->cyls == 0) {
1868 		dp->heads = dp->cyls = 1;
1869 		dp->sectors = dp->disksize;
1870 	}
1871 
1872 	return (SDGP_RESULT_OK);
1873 
1874 die:
1875 	dma_free(buf, sizeof(*buf));
1876 	return (SDGP_RESULT_OFFLINE);
1877 }
1878 
1879 void
1880 sd_flush(struct sd_softc *sc, int flags)
1881 {
1882 	struct scsi_link *link;
1883 	struct scsi_xfer *xs;
1884 	struct scsi_synchronize_cache *cmd;
1885 
1886 	if (sc->flags & SDF_DYING)
1887 		return;
1888 	link = sc->sc_link;
1889 
1890 	if (link->quirks & SDEV_NOSYNCCACHE)
1891 		return;
1892 
1893 	/*
1894 	 * Issue a SYNCHRONIZE CACHE. Address 0, length 0 means "all remaining
1895 	 * blocks starting at address 0". Ignore ILLEGAL REQUEST in the event
1896 	 * that the command is not supported by the device.
1897 	 */
1898 
1899 	xs = scsi_xs_get(link, flags);
1900 	if (xs == NULL) {
1901 		SC_DEBUG(link, SDEV_DB1, ("cache sync failed to get xs\n"));
1902 		return;
1903 	}
1904 
1905 	cmd = (struct scsi_synchronize_cache *)xs->cmd;
1906 	cmd->opcode = SYNCHRONIZE_CACHE;
1907 
1908 	xs->cmdlen = sizeof(*cmd);
1909 	xs->timeout = 100000;
1910 	xs->flags |= SCSI_IGNORE_ILLEGAL_REQUEST;
1911 
1912 	if (scsi_xs_sync(xs) == 0)
1913 		sc->flags &= ~SDF_DIRTY;
1914 	else
1915 		SC_DEBUG(link, SDEV_DB1, ("cache sync failed\n"));
1916 
1917 	scsi_xs_put(xs);
1918 }
1919