1 /* $OpenBSD: sd.c,v 1.310 2020/02/20 16:26:02 krw Exp $ */ 2 /* $NetBSD: sd.c,v 1.111 1997/04/02 02:29:41 mycroft Exp $ */ 3 4 /*- 5 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Charles M. Hannum. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Originally written by Julian Elischer (julian@dialix.oz.au) 35 * for TRW Financial Systems for use under the MACH(2.5) operating system. 36 * 37 * TRW Financial Systems, in accordance with their agreement with Carnegie 38 * Mellon University, makes this software available to CMU to distribute 39 * or use in any manner that they see fit as long as this message is kept with 40 * the software. For this reason TFS also grants any other persons or 41 * organisations permission to use or modify this software. 42 * 43 * TFS supplies this software to be publicly redistributed 44 * on the understanding that TFS is not responsible for the correct 45 * functioning of this software in any circumstances. 46 * 47 * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992 48 */ 49 50 #include <sys/stdint.h> 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/timeout.h> 54 #include <sys/fcntl.h> 55 #include <sys/stat.h> 56 #include <sys/ioctl.h> 57 #include <sys/mtio.h> 58 #include <sys/mutex.h> 59 #include <sys/buf.h> 60 #include <sys/uio.h> 61 #include <sys/malloc.h> 62 #include <sys/pool.h> 63 #include <sys/errno.h> 64 #include <sys/device.h> 65 #include <sys/disklabel.h> 66 #include <sys/disk.h> 67 #include <sys/conf.h> 68 #include <sys/scsiio.h> 69 #include <sys/dkio.h> 70 #include <sys/reboot.h> 71 72 #include <scsi/scsi_all.h> 73 #include <scsi/scsi_disk.h> 74 #include <scsi/scsiconf.h> 75 #include <scsi/sdvar.h> 76 77 #include <ufs/ffs/fs.h> /* for BBSIZE and SBSIZE */ 78 79 #include <sys/vnode.h> 80 81 int sdmatch(struct device *, void *, void *); 82 void sdattach(struct device *, struct device *, void *); 83 int sdactivate(struct device *, int); 84 int sddetach(struct device *, int); 85 86 void sdminphys(struct buf *); 87 int sdgetdisklabel(dev_t, struct sd_softc *, struct disklabel *, int); 88 void sdstart(struct scsi_xfer *); 89 int sd_interpret_sense(struct scsi_xfer *); 90 int sd_read_cap_10(struct sd_softc *, int); 91 int sd_read_cap_16(struct sd_softc *, int); 92 int sd_read_cap(struct sd_softc *, int); 93 int sd_thin_pages(struct sd_softc *, int); 94 int sd_vpd_block_limits(struct sd_softc *, int); 95 int sd_vpd_thin(struct sd_softc *, int); 96 int sd_thin_params(struct sd_softc *, int); 97 int sd_get_parms(struct sd_softc *, int); 98 int sd_flush(struct sd_softc *, int); 99 100 void viscpy(u_char *, u_char *, int); 101 102 int sd_ioctl_inquiry(struct sd_softc *, struct dk_inquiry *); 103 int sd_ioctl_cache(struct sd_softc *, long, struct dk_cache *); 104 105 void sd_cmd_rw6(struct scsi_xfer *, int, u_int64_t, u_int); 106 void sd_cmd_rw10(struct scsi_xfer *, int, u_int64_t, u_int); 107 void sd_cmd_rw12(struct scsi_xfer *, int, u_int64_t, u_int); 108 void sd_cmd_rw16(struct scsi_xfer *, int, u_int64_t, u_int); 109 110 void sd_buf_done(struct scsi_xfer *); 111 112 struct cfattach sd_ca = { 113 sizeof(struct sd_softc), sdmatch, sdattach, 114 sddetach, sdactivate 115 }; 116 117 struct cfdriver sd_cd = { 118 NULL, "sd", DV_DISK 119 }; 120 121 const struct scsi_inquiry_pattern sd_patterns[] = { 122 {T_DIRECT, T_FIXED, 123 "", "", ""}, 124 {T_DIRECT, T_REMOV, 125 "", "", ""}, 126 {T_RDIRECT, T_FIXED, 127 "", "", ""}, 128 {T_RDIRECT, T_REMOV, 129 "", "", ""}, 130 {T_OPTICAL, T_FIXED, 131 "", "", ""}, 132 {T_OPTICAL, T_REMOV, 133 "", "", ""}, 134 }; 135 136 #define sdlookup(unit) (struct sd_softc *)disk_lookup(&sd_cd, (unit)) 137 138 int 139 sdmatch(struct device *parent, void *match, void *aux) 140 { 141 struct scsi_attach_args *sa = aux; 142 int priority; 143 144 (void)scsi_inqmatch(sa->sa_inqbuf, 145 sd_patterns, nitems(sd_patterns), 146 sizeof(sd_patterns[0]), &priority); 147 148 return priority; 149 } 150 151 /* 152 * The routine called by the low level scsi routine when it discovers 153 * a device suitable for this driver. 154 */ 155 void 156 sdattach(struct device *parent, struct device *self, void *aux) 157 { 158 struct dk_cache dkc; 159 struct sd_softc *sc = (struct sd_softc *)self; 160 struct scsi_attach_args *sa = aux; 161 struct disk_parms *dp = &sc->params; 162 struct scsi_link *link = sa->sa_sc_link; 163 int error, sd_autoconf; 164 int sortby = BUFQ_DEFAULT; 165 166 SC_DEBUG(link, SDEV_DB2, ("sdattach:\n")); 167 168 sd_autoconf = scsi_autoconf | SCSI_SILENT | 169 SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE; 170 171 /* 172 * Store information needed to contact our base driver. 173 */ 174 sc->sc_link = link; 175 link->interpret_sense = sd_interpret_sense; 176 link->device_softc = sc; 177 178 if (ISSET(link->flags, SDEV_ATAPI) && ISSET(link->flags, 179 SDEV_REMOVABLE)) 180 SET(link->quirks, SDEV_NOSYNCCACHE); 181 182 if (!ISSET(link->inqdata.flags, SID_RelAdr)) 183 SET(link->quirks, SDEV_ONLYBIG); 184 185 /* 186 * Note if this device is ancient. This is used in sdminphys(). 187 */ 188 if (!ISSET(link->flags, SDEV_ATAPI) && 189 SID_ANSII_REV(sa->sa_inqbuf) == SCSI_REV_0) 190 SET(sc->flags, SDF_ANCIENT); 191 192 /* 193 * Use the subdriver to request information regarding the drive. We 194 * cannot use interrupts yet, so the request must specify this. 195 */ 196 printf("\n"); 197 198 scsi_xsh_set(&sc->sc_xsh, link, sdstart); 199 timeout_set(&sc->sc_timeout, (void (*)(void *))scsi_xsh_add, 200 &sc->sc_xsh); 201 202 /* Spin up non-UMASS devices ready or not. */ 203 if (!ISSET(link->flags, SDEV_UMASS)) 204 scsi_start(link, SSS_START, sd_autoconf); 205 206 /* 207 * Some devices (e.g. BlackBerry Pearl) won't admit they have 208 * media loaded unless its been locked in. 209 */ 210 if (ISSET(link->flags, SDEV_REMOVABLE)) 211 scsi_prevent(link, PR_PREVENT, sd_autoconf); 212 213 /* Check that it is still responding and ok. */ 214 error = scsi_test_unit_ready(sc->sc_link, TEST_READY_RETRIES * 3, 215 sd_autoconf); 216 if (error == 0) 217 error = sd_get_parms(sc, sd_autoconf); 218 219 if (ISSET(link->flags, SDEV_REMOVABLE)) 220 scsi_prevent(link, PR_ALLOW, sd_autoconf); 221 222 if (error == 0) { 223 printf("%s: %lluMB, %u bytes/sector, %llu sectors", 224 sc->sc_dev.dv_xname, 225 dp->disksize / (1048576 / dp->secsize), dp->secsize, 226 dp->disksize); 227 if (ISSET(sc->flags, SDF_THIN)) { 228 sortby = BUFQ_FIFO; 229 printf(", thin"); 230 } 231 if (ISSET(link->flags, SDEV_READONLY)) 232 printf(", readonly"); 233 printf("\n"); 234 } 235 236 /* 237 * Initialize disk structures. 238 */ 239 sc->sc_dk.dk_name = sc->sc_dev.dv_xname; 240 bufq_init(&sc->sc_bufq, sortby); 241 242 /* 243 * Enable write cache by default. 244 */ 245 memset(&dkc, 0, sizeof(dkc)); 246 if (sd_ioctl_cache(sc, DIOCGCACHE, &dkc) == 0 && dkc.wrcache == 0) { 247 dkc.wrcache = 1; 248 sd_ioctl_cache(sc, DIOCSCACHE, &dkc); 249 } 250 251 /* Attach disk. */ 252 disk_attach(&sc->sc_dev, &sc->sc_dk); 253 } 254 255 int 256 sdactivate(struct device *self, int act) 257 { 258 struct scsi_link *link; 259 struct sd_softc *sc = (struct sd_softc *)self; 260 261 if (ISSET(sc->flags, SDF_DYING)) 262 return ENXIO; 263 link = sc->sc_link; 264 265 switch (act) { 266 case DVACT_SUSPEND: 267 /* 268 * We flush the cache, since we our next step before 269 * DVACT_POWERDOWN might be a hibernate operation. 270 */ 271 if (ISSET(sc->flags, SDF_DIRTY)) 272 sd_flush(sc, SCSI_AUTOCONF); 273 break; 274 case DVACT_POWERDOWN: 275 /* 276 * Stop the disk. Stopping the disk should flush the 277 * cache, but we are paranoid so we flush the cache 278 * first. We're cold at this point, so we poll for 279 * completion. 280 */ 281 if (ISSET(sc->flags, SDF_DIRTY)) 282 sd_flush(sc, SCSI_AUTOCONF); 283 if (ISSET(boothowto, RB_POWERDOWN)) 284 scsi_start(link, SSS_STOP, 285 SCSI_IGNORE_ILLEGAL_REQUEST | 286 SCSI_IGNORE_NOT_READY | SCSI_AUTOCONF); 287 break; 288 case DVACT_RESUME: 289 scsi_start(link, SSS_START, 290 SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_AUTOCONF); 291 break; 292 case DVACT_DEACTIVATE: 293 SET(sc->flags, SDF_DYING); 294 timeout_del(&sc->sc_timeout); 295 scsi_xsh_del(&sc->sc_xsh); 296 break; 297 } 298 return 0; 299 } 300 301 int 302 sddetach(struct device *self, int flags) 303 { 304 struct sd_softc *sc = (struct sd_softc *)self; 305 306 bufq_drain(&sc->sc_bufq); 307 308 disk_gone(sdopen, self->dv_unit); 309 310 /* Detach disk. */ 311 bufq_destroy(&sc->sc_bufq); 312 disk_detach(&sc->sc_dk); 313 314 return 0; 315 } 316 317 /* 318 * Open the device. Make sure the partition info is as up-to-date as can be. 319 */ 320 int 321 sdopen(dev_t dev, int flag, int fmt, struct proc *p) 322 { 323 struct scsi_link *link; 324 struct sd_softc *sc; 325 int error = 0, part, rawopen, unit; 326 327 unit = DISKUNIT(dev); 328 part = DISKPART(dev); 329 330 rawopen = (part == RAW_PART) && (fmt == S_IFCHR); 331 332 sc = sdlookup(unit); 333 if (sc == NULL) 334 return ENXIO; 335 if (ISSET(sc->flags, SDF_DYING)) { 336 device_unref(&sc->sc_dev); 337 return ENXIO; 338 } 339 link = sc->sc_link; 340 341 if (ISSET(flag, FWRITE) && ISSET(link->flags, SDEV_READONLY)) { 342 device_unref(&sc->sc_dev); 343 return EACCES; 344 } 345 346 SC_DEBUG(link, SDEV_DB1, 347 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit, 348 sd_cd.cd_ndevs, part)); 349 350 if ((error = disk_lock(&sc->sc_dk)) != 0) { 351 device_unref(&sc->sc_dev); 352 return error; 353 } 354 355 if (sc->sc_dk.dk_openmask != 0) { 356 /* 357 * If any partition is open, but the disk has been invalidated, 358 * disallow further opens of non-raw partition. 359 */ 360 if (ISSET(sc->flags, SDF_DYING)) { 361 error = ENXIO; 362 goto die; 363 } 364 if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) { 365 if (rawopen) 366 goto out; 367 error = EIO; 368 goto bad; 369 } 370 } else { 371 /* Spin up non-UMASS devices ready or not. */ 372 if (ISSET(sc->flags, SDF_DYING)) { 373 error = ENXIO; 374 goto die; 375 } 376 if (!ISSET(link->flags, SDEV_UMASS)) 377 scsi_start(link, SSS_START, (rawopen ? SCSI_SILENT : 378 0) | SCSI_IGNORE_ILLEGAL_REQUEST | 379 SCSI_IGNORE_MEDIA_CHANGE); 380 381 /* 382 * Use sd_interpret_sense() for sense errors. 383 * 384 * But only after spinning the disk up! Just in case a broken 385 * device returns "Initialization command required." and causes 386 * a loop of scsi_start() calls. 387 */ 388 if (ISSET(sc->flags, SDF_DYING)) { 389 error = ENXIO; 390 goto die; 391 } 392 SET(link->flags, SDEV_OPEN); 393 394 /* 395 * Try to prevent the unloading of a removable device while 396 * it's open. But allow the open to proceed if the device can't 397 * be locked in. 398 */ 399 if (ISSET(link->flags, SDEV_REMOVABLE)) { 400 scsi_prevent(link, PR_PREVENT, SCSI_SILENT | 401 SCSI_IGNORE_ILLEGAL_REQUEST | 402 SCSI_IGNORE_MEDIA_CHANGE); 403 } 404 405 /* Check that it is still responding and ok. */ 406 if (ISSET(sc->flags, SDF_DYING)) { 407 error = ENXIO; 408 goto die; 409 } 410 error = scsi_test_unit_ready(link, 411 TEST_READY_RETRIES, SCSI_SILENT | 412 SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE); 413 if (error) { 414 if (rawopen) { 415 error = 0; 416 goto out; 417 } else 418 goto bad; 419 } 420 421 /* Load the physical device parameters. */ 422 if (ISSET(sc->flags, SDF_DYING)) { 423 error = ENXIO; 424 goto die; 425 } 426 SET(link->flags, SDEV_MEDIA_LOADED); 427 if (sd_get_parms(sc, (rawopen ? SCSI_SILENT : 0)) == -1) { 428 if (ISSET(sc->flags, SDF_DYING)) { 429 error = ENXIO; 430 goto die; 431 } 432 CLR(link->flags, SDEV_MEDIA_LOADED); 433 error = ENXIO; 434 goto bad; 435 } 436 SC_DEBUG(link, SDEV_DB3, ("Params loaded\n")); 437 438 /* Load the partition info if not already loaded. */ 439 error = sdgetdisklabel(dev, sc, sc->sc_dk.dk_label, 0); 440 if (error == EIO || error == ENXIO) 441 goto bad; 442 SC_DEBUG(link, SDEV_DB3, ("Disklabel loaded\n")); 443 } 444 445 out: 446 if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0) 447 goto bad; 448 449 SC_DEBUG(link, SDEV_DB3, ("open complete\n")); 450 451 /* It's OK to fall through because dk_openmask is now non-zero. */ 452 bad: 453 if (sc->sc_dk.dk_openmask == 0) { 454 if (ISSET(sc->flags, SDF_DYING)) { 455 error = ENXIO; 456 goto die; 457 } 458 if (ISSET(link->flags, SDEV_REMOVABLE)) 459 scsi_prevent(link, PR_ALLOW, SCSI_SILENT | 460 SCSI_IGNORE_ILLEGAL_REQUEST | 461 SCSI_IGNORE_MEDIA_CHANGE); 462 if (ISSET(sc->flags, SDF_DYING)) { 463 error = ENXIO; 464 goto die; 465 } 466 CLR(link->flags, SDEV_OPEN | SDEV_MEDIA_LOADED); 467 } 468 469 die: 470 disk_unlock(&sc->sc_dk); 471 device_unref(&sc->sc_dev); 472 return error; 473 } 474 475 /* 476 * Close the device. Only called if we are the last occurrence of an open 477 * device. Convenient now but usually a pain. 478 */ 479 int 480 sdclose(dev_t dev, int flag, int fmt, struct proc *p) 481 { 482 struct scsi_link *link; 483 struct sd_softc *sc; 484 int part = DISKPART(dev); 485 int error = 0; 486 487 sc = sdlookup(DISKUNIT(dev)); 488 if (sc == NULL) 489 return ENXIO; 490 if (ISSET(sc->flags, SDF_DYING)) { 491 device_unref(&sc->sc_dev); 492 return ENXIO; 493 } 494 link = sc->sc_link; 495 496 disk_lock_nointr(&sc->sc_dk); 497 498 disk_closepart(&sc->sc_dk, part, fmt); 499 500 if ((ISSET(flag, FWRITE) || sc->sc_dk.dk_openmask == 0) && 501 ISSET(sc->flags, SDF_DIRTY)) 502 sd_flush(sc, 0); 503 504 if (sc->sc_dk.dk_openmask == 0) { 505 if (ISSET(sc->flags, SDF_DYING)) { 506 error = ENXIO; 507 goto die; 508 } 509 if (ISSET(link->flags, SDEV_REMOVABLE)) 510 scsi_prevent(link, PR_ALLOW, 511 SCSI_IGNORE_ILLEGAL_REQUEST | 512 SCSI_IGNORE_NOT_READY | SCSI_SILENT); 513 if (ISSET(sc->flags, SDF_DYING)) { 514 error = ENXIO; 515 goto die; 516 } 517 CLR(link->flags, SDEV_OPEN | SDEV_MEDIA_LOADED); 518 519 if (ISSET(link->flags, SDEV_EJECTING)) { 520 scsi_start(link, SSS_STOP|SSS_LOEJ, 0); 521 if (ISSET(sc->flags, SDF_DYING)) { 522 error = ENXIO; 523 goto die; 524 } 525 CLR(link->flags, SDEV_EJECTING); 526 } 527 528 timeout_del(&sc->sc_timeout); 529 scsi_xsh_del(&sc->sc_xsh); 530 } 531 532 die: 533 disk_unlock(&sc->sc_dk); 534 device_unref(&sc->sc_dev); 535 return error; 536 } 537 538 /* 539 * Actually translate the requested transfer into one the physical driver 540 * can understand. The transfer is described by a buf and will include 541 * only one physical transfer. 542 */ 543 void 544 sdstrategy(struct buf *bp) 545 { 546 struct scsi_link *link; 547 struct sd_softc *sc; 548 int s; 549 550 sc = sdlookup(DISKUNIT(bp->b_dev)); 551 if (sc == NULL) { 552 bp->b_error = ENXIO; 553 goto bad; 554 } 555 if (ISSET(sc->flags, SDF_DYING)) { 556 bp->b_error = ENXIO; 557 goto bad; 558 } 559 link = sc->sc_link; 560 561 SC_DEBUG(link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %lld\n", 562 bp->b_bcount, (long long)bp->b_blkno)); 563 /* 564 * If the device has been made invalid, error out. 565 */ 566 if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) { 567 if (ISSET(link->flags, SDEV_OPEN)) 568 bp->b_error = EIO; 569 else 570 bp->b_error = ENODEV; 571 goto bad; 572 } 573 574 /* Validate the request. */ 575 if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1) 576 goto done; 577 578 /* Place it in the queue of disk activities for this disk. */ 579 bufq_queue(&sc->sc_bufq, bp); 580 581 /* 582 * Tell the device to get going on the transfer if it's 583 * not doing anything, otherwise just wait for completion 584 */ 585 scsi_xsh_add(&sc->sc_xsh); 586 587 device_unref(&sc->sc_dev); 588 return; 589 590 bad: 591 SET(bp->b_flags, B_ERROR); 592 bp->b_resid = bp->b_bcount; 593 done: 594 s = splbio(); 595 biodone(bp); 596 splx(s); 597 if (sc != NULL) 598 device_unref(&sc->sc_dev); 599 } 600 601 void 602 sd_cmd_rw6(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs) 603 { 604 struct scsi_rw *cmd = (struct scsi_rw *)xs->cmd; 605 606 cmd->opcode = read ? READ_COMMAND : WRITE_COMMAND; 607 _lto3b(secno, cmd->addr); 608 cmd->length = nsecs; 609 610 xs->cmdlen = sizeof(*cmd); 611 } 612 613 void 614 sd_cmd_rw10(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs) 615 { 616 struct scsi_rw_big *cmd = (struct scsi_rw_big *)xs->cmd; 617 618 cmd->opcode = read ? READ_BIG : WRITE_BIG; 619 _lto4b(secno, cmd->addr); 620 _lto2b(nsecs, cmd->length); 621 622 xs->cmdlen = sizeof(*cmd); 623 } 624 625 void 626 sd_cmd_rw12(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs) 627 { 628 struct scsi_rw_12 *cmd = (struct scsi_rw_12 *)xs->cmd; 629 630 cmd->opcode = read ? READ_12 : WRITE_12; 631 _lto4b(secno, cmd->addr); 632 _lto4b(nsecs, cmd->length); 633 634 xs->cmdlen = sizeof(*cmd); 635 } 636 637 void 638 sd_cmd_rw16(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs) 639 { 640 struct scsi_rw_16 *cmd = (struct scsi_rw_16 *)xs->cmd; 641 642 cmd->opcode = read ? READ_16 : WRITE_16; 643 _lto8b(secno, cmd->addr); 644 _lto4b(nsecs, cmd->length); 645 646 xs->cmdlen = sizeof(*cmd); 647 } 648 649 /* 650 * sdstart looks to see if there is a buf waiting for the device 651 * and that the device is not already busy. If both are true, 652 * It dequeues the buf and creates a scsi command to perform the 653 * transfer in the buf. The transfer request will call scsi_done 654 * on completion, which will in turn call this routine again 655 * so that the next queued transfer is performed. 656 * The bufs are queued by the strategy routine (sdstrategy) 657 * 658 * This routine is also called after other non-queued requests 659 * have been made of the scsi driver, to ensure that the queue 660 * continues to be drained. 661 */ 662 void 663 sdstart(struct scsi_xfer *xs) 664 { 665 struct scsi_link *link = xs->sc_link; 666 struct sd_softc *sc = link->device_softc; 667 struct buf *bp; 668 struct partition *p; 669 int nsecs, read; 670 u_int64_t secno; 671 672 if (ISSET(sc->flags, SDF_DYING)) { 673 scsi_xs_put(xs); 674 return; 675 } 676 if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) { 677 bufq_drain(&sc->sc_bufq); 678 scsi_xs_put(xs); 679 return; 680 } 681 682 bp = bufq_dequeue(&sc->sc_bufq); 683 if (bp == NULL) { 684 scsi_xs_put(xs); 685 return; 686 } 687 688 secno = DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno); 689 690 p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)]; 691 secno += DL_GETPOFFSET(p); 692 nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize); 693 read = bp->b_flags & B_READ; 694 695 /* 696 * Fill out the scsi command. If the transfer will 697 * fit in a "small" cdb, use it. 698 */ 699 if (!ISSET(link->flags, SDEV_ATAPI) && 700 !ISSET(link->quirks, SDEV_ONLYBIG) && 701 ((secno & 0x1fffff) == secno) && 702 ((nsecs & 0xff) == nsecs)) 703 sd_cmd_rw6(xs, read, secno, nsecs); 704 else if (((secno & 0xffffffff) == secno) && 705 ((nsecs & 0xffff) == nsecs)) 706 sd_cmd_rw10(xs, read, secno, nsecs); 707 else if (((secno & 0xffffffff) == secno) && 708 ((nsecs & 0xffffffff) == nsecs)) 709 sd_cmd_rw12(xs, read, secno, nsecs); 710 else 711 sd_cmd_rw16(xs, read, secno, nsecs); 712 713 SET(xs->flags, (read ? SCSI_DATA_IN : SCSI_DATA_OUT)); 714 xs->timeout = 60000; 715 xs->data = bp->b_data; 716 xs->datalen = bp->b_bcount; 717 718 xs->done = sd_buf_done; 719 xs->cookie = bp; 720 xs->bp = bp; 721 722 /* Instrumentation. */ 723 disk_busy(&sc->sc_dk); 724 725 /* Mark disk as dirty. */ 726 if (!read) 727 SET(sc->flags, SDF_DIRTY); 728 729 scsi_xs_exec(xs); 730 731 /* Move onto the next io. */ 732 if (ISSET(sc->flags, SDF_WAITING)) 733 CLR(sc->flags, SDF_WAITING); 734 else if (bufq_peek(&sc->sc_bufq)) 735 scsi_xsh_add(&sc->sc_xsh); 736 } 737 738 void 739 sd_buf_done(struct scsi_xfer *xs) 740 { 741 struct sd_softc *sc = xs->sc_link->device_softc; 742 struct buf *bp = xs->cookie; 743 int error, s; 744 745 switch (xs->error) { 746 case XS_NOERROR: 747 bp->b_error = 0; 748 CLR(bp->b_flags, B_ERROR); 749 bp->b_resid = xs->resid; 750 break; 751 752 case XS_SENSE: 753 case XS_SHORTSENSE: 754 SC_DEBUG_SENSE(xs); 755 error = sd_interpret_sense(xs); 756 if (error == 0) { 757 bp->b_error = 0; 758 CLR(bp->b_flags, B_ERROR); 759 bp->b_resid = xs->resid; 760 break; 761 } 762 if (error != ERESTART) { 763 bp->b_error = error; 764 SET(bp->b_flags, B_ERROR); 765 xs->retries = 0; 766 } 767 goto retry; 768 769 case XS_BUSY: 770 if (xs->retries) { 771 if (scsi_delay(xs, 1) != ERESTART) 772 xs->retries = 0; 773 } 774 goto retry; 775 776 case XS_TIMEOUT: 777 retry: 778 if (xs->retries--) { 779 scsi_xs_exec(xs); 780 return; 781 } 782 /* FALLTHROUGH */ 783 784 default: 785 if (bp->b_error == 0) 786 bp->b_error = EIO; 787 SET(bp->b_flags, B_ERROR); 788 bp->b_resid = bp->b_bcount; 789 break; 790 } 791 792 disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid, bp->b_blkno, 793 bp->b_flags & B_READ); 794 795 s = splbio(); 796 biodone(bp); 797 splx(s); 798 scsi_xs_put(xs); 799 } 800 801 void 802 sdminphys(struct buf *bp) 803 { 804 struct scsi_link *link; 805 struct sd_softc *sc; 806 long max; 807 808 sc = sdlookup(DISKUNIT(bp->b_dev)); 809 if (sc == NULL) 810 return; /* XXX - right way to fail this? */ 811 if (ISSET(sc->flags, SDF_DYING)) { 812 device_unref(&sc->sc_dev); 813 return; 814 } 815 link = sc->sc_link; 816 817 /* 818 * If the device is ancient, we want to make sure that 819 * the transfer fits into a 6-byte cdb. 820 * 821 * XXX Note that the SCSI-I spec says that 256-block transfers 822 * are allowed in a 6-byte read/write, and are specified 823 * by setting the "length" to 0. However, we're conservative 824 * here, allowing only 255-block transfers in case an 825 * ancient device gets confused by length == 0. A length of 0 826 * in a 10-byte read/write actually means 0 blocks. 827 */ 828 if (ISSET(sc->flags, SDF_ANCIENT)) { 829 max = sc->sc_dk.dk_label->d_secsize * 0xff; 830 831 if (bp->b_bcount > max) 832 bp->b_bcount = max; 833 } 834 835 if (link->adapter->dev_minphys != NULL) 836 (*link->adapter->dev_minphys)(bp, link); 837 else 838 minphys(bp); 839 840 device_unref(&sc->sc_dev); 841 } 842 843 int 844 sdread(dev_t dev, struct uio *uio, int ioflag) 845 { 846 return physio(sdstrategy, dev, B_READ, sdminphys, uio); 847 } 848 849 int 850 sdwrite(dev_t dev, struct uio *uio, int ioflag) 851 { 852 return physio(sdstrategy, dev, B_WRITE, sdminphys, uio); 853 } 854 855 /* 856 * Perform special action on behalf of the user. Knows about the internals of 857 * this device 858 */ 859 int 860 sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 861 { 862 struct scsi_link *link; 863 struct sd_softc *sc; 864 struct disklabel *lp; 865 int error = 0; 866 int part = DISKPART(dev); 867 868 sc = sdlookup(DISKUNIT(dev)); 869 if (sc == NULL) 870 return ENXIO; 871 if (ISSET(sc->flags, SDF_DYING)) { 872 device_unref(&sc->sc_dev); 873 return ENXIO; 874 } 875 link = sc->sc_link; 876 877 SC_DEBUG(link, SDEV_DB2, ("sdioctl 0x%lx\n", cmd)); 878 879 /* 880 * If the device is not valid, abandon ship. 881 */ 882 if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) { 883 switch (cmd) { 884 case DIOCLOCK: 885 case DIOCEJECT: 886 case SCIOCIDENTIFY: 887 case SCIOCCOMMAND: 888 case SCIOCDEBUG: 889 if (part == RAW_PART) 890 break; 891 /* FALLTHROUGH */ 892 default: 893 if (!ISSET(link->flags, SDEV_OPEN)) { 894 error = ENODEV; 895 goto exit; 896 } else { 897 error = EIO; 898 goto exit; 899 } 900 } 901 } 902 903 switch (cmd) { 904 case DIOCRLDINFO: 905 lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK); 906 sdgetdisklabel(dev, sc, lp, 0); 907 memcpy(sc->sc_dk.dk_label, lp, sizeof(*lp)); 908 free(lp, M_TEMP, sizeof(*lp)); 909 goto exit; 910 911 case DIOCGPDINFO: 912 sdgetdisklabel(dev, sc, (struct disklabel *)addr, 1); 913 goto exit; 914 915 case DIOCGDINFO: 916 *(struct disklabel *)addr = *(sc->sc_dk.dk_label); 917 goto exit; 918 919 case DIOCGPART: 920 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label; 921 ((struct partinfo *)addr)->part = 922 &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 923 goto exit; 924 925 case DIOCWDINFO: 926 case DIOCSDINFO: 927 if (!ISSET(flag, FWRITE)) { 928 error = EBADF; 929 goto exit; 930 } 931 932 if ((error = disk_lock(&sc->sc_dk)) != 0) 933 goto exit; 934 935 error = setdisklabel(sc->sc_dk.dk_label, 936 (struct disklabel *)addr, sc->sc_dk.dk_openmask); 937 if (error == 0) { 938 if (cmd == DIOCWDINFO) 939 error = writedisklabel(DISKLABELDEV(dev), 940 sdstrategy, sc->sc_dk.dk_label); 941 } 942 943 disk_unlock(&sc->sc_dk); 944 goto exit; 945 946 case DIOCLOCK: 947 error = scsi_prevent(link, 948 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0); 949 goto exit; 950 951 case MTIOCTOP: 952 if (((struct mtop *)addr)->mt_op != MTOFFL) { 953 error = EIO; 954 goto exit; 955 } 956 /* FALLTHROUGH */ 957 case DIOCEJECT: 958 if (!ISSET(link->flags, SDEV_REMOVABLE)) { 959 error = ENOTTY; 960 goto exit; 961 } 962 SET(link->flags, SDEV_EJECTING); 963 goto exit; 964 965 case DIOCINQ: 966 error = scsi_do_ioctl(link, cmd, addr, flag); 967 if (error == ENOTTY) 968 error = sd_ioctl_inquiry(sc, 969 (struct dk_inquiry *)addr); 970 goto exit; 971 972 case DIOCSCACHE: 973 if (!ISSET(flag, FWRITE)) { 974 error = EBADF; 975 goto exit; 976 } 977 /* FALLTHROUGH */ 978 case DIOCGCACHE: 979 error = sd_ioctl_cache(sc, cmd, (struct dk_cache *)addr); 980 goto exit; 981 982 case DIOCCACHESYNC: 983 if (!ISSET(flag, FWRITE)) { 984 error = EBADF; 985 goto exit; 986 } 987 if (ISSET(sc->flags, SDF_DIRTY) || *(int *)addr != 0) 988 error = sd_flush(sc, 0); 989 goto exit; 990 991 default: 992 if (part != RAW_PART) { 993 error = ENOTTY; 994 goto exit; 995 } 996 error = scsi_do_ioctl(link, cmd, addr, flag); 997 } 998 999 exit: 1000 device_unref(&sc->sc_dev); 1001 return error; 1002 } 1003 1004 int 1005 sd_ioctl_inquiry(struct sd_softc *sc, struct dk_inquiry *di) 1006 { 1007 struct scsi_link *link; 1008 struct scsi_vpd_serial *vpd; 1009 1010 vpd = dma_alloc(sizeof(*vpd), PR_WAITOK | PR_ZERO); 1011 1012 if (ISSET(sc->flags, SDF_DYING)) { 1013 dma_free(vpd, sizeof(*vpd)); 1014 return ENXIO; 1015 } 1016 link = sc->sc_link; 1017 1018 bzero(di, sizeof(struct dk_inquiry)); 1019 scsi_strvis(di->vendor, link->inqdata.vendor, 1020 sizeof(link->inqdata.vendor)); 1021 scsi_strvis(di->product, link->inqdata.product, 1022 sizeof(link->inqdata.product)); 1023 scsi_strvis(di->revision, link->inqdata.revision, 1024 sizeof(link->inqdata.revision)); 1025 1026 /* the serial vpd page is optional */ 1027 if (scsi_inquire_vpd(link, vpd, sizeof(*vpd), SI_PG_SERIAL, 0) == 0) 1028 scsi_strvis(di->serial, vpd->serial, sizeof(vpd->serial)); 1029 else 1030 strlcpy(di->serial, "(unknown)", sizeof(vpd->serial)); 1031 1032 dma_free(vpd, sizeof(*vpd)); 1033 return 0; 1034 } 1035 1036 int 1037 sd_ioctl_cache(struct sd_softc *sc, long cmd, struct dk_cache *dkc) 1038 { 1039 struct scsi_link *link; 1040 union scsi_mode_sense_buf *buf; 1041 struct page_caching_mode *mode = NULL; 1042 u_int wrcache, rdcache; 1043 int big, rv; 1044 1045 if (ISSET(sc->flags, SDF_DYING)) 1046 return ENXIO; 1047 link = sc->sc_link; 1048 1049 if (ISSET(link->flags, SDEV_UMASS)) 1050 return EOPNOTSUPP; 1051 1052 /* See if the adapter has special handling. */ 1053 rv = scsi_do_ioctl(link, cmd, (caddr_t)dkc, 0); 1054 if (rv != ENOTTY) 1055 return rv; 1056 1057 buf = dma_alloc(sizeof(*buf), PR_WAITOK); 1058 if (buf == NULL) 1059 return ENOMEM; 1060 1061 if (ISSET(sc->flags, SDF_DYING)) { 1062 rv = ENXIO; 1063 goto done; 1064 } 1065 rv = scsi_do_mode_sense(link, PAGE_CACHING_MODE, buf, (void **)&mode, 1066 sizeof(*mode) - 4, scsi_autoconf | SCSI_SILENT, &big); 1067 if (rv == 0 && mode == NULL) 1068 rv = EIO; 1069 if (rv != 0) 1070 goto done; 1071 1072 wrcache = (ISSET(mode->flags, PG_CACHE_FL_WCE) ? 1 : 0); 1073 rdcache = (ISSET(mode->flags, PG_CACHE_FL_RCD) ? 0 : 1); 1074 1075 switch (cmd) { 1076 case DIOCGCACHE: 1077 dkc->wrcache = wrcache; 1078 dkc->rdcache = rdcache; 1079 break; 1080 1081 case DIOCSCACHE: 1082 if (dkc->wrcache == wrcache && dkc->rdcache == rdcache) 1083 break; 1084 1085 if (dkc->wrcache) 1086 SET(mode->flags, PG_CACHE_FL_WCE); 1087 else 1088 CLR(mode->flags, PG_CACHE_FL_WCE); 1089 1090 if (dkc->rdcache) 1091 CLR(mode->flags, PG_CACHE_FL_RCD); 1092 else 1093 SET(mode->flags, PG_CACHE_FL_RCD); 1094 1095 if (ISSET(sc->flags, SDF_DYING)) { 1096 rv = ENXIO; 1097 goto done; 1098 } 1099 if (big) { 1100 rv = scsi_mode_select_big(link, SMS_PF, 1101 &buf->hdr_big, scsi_autoconf | SCSI_SILENT, 20000); 1102 } else { 1103 rv = scsi_mode_select(link, SMS_PF, 1104 &buf->hdr, scsi_autoconf | SCSI_SILENT, 20000); 1105 } 1106 break; 1107 } 1108 1109 done: 1110 dma_free(buf, sizeof(*buf)); 1111 return rv; 1112 } 1113 1114 /* 1115 * Load the label information on the named device. 1116 */ 1117 int 1118 sdgetdisklabel(dev_t dev, struct sd_softc *sc, struct disklabel *lp, 1119 int spoofonly) 1120 { 1121 char packname[sizeof(lp->d_packname) + 1]; 1122 char product[17], vendor[9]; 1123 struct scsi_link *link; 1124 size_t len; 1125 1126 if (ISSET(sc->flags, SDF_DYING)) 1127 return ENXIO; 1128 link = sc->sc_link; 1129 1130 bzero(lp, sizeof(struct disklabel)); 1131 1132 lp->d_secsize = sc->params.secsize; 1133 lp->d_ntracks = sc->params.heads; 1134 lp->d_nsectors = sc->params.sectors; 1135 lp->d_ncylinders = sc->params.cyls; 1136 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; 1137 if (lp->d_secpercyl == 0) { 1138 lp->d_secpercyl = 100; 1139 /* As long as it's not 0 - readdisklabel divides by it. */ 1140 } 1141 1142 lp->d_type = DTYPE_SCSI; 1143 if ((link->inqdata.device & SID_TYPE) == T_OPTICAL) 1144 strncpy(lp->d_typename, "SCSI optical", 1145 sizeof(lp->d_typename)); 1146 else 1147 strncpy(lp->d_typename, "SCSI disk", 1148 sizeof(lp->d_typename)); 1149 1150 /* 1151 * Try to fit '<vendor> <product>' into d_packname. If that doesn't fit 1152 * then leave out '<vendor> ' and use only as much of '<product>' as 1153 * does fit. 1154 */ 1155 viscpy(vendor, link->inqdata.vendor, 8); 1156 viscpy(product, link->inqdata.product, 16); 1157 len = snprintf(packname, sizeof(packname), "%s %s", vendor, product); 1158 if (len > sizeof(lp->d_packname)) { 1159 strlcpy(packname, product, sizeof(packname)); 1160 len = strlen(packname); 1161 } 1162 /* 1163 * It is safe to use len as the count of characters to copy because 1164 * packname is sizeof(lp->d_packname)+1, the string in packname is 1165 * always null terminated and len does not count the terminating null. 1166 * d_packname is not a null terminated string. 1167 */ 1168 memcpy(lp->d_packname, packname, len); 1169 1170 DL_SETDSIZE(lp, sc->params.disksize); 1171 lp->d_version = 1; 1172 lp->d_flags = 0; 1173 1174 /* XXX - These values for BBSIZE and SBSIZE assume ffs. */ 1175 lp->d_bbsize = BBSIZE; 1176 lp->d_sbsize = SBSIZE; 1177 1178 lp->d_magic = DISKMAGIC; 1179 lp->d_magic2 = DISKMAGIC; 1180 lp->d_checksum = dkcksum(lp); 1181 1182 /* 1183 * Call the generic disklabel extraction routine. 1184 */ 1185 return readdisklabel(DISKLABELDEV(dev), sdstrategy, lp, spoofonly); 1186 } 1187 1188 1189 /* 1190 * Check Errors. 1191 */ 1192 int 1193 sd_interpret_sense(struct scsi_xfer *xs) 1194 { 1195 struct scsi_sense_data *sense = &xs->sense; 1196 struct scsi_link *link = xs->sc_link; 1197 int retval; 1198 u_int8_t serr = sense->error_code & SSD_ERRCODE; 1199 1200 /* 1201 * Let the generic code handle everything except a few categories of 1202 * LUN not ready errors on open devices. 1203 */ 1204 if ((!ISSET(link->flags, SDEV_OPEN)) || 1205 (serr != SSD_ERRCODE_CURRENT && serr != SSD_ERRCODE_DEFERRED) || 1206 ((sense->flags & SSD_KEY) != SKEY_NOT_READY) || 1207 (sense->extra_len < 6)) 1208 return scsi_interpret_sense(xs); 1209 1210 if (ISSET(xs->flags, SCSI_IGNORE_NOT_READY)) 1211 return 0; 1212 1213 switch (ASC_ASCQ(sense)) { 1214 case SENSE_NOT_READY_BECOMING_READY: 1215 SC_DEBUG(link, SDEV_DB1, ("becoming ready.\n")); 1216 retval = scsi_delay(xs, 5); 1217 break; 1218 1219 case SENSE_NOT_READY_INIT_REQUIRED: 1220 SC_DEBUG(link, SDEV_DB1, ("spinning up\n")); 1221 retval = scsi_start(link, SSS_START, 1222 SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_NOSLEEP); 1223 if (retval == 0) 1224 retval = ERESTART; 1225 else if (retval == ENOMEM) 1226 /* Can't issue the command. Fall back on a delay. */ 1227 retval = scsi_delay(xs, 5); 1228 else 1229 SC_DEBUG(link, SDEV_DB1, ("spin up failed (%#x)\n", 1230 retval)); 1231 break; 1232 1233 default: 1234 retval = scsi_interpret_sense(xs); 1235 break; 1236 } 1237 1238 return retval; 1239 } 1240 1241 daddr_t 1242 sdsize(dev_t dev) 1243 { 1244 struct disklabel *lp; 1245 struct sd_softc *sc; 1246 daddr_t size; 1247 int part, omask; 1248 1249 sc = sdlookup(DISKUNIT(dev)); 1250 if (sc == NULL) 1251 return -1; 1252 if (ISSET(sc->flags, SDF_DYING)) { 1253 size = -1; 1254 goto exit; 1255 } 1256 1257 part = DISKPART(dev); 1258 omask = sc->sc_dk.dk_openmask & (1 << part); 1259 1260 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) { 1261 size = -1; 1262 goto exit; 1263 } 1264 1265 lp = sc->sc_dk.dk_label; 1266 if (ISSET(sc->flags, SDF_DYING)) { 1267 size = -1; 1268 goto exit; 1269 } 1270 if (!ISSET(sc->sc_link->flags, SDEV_MEDIA_LOADED)) 1271 size = -1; 1272 else if (lp->d_partitions[part].p_fstype != FS_SWAP) 1273 size = -1; 1274 else 1275 size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part])); 1276 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0) 1277 size = -1; 1278 1279 exit: 1280 device_unref(&sc->sc_dev); 1281 return size; 1282 } 1283 1284 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch. */ 1285 static int sddoingadump; 1286 1287 /* 1288 * Dump all of physical memory into the partition specified, starting 1289 * at offset 'dumplo' into the partition. 1290 */ 1291 int 1292 sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size) 1293 { 1294 struct sd_softc *sc; 1295 struct disklabel *lp; 1296 struct scsi_xfer *xs; 1297 u_int64_t nsects; /* partition sectors */ 1298 u_int64_t sectoff; /* partition offset */ 1299 u_int64_t totwrt; /* sectors left */ 1300 int part, rv, unit; 1301 u_int32_t sectorsize; 1302 u_int32_t nwrt; /* sectors to write */ 1303 1304 /* Check if recursive dump; if so, punt. */ 1305 if (sddoingadump) 1306 return EFAULT; 1307 if (blkno < 0) 1308 return EINVAL; 1309 1310 /* Mark as active early. */ 1311 sddoingadump = 1; 1312 1313 unit = DISKUNIT(dev); /* Decompose unit & partition. */ 1314 part = DISKPART(dev); 1315 1316 /* Check for acceptable drive number. */ 1317 if (unit >= sd_cd.cd_ndevs || (sc = sd_cd.cd_devs[unit]) == NULL) 1318 return ENXIO; 1319 1320 /* 1321 * XXX Can't do this check, since the media might have been 1322 * XXX marked `invalid' by successful unmounting of all 1323 * XXX filesystems. 1324 */ 1325 #if 0 1326 /* Make sure it was initialized. */ 1327 if (!ISSET(sc->sc_link->flags, SDEV_MEDIA_LOADED)) 1328 return ENXIO; 1329 #endif /* 0 */ 1330 1331 /* Convert to disk sectors. Request must be a multiple of size. */ 1332 lp = sc->sc_dk.dk_label; 1333 sectorsize = lp->d_secsize; 1334 if ((size % sectorsize) != 0) 1335 return EFAULT; 1336 if ((blkno % DL_BLKSPERSEC(lp)) != 0) 1337 return EFAULT; 1338 totwrt = size / sectorsize; 1339 blkno = DL_BLKTOSEC(lp, blkno); 1340 1341 nsects = DL_GETPSIZE(&lp->d_partitions[part]); 1342 sectoff = DL_GETPOFFSET(&lp->d_partitions[part]); 1343 1344 /* Check transfer bounds against partition size. */ 1345 if ((blkno + totwrt) > nsects) 1346 return EINVAL; 1347 1348 /* Offset block number to start of partition. */ 1349 blkno += sectoff; 1350 1351 while (totwrt > 0) { 1352 if (totwrt > UINT32_MAX) 1353 nwrt = UINT32_MAX; 1354 else 1355 nwrt = totwrt; 1356 1357 #ifndef SD_DUMP_NOT_TRUSTED 1358 xs = scsi_xs_get(sc->sc_link, SCSI_NOSLEEP); 1359 if (xs == NULL) 1360 return ENOMEM; 1361 1362 xs->timeout = 10000; 1363 SET(xs->flags, SCSI_DATA_OUT); 1364 xs->data = va; 1365 xs->datalen = nwrt * sectorsize; 1366 1367 sd_cmd_rw10(xs, 0, blkno, nwrt); /* XXX */ 1368 1369 rv = scsi_xs_sync(xs); 1370 scsi_xs_put(xs); 1371 if (rv != 0) 1372 return ENXIO; 1373 #else /* SD_DUMP_NOT_TRUSTED */ 1374 /* Let's just talk about this first. */ 1375 printf("sd%d: dump addr 0x%x, blk %lld\n", unit, va, 1376 (long long)blkno); 1377 delay(500 * 1000); /* 1/2 a second */ 1378 #endif /* ~SD_DUMP_NOT_TRUSTED */ 1379 1380 /* Update block count. */ 1381 totwrt -= nwrt; 1382 blkno += nwrt; 1383 va += sectorsize * nwrt; 1384 } 1385 1386 sddoingadump = 0; 1387 1388 return 0; 1389 } 1390 1391 /* 1392 * Copy up to len chars from src to dst, ignoring non-printables. 1393 * Must be room for len+1 chars in dst so we can write the NUL. 1394 * Does not assume src is NUL-terminated. 1395 */ 1396 void 1397 viscpy(u_char *dst, u_char *src, int len) 1398 { 1399 while (len > 0 && *src != '\0') { 1400 if (*src < 0x20 || *src >= 0x80) { 1401 src++; 1402 continue; 1403 } 1404 *dst++ = *src++; 1405 len--; 1406 } 1407 *dst = '\0'; 1408 } 1409 1410 int 1411 sd_read_cap_10(struct sd_softc *sc, int flags) 1412 { 1413 struct scsi_read_cap_data *rdcap; 1414 int rv; 1415 1416 rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ? 1417 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1418 if (rdcap == NULL) 1419 return -1; 1420 1421 if (ISSET(sc->flags, SDF_DYING)) { 1422 rv = -1; 1423 goto done; 1424 } 1425 1426 rv = scsi_read_cap_10(sc->sc_link, rdcap, flags); 1427 if (rv == 0) { 1428 if (_8btol(rdcap->addr) == 0) { 1429 rv = -1; 1430 goto done; 1431 } 1432 sc->params.disksize = _4btol(rdcap->addr) + 1ll; 1433 sc->params.secsize = _4btol(rdcap->length); 1434 CLR(sc->flags, SDF_THIN); 1435 } 1436 1437 done: 1438 dma_free(rdcap, sizeof(*rdcap)); 1439 return rv; 1440 } 1441 1442 int 1443 sd_read_cap_16(struct sd_softc *sc, int flags) 1444 { 1445 struct scsi_read_cap_data_16 *rdcap; 1446 int rv; 1447 1448 rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ? 1449 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1450 if (rdcap == NULL) 1451 return -1; 1452 1453 if (ISSET(sc->flags, SDF_DYING)) { 1454 rv = -1; 1455 goto done; 1456 } 1457 1458 rv = scsi_read_cap_16(sc->sc_link, rdcap, flags); 1459 if (rv == 0) { 1460 if (_8btol(rdcap->addr) == 0) { 1461 rv = -1; 1462 goto done; 1463 } 1464 sc->params.disksize = _8btol(rdcap->addr) + 1ll; 1465 sc->params.secsize = _4btol(rdcap->length); 1466 if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE)) 1467 SET(sc->flags, SDF_THIN); 1468 else 1469 CLR(sc->flags, SDF_THIN); 1470 } 1471 1472 done: 1473 dma_free(rdcap, sizeof(*rdcap)); 1474 return rv; 1475 } 1476 1477 int 1478 sd_read_cap(struct sd_softc *sc, int flags) 1479 { 1480 int rv; 1481 1482 CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST); 1483 1484 /* 1485 * post-SPC2 (i.e. post-SCSI-3) devices can start with 16 byte 1486 * read capacity commands. Older devices start with the 10 byte 1487 * version and move up to the 16 byte version if the device 1488 * says it has more sectors than can be reported via the 10 byte 1489 * read capacity. 1490 */ 1491 if (SID_ANSII_REV(&sc->sc_link->inqdata) > SCSI_REV_SPC2) { 1492 rv = sd_read_cap_16(sc, flags); 1493 if (rv != 0) 1494 rv = sd_read_cap_10(sc, flags); 1495 } else { 1496 rv = sd_read_cap_10(sc, flags); 1497 if (rv == 0 && sc->params.disksize == 0x100000000ll) 1498 rv = sd_read_cap_16(sc, flags); 1499 } 1500 1501 return rv; 1502 } 1503 1504 int 1505 sd_thin_pages(struct sd_softc *sc, int flags) 1506 { 1507 struct scsi_vpd_hdr *pg; 1508 u_int8_t *pages; 1509 size_t len = 0; 1510 int i, rv, score = 0; 1511 1512 pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ? 1513 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1514 if (pg == NULL) 1515 return ENOMEM; 1516 1517 if (ISSET(sc->flags, SDF_DYING)) { 1518 rv = ENXIO; 1519 goto done; 1520 } 1521 rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg), 1522 SI_PG_SUPPORTED, flags); 1523 if (rv != 0) 1524 goto done; 1525 1526 len = _2btol(pg->page_length); 1527 1528 dma_free(pg, sizeof(*pg)); 1529 pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ? 1530 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1531 if (pg == NULL) 1532 return ENOMEM; 1533 1534 if (ISSET(sc->flags, SDF_DYING)) { 1535 rv = ENXIO; 1536 goto done; 1537 } 1538 rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len, 1539 SI_PG_SUPPORTED, flags); 1540 if (rv != 0) 1541 goto done; 1542 1543 pages = (u_int8_t *)(pg + 1); 1544 if (pages[0] != SI_PG_SUPPORTED) { 1545 rv = EIO; 1546 goto done; 1547 } 1548 1549 for (i = 1; i < len; i++) { 1550 switch (pages[i]) { 1551 case SI_PG_DISK_LIMITS: 1552 case SI_PG_DISK_THIN: 1553 score++; 1554 break; 1555 } 1556 } 1557 1558 if (score < 2) 1559 rv = EOPNOTSUPP; 1560 1561 done: 1562 dma_free(pg, sizeof(*pg) + len); 1563 return rv; 1564 } 1565 1566 int 1567 sd_vpd_block_limits(struct sd_softc *sc, int flags) 1568 { 1569 struct scsi_vpd_disk_limits *pg; 1570 int rv; 1571 1572 pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ? 1573 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1574 if (pg == NULL) 1575 return ENOMEM; 1576 1577 if (ISSET(sc->flags, SDF_DYING)) { 1578 rv = ENXIO; 1579 goto done; 1580 } 1581 rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg), 1582 SI_PG_DISK_LIMITS, flags); 1583 if (rv != 0) 1584 goto done; 1585 1586 if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) { 1587 sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count); 1588 sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count); 1589 } else 1590 rv = EOPNOTSUPP; 1591 1592 done: 1593 dma_free(pg, sizeof(*pg)); 1594 return rv; 1595 } 1596 1597 int 1598 sd_vpd_thin(struct sd_softc *sc, int flags) 1599 { 1600 struct scsi_vpd_disk_thin *pg; 1601 int rv; 1602 1603 pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ? 1604 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1605 if (pg == NULL) 1606 return ENOMEM; 1607 1608 if (ISSET(sc->flags, SDF_DYING)) { 1609 rv = ENXIO; 1610 goto done; 1611 } 1612 rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg), 1613 SI_PG_DISK_THIN, flags); 1614 if (rv != 0) 1615 goto done; 1616 1617 #ifdef notyet 1618 if (ISSET(pg->flags, VPD_DISK_THIN_TPU)) 1619 sc->sc_delete = sd_unmap; 1620 else if (ISSET(pg->flags, VPD_DISK_THIN_TPWS)) { 1621 sc->sc_delete = sd_write_same_16; 1622 sc->params.unmap_descs = 1; /* WRITE SAME 16 only does one */ 1623 } else 1624 rv = EOPNOTSUPP; 1625 #endif /* notyet */ 1626 1627 done: 1628 dma_free(pg, sizeof(*pg)); 1629 return rv; 1630 } 1631 1632 int 1633 sd_thin_params(struct sd_softc *sc, int flags) 1634 { 1635 int rv; 1636 1637 rv = sd_thin_pages(sc, flags); 1638 if (rv != 0) 1639 return rv; 1640 1641 rv = sd_vpd_block_limits(sc, flags); 1642 if (rv != 0) 1643 return rv; 1644 1645 rv = sd_vpd_thin(sc, flags); 1646 if (rv != 0) 1647 return rv; 1648 1649 return 0; 1650 } 1651 1652 /* 1653 * Fill out the disk parameter structure. Return 0 if the structure is correctly 1654 * filled in, otherwise return -1. 1655 * 1656 * The caller is responsible for clearing the SDEV_MEDIA_LOADED flag if the 1657 * structure cannot be completed. 1658 */ 1659 int 1660 sd_get_parms(struct sd_softc *sc, int flags) 1661 { 1662 struct disk_parms dp; 1663 struct scsi_link *link = sc->sc_link; 1664 union scsi_mode_sense_buf *buf = NULL; 1665 struct page_rigid_geometry *rigid = NULL; 1666 struct page_flex_geometry *flex = NULL; 1667 struct page_reduced_geometry *reduced = NULL; 1668 u_char *page0 = NULL; 1669 int big, err = 0; 1670 1671 if (sd_read_cap(sc, flags) != 0) 1672 return -1; 1673 1674 if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) { 1675 /* we dont know the unmap limits, so we cant use thin shizz */ 1676 CLR(sc->flags, SDF_THIN); 1677 } 1678 1679 /* 1680 * Work on a copy of the values initialized by sd_read_cap() and 1681 * sd_thin_params(). 1682 */ 1683 dp = sc->params; 1684 1685 buf = dma_alloc(sizeof(*buf), PR_NOWAIT); 1686 if (buf == NULL) 1687 goto validate; 1688 1689 if (ISSET(sc->flags, SDF_DYING)) 1690 goto die; 1691 1692 /* 1693 * Ask for page 0 (vendor specific) mode sense data to find 1694 * READONLY info. The only thing USB devices will ask for. 1695 * 1696 * page0 == NULL is a valid situation. 1697 */ 1698 err = scsi_do_mode_sense(link, 0, buf, (void **)&page0, 1, 1699 flags | SCSI_SILENT, &big); 1700 if (ISSET(sc->flags, SDF_DYING)) 1701 goto die; 1702 if (err == 0) { 1703 if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT) 1704 SET(link->flags, SDEV_READONLY); 1705 else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT) 1706 SET(link->flags, SDEV_READONLY); 1707 else 1708 CLR(link->flags, SDEV_READONLY); 1709 } 1710 1711 /* 1712 * Many UMASS devices choke when asked about their geometry. Most 1713 * don't have a meaningful geometry anyway, so just fake it if 1714 * sd_read_cap() worked. 1715 */ 1716 if (ISSET(link->flags, SDEV_UMASS) && dp.disksize > 0) 1717 goto validate; 1718 1719 switch (link->inqdata.device & SID_TYPE) { 1720 case T_OPTICAL: 1721 /* No more information needed or available. */ 1722 break; 1723 1724 case T_RDIRECT: 1725 /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */ 1726 err = scsi_do_mode_sense(link, PAGE_REDUCED_GEOMETRY, buf, 1727 (void **)&reduced, sizeof(*reduced), flags | SCSI_SILENT, 1728 &big); 1729 if (err == 0) { 1730 scsi_parse_blkdesc(link, buf, big, NULL, NULL, 1731 &dp.secsize); 1732 if (reduced != NULL) { 1733 if (dp.disksize == 0) 1734 dp.disksize = _5btol(reduced->sectors); 1735 if (dp.secsize == 0) 1736 dp.secsize = _2btol(reduced->bytes_s); 1737 } 1738 } 1739 break; 1740 1741 default: 1742 /* 1743 * NOTE: Some devices leave off the last four bytes of 1744 * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages. 1745 * The only information in those four bytes is RPM information 1746 * so accept the page. The extra bytes will be zero and RPM will 1747 * end up with the default value of 3600. 1748 */ 1749 err = 0; 1750 if (!ISSET(link->flags, SDEV_ATAPI) || 1751 !ISSET(link->flags, SDEV_REMOVABLE)) 1752 err = scsi_do_mode_sense(link, PAGE_RIGID_GEOMETRY, buf, 1753 (void **)&rigid, sizeof(*rigid) - 4, 1754 flags | SCSI_SILENT, &big); 1755 if (err == 0) { 1756 scsi_parse_blkdesc(link, buf, big, NULL, NULL, 1757 &dp.secsize); 1758 if (rigid != NULL) { 1759 dp.heads = rigid->nheads; 1760 dp.cyls = _3btol(rigid->ncyl); 1761 if (dp.heads * dp.cyls > 0) 1762 dp.sectors = dp.disksize / (dp.heads * 1763 dp.cyls); 1764 } 1765 } else { 1766 if (ISSET(sc->flags, SDF_DYING)) 1767 goto die; 1768 err = scsi_do_mode_sense(link, PAGE_FLEX_GEOMETRY, buf, 1769 (void **)&flex, sizeof(*flex) - 4, 1770 flags | SCSI_SILENT, &big); 1771 if (err == 0) { 1772 scsi_parse_blkdesc(link, buf, big, NULL, NULL, 1773 &dp.secsize); 1774 if (flex != NULL) { 1775 dp.sectors = flex->ph_sec_tr; 1776 dp.heads = flex->nheads; 1777 dp.cyls = _2btol(flex->ncyl); 1778 if (dp.secsize == 0) 1779 dp.secsize = 1780 _2btol(flex->bytes_s); 1781 if (dp.disksize == 0) 1782 dp.disksize = 1783 (u_int64_t)dp.cyls * 1784 dp.heads * dp.sectors; 1785 } 1786 } 1787 } 1788 break; 1789 } 1790 1791 validate: 1792 if (buf) { 1793 dma_free(buf, sizeof(*buf)); 1794 buf = NULL; 1795 } 1796 1797 if (dp.disksize == 0) 1798 goto die; 1799 1800 /* 1801 * Restrict secsize values to powers of two between 512 and 64k. 1802 */ 1803 switch (dp.secsize) { 1804 case 0: 1805 dp.secsize = DEV_BSIZE; 1806 break; 1807 case 0x200: /* == 512, == DEV_BSIZE on all architectures. */ 1808 case 0x400: 1809 case 0x800: 1810 case 0x1000: 1811 case 0x2000: 1812 case 0x4000: 1813 case 0x8000: 1814 case 0x10000: 1815 break; 1816 default: 1817 SC_DEBUG(sc->sc_link, SDEV_DB1, 1818 ("sd_get_parms: bad secsize: %#x\n", dp.secsize)); 1819 return -1; 1820 } 1821 1822 /* 1823 * XXX THINK ABOUT THIS!! Using values such that sectors * heads * 1824 * cyls is <= disk_size can lead to wasted space. We need a more 1825 * careful calculation/validation to make everything work out 1826 * optimally. 1827 */ 1828 if (dp.disksize > 0xffffffff && (dp.heads * dp.sectors) < 0xffff) { 1829 dp.heads = 511; 1830 dp.sectors = 255; 1831 dp.cyls = 0; 1832 } 1833 1834 /* 1835 * Use standard geometry values for anything we still don't 1836 * know. 1837 */ 1838 if (dp.heads == 0) 1839 dp.heads = 255; 1840 if (dp.sectors == 0) 1841 dp.sectors = 63; 1842 if (dp.cyls == 0) { 1843 dp.cyls = dp.disksize / (dp.heads * dp.sectors); 1844 if (dp.cyls == 0) { 1845 /* Put everything into one cylinder. */ 1846 dp.heads = dp.cyls = 1; 1847 dp.sectors = dp.disksize; 1848 } 1849 } 1850 1851 #ifdef SCSIDEBUG 1852 if (dp.disksize != (u_int64_t)dp.cyls * dp.heads * dp.sectors) { 1853 sc_print_addr(sc->sc_link); 1854 printf("disksize (%llu) != cyls (%u) * heads (%u) * " 1855 "sectors/track (%u) (%llu)\n", dp.disksize, dp.cyls, 1856 dp.heads, dp.sectors, 1857 (u_int64_t)dp.cyls * dp.heads * dp.sectors); 1858 } 1859 #endif /* SCSIDEBUG */ 1860 1861 sc->params = dp; 1862 return 0; 1863 1864 die: 1865 dma_free(buf, sizeof(*buf)); 1866 return -1; 1867 } 1868 1869 int 1870 sd_flush(struct sd_softc *sc, int flags) 1871 { 1872 struct scsi_link *link; 1873 struct scsi_xfer *xs; 1874 struct scsi_synchronize_cache *cmd; 1875 int error; 1876 1877 if (ISSET(sc->flags, SDF_DYING)) 1878 return ENXIO; 1879 link = sc->sc_link; 1880 1881 if (ISSET(link->quirks, SDEV_NOSYNCCACHE)) 1882 return 0; 1883 1884 /* 1885 * Issue a SYNCHRONIZE CACHE. Address 0, length 0 means "all remaining 1886 * blocks starting at address 0". Ignore ILLEGAL REQUEST in the event 1887 * that the command is not supported by the device. 1888 */ 1889 1890 xs = scsi_xs_get(link, flags); 1891 if (xs == NULL) { 1892 SC_DEBUG(link, SDEV_DB1, ("cache sync failed to get xs\n")); 1893 return EIO; 1894 } 1895 1896 cmd = (struct scsi_synchronize_cache *)xs->cmd; 1897 cmd->opcode = SYNCHRONIZE_CACHE; 1898 1899 xs->cmdlen = sizeof(*cmd); 1900 xs->timeout = 100000; 1901 SET(xs->flags, SCSI_IGNORE_ILLEGAL_REQUEST); 1902 1903 error = scsi_xs_sync(xs); 1904 1905 scsi_xs_put(xs); 1906 1907 if (error) 1908 SC_DEBUG(link, SDEV_DB1, ("cache sync failed\n")); 1909 else 1910 CLR(sc->flags, SDF_DIRTY); 1911 1912 return error; 1913 } 1914