1 /* $NetBSD: sd.c,v 1.219 2004/06/28 20:24:16 martin Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Originally written by Julian Elischer (julian@dialix.oz.au) 41 * for TRW Financial Systems for use under the MACH(2.5) operating system. 42 * 43 * TRW Financial Systems, in accordance with their agreement with Carnegie 44 * Mellon University, makes this software available to CMU to distribute 45 * or use in any manner that they see fit as long as this message is kept with 46 * the software. For this reason TFS also grants any other persons or 47 * organisations permission to use or modify this software. 48 * 49 * TFS supplies this software to be publicly redistributed 50 * on the understanding that TFS is not responsible for the correct 51 * functioning of this software in any circumstances. 52 * 53 * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992 54 */ 55 56 #include <sys/cdefs.h> 57 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.219 2004/06/28 20:24:16 martin Exp $"); 58 59 #include "opt_scsi.h" 60 #include "rnd.h" 61 62 #include <sys/param.h> 63 #include <sys/systm.h> 64 #include <sys/kernel.h> 65 #include <sys/file.h> 66 #include <sys/stat.h> 67 #include <sys/ioctl.h> 68 #include <sys/scsiio.h> 69 #include <sys/buf.h> 70 #include <sys/uio.h> 71 #include <sys/malloc.h> 72 #include <sys/errno.h> 73 #include <sys/device.h> 74 #include <sys/disklabel.h> 75 #include <sys/disk.h> 76 #include <sys/proc.h> 77 #include <sys/conf.h> 78 #include <sys/vnode.h> 79 #if NRND > 0 80 #include <sys/rnd.h> 81 #endif 82 83 #include <dev/scsipi/scsipi_all.h> 84 #include <dev/scsipi/scsi_all.h> 85 #include <dev/scsipi/scsipi_disk.h> 86 #include <dev/scsipi/scsi_disk.h> 87 #include <dev/scsipi/scsiconf.h> 88 #include <dev/scsipi/sdvar.h> 89 90 #define SDUNIT(dev) DISKUNIT(dev) 91 #define SDPART(dev) DISKPART(dev) 92 #define SDMINOR(unit, part) DISKMINOR(unit, part) 93 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part) 94 95 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART)) 96 97 int sdlock __P((struct sd_softc *)); 98 void sdunlock __P((struct sd_softc *)); 99 void sdminphys __P((struct buf *)); 100 void sdgetdefaultlabel __P((struct sd_softc *, struct disklabel *)); 101 void sdgetdisklabel __P((struct sd_softc *)); 102 void sdstart __P((struct scsipi_periph *)); 103 void sddone __P((struct scsipi_xfer *)); 104 void sd_shutdown __P((void *)); 105 int sd_reassign_blocks __P((struct sd_softc *, u_long)); 106 int sd_interpret_sense __P((struct scsipi_xfer *)); 107 108 int sd_mode_sense __P((struct sd_softc *, u_int8_t, void *, size_t, int, 109 int, int *)); 110 int sd_mode_select __P((struct sd_softc *, u_int8_t, void *, size_t, int, 111 int)); 112 int sd_get_simplifiedparms __P((struct sd_softc *, struct disk_parms *, 113 int)); 114 int sd_get_capacity __P((struct sd_softc *, struct disk_parms *, int)); 115 int sd_get_parms __P((struct sd_softc *, struct disk_parms *, int)); 116 int sd_get_parms_page4 __P((struct sd_softc *, struct disk_parms *, 117 int)); 118 int sd_get_parms_page5 __P((struct sd_softc *, struct disk_parms *, 119 int)); 120 121 int sd_flush __P((struct sd_softc *, int)); 122 int sd_getcache __P((struct sd_softc *, int *)); 123 int sd_setcache __P((struct sd_softc *, int)); 124 125 int sdmatch __P((struct device *, struct cfdata *, void *)); 126 void sdattach __P((struct device *, struct device *, void *)); 127 int sdactivate __P((struct device *, enum devact)); 128 int sddetach __P((struct device *, int)); 129 130 CFATTACH_DECL(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach, 131 sdactivate); 132 133 extern struct cfdriver sd_cd; 134 135 const struct scsipi_inquiry_pattern sd_patterns[] = { 136 {T_DIRECT, T_FIXED, 137 "", "", ""}, 138 {T_DIRECT, T_REMOV, 139 "", "", ""}, 140 {T_OPTICAL, T_FIXED, 141 "", "", ""}, 142 {T_OPTICAL, T_REMOV, 143 "", "", ""}, 144 {T_SIMPLE_DIRECT, T_FIXED, 145 "", "", ""}, 146 {T_SIMPLE_DIRECT, T_REMOV, 147 "", "", ""}, 148 }; 149 150 dev_type_open(sdopen); 151 dev_type_close(sdclose); 152 dev_type_read(sdread); 153 dev_type_write(sdwrite); 154 dev_type_ioctl(sdioctl); 155 dev_type_strategy(sdstrategy); 156 dev_type_dump(sddump); 157 dev_type_size(sdsize); 158 159 const struct bdevsw sd_bdevsw = { 160 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK 161 }; 162 163 const struct cdevsw sd_cdevsw = { 164 sdopen, sdclose, sdread, sdwrite, sdioctl, 165 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 166 }; 167 168 struct dkdriver sddkdriver = { sdstrategy }; 169 170 const struct scsipi_periphsw sd_switch = { 171 sd_interpret_sense, /* check our error handler first */ 172 sdstart, /* have a queue, served by this */ 173 NULL, /* have no async handler */ 174 sddone, /* deal with stats at interrupt time */ 175 }; 176 177 struct sd_mode_sense_data { 178 /* 179 * XXX 180 * We are not going to parse this as-is -- it just has to be large 181 * enough. 182 */ 183 union { 184 struct scsipi_mode_header small; 185 struct scsipi_mode_header_big big; 186 } header; 187 struct scsi_blk_desc blk_desc; 188 union scsi_disk_pages pages; 189 }; 190 191 /* 192 * The routine called by the low level scsi routine when it discovers 193 * A device suitable for this driver 194 */ 195 int 196 sdmatch(parent, match, aux) 197 struct device *parent; 198 struct cfdata *match; 199 void *aux; 200 { 201 struct scsipibus_attach_args *sa = aux; 202 int priority; 203 204 (void)scsipi_inqmatch(&sa->sa_inqbuf, 205 (caddr_t)sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]), 206 sizeof(sd_patterns[0]), &priority); 207 208 return (priority); 209 } 210 211 /* 212 * Attach routine common to atapi & scsi. 213 */ 214 void 215 sdattach(parent, self, aux) 216 struct device *parent, *self; 217 void *aux; 218 { 219 struct sd_softc *sd = (void *)self; 220 struct scsipibus_attach_args *sa = aux; 221 struct scsipi_periph *periph = sa->sa_periph; 222 int error, result; 223 struct disk_parms *dp = &sd->params; 224 char pbuf[9]; 225 226 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: ")); 227 228 sd->type = (sa->sa_inqbuf.type & SID_TYPE); 229 if (sd->type == T_SIMPLE_DIRECT) 230 periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE; 231 232 if (scsipi_periph_bustype(sa->sa_periph) == SCSIPI_BUSTYPE_SCSI && 233 periph->periph_version == 0) 234 sd->flags |= SDF_ANCIENT; 235 236 bufq_alloc(&sd->buf_queue, 237 BUFQ_DISK_DEFAULT_STRAT()|BUFQ_SORT_RAWBLOCK); 238 239 /* 240 * Store information needed to contact our base driver 241 */ 242 sd->sc_periph = periph; 243 244 periph->periph_dev = &sd->sc_dev; 245 periph->periph_switch = &sd_switch; 246 247 /* 248 * Increase our openings to the maximum-per-periph 249 * supported by the adapter. This will either be 250 * clamped down or grown by the adapter if necessary. 251 */ 252 periph->periph_openings = 253 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel); 254 periph->periph_flags |= PERIPH_GROW_OPENINGS; 255 256 /* 257 * Initialize and attach the disk structure. 258 */ 259 sd->sc_dk.dk_driver = &sddkdriver; 260 sd->sc_dk.dk_name = sd->sc_dev.dv_xname; 261 disk_attach(&sd->sc_dk); 262 263 /* 264 * Use the subdriver to request information regarding the drive. 265 */ 266 aprint_naive("\n"); 267 aprint_normal("\n"); 268 269 error = scsipi_test_unit_ready(periph, 270 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 271 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV); 272 273 if (error) 274 result = SDGP_RESULT_OFFLINE; 275 else 276 result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY); 277 aprint_normal("%s: ", sd->sc_dev.dv_xname); 278 switch (result) { 279 case SDGP_RESULT_OK: 280 format_bytes(pbuf, sizeof(pbuf), 281 (u_int64_t)dp->disksize * dp->blksize); 282 aprint_normal( 283 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors", 284 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize, 285 (unsigned long long)dp->disksize); 286 break; 287 288 case SDGP_RESULT_OFFLINE: 289 aprint_normal("drive offline"); 290 break; 291 292 case SDGP_RESULT_UNFORMATTED: 293 aprint_normal("unformatted media"); 294 break; 295 296 #ifdef DIAGNOSTIC 297 default: 298 panic("sdattach: unknown result from get_parms"); 299 break; 300 #endif 301 } 302 aprint_normal("\n"); 303 304 /* 305 * Establish a shutdown hook so that we can ensure that 306 * our data has actually made it onto the platter at 307 * shutdown time. Note that this relies on the fact 308 * that the shutdown hook code puts us at the head of 309 * the list (thus guaranteeing that our hook runs before 310 * our ancestors'). 311 */ 312 if ((sd->sc_sdhook = 313 shutdownhook_establish(sd_shutdown, sd)) == NULL) 314 aprint_error("%s: WARNING: unable to establish shutdown hook\n", 315 sd->sc_dev.dv_xname); 316 317 #if NRND > 0 318 /* 319 * attach the device into the random source list 320 */ 321 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname, 322 RND_TYPE_DISK, 0); 323 #endif 324 } 325 326 int 327 sdactivate(self, act) 328 struct device *self; 329 enum devact act; 330 { 331 int rv = 0; 332 333 switch (act) { 334 case DVACT_ACTIVATE: 335 rv = EOPNOTSUPP; 336 break; 337 338 case DVACT_DEACTIVATE: 339 /* 340 * Nothing to do; we key off the device's DVF_ACTIVE. 341 */ 342 break; 343 } 344 return (rv); 345 } 346 347 int 348 sddetach(self, flags) 349 struct device *self; 350 int flags; 351 { 352 struct sd_softc *sd = (struct sd_softc *) self; 353 struct buf *bp; 354 int s, bmaj, cmaj, i, mn; 355 356 /* locate the major number */ 357 bmaj = bdevsw_lookup_major(&sd_bdevsw); 358 cmaj = cdevsw_lookup_major(&sd_cdevsw); 359 360 s = splbio(); 361 362 /* Kill off any queued buffers. */ 363 while ((bp = BUFQ_GET(&sd->buf_queue)) != NULL) { 364 bp->b_error = EIO; 365 bp->b_flags |= B_ERROR; 366 bp->b_resid = bp->b_bcount; 367 biodone(bp); 368 } 369 370 bufq_free(&sd->buf_queue); 371 372 /* Kill off any pending commands. */ 373 scsipi_kill_pending(sd->sc_periph); 374 375 splx(s); 376 377 /* Nuke the vnodes for any open instances */ 378 for (i = 0; i < MAXPARTITIONS; i++) { 379 mn = SDMINOR(self->dv_unit, i); 380 vdevgone(bmaj, mn, mn, VBLK); 381 vdevgone(cmaj, mn, mn, VCHR); 382 } 383 384 /* Detach from the disk list. */ 385 disk_detach(&sd->sc_dk); 386 387 /* Get rid of the shutdown hook. */ 388 shutdownhook_disestablish(sd->sc_sdhook); 389 390 #if NRND > 0 391 /* Unhook the entropy source. */ 392 rnd_detach_source(&sd->rnd_source); 393 #endif 394 395 return (0); 396 } 397 398 /* 399 * Wait interruptibly for an exclusive lock. 400 * 401 * XXX 402 * Several drivers do this; it should be abstracted and made MP-safe. 403 */ 404 int 405 sdlock(sd) 406 struct sd_softc *sd; 407 { 408 int error; 409 410 while ((sd->flags & SDF_LOCKED) != 0) { 411 sd->flags |= SDF_WANTED; 412 if ((error = tsleep(sd, PRIBIO | PCATCH, "sdlck", 0)) != 0) 413 return (error); 414 } 415 sd->flags |= SDF_LOCKED; 416 return (0); 417 } 418 419 /* 420 * Unlock and wake up any waiters. 421 */ 422 void 423 sdunlock(sd) 424 struct sd_softc *sd; 425 { 426 427 sd->flags &= ~SDF_LOCKED; 428 if ((sd->flags & SDF_WANTED) != 0) { 429 sd->flags &= ~SDF_WANTED; 430 wakeup(sd); 431 } 432 } 433 434 /* 435 * open the device. Make sure the partition info is a up-to-date as can be. 436 */ 437 int 438 sdopen(dev, flag, fmt, p) 439 dev_t dev; 440 int flag, fmt; 441 struct proc *p; 442 { 443 struct sd_softc *sd; 444 struct scsipi_periph *periph; 445 struct scsipi_adapter *adapt; 446 int unit, part; 447 int error; 448 449 unit = SDUNIT(dev); 450 if (unit >= sd_cd.cd_ndevs) 451 return (ENXIO); 452 sd = sd_cd.cd_devs[unit]; 453 if (sd == NULL) 454 return (ENXIO); 455 456 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) 457 return (ENODEV); 458 459 periph = sd->sc_periph; 460 adapt = periph->periph_channel->chan_adapter; 461 part = SDPART(dev); 462 463 SC_DEBUG(periph, SCSIPI_DB1, 464 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit, 465 sd_cd.cd_ndevs, part)); 466 467 /* 468 * If this is the first open of this device, add a reference 469 * to the adapter. 470 */ 471 if (sd->sc_dk.dk_openmask == 0 && 472 (error = scsipi_adapter_addref(adapt)) != 0) 473 return (error); 474 475 if ((error = sdlock(sd)) != 0) 476 goto bad4; 477 478 if ((periph->periph_flags & PERIPH_OPEN) != 0) { 479 /* 480 * If any partition is open, but the disk has been invalidated, 481 * disallow further opens of non-raw partition 482 */ 483 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 && 484 (part != RAW_PART || fmt != S_IFCHR)) { 485 error = EIO; 486 goto bad3; 487 } 488 } else { 489 int silent; 490 491 if (part == RAW_PART && fmt == S_IFCHR) 492 silent = XS_CTL_SILENT; 493 else 494 silent = 0; 495 496 /* Check that it is still responding and ok. */ 497 error = scsipi_test_unit_ready(periph, 498 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE | 499 silent); 500 501 /* 502 * Start the pack spinning if necessary. Always allow the 503 * raw parition to be opened, for raw IOCTLs. Data transfers 504 * will check for SDEV_MEDIA_LOADED. 505 */ 506 if (error == EIO) { 507 int error2; 508 509 error2 = scsipi_start(periph, SSS_START, silent); 510 switch (error2) { 511 case 0: 512 error = 0; 513 break; 514 case EIO: 515 case EINVAL: 516 break; 517 default: 518 error = error2; 519 break; 520 } 521 } 522 if (error) { 523 if (silent) 524 goto out; 525 goto bad3; 526 } 527 528 periph->periph_flags |= PERIPH_OPEN; 529 530 if (periph->periph_flags & PERIPH_REMOVABLE) { 531 /* Lock the pack in. */ 532 error = scsipi_prevent(periph, PR_PREVENT, 533 XS_CTL_IGNORE_ILLEGAL_REQUEST | 534 XS_CTL_IGNORE_MEDIA_CHANGE); 535 if (error) 536 goto bad; 537 } 538 539 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) { 540 int param_error; 541 periph->periph_flags |= PERIPH_MEDIA_LOADED; 542 543 /* 544 * Load the physical device parameters. 545 * 546 * Note that if media is present but unformatted, 547 * we allow the open (so that it can be formatted!). 548 * The drive should refuse real I/O, if the media is 549 * unformatted. 550 */ 551 if ((param_error = sd_get_parms(sd, &sd->params, 0)) 552 == SDGP_RESULT_OFFLINE) { 553 error = ENXIO; 554 goto bad2; 555 } 556 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded ")); 557 558 /* Load the partition info if not already loaded. */ 559 if (param_error == 0) { 560 sdgetdisklabel(sd); 561 SC_DEBUG(periph, SCSIPI_DB3, 562 ("Disklabel loaded ")); 563 } 564 } 565 } 566 567 /* Check that the partition exists. */ 568 if (part != RAW_PART && 569 (part >= sd->sc_dk.dk_label->d_npartitions || 570 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 571 error = ENXIO; 572 goto bad; 573 } 574 575 out: /* Insure only one open at a time. */ 576 switch (fmt) { 577 case S_IFCHR: 578 sd->sc_dk.dk_copenmask |= (1 << part); 579 break; 580 case S_IFBLK: 581 sd->sc_dk.dk_bopenmask |= (1 << part); 582 break; 583 } 584 sd->sc_dk.dk_openmask = 585 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask; 586 587 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n")); 588 sdunlock(sd); 589 return (0); 590 591 bad2: 592 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 593 594 bad: 595 if (sd->sc_dk.dk_openmask == 0) { 596 if (periph->periph_flags & PERIPH_REMOVABLE) 597 scsipi_prevent(periph, PR_ALLOW, 598 XS_CTL_IGNORE_ILLEGAL_REQUEST | 599 XS_CTL_IGNORE_MEDIA_CHANGE); 600 periph->periph_flags &= ~PERIPH_OPEN; 601 } 602 603 bad3: 604 sdunlock(sd); 605 bad4: 606 if (sd->sc_dk.dk_openmask == 0) 607 scsipi_adapter_delref(adapt); 608 return (error); 609 } 610 611 /* 612 * close the device.. only called if we are the LAST occurence of an open 613 * device. Convenient now but usually a pain. 614 */ 615 int 616 sdclose(dev, flag, fmt, p) 617 dev_t dev; 618 int flag, fmt; 619 struct proc *p; 620 { 621 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)]; 622 struct scsipi_periph *periph = sd->sc_periph; 623 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter; 624 int part = SDPART(dev); 625 int error; 626 627 if ((error = sdlock(sd)) != 0) 628 return (error); 629 630 switch (fmt) { 631 case S_IFCHR: 632 sd->sc_dk.dk_copenmask &= ~(1 << part); 633 break; 634 case S_IFBLK: 635 sd->sc_dk.dk_bopenmask &= ~(1 << part); 636 break; 637 } 638 sd->sc_dk.dk_openmask = 639 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask; 640 641 if (sd->sc_dk.dk_openmask == 0) { 642 /* 643 * If the disk cache needs flushing, and the disk supports 644 * it, do it now. 645 */ 646 if ((sd->flags & SDF_DIRTY) != 0) { 647 if (sd_flush(sd, 0)) { 648 printf("%s: cache synchronization failed\n", 649 sd->sc_dev.dv_xname); 650 sd->flags &= ~SDF_FLUSHING; 651 } else 652 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 653 } 654 655 if (! (periph->periph_flags & PERIPH_KEEP_LABEL)) 656 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 657 658 scsipi_wait_drain(periph); 659 660 if (periph->periph_flags & PERIPH_REMOVABLE) 661 scsipi_prevent(periph, PR_ALLOW, 662 XS_CTL_IGNORE_ILLEGAL_REQUEST | 663 XS_CTL_IGNORE_NOT_READY); 664 periph->periph_flags &= ~PERIPH_OPEN; 665 666 scsipi_wait_drain(periph); 667 668 scsipi_adapter_delref(adapt); 669 } 670 671 sdunlock(sd); 672 return (0); 673 } 674 675 /* 676 * Actually translate the requested transfer into one the physical driver 677 * can understand. The transfer is described by a buf and will include 678 * only one physical transfer. 679 */ 680 void 681 sdstrategy(bp) 682 struct buf *bp; 683 { 684 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)]; 685 struct scsipi_periph *periph = sd->sc_periph; 686 struct disklabel *lp; 687 daddr_t blkno; 688 int s; 689 boolean_t sector_aligned; 690 691 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy ")); 692 SC_DEBUG(sd->sc_periph, SCSIPI_DB1, 693 ("%ld bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno)); 694 /* 695 * If the device has been made invalid, error out 696 */ 697 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 || 698 (sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) { 699 if (periph->periph_flags & PERIPH_OPEN) 700 bp->b_error = EIO; 701 else 702 bp->b_error = ENODEV; 703 goto bad; 704 } 705 706 lp = sd->sc_dk.dk_label; 707 708 /* 709 * The transfer must be a whole number of blocks, offset must not be 710 * negative. 711 */ 712 if (lp->d_secsize == DEV_BSIZE) { 713 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0; 714 } else { 715 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0; 716 } 717 if (!sector_aligned || bp->b_blkno < 0) { 718 bp->b_error = EINVAL; 719 goto bad; 720 } 721 /* 722 * If it's a null transfer, return immediatly 723 */ 724 if (bp->b_bcount == 0) 725 goto done; 726 727 /* 728 * Do bounds checking, adjust transfer. if error, process. 729 * If end of partition, just return. 730 */ 731 if (SDPART(bp->b_dev) == RAW_PART) { 732 if (bounds_check_with_mediasize(bp, DEV_BSIZE, 733 sd->params.disksize512) <= 0) 734 goto done; 735 } else { 736 if (bounds_check_with_label(&sd->sc_dk, bp, 737 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0) 738 goto done; 739 } 740 741 /* 742 * Now convert the block number to absolute and put it in 743 * terms of the device's logical block size. 744 */ 745 if (lp->d_secsize == DEV_BSIZE) 746 blkno = bp->b_blkno; 747 else if (lp->d_secsize > DEV_BSIZE) 748 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 749 else 750 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize); 751 752 if (SDPART(bp->b_dev) != RAW_PART) 753 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset; 754 755 bp->b_rawblkno = blkno; 756 757 s = splbio(); 758 759 /* 760 * Place it in the queue of disk activities for this disk. 761 * 762 * XXX Only do disksort() if the current operating mode does not 763 * XXX include tagged queueing. 764 */ 765 BUFQ_PUT(&sd->buf_queue, bp); 766 767 /* 768 * Tell the device to get going on the transfer if it's 769 * not doing anything, otherwise just wait for completion 770 */ 771 sdstart(sd->sc_periph); 772 773 splx(s); 774 return; 775 776 bad: 777 bp->b_flags |= B_ERROR; 778 done: 779 /* 780 * Correctly set the buf to indicate a completed xfer 781 */ 782 bp->b_resid = bp->b_bcount; 783 biodone(bp); 784 } 785 786 /* 787 * sdstart looks to see if there is a buf waiting for the device 788 * and that the device is not already busy. If both are true, 789 * It dequeues the buf and creates a scsi command to perform the 790 * transfer in the buf. The transfer request will call scsipi_done 791 * on completion, which will in turn call this routine again 792 * so that the next queued transfer is performed. 793 * The bufs are queued by the strategy routine (sdstrategy) 794 * 795 * This routine is also called after other non-queued requests 796 * have been made of the scsi driver, to ensure that the queue 797 * continues to be drained. 798 * 799 * must be called at the correct (highish) spl level 800 * sdstart() is called at splbio from sdstrategy and scsipi_done 801 */ 802 void 803 sdstart(periph) 804 struct scsipi_periph *periph; 805 { 806 struct sd_softc *sd = (void *)periph->periph_dev; 807 struct disklabel *lp = sd->sc_dk.dk_label; 808 struct buf *bp = 0; 809 struct scsipi_rw_big cmd_big; 810 struct scsi_rw cmd_small; 811 struct scsipi_generic *cmdp; 812 int nblks, cmdlen, error, flags; 813 814 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart ")); 815 /* 816 * Check if the device has room for another command 817 */ 818 while (periph->periph_active < periph->periph_openings) { 819 /* 820 * there is excess capacity, but a special waits 821 * It'll need the adapter as soon as we clear out of the 822 * way and let it run (user level wait). 823 */ 824 if (periph->periph_flags & PERIPH_WAITING) { 825 periph->periph_flags &= ~PERIPH_WAITING; 826 wakeup((caddr_t)periph); 827 return; 828 } 829 830 /* 831 * See if there is a buf with work for us to do.. 832 */ 833 if ((bp = BUFQ_GET(&sd->buf_queue)) == NULL) 834 return; 835 836 /* 837 * If the device has become invalid, abort all the 838 * reads and writes until all files have been closed and 839 * re-opened 840 */ 841 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) { 842 bp->b_error = EIO; 843 bp->b_flags |= B_ERROR; 844 bp->b_resid = bp->b_bcount; 845 biodone(bp); 846 continue; 847 } 848 849 /* 850 * We have a buf, now we should make a command. 851 */ 852 853 if (lp->d_secsize == DEV_BSIZE) 854 nblks = bp->b_bcount >> DEV_BSHIFT; 855 else 856 nblks = howmany(bp->b_bcount, lp->d_secsize); 857 858 /* 859 * Fill out the scsi command. If the transfer will 860 * fit in a "small" cdb, use it. 861 */ 862 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) && 863 ((nblks & 0xff) == nblks) && 864 !(periph->periph_quirks & PQUIRK_ONLYBIG)) { 865 /* 866 * We can fit in a small cdb. 867 */ 868 memset(&cmd_small, 0, sizeof(cmd_small)); 869 cmd_small.opcode = (bp->b_flags & B_READ) ? 870 SCSI_READ_COMMAND : SCSI_WRITE_COMMAND; 871 _lto3b(bp->b_rawblkno, cmd_small.addr); 872 cmd_small.length = nblks & 0xff; 873 cmdlen = sizeof(cmd_small); 874 cmdp = (struct scsipi_generic *)&cmd_small; 875 } else { 876 /* 877 * Need a large cdb. 878 */ 879 memset(&cmd_big, 0, sizeof(cmd_big)); 880 cmd_big.opcode = (bp->b_flags & B_READ) ? 881 READ_BIG : WRITE_BIG; 882 _lto4b(bp->b_rawblkno, cmd_big.addr); 883 _lto2b(nblks, cmd_big.length); 884 cmdlen = sizeof(cmd_big); 885 cmdp = (struct scsipi_generic *)&cmd_big; 886 } 887 888 /* Instrumentation. */ 889 disk_busy(&sd->sc_dk); 890 891 /* 892 * Mark the disk dirty so that the cache will be 893 * flushed on close. 894 */ 895 if ((bp->b_flags & B_READ) == 0) 896 sd->flags |= SDF_DIRTY; 897 898 /* 899 * Figure out what flags to use. 900 */ 901 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG; 902 if (bp->b_flags & B_READ) 903 flags |= XS_CTL_DATA_IN; 904 else 905 flags |= XS_CTL_DATA_OUT; 906 907 /* 908 * Call the routine that chats with the adapter. 909 * Note: we cannot sleep as we may be an interrupt 910 */ 911 error = scsipi_command(periph, cmdp, cmdlen, 912 (u_char *)bp->b_data, bp->b_bcount, 913 SDRETRIES, SD_IO_TIMEOUT, bp, flags); 914 if (error) { 915 disk_unbusy(&sd->sc_dk, 0, 0); 916 printf("%s: not queued, error %d\n", 917 sd->sc_dev.dv_xname, error); 918 } 919 } 920 } 921 922 void 923 sddone(xs) 924 struct scsipi_xfer *xs; 925 { 926 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev; 927 928 if (sd->flags & SDF_FLUSHING) { 929 /* Flush completed, no longer dirty. */ 930 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 931 } 932 933 if (xs->bp != NULL) { 934 disk_unbusy(&sd->sc_dk, xs->bp->b_bcount - xs->bp->b_resid, 935 (xs->bp->b_flags & B_READ)); 936 #if NRND > 0 937 rnd_add_uint32(&sd->rnd_source, xs->bp->b_rawblkno); 938 #endif 939 } 940 } 941 942 void 943 sdminphys(bp) 944 struct buf *bp; 945 { 946 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)]; 947 long max; 948 949 /* 950 * If the device is ancient, we want to make sure that 951 * the transfer fits into a 6-byte cdb. 952 * 953 * XXX Note that the SCSI-I spec says that 256-block transfers 954 * are allowed in a 6-byte read/write, and are specified 955 * by settng the "length" to 0. However, we're conservative 956 * here, allowing only 255-block transfers in case an 957 * ancient device gets confused by length == 0. A length of 0 958 * in a 10-byte read/write actually means 0 blocks. 959 */ 960 if ((sd->flags & SDF_ANCIENT) && 961 ((sd->sc_periph->periph_flags & 962 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) { 963 max = sd->sc_dk.dk_label->d_secsize * 0xff; 964 965 if (bp->b_bcount > max) 966 bp->b_bcount = max; 967 } 968 969 scsipi_adapter_minphys(sd->sc_periph->periph_channel, bp); 970 } 971 972 int 973 sdread(dev, uio, ioflag) 974 dev_t dev; 975 struct uio *uio; 976 int ioflag; 977 { 978 979 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio)); 980 } 981 982 int 983 sdwrite(dev, uio, ioflag) 984 dev_t dev; 985 struct uio *uio; 986 int ioflag; 987 { 988 989 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio)); 990 } 991 992 /* 993 * Perform special action on behalf of the user 994 * Knows about the internals of this device 995 */ 996 int 997 sdioctl(dev, cmd, addr, flag, p) 998 dev_t dev; 999 u_long cmd; 1000 caddr_t addr; 1001 int flag; 1002 struct proc *p; 1003 { 1004 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)]; 1005 struct scsipi_periph *periph = sd->sc_periph; 1006 int part = SDPART(dev); 1007 int error = 0; 1008 #ifdef __HAVE_OLD_DISKLABEL 1009 struct disklabel *newlabel = NULL; 1010 #endif 1011 1012 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd)); 1013 1014 /* 1015 * If the device is not valid, some IOCTLs can still be 1016 * handled on the raw partition. Check this here. 1017 */ 1018 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) { 1019 switch (cmd) { 1020 case DIOCKLABEL: 1021 case DIOCWLABEL: 1022 case DIOCLOCK: 1023 case DIOCEJECT: 1024 case ODIOCEJECT: 1025 case DIOCGCACHE: 1026 case DIOCSCACHE: 1027 case SCIOCIDENTIFY: 1028 case OSCIOCIDENTIFY: 1029 case SCIOCCOMMAND: 1030 case SCIOCDEBUG: 1031 if (part == RAW_PART) 1032 break; 1033 /* FALLTHROUGH */ 1034 default: 1035 if ((periph->periph_flags & PERIPH_OPEN) == 0) 1036 return (ENODEV); 1037 else 1038 return (EIO); 1039 } 1040 } 1041 1042 switch (cmd) { 1043 case DIOCGDINFO: 1044 *(struct disklabel *)addr = *(sd->sc_dk.dk_label); 1045 return (0); 1046 1047 #ifdef __HAVE_OLD_DISKLABEL 1048 case ODIOCGDINFO: 1049 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK); 1050 if (newlabel == NULL) 1051 return EIO; 1052 memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel)); 1053 if (newlabel->d_npartitions <= OLDMAXPARTITIONS) 1054 memcpy(addr, newlabel, sizeof (struct olddisklabel)); 1055 else 1056 error = ENOTTY; 1057 free(newlabel, M_TEMP); 1058 return error; 1059 #endif 1060 1061 case DIOCGPART: 1062 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label; 1063 ((struct partinfo *)addr)->part = 1064 &sd->sc_dk.dk_label->d_partitions[part]; 1065 return (0); 1066 1067 case DIOCWDINFO: 1068 case DIOCSDINFO: 1069 #ifdef __HAVE_OLD_DISKLABEL 1070 case ODIOCWDINFO: 1071 case ODIOCSDINFO: 1072 #endif 1073 { 1074 struct disklabel *lp; 1075 1076 if ((flag & FWRITE) == 0) 1077 return (EBADF); 1078 1079 #ifdef __HAVE_OLD_DISKLABEL 1080 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) { 1081 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK); 1082 if (newlabel == NULL) 1083 return EIO; 1084 memset(newlabel, 0, sizeof newlabel); 1085 memcpy(newlabel, addr, sizeof (struct olddisklabel)); 1086 lp = newlabel; 1087 } else 1088 #endif 1089 lp = (struct disklabel *)addr; 1090 1091 if ((error = sdlock(sd)) != 0) 1092 goto bad; 1093 sd->flags |= SDF_LABELLING; 1094 1095 error = setdisklabel(sd->sc_dk.dk_label, 1096 lp, /*sd->sc_dk.dk_openmask : */0, 1097 sd->sc_dk.dk_cpulabel); 1098 if (error == 0) { 1099 if (cmd == DIOCWDINFO 1100 #ifdef __HAVE_OLD_DISKLABEL 1101 || cmd == ODIOCWDINFO 1102 #endif 1103 ) 1104 error = writedisklabel(SDLABELDEV(dev), 1105 sdstrategy, sd->sc_dk.dk_label, 1106 sd->sc_dk.dk_cpulabel); 1107 } 1108 1109 sd->flags &= ~SDF_LABELLING; 1110 sdunlock(sd); 1111 bad: 1112 #ifdef __HAVE_OLD_DISKLABEL 1113 if (newlabel != NULL) 1114 free(newlabel, M_TEMP); 1115 #endif 1116 return (error); 1117 } 1118 1119 case DIOCKLABEL: 1120 if (*(int *)addr) 1121 periph->periph_flags |= PERIPH_KEEP_LABEL; 1122 else 1123 periph->periph_flags &= ~PERIPH_KEEP_LABEL; 1124 return (0); 1125 1126 case DIOCWLABEL: 1127 if ((flag & FWRITE) == 0) 1128 return (EBADF); 1129 if (*(int *)addr) 1130 sd->flags |= SDF_WLABEL; 1131 else 1132 sd->flags &= ~SDF_WLABEL; 1133 return (0); 1134 1135 case DIOCLOCK: 1136 return (scsipi_prevent(periph, 1137 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0)); 1138 1139 case DIOCEJECT: 1140 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0) 1141 return (ENOTTY); 1142 if (*(int *)addr == 0) { 1143 /* 1144 * Don't force eject: check that we are the only 1145 * partition open. If so, unlock it. 1146 */ 1147 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 && 1148 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask == 1149 sd->sc_dk.dk_openmask) { 1150 error = scsipi_prevent(periph, PR_ALLOW, 1151 XS_CTL_IGNORE_NOT_READY); 1152 if (error) 1153 return (error); 1154 } else { 1155 return (EBUSY); 1156 } 1157 } 1158 /* FALLTHROUGH */ 1159 case ODIOCEJECT: 1160 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ? 1161 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0)); 1162 1163 case DIOCGDEFLABEL: 1164 sdgetdefaultlabel(sd, (struct disklabel *)addr); 1165 return (0); 1166 1167 #ifdef __HAVE_OLD_DISKLABEL 1168 case ODIOCGDEFLABEL: 1169 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK); 1170 if (newlabel == NULL) 1171 return EIO; 1172 sdgetdefaultlabel(sd, newlabel); 1173 if (newlabel->d_npartitions <= OLDMAXPARTITIONS) 1174 memcpy(addr, newlabel, sizeof (struct olddisklabel)); 1175 else 1176 error = ENOTTY; 1177 free(newlabel, M_TEMP); 1178 return error; 1179 #endif 1180 1181 case DIOCGCACHE: 1182 return (sd_getcache(sd, (int *) addr)); 1183 1184 case DIOCSCACHE: 1185 if ((flag & FWRITE) == 0) 1186 return (EBADF); 1187 return (sd_setcache(sd, *(int *) addr)); 1188 1189 case DIOCCACHESYNC: 1190 /* 1191 * XXX Do we really need to care about having a writable 1192 * file descriptor here? 1193 */ 1194 if ((flag & FWRITE) == 0) 1195 return (EBADF); 1196 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) { 1197 error = sd_flush(sd, 0); 1198 if (error) 1199 sd->flags &= ~SDF_FLUSHING; 1200 else 1201 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 1202 } else 1203 error = 0; 1204 return (error); 1205 1206 default: 1207 if (part != RAW_PART) 1208 return (ENOTTY); 1209 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, p)); 1210 } 1211 1212 #ifdef DIAGNOSTIC 1213 panic("sdioctl: impossible"); 1214 #endif 1215 } 1216 1217 void 1218 sdgetdefaultlabel(sd, lp) 1219 struct sd_softc *sd; 1220 struct disklabel *lp; 1221 { 1222 1223 memset(lp, 0, sizeof(struct disklabel)); 1224 1225 lp->d_secsize = sd->params.blksize; 1226 lp->d_ntracks = sd->params.heads; 1227 lp->d_nsectors = sd->params.sectors; 1228 lp->d_ncylinders = sd->params.cyls; 1229 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; 1230 1231 switch (scsipi_periph_bustype(sd->sc_periph)) { 1232 case SCSIPI_BUSTYPE_SCSI: 1233 lp->d_type = DTYPE_SCSI; 1234 break; 1235 case SCSIPI_BUSTYPE_ATAPI: 1236 lp->d_type = DTYPE_ATAPI; 1237 break; 1238 } 1239 /* 1240 * XXX 1241 * We could probe the mode pages to figure out what kind of disc it is. 1242 * Is this worthwhile? 1243 */ 1244 strncpy(lp->d_typename, "mydisk", 16); 1245 strncpy(lp->d_packname, "fictitious", 16); 1246 lp->d_secperunit = sd->params.disksize; 1247 lp->d_rpm = sd->params.rot_rate; 1248 lp->d_interleave = 1; 1249 lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ? 1250 D_REMOVABLE : 0; 1251 1252 lp->d_partitions[RAW_PART].p_offset = 0; 1253 lp->d_partitions[RAW_PART].p_size = 1254 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE); 1255 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED; 1256 lp->d_npartitions = RAW_PART + 1; 1257 1258 lp->d_magic = DISKMAGIC; 1259 lp->d_magic2 = DISKMAGIC; 1260 lp->d_checksum = dkcksum(lp); 1261 } 1262 1263 1264 /* 1265 * Load the label information on the named device 1266 */ 1267 void 1268 sdgetdisklabel(sd) 1269 struct sd_softc *sd; 1270 { 1271 struct disklabel *lp = sd->sc_dk.dk_label; 1272 const char *errstring; 1273 1274 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel)); 1275 1276 sdgetdefaultlabel(sd, lp); 1277 1278 if (lp->d_secpercyl == 0) { 1279 lp->d_secpercyl = 100; 1280 /* as long as it's not 0 - readdisklabel divides by it (?) */ 1281 } 1282 1283 /* 1284 * Call the generic disklabel extraction routine 1285 */ 1286 errstring = readdisklabel(MAKESDDEV(0, sd->sc_dev.dv_unit, RAW_PART), 1287 sdstrategy, lp, sd->sc_dk.dk_cpulabel); 1288 if (errstring) { 1289 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring); 1290 return; 1291 } 1292 } 1293 1294 void 1295 sd_shutdown(arg) 1296 void *arg; 1297 { 1298 struct sd_softc *sd = arg; 1299 1300 /* 1301 * If the disk cache needs to be flushed, and the disk supports 1302 * it, flush it. We're cold at this point, so we poll for 1303 * completion. 1304 */ 1305 if ((sd->flags & SDF_DIRTY) != 0) { 1306 if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) { 1307 printf("%s: cache synchronization failed\n", 1308 sd->sc_dev.dv_xname); 1309 sd->flags &= ~SDF_FLUSHING; 1310 } else 1311 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 1312 } 1313 } 1314 1315 /* 1316 * Check Errors 1317 */ 1318 int 1319 sd_interpret_sense(xs) 1320 struct scsipi_xfer *xs; 1321 { 1322 struct scsipi_periph *periph = xs->xs_periph; 1323 struct scsipi_sense_data *sense = &xs->sense.scsi_sense; 1324 struct sd_softc *sd = (void *)periph->periph_dev; 1325 int s, error, retval = EJUSTRETURN; 1326 1327 /* 1328 * If the periph is already recovering, just do the normal 1329 * error processing. 1330 */ 1331 if (periph->periph_flags & PERIPH_RECOVERING) 1332 return (retval); 1333 1334 /* 1335 * If the device is not open yet, let the generic code handle it. 1336 */ 1337 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) 1338 return (retval); 1339 1340 /* 1341 * If it isn't a extended or extended/deferred error, let 1342 * the generic code handle it. 1343 */ 1344 if ((sense->error_code & SSD_ERRCODE) != 0x70 && 1345 (sense->error_code & SSD_ERRCODE) != 0x71) 1346 return (retval); 1347 1348 if ((sense->flags & SSD_KEY) == SKEY_NOT_READY && 1349 sense->add_sense_code == 0x4) { 1350 if (sense->add_sense_code_qual == 0x01) { 1351 /* 1352 * Unit In The Process Of Becoming Ready. 1353 */ 1354 printf("%s: waiting for pack to spin up...\n", 1355 sd->sc_dev.dv_xname); 1356 if (!callout_pending(&periph->periph_callout)) 1357 scsipi_periph_freeze(periph, 1); 1358 callout_reset(&periph->periph_callout, 1359 5 * hz, scsipi_periph_timed_thaw, periph); 1360 retval = ERESTART; 1361 } else if (sense->add_sense_code_qual == 0x02) { 1362 printf("%s: pack is stopped, restarting...\n", 1363 sd->sc_dev.dv_xname); 1364 s = splbio(); 1365 periph->periph_flags |= PERIPH_RECOVERING; 1366 splx(s); 1367 error = scsipi_start(periph, SSS_START, 1368 XS_CTL_URGENT|XS_CTL_HEAD_TAG| 1369 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH); 1370 if (error) { 1371 printf("%s: unable to restart pack\n", 1372 sd->sc_dev.dv_xname); 1373 retval = error; 1374 } else 1375 retval = ERESTART; 1376 s = splbio(); 1377 periph->periph_flags &= ~PERIPH_RECOVERING; 1378 splx(s); 1379 } 1380 } 1381 if ((sense->flags & SSD_KEY) == SKEY_MEDIUM_ERROR && 1382 sense->add_sense_code == 0x31 && 1383 sense->add_sense_code_qual == 0x00) { /* maybe for any asq ? */ 1384 /* Medium Format Corrupted */ 1385 retval = EFTYPE; 1386 } 1387 return (retval); 1388 } 1389 1390 1391 int 1392 sdsize(dev) 1393 dev_t dev; 1394 { 1395 struct sd_softc *sd; 1396 int part, unit, omask; 1397 int size; 1398 1399 unit = SDUNIT(dev); 1400 if (unit >= sd_cd.cd_ndevs) 1401 return (-1); 1402 sd = sd_cd.cd_devs[unit]; 1403 if (sd == NULL) 1404 return (-1); 1405 1406 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) 1407 return (-1); 1408 1409 part = SDPART(dev); 1410 omask = sd->sc_dk.dk_openmask & (1 << part); 1411 1412 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) 1413 return (-1); 1414 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) 1415 size = -1; 1416 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 1417 size = -1; 1418 else 1419 size = sd->sc_dk.dk_label->d_partitions[part].p_size * 1420 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE); 1421 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0) 1422 return (-1); 1423 return (size); 1424 } 1425 1426 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */ 1427 static struct scsipi_xfer sx; 1428 static int sddoingadump; 1429 1430 /* 1431 * dump all of physical memory into the partition specified, starting 1432 * at offset 'dumplo' into the partition. 1433 */ 1434 int 1435 sddump(dev, blkno, va, size) 1436 dev_t dev; 1437 daddr_t blkno; 1438 caddr_t va; 1439 size_t size; 1440 { 1441 struct sd_softc *sd; /* disk unit to do the I/O */ 1442 struct disklabel *lp; /* disk's disklabel */ 1443 int unit, part; 1444 int sectorsize; /* size of a disk sector */ 1445 int nsects; /* number of sectors in partition */ 1446 int sectoff; /* sector offset of partition */ 1447 int totwrt; /* total number of sectors left to write */ 1448 int nwrt; /* current number of sectors to write */ 1449 struct scsipi_rw_big cmd; /* write command */ 1450 struct scsipi_xfer *xs; /* ... convenience */ 1451 struct scsipi_periph *periph; 1452 struct scsipi_channel *chan; 1453 1454 /* Check if recursive dump; if so, punt. */ 1455 if (sddoingadump) 1456 return (EFAULT); 1457 1458 /* Mark as active early. */ 1459 sddoingadump = 1; 1460 1461 unit = SDUNIT(dev); /* Decompose unit & partition. */ 1462 part = SDPART(dev); 1463 1464 /* Check for acceptable drive number. */ 1465 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL) 1466 return (ENXIO); 1467 1468 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) 1469 return (ENODEV); 1470 1471 periph = sd->sc_periph; 1472 chan = periph->periph_channel; 1473 1474 /* Make sure it was initialized. */ 1475 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) 1476 return (ENXIO); 1477 1478 /* Convert to disk sectors. Request must be a multiple of size. */ 1479 lp = sd->sc_dk.dk_label; 1480 sectorsize = lp->d_secsize; 1481 if ((size % sectorsize) != 0) 1482 return (EFAULT); 1483 totwrt = size / sectorsize; 1484 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */ 1485 1486 nsects = lp->d_partitions[part].p_size; 1487 sectoff = lp->d_partitions[part].p_offset; 1488 1489 /* Check transfer bounds against partition size. */ 1490 if ((blkno < 0) || ((blkno + totwrt) > nsects)) 1491 return (EINVAL); 1492 1493 /* Offset block number to start of partition. */ 1494 blkno += sectoff; 1495 1496 xs = &sx; 1497 1498 while (totwrt > 0) { 1499 nwrt = totwrt; /* XXX */ 1500 #ifndef SD_DUMP_NOT_TRUSTED 1501 /* 1502 * Fill out the scsi command 1503 */ 1504 memset(&cmd, 0, sizeof(cmd)); 1505 cmd.opcode = WRITE_BIG; 1506 _lto4b(blkno, cmd.addr); 1507 _lto2b(nwrt, cmd.length); 1508 /* 1509 * Fill out the scsipi_xfer structure 1510 * Note: we cannot sleep as we may be an interrupt 1511 * don't use scsipi_command() as it may want to wait 1512 * for an xs. 1513 */ 1514 memset(xs, 0, sizeof(sx)); 1515 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL | 1516 XS_CTL_DATA_OUT; 1517 xs->xs_status = 0; 1518 xs->xs_periph = periph; 1519 xs->xs_retries = SDRETRIES; 1520 xs->timeout = 10000; /* 10000 millisecs for a disk ! */ 1521 xs->cmd = (struct scsipi_generic *)&cmd; 1522 xs->cmdlen = sizeof(cmd); 1523 xs->resid = nwrt * sectorsize; 1524 xs->error = XS_NOERROR; 1525 xs->bp = 0; 1526 xs->data = va; 1527 xs->datalen = nwrt * sectorsize; 1528 1529 /* 1530 * Pass all this info to the scsi driver. 1531 */ 1532 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1533 if ((xs->xs_status & XS_STS_DONE) == 0 || 1534 xs->error != XS_NOERROR) 1535 return (EIO); 1536 #else /* SD_DUMP_NOT_TRUSTED */ 1537 /* Let's just talk about this first... */ 1538 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno); 1539 delay(500 * 1000); /* half a second */ 1540 #endif /* SD_DUMP_NOT_TRUSTED */ 1541 1542 /* update block count */ 1543 totwrt -= nwrt; 1544 blkno += nwrt; 1545 va += sectorsize * nwrt; 1546 } 1547 sddoingadump = 0; 1548 return (0); 1549 } 1550 1551 int 1552 sd_mode_sense(sd, byte2, sense, size, page, flags, big) 1553 struct sd_softc *sd; 1554 u_int8_t byte2; 1555 void *sense; 1556 size_t size; 1557 int page, flags; 1558 int *big; 1559 { 1560 1561 if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) && 1562 !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) { 1563 *big = 1; 1564 return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense, 1565 size + sizeof(struct scsipi_mode_header_big), 1566 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000); 1567 } else { 1568 *big = 0; 1569 return scsipi_mode_sense(sd->sc_periph, byte2, page, sense, 1570 size + sizeof(struct scsipi_mode_header), 1571 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000); 1572 } 1573 } 1574 1575 int 1576 sd_mode_select(sd, byte2, sense, size, flags, big) 1577 struct sd_softc *sd; 1578 u_int8_t byte2; 1579 void *sense; 1580 size_t size; 1581 int flags, big; 1582 { 1583 1584 if (big) { 1585 struct scsipi_mode_header_big *header = sense; 1586 1587 _lto2b(0, header->data_length); 1588 return scsipi_mode_select_big(sd->sc_periph, byte2, sense, 1589 size + sizeof(struct scsipi_mode_header_big), 1590 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000); 1591 } else { 1592 struct scsipi_mode_header *header = sense; 1593 1594 header->data_length = 0; 1595 return scsipi_mode_select(sd->sc_periph, byte2, sense, 1596 size + sizeof(struct scsipi_mode_header), 1597 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000); 1598 } 1599 } 1600 1601 int 1602 sd_get_simplifiedparms(sd, dp, flags) 1603 struct sd_softc *sd; 1604 struct disk_parms *dp; 1605 int flags; 1606 { 1607 struct { 1608 struct scsipi_mode_header header; 1609 /* no block descriptor */ 1610 u_int8_t pg_code; /* page code (should be 6) */ 1611 u_int8_t pg_length; /* page length (should be 11) */ 1612 u_int8_t wcd; /* bit0: cache disable */ 1613 u_int8_t lbs[2]; /* logical block size */ 1614 u_int8_t size[5]; /* number of log. blocks */ 1615 u_int8_t pp; /* power/performance */ 1616 u_int8_t flags; 1617 u_int8_t resvd; 1618 } scsipi_sense; 1619 u_int64_t sectors; 1620 int error; 1621 1622 /* 1623 * scsipi_size (ie "read capacity") and mode sense page 6 1624 * give the same information. Do both for now, and check 1625 * for consistency. 1626 * XXX probably differs for removable media 1627 */ 1628 dp->blksize = 512; 1629 if ((sectors = scsipi_size(sd->sc_periph, flags)) == 0) 1630 return (SDGP_RESULT_OFFLINE); /* XXX? */ 1631 1632 error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6, 1633 &scsipi_sense.header, sizeof(scsipi_sense), 1634 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000); 1635 1636 if (error != 0) 1637 return (SDGP_RESULT_OFFLINE); /* XXX? */ 1638 1639 dp->blksize = _2btol(scsipi_sense.lbs); 1640 if (dp->blksize == 0) 1641 dp->blksize = 512; 1642 1643 /* 1644 * Create a pseudo-geometry. 1645 */ 1646 dp->heads = 64; 1647 dp->sectors = 32; 1648 dp->cyls = sectors / (dp->heads * dp->sectors); 1649 dp->disksize = _5btol(scsipi_sense.size); 1650 if (dp->disksize <= UINT32_MAX && dp->disksize != sectors) { 1651 printf("RBC size: mode sense=%llu, get cap=%llu\n", 1652 (unsigned long long)dp->disksize, 1653 (unsigned long long)sectors); 1654 dp->disksize = sectors; 1655 } 1656 dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE; 1657 1658 return (SDGP_RESULT_OK); 1659 } 1660 1661 /* 1662 * Get the scsi driver to send a full inquiry to the * device and use the 1663 * results to fill out the disk parameter structure. 1664 */ 1665 int 1666 sd_get_capacity(sd, dp, flags) 1667 struct sd_softc *sd; 1668 struct disk_parms *dp; 1669 int flags; 1670 { 1671 u_int64_t sectors; 1672 int error; 1673 #if 0 1674 int i; 1675 u_int8_t *p; 1676 #endif 1677 1678 dp->disksize = sectors = scsipi_size(sd->sc_periph, flags); 1679 if (sectors == 0) { 1680 struct scsipi_read_format_capacities cmd; 1681 struct { 1682 struct scsipi_capacity_list_header header; 1683 struct scsipi_capacity_descriptor desc; 1684 } __attribute__((packed)) data; 1685 1686 memset(&cmd, 0, sizeof(cmd)); 1687 memset(&data, 0, sizeof(data)); 1688 cmd.opcode = READ_FORMAT_CAPACITIES; 1689 _lto2b(sizeof(data), cmd.length); 1690 1691 error = scsipi_command(sd->sc_periph, (void *)&cmd, sizeof(cmd), 1692 (void *)&data, sizeof(data), SDRETRIES, 20000, NULL, 1693 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK); 1694 if (error == EFTYPE) { 1695 /* Medium Format Corrupted, handle as not formatted */ 1696 return (SDGP_RESULT_UNFORMATTED); 1697 } 1698 if (error || data.header.length == 0) 1699 return (SDGP_RESULT_OFFLINE); 1700 1701 #if 0 1702 printf("rfc: length=%d\n", data.header.length); 1703 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + data.header.length, p = (void *)&data; i; i--, p++) printf(" %02x", *p); printf("\n"); 1704 #endif 1705 switch (data.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) { 1706 case SCSIPI_CAP_DESC_CODE_RESERVED: 1707 case SCSIPI_CAP_DESC_CODE_FORMATTED: 1708 break; 1709 1710 case SCSIPI_CAP_DESC_CODE_UNFORMATTED: 1711 return (SDGP_RESULT_UNFORMATTED); 1712 1713 case SCSIPI_CAP_DESC_CODE_NONE: 1714 return (SDGP_RESULT_OFFLINE); 1715 } 1716 1717 dp->disksize = sectors = _4btol(data.desc.nblks); 1718 if (sectors == 0) 1719 return (SDGP_RESULT_OFFLINE); /* XXX? */ 1720 1721 dp->blksize = _3btol(data.desc.blklen); 1722 if (dp->blksize == 0) 1723 dp->blksize = 512; 1724 } else { 1725 struct sd_mode_sense_data scsipi_sense; 1726 int big, bsize; 1727 struct scsi_blk_desc *bdesc; 1728 1729 memset(&scsipi_sense, 0, sizeof(scsipi_sense)); 1730 error = sd_mode_sense(sd, 0, &scsipi_sense, 1731 sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big); 1732 dp->blksize = 512; 1733 if (!error) { 1734 if (big) { 1735 bdesc = (void *)(&scsipi_sense.header.big + 1); 1736 bsize = _2btol(scsipi_sense.header.big.blk_desc_len); 1737 } else { 1738 bdesc = (void *)(&scsipi_sense.header.small + 1); 1739 bsize = scsipi_sense.header.small.blk_desc_len; 1740 } 1741 1742 #if 0 1743 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n"); 1744 printf("page 0 bsize=%d\n", bsize); 1745 printf("page 0 ok\n"); 1746 #endif 1747 1748 if (bsize >= 8) { 1749 dp->blksize = _3btol(bdesc->blklen); 1750 if (dp->blksize == 0) 1751 dp->blksize = 512; 1752 } 1753 } 1754 } 1755 1756 dp->disksize512 = (sectors * dp->blksize) / DEV_BSIZE; 1757 return (0); 1758 } 1759 1760 int 1761 sd_get_parms_page4(sd, dp, flags) 1762 struct sd_softc *sd; 1763 struct disk_parms *dp; 1764 int flags; 1765 { 1766 struct sd_mode_sense_data scsipi_sense; 1767 int error; 1768 int big, poffset, byte2; 1769 union scsi_disk_pages *pages; 1770 #if 0 1771 int i; 1772 u_int8_t *p; 1773 #endif 1774 1775 byte2 = SMS_DBD; 1776 again: 1777 memset(&scsipi_sense, 0, sizeof(scsipi_sense)); 1778 error = sd_mode_sense(sd, byte2, &scsipi_sense, 1779 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) + 1780 sizeof(scsipi_sense.pages.rigid_geometry), 4, 1781 flags | XS_CTL_SILENT, &big); 1782 if (error) { 1783 if (byte2 == SMS_DBD) { 1784 /* No result; try once more with DBD off */ 1785 byte2 = 0; 1786 goto again; 1787 } 1788 return (error); 1789 } 1790 1791 if (big) { 1792 poffset = sizeof scsipi_sense.header.big; 1793 poffset += _2btol(scsipi_sense.header.big.blk_desc_len); 1794 } else { 1795 poffset = sizeof scsipi_sense.header.small; 1796 poffset += scsipi_sense.header.small.blk_desc_len; 1797 } 1798 1799 pages = (void *)((u_long)&scsipi_sense + poffset); 1800 #if 0 1801 printf("page 4 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n"); 1802 printf("page 4 pg_code=%d sense=%p/%p\n", pages->rigid_geometry.pg_code, &scsipi_sense, pages); 1803 #endif 1804 1805 if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4) 1806 return (ERESTART); 1807 1808 SC_DEBUG(sd->sc_periph, SCSIPI_DB3, 1809 ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n", 1810 _3btol(pages->rigid_geometry.ncyl), 1811 pages->rigid_geometry.nheads, 1812 _2btol(pages->rigid_geometry.st_cyl_wp), 1813 _2btol(pages->rigid_geometry.st_cyl_rwc), 1814 _2btol(pages->rigid_geometry.land_zone))); 1815 1816 /* 1817 * KLUDGE!! (for zone recorded disks) 1818 * give a number of sectors so that sec * trks * cyls 1819 * is <= disk_size 1820 * can lead to wasted space! THINK ABOUT THIS ! 1821 */ 1822 dp->heads = pages->rigid_geometry.nheads; 1823 dp->cyls = _3btol(pages->rigid_geometry.ncyl); 1824 if (dp->heads == 0 || dp->cyls == 0) 1825 return (ERESTART); 1826 dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */ 1827 1828 dp->rot_rate = _2btol(pages->rigid_geometry.rpm); 1829 if (dp->rot_rate == 0) 1830 dp->rot_rate = 3600; 1831 1832 #if 0 1833 printf("page 4 ok\n"); 1834 #endif 1835 return (0); 1836 } 1837 1838 int 1839 sd_get_parms_page5(sd, dp, flags) 1840 struct sd_softc *sd; 1841 struct disk_parms *dp; 1842 int flags; 1843 { 1844 struct sd_mode_sense_data scsipi_sense; 1845 int error; 1846 int big, poffset, byte2; 1847 union scsi_disk_pages *pages; 1848 #if 0 1849 int i; 1850 u_int8_t *p; 1851 #endif 1852 1853 byte2 = SMS_DBD; 1854 again: 1855 memset(&scsipi_sense, 0, sizeof(scsipi_sense)); 1856 error = sd_mode_sense(sd, 0, &scsipi_sense, 1857 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) + 1858 sizeof(scsipi_sense.pages.flex_geometry), 5, 1859 flags | XS_CTL_SILENT, &big); 1860 if (error) { 1861 if (byte2 == SMS_DBD) { 1862 /* No result; try once more with DBD off */ 1863 byte2 = 0; 1864 goto again; 1865 } 1866 return (error); 1867 } 1868 1869 if (big) { 1870 poffset = sizeof scsipi_sense.header.big; 1871 poffset += _2btol(scsipi_sense.header.big.blk_desc_len); 1872 } else { 1873 poffset = sizeof scsipi_sense.header.small; 1874 poffset += scsipi_sense.header.small.blk_desc_len; 1875 } 1876 1877 pages = (void *)((u_long)&scsipi_sense + poffset); 1878 #if 0 1879 printf("page 5 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n"); 1880 printf("page 5 pg_code=%d sense=%p/%p\n", pages->flex_geometry.pg_code, &scsipi_sense, pages); 1881 #endif 1882 1883 if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5) 1884 return (ERESTART); 1885 1886 SC_DEBUG(sd->sc_periph, SCSIPI_DB3, 1887 ("%d cyls, %d heads, %d sec, %d bytes/sec\n", 1888 _3btol(pages->flex_geometry.ncyl), 1889 pages->flex_geometry.nheads, 1890 pages->flex_geometry.ph_sec_tr, 1891 _2btol(pages->flex_geometry.bytes_s))); 1892 1893 dp->heads = pages->flex_geometry.nheads; 1894 dp->cyls = _2btol(pages->flex_geometry.ncyl); 1895 dp->sectors = pages->flex_geometry.ph_sec_tr; 1896 if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0) 1897 return (ERESTART); 1898 1899 dp->rot_rate = _2btol(pages->rigid_geometry.rpm); 1900 if (dp->rot_rate == 0) 1901 dp->rot_rate = 3600; 1902 1903 #if 0 1904 printf("page 5 ok\n"); 1905 #endif 1906 return (0); 1907 } 1908 1909 int 1910 sd_get_parms(sd, dp, flags) 1911 struct sd_softc *sd; 1912 struct disk_parms *dp; 1913 int flags; 1914 { 1915 int error; 1916 1917 /* 1918 * If offline, the SDEV_MEDIA_LOADED flag will be 1919 * cleared by the caller if necessary. 1920 */ 1921 if (sd->type == T_SIMPLE_DIRECT) 1922 return (sd_get_simplifiedparms(sd, dp, flags)); 1923 1924 error = sd_get_capacity(sd, dp, flags); 1925 if (error) 1926 return (error); 1927 1928 if (sd->type == T_OPTICAL) 1929 goto page0; 1930 1931 if (sd->sc_periph->periph_flags & PERIPH_REMOVABLE) { 1932 if (!sd_get_parms_page5(sd, dp, flags) || 1933 !sd_get_parms_page4(sd, dp, flags)) 1934 return (SDGP_RESULT_OK); 1935 } else { 1936 if (!sd_get_parms_page4(sd, dp, flags) || 1937 !sd_get_parms_page5(sd, dp, flags)) 1938 return (SDGP_RESULT_OK); 1939 } 1940 1941 page0: 1942 printf("%s: fabricating a geometry\n", sd->sc_dev.dv_xname); 1943 /* Try calling driver's method for figuring out geometry. */ 1944 if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom || 1945 !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom) 1946 (sd->sc_periph, dp, dp->disksize)) { 1947 /* 1948 * Use adaptec standard fictitious geometry 1949 * this depends on which controller (e.g. 1542C is 1950 * different. but we have to put SOMETHING here..) 1951 */ 1952 dp->heads = 64; 1953 dp->sectors = 32; 1954 dp->cyls = dp->disksize / (64 * 32); 1955 } 1956 dp->rot_rate = 3600; 1957 return (SDGP_RESULT_OK); 1958 } 1959 1960 int 1961 sd_flush(sd, flags) 1962 struct sd_softc *sd; 1963 int flags; 1964 { 1965 struct scsipi_periph *periph = sd->sc_periph; 1966 struct scsi_synchronize_cache cmd; 1967 1968 /* 1969 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE. 1970 * We issue with address 0 length 0, which should be 1971 * interpreted by the device as "all remaining blocks 1972 * starting at address 0". We ignore ILLEGAL REQUEST 1973 * in the event that the command is not supported by 1974 * the device, and poll for completion so that we know 1975 * that the cache has actually been flushed. 1976 * 1977 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE 1978 * command, as indicated by our quirks flags. 1979 * 1980 * XXX What about older devices? 1981 */ 1982 if (periph->periph_version < 2 || 1983 (periph->periph_quirks & PQUIRK_NOSYNCCACHE)) 1984 return (0); 1985 1986 sd->flags |= SDF_FLUSHING; 1987 memset(&cmd, 0, sizeof(cmd)); 1988 cmd.opcode = SCSI_SYNCHRONIZE_CACHE; 1989 1990 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1991 SDRETRIES, 100000, NULL, flags | XS_CTL_IGNORE_ILLEGAL_REQUEST)); 1992 } 1993 1994 int 1995 sd_getcache(sd, bitsp) 1996 struct sd_softc *sd; 1997 int *bitsp; 1998 { 1999 struct scsipi_periph *periph = sd->sc_periph; 2000 struct sd_mode_sense_data scsipi_sense; 2001 int error, bits = 0; 2002 int big; 2003 union scsi_disk_pages *pages; 2004 2005 if (periph->periph_version < 2) 2006 return (EOPNOTSUPP); 2007 2008 memset(&scsipi_sense, 0, sizeof(scsipi_sense)); 2009 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense, 2010 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big); 2011 if (error) 2012 return (error); 2013 2014 if (big) 2015 pages = (void *)(&scsipi_sense.header.big + 1); 2016 else 2017 pages = (void *)(&scsipi_sense.header.small + 1); 2018 2019 if ((pages->caching_params.flags & CACHING_RCD) == 0) 2020 bits |= DKCACHE_READ; 2021 if (pages->caching_params.flags & CACHING_WCE) 2022 bits |= DKCACHE_WRITE; 2023 if (pages->caching_params.pg_code & PGCODE_PS) 2024 bits |= DKCACHE_SAVE; 2025 2026 memset(&scsipi_sense, 0, sizeof(scsipi_sense)); 2027 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense, 2028 sizeof(scsipi_sense.pages.caching_params), 2029 SMS_PAGE_CTRL_CHANGEABLE|8, 0, &big); 2030 if (error == 0) { 2031 if (big) 2032 pages = (void *)(&scsipi_sense.header.big + 1); 2033 else 2034 pages = (void *)(&scsipi_sense.header.small + 1); 2035 2036 if (pages->caching_params.flags & CACHING_RCD) 2037 bits |= DKCACHE_RCHANGE; 2038 if (pages->caching_params.flags & CACHING_WCE) 2039 bits |= DKCACHE_WCHANGE; 2040 } 2041 2042 *bitsp = bits; 2043 2044 return (0); 2045 } 2046 2047 int 2048 sd_setcache(sd, bits) 2049 struct sd_softc *sd; 2050 int bits; 2051 { 2052 struct scsipi_periph *periph = sd->sc_periph; 2053 struct sd_mode_sense_data scsipi_sense; 2054 int error; 2055 uint8_t oflags, byte2 = 0; 2056 int big; 2057 union scsi_disk_pages *pages; 2058 2059 if (periph->periph_version < 2) 2060 return (EOPNOTSUPP); 2061 2062 memset(&scsipi_sense, 0, sizeof(scsipi_sense)); 2063 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense, 2064 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big); 2065 if (error) 2066 return (error); 2067 2068 if (big) 2069 pages = (void *)(&scsipi_sense.header.big + 1); 2070 else 2071 pages = (void *)(&scsipi_sense.header.small + 1); 2072 2073 oflags = pages->caching_params.flags; 2074 2075 if (bits & DKCACHE_READ) 2076 pages->caching_params.flags &= ~CACHING_RCD; 2077 else 2078 pages->caching_params.flags |= CACHING_RCD; 2079 2080 if (bits & DKCACHE_WRITE) 2081 pages->caching_params.flags |= CACHING_WCE; 2082 else 2083 pages->caching_params.flags &= ~CACHING_WCE; 2084 2085 if (oflags == pages->caching_params.flags) 2086 return (0); 2087 2088 pages->caching_params.pg_code &= PGCODE_MASK; 2089 2090 if (bits & DKCACHE_SAVE) 2091 byte2 |= SMS_SP; 2092 2093 return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense, 2094 sizeof(struct scsipi_mode_page_header) + 2095 pages->caching_params.pg_length, 0, big)); 2096 } 2097