1 /* $NetBSD: sd.c,v 1.189 2002/10/18 14:31:15 junyoung Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Originally written by Julian Elischer (julian@dialix.oz.au) 41 * for TRW Financial Systems for use under the MACH(2.5) operating system. 42 * 43 * TRW Financial Systems, in accordance with their agreement with Carnegie 44 * Mellon University, makes this software available to CMU to distribute 45 * or use in any manner that they see fit as long as this message is kept with 46 * the software. For this reason TFS also grants any other persons or 47 * organisations permission to use or modify this software. 48 * 49 * TFS supplies this software to be publicly redistributed 50 * on the understanding that TFS is not responsible for the correct 51 * functioning of this software in any circumstances. 52 * 53 * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992 54 */ 55 56 #include <sys/cdefs.h> 57 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.189 2002/10/18 14:31:15 junyoung Exp $"); 58 59 #include "opt_scsi.h" 60 #include "opt_bufq.h" 61 #include "rnd.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/file.h> 67 #include <sys/stat.h> 68 #include <sys/ioctl.h> 69 #include <sys/scsiio.h> 70 #include <sys/buf.h> 71 #include <sys/uio.h> 72 #include <sys/malloc.h> 73 #include <sys/errno.h> 74 #include <sys/device.h> 75 #include <sys/disklabel.h> 76 #include <sys/disk.h> 77 #include <sys/proc.h> 78 #include <sys/conf.h> 79 #include <sys/vnode.h> 80 #if NRND > 0 81 #include <sys/rnd.h> 82 #endif 83 84 #include <dev/scsipi/scsipi_all.h> 85 #include <dev/scsipi/scsi_all.h> 86 #include <dev/scsipi/scsipi_disk.h> 87 #include <dev/scsipi/scsi_disk.h> 88 #include <dev/scsipi/scsiconf.h> 89 #include <dev/scsipi/sdvar.h> 90 91 #include "sd.h" /* NSD_SCSIBUS and NSD_ATAPIBUS come from here */ 92 93 #define SDUNIT(dev) DISKUNIT(dev) 94 #define SDPART(dev) DISKPART(dev) 95 #define SDMINOR(unit, part) DISKMINOR(unit, part) 96 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part) 97 98 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART)) 99 100 int sdlock __P((struct sd_softc *)); 101 void sdunlock __P((struct sd_softc *)); 102 void sdminphys __P((struct buf *)); 103 void sdgetdefaultlabel __P((struct sd_softc *, struct disklabel *)); 104 void sdgetdisklabel __P((struct sd_softc *)); 105 void sdstart __P((struct scsipi_periph *)); 106 void sddone __P((struct scsipi_xfer *)); 107 void sd_shutdown __P((void *)); 108 int sd_reassign_blocks __P((struct sd_softc *, u_long)); 109 int sd_interpret_sense __P((struct scsipi_xfer *)); 110 111 extern struct cfdriver sd_cd; 112 113 dev_type_open(sdopen); 114 dev_type_close(sdclose); 115 dev_type_read(sdread); 116 dev_type_write(sdwrite); 117 dev_type_ioctl(sdioctl); 118 dev_type_strategy(sdstrategy); 119 dev_type_dump(sddump); 120 dev_type_size(sdsize); 121 122 const struct bdevsw sd_bdevsw = { 123 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK 124 }; 125 126 const struct cdevsw sd_cdevsw = { 127 sdopen, sdclose, sdread, sdwrite, sdioctl, 128 nostop, notty, nopoll, nommap, D_DISK 129 }; 130 131 struct dkdriver sddkdriver = { sdstrategy }; 132 133 const struct scsipi_periphsw sd_switch = { 134 sd_interpret_sense, /* check our error handler first */ 135 sdstart, /* have a queue, served by this */ 136 NULL, /* have no async handler */ 137 sddone, /* deal with stats at interrupt time */ 138 }; 139 140 /* 141 * Attach routine common to atapi & scsi. 142 */ 143 void 144 sdattach(parent, sd, periph, ops) 145 struct device *parent; 146 struct sd_softc *sd; 147 struct scsipi_periph *periph; 148 const struct sd_ops *ops; 149 { 150 int error, result; 151 struct disk_parms *dp = &sd->params; 152 char pbuf[9]; 153 154 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: ")); 155 156 #ifdef NEW_BUFQ_STRATEGY 157 bufq_alloc(&sd->buf_queue, BUFQ_READ_PRIO|BUFQ_SORT_RAWBLOCK); 158 #else 159 bufq_alloc(&sd->buf_queue, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK); 160 #endif 161 162 /* 163 * Store information needed to contact our base driver 164 */ 165 sd->sc_periph = periph; 166 sd->sc_ops = ops; 167 168 periph->periph_dev = &sd->sc_dev; 169 periph->periph_switch = &sd_switch; 170 171 /* 172 * Increase our openings to the maximum-per-periph 173 * supported by the adapter. This will either be 174 * clamped down or grown by the adapter if necessary. 175 */ 176 periph->periph_openings = 177 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel); 178 periph->periph_flags |= PERIPH_GROW_OPENINGS; 179 180 /* 181 * Initialize and attach the disk structure. 182 */ 183 sd->sc_dk.dk_driver = &sddkdriver; 184 sd->sc_dk.dk_name = sd->sc_dev.dv_xname; 185 disk_attach(&sd->sc_dk); 186 187 /* 188 * Use the subdriver to request information regarding the drive. 189 */ 190 printf("\n"); 191 192 error = scsipi_start(periph, SSS_START, 193 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 194 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT); 195 196 if (error) 197 result = SDGP_RESULT_OFFLINE; 198 else 199 result = (*sd->sc_ops->sdo_get_parms)(sd, &sd->params, 200 XS_CTL_DISCOVERY); 201 printf("%s: ", sd->sc_dev.dv_xname); 202 switch (result) { 203 case SDGP_RESULT_OK: 204 format_bytes(pbuf, sizeof(pbuf), 205 (u_int64_t)dp->disksize * dp->blksize); 206 printf( 207 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %lu sectors", 208 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize, 209 dp->disksize); 210 break; 211 212 case SDGP_RESULT_OFFLINE: 213 printf("drive offline"); 214 break; 215 216 case SDGP_RESULT_UNFORMATTED: 217 printf("unformatted media"); 218 break; 219 220 #ifdef DIAGNOSTIC 221 default: 222 panic("sdattach: unknown result from get_parms"); 223 break; 224 #endif 225 } 226 printf("\n"); 227 228 /* 229 * Establish a shutdown hook so that we can ensure that 230 * our data has actually made it onto the platter at 231 * shutdown time. Note that this relies on the fact 232 * that the shutdown hook code puts us at the head of 233 * the list (thus guaranteeing that our hook runs before 234 * our ancestors'). 235 */ 236 if ((sd->sc_sdhook = 237 shutdownhook_establish(sd_shutdown, sd)) == NULL) 238 printf("%s: WARNING: unable to establish shutdown hook\n", 239 sd->sc_dev.dv_xname); 240 241 #if NRND > 0 242 /* 243 * attach the device into the random source list 244 */ 245 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname, 246 RND_TYPE_DISK, 0); 247 #endif 248 } 249 250 int 251 sdactivate(self, act) 252 struct device *self; 253 enum devact act; 254 { 255 int rv = 0; 256 257 switch (act) { 258 case DVACT_ACTIVATE: 259 rv = EOPNOTSUPP; 260 break; 261 262 case DVACT_DEACTIVATE: 263 /* 264 * Nothing to do; we key off the device's DVF_ACTIVE. 265 */ 266 break; 267 } 268 return (rv); 269 } 270 271 int 272 sddetach(self, flags) 273 struct device *self; 274 int flags; 275 { 276 struct sd_softc *sd = (struct sd_softc *) self; 277 struct buf *bp; 278 int s, bmaj, cmaj, i, mn; 279 280 /* locate the major number */ 281 bmaj = bdevsw_lookup_major(&sd_bdevsw); 282 cmaj = cdevsw_lookup_major(&sd_cdevsw); 283 284 s = splbio(); 285 286 /* Kill off any queued buffers. */ 287 while ((bp = BUFQ_GET(&sd->buf_queue)) != NULL) { 288 bp->b_error = EIO; 289 bp->b_flags |= B_ERROR; 290 bp->b_resid = bp->b_bcount; 291 biodone(bp); 292 } 293 294 bufq_free(&sd->buf_queue); 295 296 /* Kill off any pending commands. */ 297 scsipi_kill_pending(sd->sc_periph); 298 299 splx(s); 300 301 /* Nuke the vnodes for any open instances */ 302 for (i = 0; i < MAXPARTITIONS; i++) { 303 mn = SDMINOR(self->dv_unit, i); 304 vdevgone(bmaj, mn, mn, VBLK); 305 vdevgone(cmaj, mn, mn, VCHR); 306 } 307 308 /* Detach from the disk list. */ 309 disk_detach(&sd->sc_dk); 310 311 /* Get rid of the shutdown hook. */ 312 shutdownhook_disestablish(sd->sc_sdhook); 313 314 #if NRND > 0 315 /* Unhook the entropy source. */ 316 rnd_detach_source(&sd->rnd_source); 317 #endif 318 319 return (0); 320 } 321 322 /* 323 * Wait interruptibly for an exclusive lock. 324 * 325 * XXX 326 * Several drivers do this; it should be abstracted and made MP-safe. 327 */ 328 int 329 sdlock(sd) 330 struct sd_softc *sd; 331 { 332 int error; 333 334 while ((sd->flags & SDF_LOCKED) != 0) { 335 sd->flags |= SDF_WANTED; 336 if ((error = tsleep(sd, PRIBIO | PCATCH, "sdlck", 0)) != 0) 337 return (error); 338 } 339 sd->flags |= SDF_LOCKED; 340 return (0); 341 } 342 343 /* 344 * Unlock and wake up any waiters. 345 */ 346 void 347 sdunlock(sd) 348 struct sd_softc *sd; 349 { 350 351 sd->flags &= ~SDF_LOCKED; 352 if ((sd->flags & SDF_WANTED) != 0) { 353 sd->flags &= ~SDF_WANTED; 354 wakeup(sd); 355 } 356 } 357 358 /* 359 * open the device. Make sure the partition info is a up-to-date as can be. 360 */ 361 int 362 sdopen(dev, flag, fmt, p) 363 dev_t dev; 364 int flag, fmt; 365 struct proc *p; 366 { 367 struct sd_softc *sd; 368 struct scsipi_periph *periph; 369 struct scsipi_adapter *adapt; 370 int unit, part; 371 int error; 372 373 unit = SDUNIT(dev); 374 if (unit >= sd_cd.cd_ndevs) 375 return (ENXIO); 376 sd = sd_cd.cd_devs[unit]; 377 if (sd == NULL) 378 return (ENXIO); 379 380 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) 381 return (ENODEV); 382 383 periph = sd->sc_periph; 384 adapt = periph->periph_channel->chan_adapter; 385 part = SDPART(dev); 386 387 SC_DEBUG(periph, SCSIPI_DB1, 388 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit, 389 sd_cd.cd_ndevs, part)); 390 391 /* 392 * If this is the first open of this device, add a reference 393 * to the adapter. 394 */ 395 if (sd->sc_dk.dk_openmask == 0 && 396 (error = scsipi_adapter_addref(adapt)) != 0) 397 return (error); 398 399 if ((error = sdlock(sd)) != 0) 400 goto bad4; 401 402 if ((periph->periph_flags & PERIPH_OPEN) != 0) { 403 /* 404 * If any partition is open, but the disk has been invalidated, 405 * disallow further opens of non-raw partition 406 */ 407 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 && 408 (part != RAW_PART || fmt != S_IFCHR)) { 409 error = EIO; 410 goto bad3; 411 } 412 } else { 413 /* Check that it is still responding and ok. */ 414 error = scsipi_test_unit_ready(periph, 415 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE | 416 XS_CTL_IGNORE_NOT_READY); 417 if (error) 418 goto bad3; 419 420 /* 421 * Start the pack spinning if necessary. Always allow the 422 * raw parition to be opened, for raw IOCTLs. Data transfers 423 * will check for SDEV_MEDIA_LOADED. 424 */ 425 error = scsipi_start(periph, SSS_START, 426 XS_CTL_IGNORE_ILLEGAL_REQUEST | 427 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT); 428 if (error) { 429 if (part != RAW_PART || fmt != S_IFCHR) 430 goto bad3; 431 else 432 goto out; 433 } 434 435 periph->periph_flags |= PERIPH_OPEN; 436 437 if (periph->periph_flags & PERIPH_REMOVABLE) { 438 /* Lock the pack in. */ 439 error = scsipi_prevent(periph, PR_PREVENT, 440 XS_CTL_IGNORE_ILLEGAL_REQUEST | 441 XS_CTL_IGNORE_MEDIA_CHANGE); 442 if (error) 443 goto bad; 444 } 445 446 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) { 447 periph->periph_flags |= PERIPH_MEDIA_LOADED; 448 449 /* 450 * Load the physical device parameters. 451 * 452 * Note that if media is present but unformatted, 453 * we allow the open (so that it can be formatted!). 454 * The drive should refuse real I/O, if the media is 455 * unformatted. 456 */ 457 if ((*sd->sc_ops->sdo_get_parms)(sd, &sd->params, 458 0) == SDGP_RESULT_OFFLINE) { 459 error = ENXIO; 460 goto bad2; 461 } 462 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded ")); 463 464 /* Load the partition info if not already loaded. */ 465 sdgetdisklabel(sd); 466 SC_DEBUG(periph, SCSIPI_DB3, ("Disklabel loaded ")); 467 } 468 } 469 470 /* Check that the partition exists. */ 471 if (part != RAW_PART && 472 (part >= sd->sc_dk.dk_label->d_npartitions || 473 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 474 error = ENXIO; 475 goto bad; 476 } 477 478 out: /* Insure only one open at a time. */ 479 switch (fmt) { 480 case S_IFCHR: 481 sd->sc_dk.dk_copenmask |= (1 << part); 482 break; 483 case S_IFBLK: 484 sd->sc_dk.dk_bopenmask |= (1 << part); 485 break; 486 } 487 sd->sc_dk.dk_openmask = 488 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask; 489 490 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n")); 491 sdunlock(sd); 492 return (0); 493 494 bad2: 495 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 496 497 bad: 498 if (sd->sc_dk.dk_openmask == 0) { 499 scsipi_prevent(periph, PR_ALLOW, 500 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE); 501 periph->periph_flags &= ~PERIPH_OPEN; 502 } 503 504 bad3: 505 sdunlock(sd); 506 bad4: 507 if (sd->sc_dk.dk_openmask == 0) 508 scsipi_adapter_delref(adapt); 509 return (error); 510 } 511 512 /* 513 * close the device.. only called if we are the LAST occurence of an open 514 * device. Convenient now but usually a pain. 515 */ 516 int 517 sdclose(dev, flag, fmt, p) 518 dev_t dev; 519 int flag, fmt; 520 struct proc *p; 521 { 522 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)]; 523 struct scsipi_periph *periph = sd->sc_periph; 524 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter; 525 int part = SDPART(dev); 526 int error; 527 528 if ((error = sdlock(sd)) != 0) 529 return (error); 530 531 switch (fmt) { 532 case S_IFCHR: 533 sd->sc_dk.dk_copenmask &= ~(1 << part); 534 break; 535 case S_IFBLK: 536 sd->sc_dk.dk_bopenmask &= ~(1 << part); 537 break; 538 } 539 sd->sc_dk.dk_openmask = 540 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask; 541 542 if (sd->sc_dk.dk_openmask == 0) { 543 /* 544 * If the disk cache needs flushing, and the disk supports 545 * it, do it now. 546 */ 547 if ((sd->flags & SDF_DIRTY) != 0 && 548 sd->sc_ops->sdo_flush != NULL) { 549 if ((*sd->sc_ops->sdo_flush)(sd, 0)) { 550 printf("%s: cache synchronization failed\n", 551 sd->sc_dev.dv_xname); 552 sd->flags &= ~SDF_FLUSHING; 553 } else 554 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 555 } 556 557 if (! (periph->periph_flags & PERIPH_KEEP_LABEL)) 558 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 559 560 scsipi_wait_drain(periph); 561 562 if (periph->periph_flags & PERIPH_REMOVABLE) { 563 scsipi_prevent(periph, PR_ALLOW, 564 XS_CTL_IGNORE_ILLEGAL_REQUEST | 565 XS_CTL_IGNORE_NOT_READY); 566 } 567 periph->periph_flags &= ~PERIPH_OPEN; 568 569 scsipi_wait_drain(periph); 570 571 scsipi_adapter_delref(adapt); 572 } 573 574 sdunlock(sd); 575 return (0); 576 } 577 578 /* 579 * Actually translate the requested transfer into one the physical driver 580 * can understand. The transfer is described by a buf and will include 581 * only one physical transfer. 582 */ 583 void 584 sdstrategy(bp) 585 struct buf *bp; 586 { 587 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)]; 588 struct scsipi_periph *periph = sd->sc_periph; 589 struct disklabel *lp; 590 daddr_t blkno; 591 int s; 592 boolean_t sector_aligned; 593 594 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy ")); 595 SC_DEBUG(sd->sc_periph, SCSIPI_DB1, 596 ("%ld bytes @ blk %d\n", bp->b_bcount, bp->b_blkno)); 597 /* 598 * If the device has been made invalid, error out 599 */ 600 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 || 601 (sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) { 602 if (periph->periph_flags & PERIPH_OPEN) 603 bp->b_error = EIO; 604 else 605 bp->b_error = ENODEV; 606 goto bad; 607 } 608 609 lp = sd->sc_dk.dk_label; 610 611 /* 612 * The transfer must be a whole number of blocks, offset must not be 613 * negative. 614 */ 615 if (lp->d_secsize == DEV_BSIZE) { 616 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0; 617 } else { 618 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0; 619 } 620 if (!sector_aligned || bp->b_blkno < 0) { 621 bp->b_error = EINVAL; 622 goto bad; 623 } 624 /* 625 * If it's a null transfer, return immediatly 626 */ 627 if (bp->b_bcount == 0) 628 goto done; 629 630 /* 631 * Do bounds checking, adjust transfer. if error, process. 632 * If end of partition, just return. 633 */ 634 if (SDPART(bp->b_dev) != RAW_PART && 635 bounds_check_with_label(bp, lp, 636 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0) 637 goto done; 638 639 /* 640 * Now convert the block number to absolute and put it in 641 * terms of the device's logical block size. 642 */ 643 if (lp->d_secsize == DEV_BSIZE) 644 blkno = bp->b_blkno; 645 else if (lp->d_secsize > DEV_BSIZE) 646 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 647 else 648 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize); 649 650 if (SDPART(bp->b_dev) != RAW_PART) 651 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset; 652 653 bp->b_rawblkno = blkno; 654 655 s = splbio(); 656 657 /* 658 * Place it in the queue of disk activities for this disk. 659 * 660 * XXX Only do disksort() if the current operating mode does not 661 * XXX include tagged queueing. 662 */ 663 BUFQ_PUT(&sd->buf_queue, bp); 664 665 /* 666 * Tell the device to get going on the transfer if it's 667 * not doing anything, otherwise just wait for completion 668 */ 669 sdstart(sd->sc_periph); 670 671 splx(s); 672 return; 673 674 bad: 675 bp->b_flags |= B_ERROR; 676 done: 677 /* 678 * Correctly set the buf to indicate a completed xfer 679 */ 680 bp->b_resid = bp->b_bcount; 681 biodone(bp); 682 } 683 684 /* 685 * sdstart looks to see if there is a buf waiting for the device 686 * and that the device is not already busy. If both are true, 687 * It dequeues the buf and creates a scsi command to perform the 688 * transfer in the buf. The transfer request will call scsipi_done 689 * on completion, which will in turn call this routine again 690 * so that the next queued transfer is performed. 691 * The bufs are queued by the strategy routine (sdstrategy) 692 * 693 * This routine is also called after other non-queued requests 694 * have been made of the scsi driver, to ensure that the queue 695 * continues to be drained. 696 * 697 * must be called at the correct (highish) spl level 698 * sdstart() is called at splbio from sdstrategy and scsipi_done 699 */ 700 void 701 sdstart(periph) 702 struct scsipi_periph *periph; 703 { 704 struct sd_softc *sd = (void *)periph->periph_dev; 705 struct disklabel *lp = sd->sc_dk.dk_label; 706 struct buf *bp = 0; 707 struct scsipi_rw_big cmd_big; 708 #if NSD_SCSIBUS > 0 709 struct scsi_rw cmd_small; 710 #endif 711 struct scsipi_generic *cmdp; 712 int nblks, cmdlen, error, flags; 713 714 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart ")); 715 /* 716 * Check if the device has room for another command 717 */ 718 while (periph->periph_active < periph->periph_openings) { 719 /* 720 * there is excess capacity, but a special waits 721 * It'll need the adapter as soon as we clear out of the 722 * way and let it run (user level wait). 723 */ 724 if (periph->periph_flags & PERIPH_WAITING) { 725 periph->periph_flags &= ~PERIPH_WAITING; 726 wakeup((caddr_t)periph); 727 return; 728 } 729 730 /* 731 * See if there is a buf with work for us to do.. 732 */ 733 if ((bp = BUFQ_GET(&sd->buf_queue)) == NULL) 734 return; 735 736 /* 737 * If the device has become invalid, abort all the 738 * reads and writes until all files have been closed and 739 * re-opened 740 */ 741 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) { 742 bp->b_error = EIO; 743 bp->b_flags |= B_ERROR; 744 bp->b_resid = bp->b_bcount; 745 biodone(bp); 746 continue; 747 } 748 749 /* 750 * We have a buf, now we should make a command. 751 */ 752 753 if (lp->d_secsize == DEV_BSIZE) 754 nblks = bp->b_bcount >> DEV_BSHIFT; 755 else 756 nblks = howmany(bp->b_bcount, lp->d_secsize); 757 758 #if NSD_SCSIBUS > 0 759 /* 760 * Fill out the scsi command. If the transfer will 761 * fit in a "small" cdb, use it. 762 */ 763 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) && 764 ((nblks & 0xff) == nblks) && 765 !(periph->periph_quirks & PQUIRK_ONLYBIG) && 766 scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_SCSI) { 767 /* 768 * We can fit in a small cdb. 769 */ 770 memset(&cmd_small, 0, sizeof(cmd_small)); 771 cmd_small.opcode = (bp->b_flags & B_READ) ? 772 SCSI_READ_COMMAND : SCSI_WRITE_COMMAND; 773 _lto3b(bp->b_rawblkno, cmd_small.addr); 774 cmd_small.length = nblks & 0xff; 775 cmdlen = sizeof(cmd_small); 776 cmdp = (struct scsipi_generic *)&cmd_small; 777 } else 778 #endif /* NSD_SCSIBUS > 0 */ 779 { 780 /* 781 * Need a large cdb. 782 */ 783 memset(&cmd_big, 0, sizeof(cmd_big)); 784 cmd_big.opcode = (bp->b_flags & B_READ) ? 785 READ_BIG : WRITE_BIG; 786 _lto4b(bp->b_rawblkno, cmd_big.addr); 787 _lto2b(nblks, cmd_big.length); 788 cmdlen = sizeof(cmd_big); 789 cmdp = (struct scsipi_generic *)&cmd_big; 790 } 791 792 /* Instrumentation. */ 793 disk_busy(&sd->sc_dk); 794 795 /* 796 * Mark the disk dirty so that the cache will be 797 * flushed on close. 798 */ 799 if ((bp->b_flags & B_READ) == 0) 800 sd->flags |= SDF_DIRTY; 801 802 /* 803 * Figure out what flags to use. 804 */ 805 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG; 806 if (bp->b_flags & B_READ) 807 flags |= XS_CTL_DATA_IN; 808 else 809 flags |= XS_CTL_DATA_OUT; 810 811 /* 812 * Call the routine that chats with the adapter. 813 * Note: we cannot sleep as we may be an interrupt 814 */ 815 error = scsipi_command(periph, cmdp, cmdlen, 816 (u_char *)bp->b_data, bp->b_bcount, 817 SDRETRIES, SD_IO_TIMEOUT, bp, flags); 818 if (error) { 819 disk_unbusy(&sd->sc_dk, 0); 820 printf("%s: not queued, error %d\n", 821 sd->sc_dev.dv_xname, error); 822 } 823 } 824 } 825 826 void 827 sddone(xs) 828 struct scsipi_xfer *xs; 829 { 830 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev; 831 832 if (sd->flags & SDF_FLUSHING) { 833 /* Flush completed, no longer dirty. */ 834 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 835 } 836 837 if (xs->bp != NULL) { 838 disk_unbusy(&sd->sc_dk, xs->bp->b_bcount - xs->bp->b_resid); 839 #if NRND > 0 840 rnd_add_uint32(&sd->rnd_source, xs->bp->b_rawblkno); 841 #endif 842 } 843 } 844 845 void 846 sdminphys(bp) 847 struct buf *bp; 848 { 849 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)]; 850 long max; 851 852 /* 853 * If the device is ancient, we want to make sure that 854 * the transfer fits into a 6-byte cdb. 855 * 856 * XXX Note that the SCSI-I spec says that 256-block transfers 857 * are allowed in a 6-byte read/write, and are specified 858 * by settng the "length" to 0. However, we're conservative 859 * here, allowing only 255-block transfers in case an 860 * ancient device gets confused by length == 0. A length of 0 861 * in a 10-byte read/write actually means 0 blocks. 862 */ 863 if ((sd->flags & SDF_ANCIENT) && 864 ((sd->sc_periph->periph_flags & 865 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) { 866 max = sd->sc_dk.dk_label->d_secsize * 0xff; 867 868 if (bp->b_bcount > max) 869 bp->b_bcount = max; 870 } 871 872 (*sd->sc_periph->periph_channel->chan_adapter->adapt_minphys)(bp); 873 } 874 875 int 876 sdread(dev, uio, ioflag) 877 dev_t dev; 878 struct uio *uio; 879 int ioflag; 880 { 881 882 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio)); 883 } 884 885 int 886 sdwrite(dev, uio, ioflag) 887 dev_t dev; 888 struct uio *uio; 889 int ioflag; 890 { 891 892 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio)); 893 } 894 895 /* 896 * Perform special action on behalf of the user 897 * Knows about the internals of this device 898 */ 899 int 900 sdioctl(dev, cmd, addr, flag, p) 901 dev_t dev; 902 u_long cmd; 903 caddr_t addr; 904 int flag; 905 struct proc *p; 906 { 907 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)]; 908 struct scsipi_periph *periph = sd->sc_periph; 909 int part = SDPART(dev); 910 int error; 911 #ifdef __HAVE_OLD_DISKLABEL 912 struct disklabel newlabel; 913 #endif 914 915 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd)); 916 917 /* 918 * If the device is not valid, some IOCTLs can still be 919 * handled on the raw partition. Check this here. 920 */ 921 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) { 922 switch (cmd) { 923 case DIOCKLABEL: 924 case DIOCWLABEL: 925 case DIOCLOCK: 926 case DIOCEJECT: 927 case ODIOCEJECT: 928 case DIOCGCACHE: 929 case DIOCSCACHE: 930 case SCIOCIDENTIFY: 931 case OSCIOCIDENTIFY: 932 case SCIOCCOMMAND: 933 case SCIOCDEBUG: 934 if (part == RAW_PART) 935 break; 936 /* FALLTHROUGH */ 937 default: 938 if ((periph->periph_flags & PERIPH_OPEN) == 0) 939 return (ENODEV); 940 else 941 return (EIO); 942 } 943 } 944 945 switch (cmd) { 946 case DIOCGDINFO: 947 *(struct disklabel *)addr = *(sd->sc_dk.dk_label); 948 return (0); 949 950 #ifdef __HAVE_OLD_DISKLABEL 951 case ODIOCGDINFO: 952 newlabel = *(sd->sc_dk.dk_label); 953 if (newlabel.d_npartitions > OLDMAXPARTITIONS) 954 return ENOTTY; 955 memcpy(addr, &newlabel, sizeof (struct olddisklabel)); 956 return (0); 957 #endif 958 959 case DIOCGPART: 960 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label; 961 ((struct partinfo *)addr)->part = 962 &sd->sc_dk.dk_label->d_partitions[part]; 963 return (0); 964 965 case DIOCWDINFO: 966 case DIOCSDINFO: 967 #ifdef __HAVE_OLD_DISKLABEL 968 case ODIOCWDINFO: 969 case ODIOCSDINFO: 970 #endif 971 { 972 struct disklabel *lp; 973 974 #ifdef __HAVE_OLD_DISKLABEL 975 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) { 976 memset(&newlabel, 0, sizeof newlabel); 977 memcpy(&newlabel, addr, sizeof (struct olddisklabel)); 978 lp = &newlabel; 979 } else 980 #endif 981 lp = (struct disklabel *)addr; 982 983 if ((flag & FWRITE) == 0) 984 return (EBADF); 985 986 if ((error = sdlock(sd)) != 0) 987 return (error); 988 sd->flags |= SDF_LABELLING; 989 990 error = setdisklabel(sd->sc_dk.dk_label, 991 lp, /*sd->sc_dk.dk_openmask : */0, 992 sd->sc_dk.dk_cpulabel); 993 if (error == 0) { 994 if (cmd == DIOCWDINFO 995 #ifdef __HAVE_OLD_DISKLABEL 996 || cmd == ODIOCWDINFO 997 #endif 998 ) 999 error = writedisklabel(SDLABELDEV(dev), 1000 sdstrategy, sd->sc_dk.dk_label, 1001 sd->sc_dk.dk_cpulabel); 1002 } 1003 1004 sd->flags &= ~SDF_LABELLING; 1005 sdunlock(sd); 1006 return (error); 1007 } 1008 1009 case DIOCKLABEL: 1010 if (*(int *)addr) 1011 periph->periph_flags |= PERIPH_KEEP_LABEL; 1012 else 1013 periph->periph_flags &= ~PERIPH_KEEP_LABEL; 1014 return (0); 1015 1016 case DIOCWLABEL: 1017 if ((flag & FWRITE) == 0) 1018 return (EBADF); 1019 if (*(int *)addr) 1020 sd->flags |= SDF_WLABEL; 1021 else 1022 sd->flags &= ~SDF_WLABEL; 1023 return (0); 1024 1025 case DIOCLOCK: 1026 return (scsipi_prevent(periph, 1027 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0)); 1028 1029 case DIOCEJECT: 1030 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0) 1031 return (ENOTTY); 1032 if (*(int *)addr == 0) { 1033 /* 1034 * Don't force eject: check that we are the only 1035 * partition open. If so, unlock it. 1036 */ 1037 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 && 1038 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask == 1039 sd->sc_dk.dk_openmask) { 1040 error = scsipi_prevent(periph, PR_ALLOW, 1041 XS_CTL_IGNORE_NOT_READY); 1042 if (error) 1043 return (error); 1044 } else { 1045 return (EBUSY); 1046 } 1047 } 1048 /* FALLTHROUGH */ 1049 case ODIOCEJECT: 1050 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ? 1051 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0)); 1052 1053 case DIOCGDEFLABEL: 1054 sdgetdefaultlabel(sd, (struct disklabel *)addr); 1055 return (0); 1056 1057 #ifdef __HAVE_OLD_DISKLABEL 1058 case ODIOCGDEFLABEL: 1059 sdgetdefaultlabel(sd, &newlabel); 1060 if (newlabel.d_npartitions > OLDMAXPARTITIONS) 1061 return ENOTTY; 1062 memcpy(addr, &newlabel, sizeof (struct olddisklabel)); 1063 return (0); 1064 #endif 1065 1066 case DIOCGCACHE: 1067 if (sd->sc_ops->sdo_getcache != NULL) 1068 return ((*sd->sc_ops->sdo_getcache)(sd, (int *) addr)); 1069 1070 /* Not supported on this device. */ 1071 *(int *) addr = 0; 1072 return (0); 1073 1074 case DIOCSCACHE: 1075 if ((flag & FWRITE) == 0) 1076 return (EBADF); 1077 if (sd->sc_ops->sdo_setcache != NULL) 1078 return ((*sd->sc_ops->sdo_setcache)(sd, *(int *) addr)); 1079 1080 /* Not supported on this device. */ 1081 return (EOPNOTSUPP); 1082 1083 case DIOCCACHESYNC: 1084 /* 1085 * XXX Do we really need to care about having a writeable 1086 * file descriptor here? 1087 */ 1088 if ((flag & FWRITE) == 0) 1089 return (EBADF); 1090 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0) && 1091 sd->sc_ops->sdo_flush != NULL) { 1092 error = (*sd->sc_ops->sdo_flush)(sd, 0); 1093 if (error) 1094 sd->flags &= ~SDF_FLUSHING; 1095 else 1096 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 1097 } else 1098 error = 0; 1099 return (error); 1100 1101 default: 1102 if (part != RAW_PART) 1103 return (ENOTTY); 1104 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, p)); 1105 } 1106 1107 #ifdef DIAGNOSTIC 1108 panic("sdioctl: impossible"); 1109 #endif 1110 } 1111 1112 void 1113 sdgetdefaultlabel(sd, lp) 1114 struct sd_softc *sd; 1115 struct disklabel *lp; 1116 { 1117 1118 memset(lp, 0, sizeof(struct disklabel)); 1119 1120 lp->d_secsize = sd->params.blksize; 1121 lp->d_ntracks = sd->params.heads; 1122 lp->d_nsectors = sd->params.sectors; 1123 lp->d_ncylinders = sd->params.cyls; 1124 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; 1125 1126 switch (scsipi_periph_bustype(sd->sc_periph)) { 1127 #if NSD_SCSIBUS > 0 1128 case SCSIPI_BUSTYPE_SCSI: 1129 lp->d_type = DTYPE_SCSI; 1130 break; 1131 #endif 1132 #if NSD_ATAPIBUS > 0 1133 case SCSIPI_BUSTYPE_ATAPI: 1134 lp->d_type = DTYPE_ATAPI; 1135 break; 1136 #endif 1137 } 1138 strncpy(lp->d_typename, sd->name, 16); 1139 strncpy(lp->d_packname, "fictitious", 16); 1140 lp->d_secperunit = sd->params.disksize; 1141 lp->d_rpm = sd->params.rot_rate; 1142 lp->d_interleave = 1; 1143 lp->d_flags = 0; 1144 1145 lp->d_partitions[RAW_PART].p_offset = 0; 1146 lp->d_partitions[RAW_PART].p_size = 1147 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE); 1148 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED; 1149 lp->d_npartitions = RAW_PART + 1; 1150 1151 lp->d_magic = DISKMAGIC; 1152 lp->d_magic2 = DISKMAGIC; 1153 lp->d_checksum = dkcksum(lp); 1154 } 1155 1156 1157 /* 1158 * Load the label information on the named device 1159 */ 1160 void 1161 sdgetdisklabel(sd) 1162 struct sd_softc *sd; 1163 { 1164 struct disklabel *lp = sd->sc_dk.dk_label; 1165 char *errstring; 1166 1167 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel)); 1168 1169 sdgetdefaultlabel(sd, lp); 1170 1171 if (lp->d_secpercyl == 0) { 1172 lp->d_secpercyl = 100; 1173 /* as long as it's not 0 - readdisklabel divides by it (?) */ 1174 } 1175 1176 /* 1177 * Call the generic disklabel extraction routine 1178 */ 1179 errstring = readdisklabel(MAKESDDEV(0, sd->sc_dev.dv_unit, RAW_PART), 1180 sdstrategy, lp, sd->sc_dk.dk_cpulabel); 1181 if (errstring) { 1182 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring); 1183 return; 1184 } 1185 } 1186 1187 void 1188 sd_shutdown(arg) 1189 void *arg; 1190 { 1191 struct sd_softc *sd = arg; 1192 1193 /* 1194 * If the disk cache needs to be flushed, and the disk supports 1195 * it, flush it. We're cold at this point, so we poll for 1196 * completion. 1197 */ 1198 if ((sd->flags & SDF_DIRTY) != 0 && sd->sc_ops->sdo_flush != NULL) { 1199 if ((*sd->sc_ops->sdo_flush)(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) { 1200 printf("%s: cache synchronization failed\n", 1201 sd->sc_dev.dv_xname); 1202 sd->flags &= ~SDF_FLUSHING; 1203 } else 1204 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 1205 } 1206 } 1207 1208 /* 1209 * Tell the device to map out a defective block 1210 */ 1211 int 1212 sd_reassign_blocks(sd, blkno) 1213 struct sd_softc *sd; 1214 u_long blkno; 1215 { 1216 struct scsi_reassign_blocks scsipi_cmd; 1217 struct scsi_reassign_blocks_data rbdata; 1218 1219 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1220 memset(&rbdata, 0, sizeof(rbdata)); 1221 scsipi_cmd.opcode = SCSI_REASSIGN_BLOCKS; 1222 1223 _lto2b(sizeof(rbdata.defect_descriptor[0]), rbdata.length); 1224 _lto4b(blkno, rbdata.defect_descriptor[0].dlbaddr); 1225 1226 return (scsipi_command(sd->sc_periph, 1227 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd), 1228 (u_char *)&rbdata, sizeof(rbdata), SDRETRIES, 5000, NULL, 1229 XS_CTL_DATA_OUT | XS_CTL_DATA_ONSTACK)); 1230 } 1231 1232 /* 1233 * Check Errors 1234 */ 1235 int 1236 sd_interpret_sense(xs) 1237 struct scsipi_xfer *xs; 1238 { 1239 struct scsipi_periph *periph = xs->xs_periph; 1240 struct scsipi_sense_data *sense = &xs->sense.scsi_sense; 1241 struct sd_softc *sd = (void *)periph->periph_dev; 1242 int s, error, retval = EJUSTRETURN; 1243 1244 /* 1245 * If the periph is already recovering, just do the normal 1246 * error processing. 1247 */ 1248 if (periph->periph_flags & PERIPH_RECOVERING) 1249 return (retval); 1250 1251 /* 1252 * If the device is not open yet, let the generic code handle it. 1253 */ 1254 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) 1255 return (retval); 1256 1257 /* 1258 * If it isn't a extended or extended/deferred error, let 1259 * the generic code handle it. 1260 */ 1261 if ((sense->error_code & SSD_ERRCODE) != 0x70 && 1262 (sense->error_code & SSD_ERRCODE) != 0x71) 1263 return (retval); 1264 1265 if ((sense->flags & SSD_KEY) == SKEY_NOT_READY && 1266 sense->add_sense_code == 0x4) { 1267 if (sense->add_sense_code_qual == 0x01) { 1268 /* 1269 * Unit In The Process Of Becoming Ready. 1270 */ 1271 printf("%s: waiting for pack to spin up...\n", 1272 sd->sc_dev.dv_xname); 1273 if (!callout_active(&periph->periph_callout)) 1274 scsipi_periph_freeze(periph, 1); 1275 callout_reset(&periph->periph_callout, 1276 5 * hz, scsipi_periph_timed_thaw, periph); 1277 retval = ERESTART; 1278 } else if ((sense->add_sense_code_qual == 0x2) && 1279 (periph->periph_quirks & PQUIRK_NOSTARTUNIT) == 0) { 1280 printf("%s: pack is stopped, restarting...\n", 1281 sd->sc_dev.dv_xname); 1282 s = splbio(); 1283 periph->periph_flags |= PERIPH_RECOVERING; 1284 splx(s); 1285 error = scsipi_start(periph, SSS_START, 1286 XS_CTL_URGENT|XS_CTL_HEAD_TAG| 1287 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH); 1288 if (error) { 1289 printf("%s: unable to restart pack\n", 1290 sd->sc_dev.dv_xname); 1291 retval = error; 1292 } else 1293 retval = ERESTART; 1294 s = splbio(); 1295 periph->periph_flags &= ~PERIPH_RECOVERING; 1296 splx(s); 1297 } 1298 } 1299 return (retval); 1300 } 1301 1302 1303 int 1304 sdsize(dev) 1305 dev_t dev; 1306 { 1307 struct sd_softc *sd; 1308 int part, unit, omask; 1309 int size; 1310 1311 unit = SDUNIT(dev); 1312 if (unit >= sd_cd.cd_ndevs) 1313 return (-1); 1314 sd = sd_cd.cd_devs[unit]; 1315 if (sd == NULL) 1316 return (-1); 1317 1318 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) 1319 return (-1); 1320 1321 part = SDPART(dev); 1322 omask = sd->sc_dk.dk_openmask & (1 << part); 1323 1324 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) 1325 return (-1); 1326 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) 1327 size = -1; 1328 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 1329 size = -1; 1330 else 1331 size = sd->sc_dk.dk_label->d_partitions[part].p_size * 1332 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE); 1333 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0) 1334 return (-1); 1335 return (size); 1336 } 1337 1338 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */ 1339 static struct scsipi_xfer sx; 1340 static int sddoingadump; 1341 1342 /* 1343 * dump all of physical memory into the partition specified, starting 1344 * at offset 'dumplo' into the partition. 1345 */ 1346 int 1347 sddump(dev, blkno, va, size) 1348 dev_t dev; 1349 daddr_t blkno; 1350 caddr_t va; 1351 size_t size; 1352 { 1353 struct sd_softc *sd; /* disk unit to do the I/O */ 1354 struct disklabel *lp; /* disk's disklabel */ 1355 int unit, part; 1356 int sectorsize; /* size of a disk sector */ 1357 int nsects; /* number of sectors in partition */ 1358 int sectoff; /* sector offset of partition */ 1359 int totwrt; /* total number of sectors left to write */ 1360 int nwrt; /* current number of sectors to write */ 1361 struct scsipi_rw_big cmd; /* write command */ 1362 struct scsipi_xfer *xs; /* ... convenience */ 1363 struct scsipi_periph *periph; 1364 struct scsipi_channel *chan; 1365 1366 /* Check if recursive dump; if so, punt. */ 1367 if (sddoingadump) 1368 return (EFAULT); 1369 1370 /* Mark as active early. */ 1371 sddoingadump = 1; 1372 1373 unit = SDUNIT(dev); /* Decompose unit & partition. */ 1374 part = SDPART(dev); 1375 1376 /* Check for acceptable drive number. */ 1377 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL) 1378 return (ENXIO); 1379 1380 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) 1381 return (ENODEV); 1382 1383 periph = sd->sc_periph; 1384 chan = periph->periph_channel; 1385 1386 /* Make sure it was initialized. */ 1387 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) 1388 return (ENXIO); 1389 1390 /* Convert to disk sectors. Request must be a multiple of size. */ 1391 lp = sd->sc_dk.dk_label; 1392 sectorsize = lp->d_secsize; 1393 if ((size % sectorsize) != 0) 1394 return (EFAULT); 1395 totwrt = size / sectorsize; 1396 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */ 1397 1398 nsects = lp->d_partitions[part].p_size; 1399 sectoff = lp->d_partitions[part].p_offset; 1400 1401 /* Check transfer bounds against partition size. */ 1402 if ((blkno < 0) || ((blkno + totwrt) > nsects)) 1403 return (EINVAL); 1404 1405 /* Offset block number to start of partition. */ 1406 blkno += sectoff; 1407 1408 xs = &sx; 1409 1410 while (totwrt > 0) { 1411 nwrt = totwrt; /* XXX */ 1412 #ifndef SD_DUMP_NOT_TRUSTED 1413 /* 1414 * Fill out the scsi command 1415 */ 1416 memset(&cmd, 0, sizeof(cmd)); 1417 cmd.opcode = WRITE_BIG; 1418 _lto4b(blkno, cmd.addr); 1419 _lto2b(nwrt, cmd.length); 1420 /* 1421 * Fill out the scsipi_xfer structure 1422 * Note: we cannot sleep as we may be an interrupt 1423 * don't use scsipi_command() as it may want to wait 1424 * for an xs. 1425 */ 1426 memset(xs, 0, sizeof(sx)); 1427 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL | 1428 XS_CTL_DATA_OUT; 1429 xs->xs_status = 0; 1430 xs->xs_periph = periph; 1431 xs->xs_retries = SDRETRIES; 1432 xs->timeout = 10000; /* 10000 millisecs for a disk ! */ 1433 xs->cmd = (struct scsipi_generic *)&cmd; 1434 xs->cmdlen = sizeof(cmd); 1435 xs->resid = nwrt * sectorsize; 1436 xs->error = XS_NOERROR; 1437 xs->bp = 0; 1438 xs->data = va; 1439 xs->datalen = nwrt * sectorsize; 1440 1441 /* 1442 * Pass all this info to the scsi driver. 1443 */ 1444 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1445 if ((xs->xs_status & XS_STS_DONE) == 0 || 1446 xs->error != XS_NOERROR) 1447 return (EIO); 1448 #else /* SD_DUMP_NOT_TRUSTED */ 1449 /* Let's just talk about this first... */ 1450 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno); 1451 delay(500 * 1000); /* half a second */ 1452 #endif /* SD_DUMP_NOT_TRUSTED */ 1453 1454 /* update block count */ 1455 totwrt -= nwrt; 1456 blkno += nwrt; 1457 va += sectorsize * nwrt; 1458 } 1459 sddoingadump = 0; 1460 return (0); 1461 } 1462