1 /* $NetBSD: sd.c,v 1.178 2001/07/18 18:21:05 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Originally written by Julian Elischer (julian@dialix.oz.au) 41 * for TRW Financial Systems for use under the MACH(2.5) operating system. 42 * 43 * TRW Financial Systems, in accordance with their agreement with Carnegie 44 * Mellon University, makes this software available to CMU to distribute 45 * or use in any manner that they see fit as long as this message is kept with 46 * the software. For this reason TFS also grants any other persons or 47 * organisations permission to use or modify this software. 48 * 49 * TFS supplies this software to be publicly redistributed 50 * on the understanding that TFS is not responsible for the correct 51 * functioning of this software in any circumstances. 52 * 53 * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992 54 */ 55 56 #include "opt_scsi.h" 57 #include "rnd.h" 58 59 #include <sys/types.h> 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/kernel.h> 63 #include <sys/file.h> 64 #include <sys/stat.h> 65 #include <sys/ioctl.h> 66 #include <sys/scsiio.h> 67 #include <sys/buf.h> 68 #include <sys/uio.h> 69 #include <sys/malloc.h> 70 #include <sys/errno.h> 71 #include <sys/device.h> 72 #include <sys/disklabel.h> 73 #include <sys/disk.h> 74 #include <sys/proc.h> 75 #include <sys/conf.h> 76 #include <sys/vnode.h> 77 #if NRND > 0 78 #include <sys/rnd.h> 79 #endif 80 81 #include <dev/scsipi/scsipi_all.h> 82 #include <dev/scsipi/scsi_all.h> 83 #include <dev/scsipi/scsipi_disk.h> 84 #include <dev/scsipi/scsi_disk.h> 85 #include <dev/scsipi/scsiconf.h> 86 #include <dev/scsipi/sdvar.h> 87 88 #include "sd.h" /* NSD_SCSIBUS and NSD_ATAPIBUS come from here */ 89 90 #define SDUNIT(dev) DISKUNIT(dev) 91 #define SDPART(dev) DISKPART(dev) 92 #define SDMINOR(unit, part) DISKMINOR(unit, part) 93 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part) 94 95 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART)) 96 97 int sdlock __P((struct sd_softc *)); 98 void sdunlock __P((struct sd_softc *)); 99 void sdminphys __P((struct buf *)); 100 void sdgetdefaultlabel __P((struct sd_softc *, struct disklabel *)); 101 void sdgetdisklabel __P((struct sd_softc *)); 102 void sdstart __P((struct scsipi_periph *)); 103 void sddone __P((struct scsipi_xfer *)); 104 void sd_shutdown __P((void *)); 105 int sd_reassign_blocks __P((struct sd_softc *, u_long)); 106 int sd_interpret_sense __P((struct scsipi_xfer *)); 107 108 extern struct cfdriver sd_cd; 109 110 struct dkdriver sddkdriver = { sdstrategy }; 111 112 const struct scsipi_periphsw sd_switch = { 113 sd_interpret_sense, /* check our error handler first */ 114 sdstart, /* have a queue, served by this */ 115 NULL, /* have no async handler */ 116 sddone, /* deal with stats at interrupt time */ 117 }; 118 119 /* 120 * Attach routine common to atapi & scsi. 121 */ 122 void 123 sdattach(parent, sd, periph, ops) 124 struct device *parent; 125 struct sd_softc *sd; 126 struct scsipi_periph *periph; 127 const struct sd_ops *ops; 128 { 129 int error, result; 130 struct disk_parms *dp = &sd->params; 131 char pbuf[9]; 132 133 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: ")); 134 135 BUFQ_INIT(&sd->buf_queue); 136 137 /* 138 * Store information needed to contact our base driver 139 */ 140 sd->sc_periph = periph; 141 sd->sc_ops = ops; 142 143 periph->periph_dev = &sd->sc_dev; 144 periph->periph_switch = &sd_switch; 145 146 /* 147 * Increase our openings to the maximum-per-periph 148 * supported by the adapter. This will either be 149 * clamped down or grown by the adapter if necessary. 150 */ 151 periph->periph_openings = 152 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel); 153 periph->periph_flags |= PERIPH_GROW_OPENINGS; 154 155 /* 156 * Initialize and attach the disk structure. 157 */ 158 sd->sc_dk.dk_driver = &sddkdriver; 159 sd->sc_dk.dk_name = sd->sc_dev.dv_xname; 160 disk_attach(&sd->sc_dk); 161 162 #ifdef __BROKEN_DK_ESTABLISH 163 dk_establish(&sd->sc_dk, &sd->sc_dev); /* XXX */ 164 #endif 165 166 /* 167 * Use the subdriver to request information regarding the drive. 168 */ 169 printf("\n"); 170 171 error = scsipi_start(periph, SSS_START, 172 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 173 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT); 174 175 if (error) 176 result = SDGP_RESULT_OFFLINE; 177 else 178 result = (*sd->sc_ops->sdo_get_parms)(sd, &sd->params, 179 XS_CTL_DISCOVERY); 180 printf("%s: ", sd->sc_dev.dv_xname); 181 switch (result) { 182 case SDGP_RESULT_OK: 183 format_bytes(pbuf, sizeof(pbuf), 184 (u_int64_t)dp->disksize * dp->blksize); 185 printf( 186 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %ld sectors", 187 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize, 188 dp->disksize); 189 break; 190 191 case SDGP_RESULT_OFFLINE: 192 printf("drive offline"); 193 break; 194 195 case SDGP_RESULT_UNFORMATTED: 196 printf("unformatted media"); 197 break; 198 199 #ifdef DIAGNOSTIC 200 default: 201 panic("sdattach: unknown result from get_parms"); 202 break; 203 #endif 204 } 205 printf("\n"); 206 207 /* 208 * Establish a shutdown hook so that we can ensure that 209 * our data has actually made it onto the platter at 210 * shutdown time. Note that this relies on the fact 211 * that the shutdown hook code puts us at the head of 212 * the list (thus guaranteeing that our hook runs before 213 * our ancestors'). 214 */ 215 if ((sd->sc_sdhook = 216 shutdownhook_establish(sd_shutdown, sd)) == NULL) 217 printf("%s: WARNING: unable to establish shutdown hook\n", 218 sd->sc_dev.dv_xname); 219 220 #if NRND > 0 221 /* 222 * attach the device into the random source list 223 */ 224 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname, 225 RND_TYPE_DISK, 0); 226 #endif 227 } 228 229 int 230 sdactivate(self, act) 231 struct device *self; 232 enum devact act; 233 { 234 int rv = 0; 235 236 switch (act) { 237 case DVACT_ACTIVATE: 238 rv = EOPNOTSUPP; 239 break; 240 241 case DVACT_DEACTIVATE: 242 /* 243 * Nothing to do; we key off the device's DVF_ACTIVE. 244 */ 245 break; 246 } 247 return (rv); 248 } 249 250 int 251 sddetach(self, flags) 252 struct device *self; 253 int flags; 254 { 255 struct sd_softc *sd = (struct sd_softc *) self; 256 struct buf *bp; 257 int s, bmaj, cmaj, i, mn; 258 259 /* locate the major number */ 260 for (bmaj = 0; bmaj <= nblkdev; bmaj++) 261 if (bdevsw[bmaj].d_open == sdopen) 262 break; 263 for (cmaj = 0; cmaj <= nchrdev; cmaj++) 264 if (cdevsw[cmaj].d_open == sdopen) 265 break; 266 267 s = splbio(); 268 269 /* Kill off any queued buffers. */ 270 while ((bp = BUFQ_FIRST(&sd->buf_queue)) != NULL) { 271 BUFQ_REMOVE(&sd->buf_queue, bp); 272 bp->b_error = EIO; 273 bp->b_flags |= B_ERROR; 274 bp->b_resid = bp->b_bcount; 275 biodone(bp); 276 } 277 278 /* Kill off any pending commands. */ 279 scsipi_kill_pending(sd->sc_periph); 280 281 splx(s); 282 283 /* Nuke the vnodes for any open instances */ 284 for (i = 0; i < MAXPARTITIONS; i++) { 285 mn = SDMINOR(self->dv_unit, i); 286 vdevgone(bmaj, mn, mn, VBLK); 287 vdevgone(cmaj, mn, mn, VCHR); 288 } 289 290 /* Detach from the disk list. */ 291 disk_detach(&sd->sc_dk); 292 293 /* Get rid of the shutdown hook. */ 294 shutdownhook_disestablish(sd->sc_sdhook); 295 296 #if NRND > 0 297 /* Unhook the entropy source. */ 298 rnd_detach_source(&sd->rnd_source); 299 #endif 300 301 return (0); 302 } 303 304 /* 305 * Wait interruptibly for an exclusive lock. 306 * 307 * XXX 308 * Several drivers do this; it should be abstracted and made MP-safe. 309 */ 310 int 311 sdlock(sd) 312 struct sd_softc *sd; 313 { 314 int error; 315 316 while ((sd->flags & SDF_LOCKED) != 0) { 317 sd->flags |= SDF_WANTED; 318 if ((error = tsleep(sd, PRIBIO | PCATCH, "sdlck", 0)) != 0) 319 return (error); 320 } 321 sd->flags |= SDF_LOCKED; 322 return (0); 323 } 324 325 /* 326 * Unlock and wake up any waiters. 327 */ 328 void 329 sdunlock(sd) 330 struct sd_softc *sd; 331 { 332 333 sd->flags &= ~SDF_LOCKED; 334 if ((sd->flags & SDF_WANTED) != 0) { 335 sd->flags &= ~SDF_WANTED; 336 wakeup(sd); 337 } 338 } 339 340 /* 341 * open the device. Make sure the partition info is a up-to-date as can be. 342 */ 343 int 344 sdopen(dev, flag, fmt, p) 345 dev_t dev; 346 int flag, fmt; 347 struct proc *p; 348 { 349 struct sd_softc *sd; 350 struct scsipi_periph *periph; 351 struct scsipi_adapter *adapt; 352 int unit, part; 353 int error; 354 355 unit = SDUNIT(dev); 356 if (unit >= sd_cd.cd_ndevs) 357 return (ENXIO); 358 sd = sd_cd.cd_devs[unit]; 359 if (sd == NULL) 360 return (ENXIO); 361 362 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) 363 return (ENODEV); 364 365 periph = sd->sc_periph; 366 adapt = periph->periph_channel->chan_adapter; 367 part = SDPART(dev); 368 369 SC_DEBUG(periph, SCSIPI_DB1, 370 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit, 371 sd_cd.cd_ndevs, part)); 372 373 /* 374 * If this is the first open of this device, add a reference 375 * to the adapter. 376 */ 377 if (sd->sc_dk.dk_openmask == 0 && 378 (error = scsipi_adapter_addref(adapt)) != 0) 379 return (error); 380 381 if ((error = sdlock(sd)) != 0) 382 goto bad4; 383 384 if ((periph->periph_flags & PERIPH_OPEN) != 0) { 385 /* 386 * If any partition is open, but the disk has been invalidated, 387 * disallow further opens of non-raw partition 388 */ 389 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 && 390 (part != RAW_PART || fmt != S_IFCHR)) { 391 error = EIO; 392 goto bad3; 393 } 394 } else { 395 /* Check that it is still responding and ok. */ 396 error = scsipi_test_unit_ready(periph, 397 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE | 398 XS_CTL_IGNORE_NOT_READY); 399 if (error) 400 goto bad3; 401 402 /* 403 * Start the pack spinning if necessary. Always allow the 404 * raw parition to be opened, for raw IOCTLs. Data transfers 405 * will check for SDEV_MEDIA_LOADED. 406 */ 407 error = scsipi_start(periph, SSS_START, 408 XS_CTL_IGNORE_ILLEGAL_REQUEST | 409 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT); 410 if (error) { 411 if (part != RAW_PART || fmt != S_IFCHR) 412 goto bad3; 413 else 414 goto out; 415 } 416 417 periph->periph_flags |= PERIPH_OPEN; 418 419 if (periph->periph_flags & PERIPH_REMOVABLE) { 420 /* Lock the pack in. */ 421 error = scsipi_prevent(periph, PR_PREVENT, 422 XS_CTL_IGNORE_ILLEGAL_REQUEST | 423 XS_CTL_IGNORE_MEDIA_CHANGE); 424 if (error) 425 goto bad; 426 } 427 428 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) { 429 periph->periph_flags |= PERIPH_MEDIA_LOADED; 430 431 /* 432 * Load the physical device parameters. 433 * 434 * Note that if media is present but unformatted, 435 * we allow the open (so that it can be formatted!). 436 * The drive should refuse real I/O, if the media is 437 * unformatted. 438 */ 439 if ((*sd->sc_ops->sdo_get_parms)(sd, &sd->params, 440 0) == SDGP_RESULT_OFFLINE) { 441 error = ENXIO; 442 goto bad2; 443 } 444 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded ")); 445 446 /* Load the partition info if not already loaded. */ 447 sdgetdisklabel(sd); 448 SC_DEBUG(periph, SCSIPI_DB3, ("Disklabel loaded ")); 449 } 450 } 451 452 /* Check that the partition exists. */ 453 if (part != RAW_PART && 454 (part >= sd->sc_dk.dk_label->d_npartitions || 455 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 456 error = ENXIO; 457 goto bad; 458 } 459 460 out: /* Insure only one open at a time. */ 461 switch (fmt) { 462 case S_IFCHR: 463 sd->sc_dk.dk_copenmask |= (1 << part); 464 break; 465 case S_IFBLK: 466 sd->sc_dk.dk_bopenmask |= (1 << part); 467 break; 468 } 469 sd->sc_dk.dk_openmask = 470 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask; 471 472 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n")); 473 sdunlock(sd); 474 return (0); 475 476 bad2: 477 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 478 479 bad: 480 if (sd->sc_dk.dk_openmask == 0) { 481 scsipi_prevent(periph, PR_ALLOW, 482 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE); 483 periph->periph_flags &= ~PERIPH_OPEN; 484 } 485 486 bad3: 487 sdunlock(sd); 488 bad4: 489 if (sd->sc_dk.dk_openmask == 0) 490 scsipi_adapter_delref(adapt); 491 return (error); 492 } 493 494 /* 495 * close the device.. only called if we are the LAST occurence of an open 496 * device. Convenient now but usually a pain. 497 */ 498 int 499 sdclose(dev, flag, fmt, p) 500 dev_t dev; 501 int flag, fmt; 502 struct proc *p; 503 { 504 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)]; 505 struct scsipi_periph *periph = sd->sc_periph; 506 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter; 507 int part = SDPART(dev); 508 int error; 509 510 if ((error = sdlock(sd)) != 0) 511 return (error); 512 513 switch (fmt) { 514 case S_IFCHR: 515 sd->sc_dk.dk_copenmask &= ~(1 << part); 516 break; 517 case S_IFBLK: 518 sd->sc_dk.dk_bopenmask &= ~(1 << part); 519 break; 520 } 521 sd->sc_dk.dk_openmask = 522 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask; 523 524 if (sd->sc_dk.dk_openmask == 0) { 525 /* 526 * If the disk cache needs flushing, and the disk supports 527 * it, do it now. 528 */ 529 if ((sd->flags & SDF_DIRTY) != 0 && 530 sd->sc_ops->sdo_flush != NULL) { 531 if ((*sd->sc_ops->sdo_flush)(sd, 0)) { 532 printf("%s: cache synchronization failed\n", 533 sd->sc_dev.dv_xname); 534 sd->flags &= ~SDF_FLUSHING; 535 } else 536 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 537 } 538 539 if (! (periph->periph_flags & PERIPH_KEEP_LABEL)) 540 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 541 542 scsipi_wait_drain(periph); 543 544 if (periph->periph_flags & PERIPH_REMOVABLE) { 545 scsipi_prevent(periph, PR_ALLOW, 546 XS_CTL_IGNORE_ILLEGAL_REQUEST | 547 XS_CTL_IGNORE_NOT_READY); 548 } 549 periph->periph_flags &= ~PERIPH_OPEN; 550 551 scsipi_wait_drain(periph); 552 553 scsipi_adapter_delref(adapt); 554 } 555 556 sdunlock(sd); 557 return (0); 558 } 559 560 /* 561 * Actually translate the requested transfer into one the physical driver 562 * can understand. The transfer is described by a buf and will include 563 * only one physical transfer. 564 */ 565 void 566 sdstrategy(bp) 567 struct buf *bp; 568 { 569 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)]; 570 struct scsipi_periph *periph = sd->sc_periph; 571 struct disklabel *lp; 572 daddr_t blkno; 573 int s; 574 boolean_t sector_aligned; 575 576 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy ")); 577 SC_DEBUG(sd->sc_periph, SCSIPI_DB1, 578 ("%ld bytes @ blk %d\n", bp->b_bcount, bp->b_blkno)); 579 /* 580 * If the device has been made invalid, error out 581 */ 582 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 || 583 (sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) { 584 if (periph->periph_flags & PERIPH_OPEN) 585 bp->b_error = EIO; 586 else 587 bp->b_error = ENODEV; 588 goto bad; 589 } 590 591 lp = sd->sc_dk.dk_label; 592 593 /* 594 * The transfer must be a whole number of blocks, offset must not be 595 * negative. 596 */ 597 if (lp->d_secsize == DEV_BSIZE) { 598 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0; 599 } else { 600 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0; 601 } 602 if (!sector_aligned || bp->b_blkno < 0) { 603 bp->b_error = EINVAL; 604 goto bad; 605 } 606 /* 607 * If it's a null transfer, return immediatly 608 */ 609 if (bp->b_bcount == 0) 610 goto done; 611 612 /* 613 * Do bounds checking, adjust transfer. if error, process. 614 * If end of partition, just return. 615 */ 616 if (SDPART(bp->b_dev) != RAW_PART && 617 bounds_check_with_label(bp, lp, 618 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0) 619 goto done; 620 621 /* 622 * Now convert the block number to absolute and put it in 623 * terms of the device's logical block size. 624 */ 625 if (lp->d_secsize == DEV_BSIZE) 626 blkno = bp->b_blkno; 627 else if (lp->d_secsize > DEV_BSIZE) 628 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 629 else 630 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize); 631 632 if (SDPART(bp->b_dev) != RAW_PART) 633 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset; 634 635 bp->b_rawblkno = blkno; 636 637 s = splbio(); 638 639 /* 640 * Place it in the queue of disk activities for this disk. 641 * 642 * XXX Only do disksort() if the current operating mode does not 643 * XXX include tagged queueing. 644 */ 645 disksort_blkno(&sd->buf_queue, bp); 646 647 /* 648 * Tell the device to get going on the transfer if it's 649 * not doing anything, otherwise just wait for completion 650 */ 651 sdstart(sd->sc_periph); 652 653 splx(s); 654 return; 655 656 bad: 657 bp->b_flags |= B_ERROR; 658 done: 659 /* 660 * Correctly set the buf to indicate a completed xfer 661 */ 662 bp->b_resid = bp->b_bcount; 663 biodone(bp); 664 } 665 666 /* 667 * sdstart looks to see if there is a buf waiting for the device 668 * and that the device is not already busy. If both are true, 669 * It dequeues the buf and creates a scsi command to perform the 670 * transfer in the buf. The transfer request will call scsipi_done 671 * on completion, which will in turn call this routine again 672 * so that the next queued transfer is performed. 673 * The bufs are queued by the strategy routine (sdstrategy) 674 * 675 * This routine is also called after other non-queued requests 676 * have been made of the scsi driver, to ensure that the queue 677 * continues to be drained. 678 * 679 * must be called at the correct (highish) spl level 680 * sdstart() is called at splbio from sdstrategy and scsipi_done 681 */ 682 void 683 sdstart(periph) 684 struct scsipi_periph *periph; 685 { 686 struct sd_softc *sd = (void *)periph->periph_dev; 687 struct disklabel *lp = sd->sc_dk.dk_label; 688 struct buf *bp = 0; 689 struct scsipi_rw_big cmd_big; 690 #if NSD_SCSIBUS > 0 691 struct scsi_rw cmd_small; 692 #endif 693 struct scsipi_generic *cmdp; 694 int nblks, cmdlen, error, flags; 695 696 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart ")); 697 /* 698 * Check if the device has room for another command 699 */ 700 while (periph->periph_active < periph->periph_openings) { 701 /* 702 * there is excess capacity, but a special waits 703 * It'll need the adapter as soon as we clear out of the 704 * way and let it run (user level wait). 705 */ 706 if (periph->periph_flags & PERIPH_WAITING) { 707 periph->periph_flags &= ~PERIPH_WAITING; 708 wakeup((caddr_t)periph); 709 return; 710 } 711 712 /* 713 * See if there is a buf with work for us to do.. 714 */ 715 if ((bp = BUFQ_FIRST(&sd->buf_queue)) == NULL) 716 return; 717 BUFQ_REMOVE(&sd->buf_queue, bp); 718 719 /* 720 * If the device has become invalid, abort all the 721 * reads and writes until all files have been closed and 722 * re-opened 723 */ 724 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) { 725 bp->b_error = EIO; 726 bp->b_flags |= B_ERROR; 727 bp->b_resid = bp->b_bcount; 728 biodone(bp); 729 continue; 730 } 731 732 /* 733 * We have a buf, now we should make a command. 734 */ 735 736 if (lp->d_secsize == DEV_BSIZE) 737 nblks = bp->b_bcount >> DEV_BSHIFT; 738 else 739 nblks = howmany(bp->b_bcount, lp->d_secsize); 740 741 #if NSD_SCSIBUS > 0 742 /* 743 * Fill out the scsi command. If the transfer will 744 * fit in a "small" cdb, use it. 745 */ 746 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) && 747 ((nblks & 0xff) == nblks) && 748 !(periph->periph_quirks & PQUIRK_ONLYBIG) && 749 scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_SCSI) { 750 /* 751 * We can fit in a small cdb. 752 */ 753 memset(&cmd_small, 0, sizeof(cmd_small)); 754 cmd_small.opcode = (bp->b_flags & B_READ) ? 755 SCSI_READ_COMMAND : SCSI_WRITE_COMMAND; 756 _lto3b(bp->b_rawblkno, cmd_small.addr); 757 cmd_small.length = nblks & 0xff; 758 cmdlen = sizeof(cmd_small); 759 cmdp = (struct scsipi_generic *)&cmd_small; 760 } else 761 #endif 762 { 763 /* 764 * Need a large cdb. 765 */ 766 memset(&cmd_big, 0, sizeof(cmd_big)); 767 cmd_big.opcode = (bp->b_flags & B_READ) ? 768 READ_BIG : WRITE_BIG; 769 _lto4b(bp->b_rawblkno, cmd_big.addr); 770 _lto2b(nblks, cmd_big.length); 771 cmdlen = sizeof(cmd_big); 772 cmdp = (struct scsipi_generic *)&cmd_big; 773 } 774 775 /* Instrumentation. */ 776 disk_busy(&sd->sc_dk); 777 778 /* 779 * Mark the disk dirty so that the cache will be 780 * flushed on close. 781 */ 782 if ((bp->b_flags & B_READ) == 0) 783 sd->flags |= SDF_DIRTY; 784 785 /* 786 * Figure out what flags to use. 787 */ 788 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC; 789 if (bp->b_flags & B_READ) 790 flags |= XS_CTL_DATA_IN; 791 else 792 flags |= XS_CTL_DATA_OUT; 793 if (bp->b_flags & B_ORDERED) 794 flags |= XS_CTL_ORDERED_TAG; 795 else 796 flags |= XS_CTL_SIMPLE_TAG; 797 798 /* 799 * Call the routine that chats with the adapter. 800 * Note: we cannot sleep as we may be an interrupt 801 */ 802 error = scsipi_command(periph, cmdp, cmdlen, 803 (u_char *)bp->b_data, bp->b_bcount, 804 SDRETRIES, SD_IO_TIMEOUT, bp, flags); 805 if (error) { 806 disk_unbusy(&sd->sc_dk, 0); 807 printf("%s: not queued, error %d\n", 808 sd->sc_dev.dv_xname, error); 809 } 810 } 811 } 812 813 void 814 sddone(xs) 815 struct scsipi_xfer *xs; 816 { 817 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev; 818 819 if (sd->flags & SDF_FLUSHING) { 820 /* Flush completed, no longer dirty. */ 821 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 822 } 823 824 if (xs->bp != NULL) { 825 disk_unbusy(&sd->sc_dk, xs->bp->b_bcount - xs->bp->b_resid); 826 #if NRND > 0 827 rnd_add_uint32(&sd->rnd_source, xs->bp->b_rawblkno); 828 #endif 829 } 830 } 831 832 void 833 sdminphys(bp) 834 struct buf *bp; 835 { 836 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)]; 837 long max; 838 839 /* 840 * If the device is ancient, we want to make sure that 841 * the transfer fits into a 6-byte cdb. 842 * 843 * XXX Note that the SCSI-I spec says that 256-block transfers 844 * are allowed in a 6-byte read/write, and are specified 845 * by settng the "length" to 0. However, we're conservative 846 * here, allowing only 255-block transfers in case an 847 * ancient device gets confused by length == 0. A length of 0 848 * in a 10-byte read/write actually means 0 blocks. 849 */ 850 if ((sd->flags & SDF_ANCIENT) && 851 ((sd->sc_periph->periph_flags & 852 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) { 853 max = sd->sc_dk.dk_label->d_secsize * 0xff; 854 855 if (bp->b_bcount > max) 856 bp->b_bcount = max; 857 } 858 859 (*sd->sc_periph->periph_channel->chan_adapter->adapt_minphys)(bp); 860 } 861 862 int 863 sdread(dev, uio, ioflag) 864 dev_t dev; 865 struct uio *uio; 866 int ioflag; 867 { 868 869 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio)); 870 } 871 872 int 873 sdwrite(dev, uio, ioflag) 874 dev_t dev; 875 struct uio *uio; 876 int ioflag; 877 { 878 879 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio)); 880 } 881 882 /* 883 * Perform special action on behalf of the user 884 * Knows about the internals of this device 885 */ 886 int 887 sdioctl(dev, cmd, addr, flag, p) 888 dev_t dev; 889 u_long cmd; 890 caddr_t addr; 891 int flag; 892 struct proc *p; 893 { 894 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)]; 895 struct scsipi_periph *periph = sd->sc_periph; 896 int part = SDPART(dev); 897 int error; 898 #ifdef __HAVE_OLD_DISKLABEL 899 struct disklabel newlabel; 900 #endif 901 902 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd)); 903 904 /* 905 * If the device is not valid, some IOCTLs can still be 906 * handled on the raw partition. Check this here. 907 */ 908 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) { 909 switch (cmd) { 910 case DIOCKLABEL: 911 case DIOCWLABEL: 912 case DIOCLOCK: 913 case DIOCEJECT: 914 case ODIOCEJECT: 915 case SCIOCIDENTIFY: 916 case OSCIOCIDENTIFY: 917 case SCIOCCOMMAND: 918 case SCIOCDEBUG: 919 if (part == RAW_PART) 920 break; 921 /* FALLTHROUGH */ 922 default: 923 if ((periph->periph_flags & PERIPH_OPEN) == 0) 924 return (ENODEV); 925 else 926 return (EIO); 927 } 928 } 929 930 switch (cmd) { 931 case DIOCGDINFO: 932 *(struct disklabel *)addr = *(sd->sc_dk.dk_label); 933 return (0); 934 935 #ifdef __HAVE_OLD_DISKLABEL 936 case ODIOCGDINFO: 937 newlabel = *(sd->sc_dk.dk_label); 938 if (newlabel.d_npartitions > OLDMAXPARTITIONS) 939 return ENOTTY; 940 memcpy(addr, &newlabel, sizeof (struct olddisklabel)); 941 return (0); 942 #endif 943 944 case DIOCGPART: 945 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label; 946 ((struct partinfo *)addr)->part = 947 &sd->sc_dk.dk_label->d_partitions[part]; 948 return (0); 949 950 case DIOCWDINFO: 951 case DIOCSDINFO: 952 #ifdef __HAVE_OLD_DISKLABEL 953 case ODIOCWDINFO: 954 case ODIOCSDINFO: 955 #endif 956 { 957 struct disklabel *lp; 958 959 #ifdef __HAVE_OLD_DISKLABEL 960 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) { 961 memset(&newlabel, 0, sizeof newlabel); 962 memcpy(&newlabel, addr, sizeof (struct olddisklabel)); 963 lp = &newlabel; 964 } else 965 #endif 966 lp = (struct disklabel *)addr; 967 968 if ((flag & FWRITE) == 0) 969 return (EBADF); 970 971 if ((error = sdlock(sd)) != 0) 972 return (error); 973 sd->flags |= SDF_LABELLING; 974 975 error = setdisklabel(sd->sc_dk.dk_label, 976 lp, /*sd->sc_dk.dk_openmask : */0, 977 sd->sc_dk.dk_cpulabel); 978 if (error == 0) { 979 if (cmd == DIOCWDINFO 980 #ifdef __HAVE_OLD_DISKLABEL 981 || cmd == ODIOCWDINFO 982 #endif 983 ) 984 error = writedisklabel(SDLABELDEV(dev), 985 sdstrategy, sd->sc_dk.dk_label, 986 sd->sc_dk.dk_cpulabel); 987 } 988 989 sd->flags &= ~SDF_LABELLING; 990 sdunlock(sd); 991 return (error); 992 } 993 994 case DIOCKLABEL: 995 if (*(int *)addr) 996 periph->periph_flags |= PERIPH_KEEP_LABEL; 997 else 998 periph->periph_flags &= ~PERIPH_KEEP_LABEL; 999 return (0); 1000 1001 case DIOCWLABEL: 1002 if ((flag & FWRITE) == 0) 1003 return (EBADF); 1004 if (*(int *)addr) 1005 sd->flags |= SDF_WLABEL; 1006 else 1007 sd->flags &= ~SDF_WLABEL; 1008 return (0); 1009 1010 case DIOCLOCK: 1011 return (scsipi_prevent(periph, 1012 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0)); 1013 1014 case DIOCEJECT: 1015 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0) 1016 return (ENOTTY); 1017 if (*(int *)addr == 0) { 1018 /* 1019 * Don't force eject: check that we are the only 1020 * partition open. If so, unlock it. 1021 */ 1022 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 && 1023 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask == 1024 sd->sc_dk.dk_openmask) { 1025 error = scsipi_prevent(periph, PR_ALLOW, 1026 XS_CTL_IGNORE_NOT_READY); 1027 if (error) 1028 return (error); 1029 } else { 1030 return (EBUSY); 1031 } 1032 } 1033 /* FALLTHROUGH */ 1034 case ODIOCEJECT: 1035 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ? 1036 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0)); 1037 1038 case DIOCGDEFLABEL: 1039 sdgetdefaultlabel(sd, (struct disklabel *)addr); 1040 return (0); 1041 1042 #ifdef __HAVE_OLD_DISKLABEL 1043 case ODIOCGDEFLABEL: 1044 sdgetdefaultlabel(sd, &newlabel); 1045 if (newlabel.d_npartitions > OLDMAXPARTITIONS) 1046 return ENOTTY; 1047 memcpy(addr, &newlabel, sizeof (struct olddisklabel)); 1048 return (0); 1049 #endif 1050 1051 default: 1052 if (part != RAW_PART) 1053 return (ENOTTY); 1054 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, p)); 1055 } 1056 1057 #ifdef DIAGNOSTIC 1058 panic("sdioctl: impossible"); 1059 #endif 1060 } 1061 1062 void 1063 sdgetdefaultlabel(sd, lp) 1064 struct sd_softc *sd; 1065 struct disklabel *lp; 1066 { 1067 1068 memset(lp, 0, sizeof(struct disklabel)); 1069 1070 lp->d_secsize = sd->params.blksize; 1071 lp->d_ntracks = sd->params.heads; 1072 lp->d_nsectors = sd->params.sectors; 1073 lp->d_ncylinders = sd->params.cyls; 1074 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; 1075 1076 switch (scsipi_periph_bustype(sd->sc_periph)) { 1077 #if NSD_SCSIBUS > 0 1078 case SCSIPI_BUSTYPE_SCSI: 1079 lp->d_type = DTYPE_SCSI; 1080 break; 1081 #endif 1082 #if NSD_ATAPIBUS > 0 1083 case SCSIPI_BUSTYPE_ATAPI: 1084 lp->d_type = DTYPE_ATAPI; 1085 break; 1086 #endif 1087 } 1088 strncpy(lp->d_typename, sd->name, 16); 1089 strncpy(lp->d_packname, "fictitious", 16); 1090 lp->d_secperunit = sd->params.disksize; 1091 lp->d_rpm = sd->params.rot_rate; 1092 lp->d_interleave = 1; 1093 lp->d_flags = 0; 1094 1095 lp->d_partitions[RAW_PART].p_offset = 0; 1096 lp->d_partitions[RAW_PART].p_size = 1097 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE); 1098 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED; 1099 lp->d_npartitions = RAW_PART + 1; 1100 1101 lp->d_magic = DISKMAGIC; 1102 lp->d_magic2 = DISKMAGIC; 1103 lp->d_checksum = dkcksum(lp); 1104 } 1105 1106 1107 /* 1108 * Load the label information on the named device 1109 */ 1110 void 1111 sdgetdisklabel(sd) 1112 struct sd_softc *sd; 1113 { 1114 struct disklabel *lp = sd->sc_dk.dk_label; 1115 char *errstring; 1116 1117 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel)); 1118 1119 sdgetdefaultlabel(sd, lp); 1120 1121 if (lp->d_secpercyl == 0) { 1122 lp->d_secpercyl = 100; 1123 /* as long as it's not 0 - readdisklabel divides by it (?) */ 1124 } 1125 1126 /* 1127 * Call the generic disklabel extraction routine 1128 */ 1129 errstring = readdisklabel(MAKESDDEV(0, sd->sc_dev.dv_unit, RAW_PART), 1130 sdstrategy, lp, sd->sc_dk.dk_cpulabel); 1131 if (errstring) { 1132 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring); 1133 return; 1134 } 1135 } 1136 1137 void 1138 sd_shutdown(arg) 1139 void *arg; 1140 { 1141 struct sd_softc *sd = arg; 1142 1143 /* 1144 * If the disk cache needs to be flushed, and the disk supports 1145 * it, flush it. We're cold at this point, so we poll for 1146 * completion. 1147 */ 1148 if ((sd->flags & SDF_DIRTY) != 0 && sd->sc_ops->sdo_flush != NULL) { 1149 if ((*sd->sc_ops->sdo_flush)(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) { 1150 printf("%s: cache synchronization failed\n", 1151 sd->sc_dev.dv_xname); 1152 sd->flags &= ~SDF_FLUSHING; 1153 } else 1154 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY); 1155 } 1156 } 1157 1158 /* 1159 * Tell the device to map out a defective block 1160 */ 1161 int 1162 sd_reassign_blocks(sd, blkno) 1163 struct sd_softc *sd; 1164 u_long blkno; 1165 { 1166 struct scsi_reassign_blocks scsipi_cmd; 1167 struct scsi_reassign_blocks_data rbdata; 1168 1169 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1170 memset(&rbdata, 0, sizeof(rbdata)); 1171 scsipi_cmd.opcode = SCSI_REASSIGN_BLOCKS; 1172 1173 _lto2b(sizeof(rbdata.defect_descriptor[0]), rbdata.length); 1174 _lto4b(blkno, rbdata.defect_descriptor[0].dlbaddr); 1175 1176 return (scsipi_command(sd->sc_periph, 1177 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd), 1178 (u_char *)&rbdata, sizeof(rbdata), SDRETRIES, 5000, NULL, 1179 XS_CTL_DATA_OUT | XS_CTL_DATA_ONSTACK)); 1180 } 1181 1182 /* 1183 * Check Errors 1184 */ 1185 int 1186 sd_interpret_sense(xs) 1187 struct scsipi_xfer *xs; 1188 { 1189 struct scsipi_periph *periph = xs->xs_periph; 1190 struct scsipi_sense_data *sense = &xs->sense.scsi_sense; 1191 struct sd_softc *sd = (void *)periph->periph_dev; 1192 int s, error, retval = EJUSTRETURN; 1193 1194 /* 1195 * If the periph is already recovering, just do the normal 1196 * error processing. 1197 */ 1198 if (periph->periph_flags & PERIPH_RECOVERING) 1199 return (retval); 1200 1201 /* 1202 * If the device is not open yet, let the generic code handle it. 1203 */ 1204 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) 1205 return (retval); 1206 1207 /* 1208 * If it isn't a extended or extended/deferred error, let 1209 * the generic code handle it. 1210 */ 1211 if ((sense->error_code & SSD_ERRCODE) != 0x70 && 1212 (sense->error_code & SSD_ERRCODE) != 0x71) 1213 return (retval); 1214 1215 if ((sense->flags & SSD_KEY) == SKEY_NOT_READY && 1216 sense->add_sense_code == 0x4) { 1217 if (sense->add_sense_code_qual == 0x01) { 1218 /* 1219 * Unit In The Process Of Becoming Ready. 1220 */ 1221 printf("%s: waiting for pack to spin up...\n", 1222 sd->sc_dev.dv_xname); 1223 scsipi_periph_freeze(periph, 1); 1224 callout_reset(&periph->periph_callout, 1225 5 * hz, scsipi_periph_timed_thaw, periph); 1226 retval = ERESTART; 1227 } else if ((sense->add_sense_code_qual == 0x2) && 1228 (periph->periph_quirks & PQUIRK_NOSTARTUNIT) == 0) { 1229 printf("%s: pack is stopped, restarting...\n", 1230 sd->sc_dev.dv_xname); 1231 s = splbio(); 1232 periph->periph_flags |= PERIPH_RECOVERING; 1233 splx(s); 1234 error = scsipi_start(periph, SSS_START, 1235 XS_CTL_URGENT|XS_CTL_HEAD_TAG| 1236 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH); 1237 if (error) { 1238 printf("%s: unable to restart pack\n", 1239 sd->sc_dev.dv_xname); 1240 retval = error; 1241 } else 1242 retval = ERESTART; 1243 s = splbio(); 1244 periph->periph_flags &= ~PERIPH_RECOVERING; 1245 splx(s); 1246 } 1247 } 1248 return (retval); 1249 } 1250 1251 1252 int 1253 sdsize(dev) 1254 dev_t dev; 1255 { 1256 struct sd_softc *sd; 1257 int part, unit, omask; 1258 int size; 1259 1260 unit = SDUNIT(dev); 1261 if (unit >= sd_cd.cd_ndevs) 1262 return (-1); 1263 sd = sd_cd.cd_devs[unit]; 1264 if (sd == NULL) 1265 return (-1); 1266 1267 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) 1268 return (-1); 1269 1270 part = SDPART(dev); 1271 omask = sd->sc_dk.dk_openmask & (1 << part); 1272 1273 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) 1274 return (-1); 1275 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) 1276 size = -1; 1277 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 1278 size = -1; 1279 else 1280 size = sd->sc_dk.dk_label->d_partitions[part].p_size * 1281 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE); 1282 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0) 1283 return (-1); 1284 return (size); 1285 } 1286 1287 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */ 1288 static struct scsipi_xfer sx; 1289 static int sddoingadump; 1290 1291 /* 1292 * dump all of physical memory into the partition specified, starting 1293 * at offset 'dumplo' into the partition. 1294 */ 1295 int 1296 sddump(dev, blkno, va, size) 1297 dev_t dev; 1298 daddr_t blkno; 1299 caddr_t va; 1300 size_t size; 1301 { 1302 struct sd_softc *sd; /* disk unit to do the I/O */ 1303 struct disklabel *lp; /* disk's disklabel */ 1304 int unit, part; 1305 int sectorsize; /* size of a disk sector */ 1306 int nsects; /* number of sectors in partition */ 1307 int sectoff; /* sector offset of partition */ 1308 int totwrt; /* total number of sectors left to write */ 1309 int nwrt; /* current number of sectors to write */ 1310 struct scsipi_rw_big cmd; /* write command */ 1311 struct scsipi_xfer *xs; /* ... convenience */ 1312 struct scsipi_periph *periph; 1313 struct scsipi_channel *chan; 1314 1315 /* Check if recursive dump; if so, punt. */ 1316 if (sddoingadump) 1317 return (EFAULT); 1318 1319 /* Mark as active early. */ 1320 sddoingadump = 1; 1321 1322 unit = SDUNIT(dev); /* Decompose unit & partition. */ 1323 part = SDPART(dev); 1324 1325 /* Check for acceptable drive number. */ 1326 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL) 1327 return (ENXIO); 1328 1329 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) 1330 return (ENODEV); 1331 1332 periph = sd->sc_periph; 1333 chan = periph->periph_channel; 1334 1335 /* Make sure it was initialized. */ 1336 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) 1337 return (ENXIO); 1338 1339 /* Convert to disk sectors. Request must be a multiple of size. */ 1340 lp = sd->sc_dk.dk_label; 1341 sectorsize = lp->d_secsize; 1342 if ((size % sectorsize) != 0) 1343 return (EFAULT); 1344 totwrt = size / sectorsize; 1345 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */ 1346 1347 nsects = lp->d_partitions[part].p_size; 1348 sectoff = lp->d_partitions[part].p_offset; 1349 1350 /* Check transfer bounds against partition size. */ 1351 if ((blkno < 0) || ((blkno + totwrt) > nsects)) 1352 return (EINVAL); 1353 1354 /* Offset block number to start of partition. */ 1355 blkno += sectoff; 1356 1357 xs = &sx; 1358 1359 while (totwrt > 0) { 1360 nwrt = totwrt; /* XXX */ 1361 #ifndef SD_DUMP_NOT_TRUSTED 1362 /* 1363 * Fill out the scsi command 1364 */ 1365 memset(&cmd, 0, sizeof(cmd)); 1366 cmd.opcode = WRITE_BIG; 1367 _lto4b(blkno, cmd.addr); 1368 _lto2b(nwrt, cmd.length); 1369 /* 1370 * Fill out the scsipi_xfer structure 1371 * Note: we cannot sleep as we may be an interrupt 1372 * don't use scsipi_command() as it may want to wait 1373 * for an xs. 1374 */ 1375 memset(xs, 0, sizeof(sx)); 1376 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL | 1377 XS_CTL_DATA_OUT; 1378 xs->xs_status = 0; 1379 xs->xs_periph = periph; 1380 xs->xs_retries = SDRETRIES; 1381 xs->timeout = 10000; /* 10000 millisecs for a disk ! */ 1382 xs->cmd = (struct scsipi_generic *)&cmd; 1383 xs->cmdlen = sizeof(cmd); 1384 xs->resid = nwrt * sectorsize; 1385 xs->error = XS_NOERROR; 1386 xs->bp = 0; 1387 xs->data = va; 1388 xs->datalen = nwrt * sectorsize; 1389 1390 /* 1391 * Pass all this info to the scsi driver. 1392 */ 1393 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1394 if ((xs->xs_status & XS_STS_DONE) == 0 || 1395 xs->error != XS_NOERROR) 1396 return (EIO); 1397 #else /* SD_DUMP_NOT_TRUSTED */ 1398 /* Let's just talk about this first... */ 1399 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno); 1400 delay(500 * 1000); /* half a second */ 1401 #endif /* SD_DUMP_NOT_TRUSTED */ 1402 1403 /* update block count */ 1404 totwrt -= nwrt; 1405 blkno += nwrt; 1406 va += sectorsize * nwrt; 1407 } 1408 sddoingadump = 0; 1409 return (0); 1410 } 1411