1 /* $NetBSD: ld.c,v 1.6 2001/01/08 06:57:21 itojun Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran and Charles M. Hannum. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Disk driver for use by RAID controllers. 41 */ 42 43 #include "rnd.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/device.h> 49 #include <sys/queue.h> 50 #include <sys/proc.h> 51 #include <sys/buf.h> 52 #include <sys/endian.h> 53 #include <sys/disklabel.h> 54 #include <sys/disk.h> 55 #include <sys/dkio.h> 56 #include <sys/stat.h> 57 #include <sys/lock.h> 58 #include <sys/conf.h> 59 #include <sys/fcntl.h> 60 #include <sys/vnode.h> 61 #include <sys/syslog.h> 62 #if NRND > 0 63 #include <sys/rnd.h> 64 #endif 65 66 #include <dev/ldvar.h> 67 68 static void ldgetdefaultlabel(struct ld_softc *, struct disklabel *); 69 static void ldgetdisklabel(struct ld_softc *); 70 static int ldlock(struct ld_softc *); 71 static void ldminphys(struct buf *bp); 72 static void ldshutdown(void *); 73 static int ldstart(struct ld_softc *, struct buf *); 74 static void ldunlock(struct ld_softc *); 75 76 extern struct cfdriver ld_cd; 77 78 static struct dkdriver lddkdriver = { ldstrategy }; 79 static void *ld_sdh; 80 81 void 82 ldattach(struct ld_softc *sc) 83 { 84 char buf[9]; 85 86 /* Initialise and attach the disk structure. */ 87 sc->sc_dk.dk_driver = &lddkdriver; 88 sc->sc_dk.dk_name = sc->sc_dv.dv_xname; 89 disk_attach(&sc->sc_dk); 90 91 if ((sc->sc_flags & LDF_ENABLED) == 0) { 92 printf("%s: disabled\n", sc->sc_dv.dv_xname); 93 return; 94 } 95 if (sc->sc_maxxfer > MAXPHYS) 96 sc->sc_maxxfer = MAXPHYS; 97 98 format_bytes(buf, sizeof(buf), (u_int64_t)sc->sc_secperunit * 99 sc->sc_secsize); 100 printf("%s: %s, %d cyl, %d head, %d sec, %d bytes/sect x %d sectors\n", 101 sc->sc_dv.dv_xname, buf, sc->sc_ncylinders, sc->sc_nheads, 102 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit); 103 104 #if NRND > 0 105 /* Attach the device into the rnd source list. */ 106 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dv.dv_xname, 107 RND_TYPE_DISK, 0); 108 #endif 109 110 /* Set the `shutdownhook'. */ 111 if (ld_sdh == NULL) 112 ld_sdh = shutdownhook_establish(ldshutdown, NULL); 113 BUFQ_INIT(&sc->sc_bufq); 114 } 115 116 int 117 lddrain(struct ld_softc *sc, int flags) 118 { 119 int s; 120 121 if ((flags & DETACH_FORCE) == 0 && sc->sc_dk.dk_openmask != 0) 122 return (EBUSY); 123 124 s = splbio(); 125 sc->sc_flags |= LDF_DRAIN; 126 splx(s); 127 return (0); 128 } 129 130 void 131 lddetach(struct ld_softc *sc) 132 { 133 struct buf *bp; 134 int s, bmaj, cmaj, mn; 135 136 /* Wait for commands queued with the hardware to complete. */ 137 if (sc->sc_queuecnt != 0) 138 tsleep(&sc->sc_queuecnt, PRIBIO, "lddrn", 30 * hz); 139 140 /* Locate the major numbers. */ 141 for (bmaj = 0; bmaj <= nblkdev; bmaj++) 142 if (bdevsw[bmaj].d_open == sdopen) 143 break; 144 for (cmaj = 0; cmaj <= nchrdev; cmaj++) 145 if (cdevsw[cmaj].d_open == sdopen) 146 break; 147 148 /* Kill off any queued buffers. */ 149 s = splbio(); 150 while ((bp = BUFQ_FIRST(&sc->sc_bufq)) != NULL) { 151 BUFQ_REMOVE(&sc->sc_bufq, bp); 152 bp->b_error = EIO; 153 bp->b_flags |= B_ERROR; 154 bp->b_resid = bp->b_bcount; 155 biodone(bp); 156 } 157 splx(s); 158 159 /* Nuke the vnodes for any open instances. */ 160 mn = DISKUNIT(sc->sc_dv.dv_unit); 161 vdevgone(bmaj, mn, mn + (MAXPARTITIONS - 1), VBLK); 162 vdevgone(cmaj, mn, mn + (MAXPARTITIONS - 1), VCHR); 163 164 /* Detach from the disk list. */ 165 disk_detach(&sc->sc_dk); 166 167 #if NRND > 0 168 /* Unhook the entropy source. */ 169 rnd_detach_source(&sc->sc_rnd_source); 170 #endif 171 172 /* Flush the device's cache. */ 173 if (sc->sc_flush != NULL) 174 if ((*sc->sc_flush)(sc) != 0) 175 printf("%s: unable to flush cache\n", 176 sc->sc_dv.dv_xname); 177 } 178 179 static void 180 ldshutdown(void *cookie) 181 { 182 struct ld_softc *sc; 183 int i; 184 185 for (i = 0; i < ld_cd.cd_ndevs; i++) { 186 if ((sc = device_lookup(&ld_cd, i)) == NULL) 187 continue; 188 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc) != 0) 189 printf("%s: unable to flush cache\n", 190 sc->sc_dv.dv_xname); 191 } 192 } 193 194 int 195 ldopen(dev_t dev, int flags, int fmt, struct proc *p) 196 { 197 struct ld_softc *sc; 198 int unit, part; 199 200 unit = DISKUNIT(dev); 201 if ((sc = device_lookup(&ld_cd, unit))== NULL) 202 return (ENXIO); 203 if ((sc->sc_flags & LDF_ENABLED) == 0) 204 return (ENODEV); 205 part = DISKPART(dev); 206 ldlock(sc); 207 208 if (sc->sc_dk.dk_openmask == 0) 209 ldgetdisklabel(sc); 210 211 /* Check that the partition exists. */ 212 if (part != RAW_PART && (part >= sc->sc_dk.dk_label->d_npartitions || 213 sc->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 214 ldunlock(sc); 215 return (ENXIO); 216 } 217 218 /* Ensure only one open at a time. */ 219 switch (fmt) { 220 case S_IFCHR: 221 sc->sc_dk.dk_copenmask |= (1 << part); 222 break; 223 case S_IFBLK: 224 sc->sc_dk.dk_bopenmask |= (1 << part); 225 break; 226 } 227 sc->sc_dk.dk_openmask = 228 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask; 229 230 ldunlock(sc); 231 return (0); 232 } 233 234 int 235 ldclose(dev_t dev, int flags, int fmt, struct proc *p) 236 { 237 struct ld_softc *sc; 238 int part, unit; 239 240 unit = DISKUNIT(dev); 241 part = DISKPART(dev); 242 sc = device_lookup(&ld_cd, unit); 243 ldlock(sc); 244 245 switch (fmt) { 246 case S_IFCHR: 247 sc->sc_dk.dk_copenmask &= ~(1 << part); 248 break; 249 case S_IFBLK: 250 sc->sc_dk.dk_bopenmask &= ~(1 << part); 251 break; 252 } 253 sc->sc_dk.dk_openmask = 254 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask; 255 256 if (sc->sc_dk.dk_openmask == 0 && sc->sc_flush != NULL) 257 if ((*sc->sc_flush)(sc) != 0) 258 printf("%s: unable to flush cache\n", 259 sc->sc_dv.dv_xname); 260 261 ldunlock(sc); 262 return (0); 263 } 264 265 int 266 ldread(dev_t dev, struct uio *uio, int ioflag) 267 { 268 269 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio)); 270 } 271 272 int 273 ldwrite(dev_t dev, struct uio *uio, int ioflag) 274 { 275 276 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio)); 277 } 278 279 int 280 ldioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct proc *p) 281 { 282 struct ld_softc *sc; 283 int part, unit, error; 284 #ifdef __HAVE_OLD_DISKLABEL 285 struct disklabel newlabel; 286 #endif 287 struct disklabel *lp; 288 289 unit = DISKUNIT(dev); 290 part = DISKPART(dev); 291 sc = device_lookup(&ld_cd, unit); 292 error = 0; 293 294 switch (cmd) { 295 case DIOCGDINFO: 296 memcpy(addr, sc->sc_dk.dk_label, sizeof(struct disklabel)); 297 return (0); 298 299 #ifdef __HAVE_OLD_DISKLABEL 300 case ODIOCGDINFO: 301 newlabel = *(sc->sc_dk.dk_label); 302 if (newlabel.d_npartitions > OLDMAXPARTITIONS) 303 return ENOTTY; 304 memcpy(addr, &newlabel, sizeof(struct olddisklabel)); 305 return (0); 306 #endif 307 308 case DIOCGPART: 309 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label; 310 ((struct partinfo *)addr)->part = 311 &sc->sc_dk.dk_label->d_partitions[part]; 312 break; 313 314 case DIOCWDINFO: 315 case DIOCSDINFO: 316 #ifdef __HAVE_OLD_DISKLABEL 317 case ODIOCWDINFO: 318 case ODIOCSDINFO: 319 320 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) { 321 memset(&newlabel, 0, sizeof newlabel); 322 memcpy(&newlabel, addr, sizeof (struct olddisklabel)); 323 lp = &newlabel; 324 } else 325 #endif 326 lp = (struct disklabel *)addr; 327 328 if ((flag & FWRITE) == 0) 329 return (EBADF); 330 331 if ((error = ldlock(sc)) != 0) 332 return (error); 333 sc->sc_flags |= LDF_LABELLING; 334 335 error = setdisklabel(sc->sc_dk.dk_label, 336 lp, /*sc->sc_dk.dk_openmask : */0, 337 sc->sc_dk.dk_cpulabel); 338 if (error == 0 && (cmd == DIOCWDINFO 339 #ifdef __HAVE_OLD_DISKLABEL 340 || cmd == ODIOCWDINFO 341 #endif 342 )) 343 error = writedisklabel( 344 MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 345 ldstrategy, sc->sc_dk.dk_label, 346 sc->sc_dk.dk_cpulabel); 347 348 sc->sc_flags &= ~LDF_LABELLING; 349 ldunlock(sc); 350 break; 351 352 case DIOCWLABEL: 353 if ((flag & FWRITE) == 0) 354 return (EBADF); 355 if (*(int *)addr) 356 sc->sc_flags |= LDF_WLABEL; 357 else 358 sc->sc_flags &= ~LDF_WLABEL; 359 break; 360 361 case DIOCGDEFLABEL: 362 ldgetdefaultlabel(sc, (struct disklabel *)addr); 363 break; 364 365 #ifdef __HAVE_OLD_DISKLABEL 366 case ODIOCGDEFLABEL: 367 ldgetdefaultlabel(sc, &newlabel); 368 if (newlabel.d_npartitions > OLDMAXPARTITIONS) 369 return ENOTTY; 370 memcpy(addr, &newlabel, sizeof (struct olddisklabel)); 371 break; 372 #endif 373 374 default: 375 error = ENOTTY; 376 break; 377 } 378 379 return (error); 380 } 381 382 void 383 ldstrategy(struct buf *bp) 384 { 385 struct ld_softc *sc; 386 int s; 387 388 sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev)); 389 390 s = splbio(); 391 if (sc->sc_queuecnt == sc->sc_maxqueuecnt) { 392 BUFQ_INSERT_TAIL(&sc->sc_bufq, bp); 393 splx(s); 394 return; 395 } 396 splx(s); 397 ldstart(sc, bp); 398 } 399 400 static int 401 ldstart(struct ld_softc *sc, struct buf *bp) 402 { 403 struct disklabel *lp; 404 int part, s, rv; 405 406 if ((sc->sc_flags & LDF_DRAIN) != 0) { 407 bp->b_error = EIO; 408 bp->b_flags |= B_ERROR; 409 bp->b_resid = bp->b_bcount; 410 biodone(bp); 411 return (-1); 412 } 413 414 part = DISKPART(bp->b_dev); 415 lp = sc->sc_dk.dk_label; 416 417 /* 418 * The transfer must be a whole number of blocks and the offset must 419 * not be negative. 420 */ 421 if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) { 422 bp->b_flags |= B_ERROR; 423 biodone(bp); 424 return (-1); 425 } 426 427 /* 428 * If it's a null transfer, return. 429 */ 430 if (bp->b_bcount == 0) { 431 bp->b_resid = bp->b_bcount; 432 biodone(bp); 433 return (-1); 434 } 435 436 /* 437 * Do bounds checking and adjust the transfer. If error, process. 438 * If past the end of partition, just return. 439 */ 440 if (part != RAW_PART && 441 bounds_check_with_label(bp, lp, 442 (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0) { 443 bp->b_resid = bp->b_bcount; 444 biodone(bp); 445 return (-1); 446 } 447 448 /* 449 * Convert the logical block number to a physical one and put it in 450 * terms of the device's logical block size. 451 */ 452 if (lp->d_secsize >= DEV_BSIZE) 453 bp->b_rawblkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 454 else 455 bp->b_rawblkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize); 456 457 if (bp->b_dev != RAW_PART) 458 bp->b_rawblkno += lp->d_partitions[part].p_offset; 459 460 s = splbio(); 461 disk_busy(&sc->sc_dk); 462 sc->sc_queuecnt++; 463 splx(s); 464 465 if ((rv = (*sc->sc_start)(sc, bp)) != 0) { 466 bp->b_error = rv; 467 bp->b_flags |= B_ERROR; 468 bp->b_resid = bp->b_bcount; 469 s = splbio(); 470 lddone(sc, bp); 471 splx(s); 472 } 473 474 return (0); 475 } 476 477 void 478 lddone(struct ld_softc *sc, struct buf *bp) 479 { 480 481 if ((bp->b_flags & B_ERROR) != 0) { 482 diskerr(bp, "ld", "error", LOG_PRINTF, 0, sc->sc_dk.dk_label); 483 printf("\n"); 484 } 485 486 disk_unbusy(&sc->sc_dk, bp->b_bcount - bp->b_resid); 487 #if NRND > 0 488 rnd_add_uint32(&sc->sc_rnd_source, bp->b_rawblkno); 489 #endif 490 biodone(bp); 491 if (--sc->sc_queuecnt == 0 && (sc->sc_flags & LDF_DRAIN) != 0) 492 wakeup(&sc->sc_queuecnt); 493 494 while ((bp = BUFQ_FIRST(&sc->sc_bufq)) != NULL) { 495 BUFQ_REMOVE(&sc->sc_bufq, bp); 496 if (!ldstart(sc, bp)) 497 break; 498 } 499 } 500 501 int 502 ldsize(dev_t dev) 503 { 504 struct ld_softc *sc; 505 int part, unit, omask, size; 506 507 unit = DISKUNIT(dev); 508 if ((sc = device_lookup(&ld_cd, unit)) == NULL) 509 return (ENODEV); 510 if ((sc->sc_flags & LDF_ENABLED) == 0) 511 return (ENODEV); 512 part = DISKPART(dev); 513 514 omask = sc->sc_dk.dk_openmask & (1 << part); 515 516 if (omask == 0 && ldopen(dev, 0, S_IFBLK, NULL) != 0) 517 return (-1); 518 else if (sc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 519 size = -1; 520 else 521 size = sc->sc_dk.dk_label->d_partitions[part].p_size * 522 (sc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 523 if (omask == 0 && ldclose(dev, 0, S_IFBLK, NULL) != 0) 524 return (-1); 525 526 return (size); 527 } 528 529 /* 530 * Load the label information from the specified device. 531 */ 532 static void 533 ldgetdisklabel(struct ld_softc *sc) 534 { 535 const char *errstring; 536 537 ldgetdefaultlabel(sc, sc->sc_dk.dk_label); 538 539 /* Call the generic disklabel extraction routine. */ 540 errstring = readdisklabel(MAKEDISKDEV(0, sc->sc_dv.dv_unit, RAW_PART), 541 ldstrategy, sc->sc_dk.dk_label, sc->sc_dk.dk_cpulabel); 542 if (errstring != NULL) 543 printf("%s: %s\n", sc->sc_dv.dv_xname, errstring); 544 } 545 546 /* 547 * Construct a ficticious label. 548 */ 549 static void 550 ldgetdefaultlabel(struct ld_softc *sc, struct disklabel *lp) 551 { 552 553 memset(lp, 0, sizeof(struct disklabel)); 554 555 lp->d_secsize = sc->sc_secsize; 556 lp->d_ntracks = sc->sc_nheads; 557 lp->d_nsectors = sc->sc_nsectors; 558 lp->d_ncylinders = sc->sc_ncylinders; 559 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; 560 lp->d_type = DTYPE_LD; 561 strcpy(lp->d_typename, "unknown"); 562 strcpy(lp->d_packname, "fictitious"); 563 lp->d_secperunit = sc->sc_secperunit; 564 lp->d_rpm = 7200; 565 lp->d_interleave = 1; 566 lp->d_flags = 0; 567 568 lp->d_partitions[RAW_PART].p_offset = 0; 569 lp->d_partitions[RAW_PART].p_size = 570 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE); 571 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED; 572 lp->d_npartitions = RAW_PART + 1; 573 574 lp->d_magic = DISKMAGIC; 575 lp->d_magic2 = DISKMAGIC; 576 lp->d_checksum = dkcksum(lp); 577 } 578 579 /* 580 * Wait interruptibly for an exclusive lock. 581 * 582 * XXX Several drivers do this; it should be abstracted and made MP-safe. 583 */ 584 static int 585 ldlock(struct ld_softc *sc) 586 { 587 int error; 588 589 while ((sc->sc_flags & LDF_LKHELD) != 0) { 590 sc->sc_flags |= LDF_LKWANTED; 591 if ((error = tsleep(sc, PRIBIO | PCATCH, "ldlck", 0)) != 0) 592 return (error); 593 } 594 sc->sc_flags |= LDF_LKHELD; 595 return (0); 596 } 597 598 /* 599 * Unlock and wake up any waiters. 600 */ 601 static void 602 ldunlock(struct ld_softc *sc) 603 { 604 605 sc->sc_flags &= ~LDF_LKHELD; 606 if ((sc->sc_flags & LDF_LKWANTED) != 0) { 607 sc->sc_flags &= ~LDF_LKWANTED; 608 wakeup(sc); 609 } 610 } 611 612 /* 613 * Take a dump. 614 */ 615 int 616 lddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size) 617 { 618 struct ld_softc *sc; 619 struct disklabel *lp; 620 int unit, part, nsects, sectoff, towrt, nblk, maxblkcnt, rv; 621 static int dumping; 622 623 unit = DISKUNIT(dev); 624 if ((sc = device_lookup(&ld_cd, unit)) == NULL) 625 return (ENXIO); 626 if ((sc->sc_flags & LDF_ENABLED) == 0) 627 return (ENODEV); 628 if (sc->sc_dump == NULL) 629 return (ENXIO); 630 631 /* Check if recursive dump; if so, punt. */ 632 if (dumping) 633 return (EFAULT); 634 dumping = 1; 635 636 /* Convert to disk sectors. Request must be a multiple of size. */ 637 part = DISKPART(dev); 638 lp = sc->sc_dk.dk_label; 639 if ((size % lp->d_secsize) != 0) 640 return (EFAULT); 641 towrt = size / lp->d_secsize; 642 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in DEV_BSIZE units */ 643 644 nsects = lp->d_partitions[part].p_size; 645 sectoff = lp->d_partitions[part].p_offset; 646 647 /* Check transfer bounds against partition size. */ 648 if ((blkno < 0) || ((blkno + towrt) > nsects)) 649 return (EINVAL); 650 651 /* Offset block number to start of partition. */ 652 blkno += sectoff; 653 654 /* Start dumping and return when done. */ 655 maxblkcnt = sc->sc_maxxfer / sc->sc_secsize - 1; 656 while (towrt > 0) { 657 nblk = min(maxblkcnt, towrt); 658 659 if ((rv = (*sc->sc_dump)(sc, va, blkno, nblk)) != 0) 660 return (rv); 661 662 towrt -= nblk; 663 blkno += nblk; 664 va += nblk * sc->sc_secsize; 665 } 666 667 dumping = 0; 668 return (0); 669 } 670 671 /* 672 * Adjust the size of a transfer. 673 */ 674 static void 675 ldminphys(struct buf *bp) 676 { 677 struct ld_softc *sc; 678 679 sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev)); 680 681 if (bp->b_bcount > sc->sc_maxxfer) 682 bp->b_bcount = sc->sc_maxxfer; 683 minphys(bp); 684 } 685