1 /* $NetBSD: ld.c,v 1.82 2015/04/13 16:33:23 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran and Charles M. Hannum. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Disk driver for use by RAID controllers. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.82 2015/04/13 16:33:23 riastradh Exp $"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/device.h> 43 #include <sys/queue.h> 44 #include <sys/proc.h> 45 #include <sys/buf.h> 46 #include <sys/bufq.h> 47 #include <sys/endian.h> 48 #include <sys/disklabel.h> 49 #include <sys/disk.h> 50 #include <sys/dkio.h> 51 #include <sys/stat.h> 52 #include <sys/conf.h> 53 #include <sys/fcntl.h> 54 #include <sys/vnode.h> 55 #include <sys/syslog.h> 56 #include <sys/mutex.h> 57 #include <sys/rndsource.h> 58 59 #include <dev/ldvar.h> 60 61 #include <prop/proplib.h> 62 63 static void ldgetdefaultlabel(struct ld_softc *, struct disklabel *); 64 static void ldgetdisklabel(struct ld_softc *); 65 static void ldminphys(struct buf *bp); 66 static bool ld_suspend(device_t, const pmf_qual_t *); 67 static bool ld_shutdown(device_t, int); 68 static void ldstart(struct ld_softc *, struct buf *); 69 static void ld_set_geometry(struct ld_softc *); 70 static void ld_config_interrupts (device_t); 71 static int ldlastclose(device_t); 72 73 extern struct cfdriver ld_cd; 74 75 static dev_type_open(ldopen); 76 static dev_type_close(ldclose); 77 static dev_type_read(ldread); 78 static dev_type_write(ldwrite); 79 static dev_type_ioctl(ldioctl); 80 static dev_type_strategy(ldstrategy); 81 static dev_type_dump(lddump); 82 static dev_type_size(ldsize); 83 84 const struct bdevsw ld_bdevsw = { 85 .d_open = ldopen, 86 .d_close = ldclose, 87 .d_strategy = ldstrategy, 88 .d_ioctl = ldioctl, 89 .d_dump = lddump, 90 .d_psize = ldsize, 91 .d_discard = nodiscard, 92 .d_flag = D_DISK 93 }; 94 95 const struct cdevsw ld_cdevsw = { 96 .d_open = ldopen, 97 .d_close = ldclose, 98 .d_read = ldread, 99 .d_write = ldwrite, 100 .d_ioctl = ldioctl, 101 .d_stop = nostop, 102 .d_tty = notty, 103 .d_poll = nopoll, 104 .d_mmap = nommap, 105 .d_kqfilter = nokqfilter, 106 .d_discard = nodiscard, 107 .d_flag = D_DISK 108 }; 109 110 static struct dkdriver lddkdriver = { ldstrategy, ldminphys }; 111 112 void 113 ldattach(struct ld_softc *sc) 114 { 115 char tbuf[9]; 116 117 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM); 118 119 if ((sc->sc_flags & LDF_ENABLED) == 0) { 120 aprint_normal_dev(sc->sc_dv, "disabled\n"); 121 return; 122 } 123 124 /* Initialise and attach the disk structure. */ 125 disk_init(&sc->sc_dk, device_xname(sc->sc_dv), &lddkdriver); 126 disk_attach(&sc->sc_dk); 127 128 if (sc->sc_maxxfer > MAXPHYS) 129 sc->sc_maxxfer = MAXPHYS; 130 131 /* Build synthetic geometry if necessary. */ 132 if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 || 133 sc->sc_ncylinders == 0) { 134 uint64_t ncyl; 135 136 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */ 137 sc->sc_nheads = 16; 138 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */ 139 sc->sc_nheads = 32; 140 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */ 141 sc->sc_nheads = 64; 142 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */ 143 sc->sc_nheads = 128; 144 else 145 sc->sc_nheads = 255; 146 147 sc->sc_nsectors = 63; 148 sc->sc_ncylinders = INT_MAX; 149 ncyl = sc->sc_secperunit / 150 (sc->sc_nheads * sc->sc_nsectors); 151 if (ncyl < INT_MAX) 152 sc->sc_ncylinders = (int)ncyl; 153 } 154 155 format_bytes(tbuf, sizeof(tbuf), sc->sc_secperunit * 156 sc->sc_secsize); 157 aprint_normal_dev(sc->sc_dv, "%s, %d cyl, %d head, %d sec, " 158 "%d bytes/sect x %"PRIu64" sectors\n", 159 tbuf, sc->sc_ncylinders, sc->sc_nheads, 160 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit); 161 sc->sc_disksize512 = sc->sc_secperunit * sc->sc_secsize / DEV_BSIZE; 162 163 ld_set_geometry(sc); 164 165 /* Attach the device into the rnd source list. */ 166 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv), 167 RND_TYPE_DISK, RND_FLAG_DEFAULT); 168 169 /* Register with PMF */ 170 if (!pmf_device_register1(sc->sc_dv, ld_suspend, NULL, ld_shutdown)) 171 aprint_error_dev(sc->sc_dv, 172 "couldn't establish power handler\n"); 173 174 bufq_alloc(&sc->sc_bufq, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK); 175 176 /* Discover wedges on this disk. */ 177 config_interrupts(sc->sc_dv, ld_config_interrupts); 178 } 179 180 int 181 ldadjqparam(struct ld_softc *sc, int xmax) 182 { 183 int s; 184 185 s = splbio(); 186 sc->sc_maxqueuecnt = xmax; 187 splx(s); 188 189 return (0); 190 } 191 192 int 193 ldbegindetach(struct ld_softc *sc, int flags) 194 { 195 int s, rv = 0; 196 197 if ((sc->sc_flags & LDF_ENABLED) == 0) 198 return (0); 199 200 rv = disk_begindetach(&sc->sc_dk, ldlastclose, sc->sc_dv, flags); 201 202 if (rv != 0) 203 return rv; 204 205 s = splbio(); 206 sc->sc_maxqueuecnt = 0; 207 sc->sc_flags |= LDF_DETACH; 208 while (sc->sc_queuecnt > 0) { 209 sc->sc_flags |= LDF_DRAIN; 210 rv = tsleep(&sc->sc_queuecnt, PRIBIO, "lddrn", 0); 211 if (rv) 212 break; 213 } 214 splx(s); 215 216 return (rv); 217 } 218 219 void 220 ldenddetach(struct ld_softc *sc) 221 { 222 int s, bmaj, cmaj, i, mn; 223 224 if ((sc->sc_flags & LDF_ENABLED) == 0) 225 return; 226 227 /* Wait for commands queued with the hardware to complete. */ 228 if (sc->sc_queuecnt != 0) 229 if (tsleep(&sc->sc_queuecnt, PRIBIO, "lddtch", 30 * hz)) 230 printf("%s: not drained\n", device_xname(sc->sc_dv)); 231 232 /* Locate the major numbers. */ 233 bmaj = bdevsw_lookup_major(&ld_bdevsw); 234 cmaj = cdevsw_lookup_major(&ld_cdevsw); 235 236 /* Kill off any queued buffers. */ 237 s = splbio(); 238 bufq_drain(sc->sc_bufq); 239 splx(s); 240 241 bufq_free(sc->sc_bufq); 242 243 /* Nuke the vnodes for any open instances. */ 244 for (i = 0; i < MAXPARTITIONS; i++) { 245 mn = DISKMINOR(device_unit(sc->sc_dv), i); 246 vdevgone(bmaj, mn, mn, VBLK); 247 vdevgone(cmaj, mn, mn, VCHR); 248 } 249 250 /* Delete all of our wedges. */ 251 dkwedge_delall(&sc->sc_dk); 252 253 /* Detach from the disk list. */ 254 disk_detach(&sc->sc_dk); 255 disk_destroy(&sc->sc_dk); 256 257 /* Unhook the entropy source. */ 258 rnd_detach_source(&sc->sc_rnd_source); 259 260 /* Deregister with PMF */ 261 pmf_device_deregister(sc->sc_dv); 262 263 /* 264 * XXX We can't really flush the cache here, beceause the 265 * XXX device may already be non-existent from the controller's 266 * XXX perspective. 267 */ 268 #if 0 269 /* Flush the device's cache. */ 270 if (sc->sc_flush != NULL) 271 if ((*sc->sc_flush)(sc, 0) != 0) 272 aprint_error_dev(sc->sc_dv, "unable to flush cache\n"); 273 #endif 274 mutex_destroy(&sc->sc_mutex); 275 } 276 277 /* ARGSUSED */ 278 static bool 279 ld_suspend(device_t dev, const pmf_qual_t *qual) 280 { 281 return ld_shutdown(dev, 0); 282 } 283 284 /* ARGSUSED */ 285 static bool 286 ld_shutdown(device_t dev, int flags) 287 { 288 struct ld_softc *sc = device_private(dev); 289 290 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc, LDFL_POLL) != 0) { 291 printf("%s: unable to flush cache\n", device_xname(dev)); 292 return false; 293 } 294 295 return true; 296 } 297 298 /* ARGSUSED */ 299 static int 300 ldopen(dev_t dev, int flags, int fmt, struct lwp *l) 301 { 302 struct ld_softc *sc; 303 int error, unit, part; 304 305 unit = DISKUNIT(dev); 306 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL) 307 return (ENXIO); 308 if ((sc->sc_flags & LDF_ENABLED) == 0) 309 return (ENODEV); 310 part = DISKPART(dev); 311 312 mutex_enter(&sc->sc_dk.dk_openlock); 313 314 if (sc->sc_dk.dk_openmask == 0) { 315 /* Load the partition info if not already loaded. */ 316 if ((sc->sc_flags & LDF_VLABEL) == 0) 317 ldgetdisklabel(sc); 318 } 319 320 /* Check that the partition exists. */ 321 if (part != RAW_PART && (part >= sc->sc_dk.dk_label->d_npartitions || 322 sc->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 323 error = ENXIO; 324 goto bad1; 325 } 326 327 /* Ensure only one open at a time. */ 328 switch (fmt) { 329 case S_IFCHR: 330 sc->sc_dk.dk_copenmask |= (1 << part); 331 break; 332 case S_IFBLK: 333 sc->sc_dk.dk_bopenmask |= (1 << part); 334 break; 335 } 336 sc->sc_dk.dk_openmask = 337 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask; 338 339 error = 0; 340 bad1: 341 mutex_exit(&sc->sc_dk.dk_openlock); 342 return (error); 343 } 344 345 static int 346 ldlastclose(device_t self) 347 { 348 struct ld_softc *sc = device_private(self); 349 350 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc, 0) != 0) 351 aprint_error_dev(self, "unable to flush cache\n"); 352 if ((sc->sc_flags & LDF_KLABEL) == 0) 353 sc->sc_flags &= ~LDF_VLABEL; 354 355 return 0; 356 } 357 358 /* ARGSUSED */ 359 static int 360 ldclose(dev_t dev, int flags, int fmt, struct lwp *l) 361 { 362 struct ld_softc *sc; 363 int part, unit; 364 365 unit = DISKUNIT(dev); 366 part = DISKPART(dev); 367 sc = device_lookup_private(&ld_cd, unit); 368 369 mutex_enter(&sc->sc_dk.dk_openlock); 370 371 switch (fmt) { 372 case S_IFCHR: 373 sc->sc_dk.dk_copenmask &= ~(1 << part); 374 break; 375 case S_IFBLK: 376 sc->sc_dk.dk_bopenmask &= ~(1 << part); 377 break; 378 } 379 sc->sc_dk.dk_openmask = 380 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask; 381 382 if (sc->sc_dk.dk_openmask == 0) 383 ldlastclose(sc->sc_dv); 384 385 mutex_exit(&sc->sc_dk.dk_openlock); 386 return (0); 387 } 388 389 /* ARGSUSED */ 390 static int 391 ldread(dev_t dev, struct uio *uio, int ioflag) 392 { 393 394 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio)); 395 } 396 397 /* ARGSUSED */ 398 static int 399 ldwrite(dev_t dev, struct uio *uio, int ioflag) 400 { 401 402 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio)); 403 } 404 405 /* ARGSUSED */ 406 static int 407 ldioctl(dev_t dev, u_long cmd, void *addr, int32_t flag, struct lwp *l) 408 { 409 struct ld_softc *sc; 410 int unit, error; 411 #ifdef __HAVE_OLD_DISKLABEL 412 struct disklabel newlabel; 413 #endif 414 struct disklabel *lp; 415 416 unit = DISKUNIT(dev); 417 sc = device_lookup_private(&ld_cd, unit); 418 419 error = disk_ioctl(&sc->sc_dk, dev, cmd, addr, flag, l); 420 if (error != EPASSTHROUGH) 421 return (error); 422 423 error = 0; 424 switch (cmd) { 425 case DIOCWDINFO: 426 case DIOCSDINFO: 427 #ifdef __HAVE_OLD_DISKLABEL 428 case ODIOCWDINFO: 429 case ODIOCSDINFO: 430 431 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) { 432 memset(&newlabel, 0, sizeof newlabel); 433 memcpy(&newlabel, addr, sizeof (struct olddisklabel)); 434 lp = &newlabel; 435 } else 436 #endif 437 lp = (struct disklabel *)addr; 438 439 if ((flag & FWRITE) == 0) 440 return (EBADF); 441 442 mutex_enter(&sc->sc_dk.dk_openlock); 443 sc->sc_flags |= LDF_LABELLING; 444 445 error = setdisklabel(sc->sc_dk.dk_label, 446 lp, /*sc->sc_dk.dk_openmask : */0, 447 sc->sc_dk.dk_cpulabel); 448 if (error == 0 && (cmd == DIOCWDINFO 449 #ifdef __HAVE_OLD_DISKLABEL 450 || cmd == ODIOCWDINFO 451 #endif 452 )) 453 error = writedisklabel( 454 MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 455 ldstrategy, sc->sc_dk.dk_label, 456 sc->sc_dk.dk_cpulabel); 457 458 sc->sc_flags &= ~LDF_LABELLING; 459 mutex_exit(&sc->sc_dk.dk_openlock); 460 break; 461 462 case DIOCKLABEL: 463 if ((flag & FWRITE) == 0) 464 return (EBADF); 465 if (*(int *)addr) 466 sc->sc_flags |= LDF_KLABEL; 467 else 468 sc->sc_flags &= ~LDF_KLABEL; 469 break; 470 471 case DIOCWLABEL: 472 if ((flag & FWRITE) == 0) 473 return (EBADF); 474 if (*(int *)addr) 475 sc->sc_flags |= LDF_WLABEL; 476 else 477 sc->sc_flags &= ~LDF_WLABEL; 478 break; 479 480 case DIOCGDEFLABEL: 481 ldgetdefaultlabel(sc, (struct disklabel *)addr); 482 break; 483 484 #ifdef __HAVE_OLD_DISKLABEL 485 case ODIOCGDEFLABEL: 486 ldgetdefaultlabel(sc, &newlabel); 487 if (newlabel.d_npartitions > OLDMAXPARTITIONS) 488 return ENOTTY; 489 memcpy(addr, &newlabel, sizeof (struct olddisklabel)); 490 break; 491 #endif 492 493 case DIOCCACHESYNC: 494 /* 495 * XXX Do we really need to care about having a writable 496 * file descriptor here? 497 */ 498 if ((flag & FWRITE) == 0) 499 error = EBADF; 500 else if (sc->sc_flush) 501 error = (*sc->sc_flush)(sc, 0); 502 else 503 error = 0; /* XXX Error out instead? */ 504 break; 505 506 case DIOCGSTRATEGY: 507 { 508 struct disk_strategy *dks = (void *)addr; 509 510 mutex_enter(&sc->sc_mutex); 511 strlcpy(dks->dks_name, bufq_getstrategyname(sc->sc_bufq), 512 sizeof(dks->dks_name)); 513 mutex_exit(&sc->sc_mutex); 514 dks->dks_paramlen = 0; 515 516 return 0; 517 } 518 case DIOCSSTRATEGY: 519 { 520 struct disk_strategy *dks = (void *)addr; 521 struct bufq_state *new_bufq, *old_bufq; 522 523 if ((flag & FWRITE) == 0) 524 return EPERM; 525 526 if (dks->dks_param != NULL) 527 return EINVAL; 528 529 dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */ 530 error = bufq_alloc(&new_bufq, dks->dks_name, 531 BUFQ_EXACT|BUFQ_SORT_RAWBLOCK); 532 if (error) 533 return error; 534 535 mutex_enter(&sc->sc_mutex); 536 old_bufq = sc->sc_bufq; 537 bufq_move(new_bufq, old_bufq); 538 sc->sc_bufq = new_bufq; 539 mutex_exit(&sc->sc_mutex); 540 bufq_free(old_bufq); 541 542 return 0; 543 } 544 default: 545 error = ENOTTY; 546 break; 547 } 548 549 return (error); 550 } 551 552 static void 553 ldstrategy(struct buf *bp) 554 { 555 struct ld_softc *sc; 556 struct disklabel *lp; 557 daddr_t blkno; 558 int s, part; 559 560 sc = device_lookup_private(&ld_cd, DISKUNIT(bp->b_dev)); 561 part = DISKPART(bp->b_dev); 562 563 if ((sc->sc_flags & LDF_DETACH) != 0) { 564 bp->b_error = EIO; 565 goto done; 566 } 567 568 lp = sc->sc_dk.dk_label; 569 570 /* 571 * The transfer must be a whole number of blocks and the offset must 572 * not be negative. 573 */ 574 if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) { 575 bp->b_error = EINVAL; 576 goto done; 577 } 578 579 /* If it's a null transfer, return immediately. */ 580 if (bp->b_bcount == 0) 581 goto done; 582 583 /* 584 * Do bounds checking and adjust the transfer. If error, process. 585 * If past the end of partition, just return. 586 */ 587 if (part == RAW_PART) { 588 if (bounds_check_with_mediasize(bp, DEV_BSIZE, 589 sc->sc_disksize512) <= 0) 590 goto done; 591 } else { 592 if (bounds_check_with_label(&sc->sc_dk, bp, 593 (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0) 594 goto done; 595 } 596 597 /* 598 * Convert the block number to absolute and put it in terms 599 * of the device's logical block size. 600 */ 601 if (lp->d_secsize == DEV_BSIZE) 602 blkno = bp->b_blkno; 603 else if (lp->d_secsize > DEV_BSIZE) 604 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 605 else 606 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize); 607 608 if (part != RAW_PART) 609 blkno += lp->d_partitions[part].p_offset; 610 611 bp->b_rawblkno = blkno; 612 613 s = splbio(); 614 ldstart(sc, bp); 615 splx(s); 616 return; 617 618 done: 619 bp->b_resid = bp->b_bcount; 620 biodone(bp); 621 } 622 623 static void 624 ldstart(struct ld_softc *sc, struct buf *bp) 625 { 626 int error; 627 628 mutex_enter(&sc->sc_mutex); 629 630 if (bp != NULL) 631 bufq_put(sc->sc_bufq, bp); 632 633 while (sc->sc_queuecnt < sc->sc_maxqueuecnt) { 634 /* See if there is work to do. */ 635 if ((bp = bufq_peek(sc->sc_bufq)) == NULL) 636 break; 637 638 disk_busy(&sc->sc_dk); 639 sc->sc_queuecnt++; 640 641 if (__predict_true((error = (*sc->sc_start)(sc, bp)) == 0)) { 642 /* 643 * The back-end is running the job; remove it from 644 * the queue. 645 */ 646 (void) bufq_get(sc->sc_bufq); 647 } else { 648 disk_unbusy(&sc->sc_dk, 0, (bp->b_flags & B_READ)); 649 sc->sc_queuecnt--; 650 if (error == EAGAIN) { 651 /* 652 * Temporary resource shortage in the 653 * back-end; just defer the job until 654 * later. 655 * 656 * XXX We might consider a watchdog timer 657 * XXX to make sure we are kicked into action. 658 */ 659 break; 660 } else { 661 (void) bufq_get(sc->sc_bufq); 662 bp->b_error = error; 663 bp->b_resid = bp->b_bcount; 664 mutex_exit(&sc->sc_mutex); 665 biodone(bp); 666 mutex_enter(&sc->sc_mutex); 667 } 668 } 669 } 670 671 mutex_exit(&sc->sc_mutex); 672 } 673 674 void 675 lddone(struct ld_softc *sc, struct buf *bp) 676 { 677 678 if (bp->b_error != 0) { 679 diskerr(bp, "ld", "error", LOG_PRINTF, 0, sc->sc_dk.dk_label); 680 printf("\n"); 681 } 682 683 disk_unbusy(&sc->sc_dk, bp->b_bcount - bp->b_resid, 684 (bp->b_flags & B_READ)); 685 rnd_add_uint32(&sc->sc_rnd_source, bp->b_rawblkno); 686 biodone(bp); 687 688 mutex_enter(&sc->sc_mutex); 689 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) { 690 if ((sc->sc_flags & LDF_DRAIN) != 0) { 691 sc->sc_flags &= ~LDF_DRAIN; 692 wakeup(&sc->sc_queuecnt); 693 } 694 mutex_exit(&sc->sc_mutex); 695 ldstart(sc, NULL); 696 } else 697 mutex_exit(&sc->sc_mutex); 698 } 699 700 static int 701 ldsize(dev_t dev) 702 { 703 struct ld_softc *sc; 704 int part, unit, omask, size; 705 706 unit = DISKUNIT(dev); 707 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL) 708 return (ENODEV); 709 if ((sc->sc_flags & LDF_ENABLED) == 0) 710 return (ENODEV); 711 part = DISKPART(dev); 712 713 omask = sc->sc_dk.dk_openmask & (1 << part); 714 715 if (omask == 0 && ldopen(dev, 0, S_IFBLK, NULL) != 0) 716 return (-1); 717 else if (sc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 718 size = -1; 719 else 720 size = sc->sc_dk.dk_label->d_partitions[part].p_size * 721 (sc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 722 if (omask == 0 && ldclose(dev, 0, S_IFBLK, NULL) != 0) 723 return (-1); 724 725 return (size); 726 } 727 728 /* 729 * Load the label information from the specified device. 730 */ 731 static void 732 ldgetdisklabel(struct ld_softc *sc) 733 { 734 const char *errstring; 735 736 ldgetdefaultlabel(sc, sc->sc_dk.dk_label); 737 738 /* Call the generic disklabel extraction routine. */ 739 errstring = readdisklabel(MAKEDISKDEV(0, device_unit(sc->sc_dv), 740 RAW_PART), ldstrategy, sc->sc_dk.dk_label, sc->sc_dk.dk_cpulabel); 741 if (errstring != NULL) 742 printf("%s: %s\n", device_xname(sc->sc_dv), errstring); 743 744 /* In-core label now valid. */ 745 sc->sc_flags |= LDF_VLABEL; 746 } 747 748 /* 749 * Construct a ficticious label. 750 */ 751 static void 752 ldgetdefaultlabel(struct ld_softc *sc, struct disklabel *lp) 753 { 754 755 memset(lp, 0, sizeof(struct disklabel)); 756 757 lp->d_secsize = sc->sc_secsize; 758 lp->d_ntracks = sc->sc_nheads; 759 lp->d_nsectors = sc->sc_nsectors; 760 lp->d_ncylinders = sc->sc_ncylinders; 761 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; 762 lp->d_type = DKTYPE_LD; 763 strlcpy(lp->d_typename, "unknown", sizeof(lp->d_typename)); 764 strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname)); 765 if (sc->sc_secperunit > UINT32_MAX) 766 lp->d_secperunit = UINT32_MAX; 767 else 768 lp->d_secperunit = sc->sc_secperunit; 769 lp->d_rpm = 7200; 770 lp->d_interleave = 1; 771 lp->d_flags = 0; 772 773 lp->d_partitions[RAW_PART].p_offset = 0; 774 lp->d_partitions[RAW_PART].p_size = lp->d_secperunit; 775 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED; 776 lp->d_npartitions = RAW_PART + 1; 777 778 lp->d_magic = DISKMAGIC; 779 lp->d_magic2 = DISKMAGIC; 780 lp->d_checksum = dkcksum(lp); 781 } 782 783 /* 784 * Take a dump. 785 */ 786 static int 787 lddump(dev_t dev, daddr_t blkno, void *vav, size_t size) 788 { 789 char *va = vav; 790 struct ld_softc *sc; 791 struct disklabel *lp; 792 int unit, part, nsects, sectoff, towrt, nblk, maxblkcnt, rv; 793 static int dumping; 794 795 unit = DISKUNIT(dev); 796 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL) 797 return (ENXIO); 798 if ((sc->sc_flags & LDF_ENABLED) == 0) 799 return (ENODEV); 800 if (sc->sc_dump == NULL) 801 return (ENXIO); 802 803 /* Check if recursive dump; if so, punt. */ 804 if (dumping) 805 return (EFAULT); 806 dumping = 1; 807 808 /* Convert to disk sectors. Request must be a multiple of size. */ 809 part = DISKPART(dev); 810 lp = sc->sc_dk.dk_label; 811 if ((size % lp->d_secsize) != 0) 812 return (EFAULT); 813 towrt = size / lp->d_secsize; 814 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in DEV_BSIZE units */ 815 816 nsects = lp->d_partitions[part].p_size; 817 sectoff = lp->d_partitions[part].p_offset; 818 819 /* Check transfer bounds against partition size. */ 820 if ((blkno < 0) || ((blkno + towrt) > nsects)) 821 return (EINVAL); 822 823 /* Offset block number to start of partition. */ 824 blkno += sectoff; 825 826 /* Start dumping and return when done. */ 827 maxblkcnt = sc->sc_maxxfer / sc->sc_secsize - 1; 828 while (towrt > 0) { 829 nblk = min(maxblkcnt, towrt); 830 831 if ((rv = (*sc->sc_dump)(sc, va, blkno, nblk)) != 0) 832 return (rv); 833 834 towrt -= nblk; 835 blkno += nblk; 836 va += nblk * sc->sc_secsize; 837 } 838 839 dumping = 0; 840 return (0); 841 } 842 843 /* 844 * Adjust the size of a transfer. 845 */ 846 static void 847 ldminphys(struct buf *bp) 848 { 849 struct ld_softc *sc; 850 851 sc = device_lookup_private(&ld_cd, DISKUNIT(bp->b_dev)); 852 853 if (bp->b_bcount > sc->sc_maxxfer) 854 bp->b_bcount = sc->sc_maxxfer; 855 minphys(bp); 856 } 857 858 static void 859 ld_set_geometry(struct ld_softc *ld) 860 { 861 struct disk_geom *dg = &ld->sc_dk.dk_geom; 862 863 memset(dg, 0, sizeof(*dg)); 864 865 dg->dg_secperunit = ld->sc_secperunit; 866 dg->dg_secsize = ld->sc_secsize; 867 dg->dg_nsectors = ld->sc_nsectors; 868 dg->dg_ntracks = ld->sc_nheads; 869 dg->dg_ncylinders = ld->sc_ncylinders; 870 871 disk_set_info(ld->sc_dv, &ld->sc_dk, NULL); 872 } 873 874 static void 875 ld_config_interrupts(device_t d) 876 { 877 struct ld_softc *sc = device_private(d); 878 dkwedge_discover(&sc->sc_dk); 879 } 880