1 /* 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * Copyright (c) 2004 Matthew Dillon. 10 * Copyright (c) 1982, 1986, 1988, 1993 11 * The Regents of the University of California. All rights reserved. 12 * (c) UNIX System Laboratories, Inc. 13 * All or some portions of this file are derived from material licensed 14 * to the University of California by American Telephone and Telegraph 15 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 16 * the permission of UNIX System Laboratories, Inc. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. All advertising materials mentioning features or use of this software 27 * must display the following acknowledgement: 28 * This product includes software developed by the University of 29 * California, Berkeley and its contributors. 30 * 4. Neither the name of the University nor the names of its contributors 31 * may be used to endorse or promote products derived from this software 32 * without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 37 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 44 * SUCH DAMAGE. 45 * 46 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94 47 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $ 48 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $ 49 * $DragonFly: src/sys/kern/subr_disk.c,v 1.12 2004/06/02 19:31:02 dillon Exp $ 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/kernel.h> 55 #include <sys/proc.h> 56 #include <sys/sysctl.h> 57 #include <sys/buf.h> 58 #include <sys/conf.h> 59 #include <sys/disklabel.h> 60 #include <sys/diskslice.h> 61 #include <sys/disk.h> 62 #include <sys/malloc.h> 63 #include <sys/sysctl.h> 64 #include <machine/md_var.h> 65 #include <sys/ctype.h> 66 #include <sys/syslog.h> 67 #include <sys/device.h> 68 #include <sys/msgport.h> 69 #include <sys/msgport2.h> 70 #include <sys/buf2.h> 71 72 static MALLOC_DEFINE(M_DISK, "disk", "disk data"); 73 74 static d_strategy_t diskstrategy; 75 static d_open_t diskopen; 76 static d_close_t diskclose; 77 static d_ioctl_t diskioctl; 78 static d_psize_t diskpsize; 79 static d_clone_t diskclone; 80 static int disk_putport(lwkt_port_t port, lwkt_msg_t msg); 81 82 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist); 83 84 /* 85 * Create a slice and unit managed disk. 86 * 87 * Our port layer will be responsible for assigning pblkno and handling 88 * high level partition operations, then forwarding the requests to the 89 * raw device. 90 * 91 * The raw device (based on rawsw) is returned to the caller, NOT the 92 * slice and unit managed cdev. The caller typically sets various 93 * driver parameters and IO limits on the returned rawdev which we must 94 * inherit when our managed device is opened. 95 */ 96 dev_t 97 disk_create(int unit, struct disk *dp, int flags, struct cdevsw *rawsw) 98 { 99 dev_t rawdev; 100 struct cdevsw *devsw; 101 102 /* 103 * Create the raw backing device 104 */ 105 compile_devsw(rawsw); 106 rawdev = make_dev(rawsw, dkmakeminor(unit, WHOLE_DISK_SLICE, RAW_PART), 107 UID_ROOT, GID_OPERATOR, 0640, 108 "%s%d", rawsw->d_name, unit); 109 110 /* 111 * Initialize our intercept port 112 */ 113 bzero(dp, sizeof(*dp)); 114 lwkt_initport(&dp->d_port, NULL); 115 dp->d_port.mp_putport = disk_putport; 116 dp->d_rawsw = rawsw; 117 118 /* 119 * We install a custom cdevsw rather then the passed cdevsw, 120 * and save our disk structure in d_data so we can get at it easily 121 * without any complex cloning code. 122 */ 123 devsw = cdevsw_add_override(rawdev, dkunitmask(), dkmakeunit(unit)); 124 devsw->d_port = &dp->d_port; 125 devsw->d_data = dp; 126 devsw->d_clone = diskclone; 127 dp->d_devsw = devsw; 128 dp->d_rawdev = rawdev; 129 dp->d_cdev = make_dev(devsw, 130 dkmakeminor(unit, WHOLE_DISK_SLICE, RAW_PART), 131 UID_ROOT, GID_OPERATOR, 0640, 132 "%s%d", devsw->d_name, unit); 133 134 dp->d_dsflags = flags; 135 LIST_INSERT_HEAD(&disklist, dp, d_list); 136 return (dp->d_rawdev); 137 } 138 139 /* 140 * This routine is called when an adapter detaches. The higher level 141 * managed disk device is destroyed while the lower level raw device is 142 * released. 143 */ 144 void 145 disk_destroy(struct disk *disk) 146 { 147 if (disk->d_devsw) { 148 cdevsw_remove(disk->d_devsw, dkunitmask(), dkunit(disk->d_cdev)); 149 LIST_REMOVE(disk, d_list); 150 } 151 if (disk->d_rawsw) 152 destroy_all_dev(disk->d_rawsw, dkunitmask(), dkunit(disk->d_rawdev)); 153 bzero(disk, sizeof(*disk)); 154 } 155 156 int 157 disk_dumpcheck(dev_t dev, u_int *count, u_int *blkno, u_int *secsize) 158 { 159 struct disk *dp; 160 struct disklabel *dl; 161 u_int boff; 162 163 dp = dev->si_disk; 164 if (!dp) 165 return (ENXIO); 166 if (!dp->d_slice) 167 return (ENXIO); 168 dl = dsgetlabel(dev, dp->d_slice); 169 if (!dl) 170 return (ENXIO); 171 *count = Maxmem * (PAGE_SIZE / dl->d_secsize); 172 if (dumplo <= LABELSECTOR || 173 (dumplo + *count > dl->d_partitions[dkpart(dev)].p_size)) 174 return (EINVAL); 175 boff = dl->d_partitions[dkpart(dev)].p_offset + 176 dp->d_slice->dss_slices[dkslice(dev)].ds_offset; 177 *blkno = boff + dumplo; 178 *secsize = dl->d_secsize; 179 return (0); 180 181 } 182 183 void 184 disk_invalidate (struct disk *disk) 185 { 186 if (disk->d_slice) 187 dsgone(&disk->d_slice); 188 } 189 190 struct disk * 191 disk_enumerate(struct disk *disk) 192 { 193 if (!disk) 194 return (LIST_FIRST(&disklist)); 195 else 196 return (LIST_NEXT(disk, d_list)); 197 } 198 199 static 200 int 201 sysctl_disks(SYSCTL_HANDLER_ARGS) 202 { 203 struct disk *disk; 204 int error, first; 205 206 disk = NULL; 207 first = 1; 208 209 while ((disk = disk_enumerate(disk))) { 210 if (!first) { 211 error = SYSCTL_OUT(req, " ", 1); 212 if (error) 213 return error; 214 } else { 215 first = 0; 216 } 217 error = SYSCTL_OUT(req, disk->d_rawdev->si_name, strlen(disk->d_rawdev->si_name)); 218 if (error) 219 return error; 220 } 221 error = SYSCTL_OUT(req, "", 1); 222 return error; 223 } 224 225 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, 0, NULL, 226 sysctl_disks, "A", "names of available disks"); 227 228 /* 229 * The port intercept functions 230 */ 231 static 232 int 233 disk_putport(lwkt_port_t port, lwkt_msg_t lmsg) 234 { 235 struct disk *disk = (struct disk *)port; 236 cdevallmsg_t msg = (cdevallmsg_t)lmsg; 237 int error; 238 239 switch(msg->am_lmsg.ms_cmd.cm_op) { 240 case CDEV_CMD_OPEN: 241 error = diskopen( 242 msg->am_open.msg.dev, 243 msg->am_open.oflags, 244 msg->am_open.devtype, 245 msg->am_open.td); 246 break; 247 case CDEV_CMD_CLOSE: 248 error = diskclose( 249 msg->am_close.msg.dev, 250 msg->am_close.fflag, 251 msg->am_close.devtype, 252 msg->am_close.td); 253 break; 254 case CDEV_CMD_IOCTL: 255 error = diskioctl( 256 msg->am_ioctl.msg.dev, 257 msg->am_ioctl.cmd, 258 msg->am_ioctl.data, 259 msg->am_ioctl.fflag, 260 msg->am_ioctl.td); 261 break; 262 case CDEV_CMD_STRATEGY: 263 diskstrategy(msg->am_strategy.bp); 264 error = 0; 265 break; 266 case CDEV_CMD_PSIZE: 267 msg->am_psize.result = diskpsize(msg->am_psize.msg.dev); 268 error = 0; /* XXX */ 269 break; 270 case CDEV_CMD_READ: 271 error = physio(msg->am_read.msg.dev, 272 msg->am_read.uio, msg->am_read.ioflag); 273 break; 274 case CDEV_CMD_WRITE: 275 error = physio(msg->am_write.msg.dev, 276 msg->am_write.uio, msg->am_write.ioflag); 277 break; 278 case CDEV_CMD_POLL: 279 case CDEV_CMD_KQFILTER: 280 error = ENODEV; 281 case CDEV_CMD_MMAP: 282 error = -1; 283 break; 284 case CDEV_CMD_DUMP: 285 error = disk_dumpcheck(msg->am_dump.msg.dev, 286 &msg->am_dump.count, 287 &msg->am_dump.blkno, 288 &msg->am_dump.secsize); 289 if (error == 0) { 290 msg->am_dump.msg.dev = disk->d_rawdev; 291 error = lwkt_forwardmsg(disk->d_rawdev->si_port, 292 &msg->am_dump.msg.msg); 293 printf("error2 %d\n", error); 294 } 295 break; 296 default: 297 error = ENOTSUP; 298 break; 299 } 300 return(error); 301 } 302 303 /* 304 * When new device entries are instantiated, make sure they inherit our 305 * si_disk structure and block and iosize limits from the raw device. 306 * 307 * This routine is always called synchronously in the context of the 308 * client. 309 * 310 * XXX The various io and block size constraints are not always initialized 311 * properly by devices. 312 */ 313 static 314 int 315 diskclone(dev_t dev) 316 { 317 struct disk *dp; 318 319 dp = dev->si_devsw->d_data; 320 KKASSERT(dp != NULL); 321 dev->si_disk = dp; 322 dev->si_iosize_max = dp->d_rawdev->si_iosize_max; 323 dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys; 324 dev->si_bsize_best = dp->d_rawdev->si_bsize_best; 325 return(0); 326 } 327 328 /* 329 * Open a disk device or partition. 330 */ 331 static 332 int 333 diskopen(dev_t dev, int oflags, int devtype, struct thread *td) 334 { 335 struct disk *dp; 336 int error; 337 338 /* 339 * dp can't be NULL here XXX. 340 */ 341 error = 0; 342 dp = dev->si_disk; 343 if (dp == NULL) 344 return (ENXIO); 345 346 /* 347 * Deal with open races 348 */ 349 while (dp->d_flags & DISKFLAG_LOCK) { 350 dp->d_flags |= DISKFLAG_WANTED; 351 error = tsleep(dp, PCATCH, "diskopen", hz); 352 if (error) 353 return (error); 354 } 355 dp->d_flags |= DISKFLAG_LOCK; 356 357 /* 358 * Open the underlying raw device. 359 */ 360 if (!dsisopen(dp->d_slice)) { 361 #if 0 362 if (!pdev->si_iosize_max) 363 pdev->si_iosize_max = dev->si_iosize_max; 364 #endif 365 error = dev_dopen(dp->d_rawdev, oflags, devtype, td); 366 } 367 368 /* 369 * Inherit properties from the underlying device now that it is 370 * open. 371 */ 372 diskclone(dev); 373 374 if (error) 375 goto out; 376 377 error = dsopen(dev, devtype, dp->d_dsflags, &dp->d_slice, &dp->d_label); 378 379 if (!dsisopen(dp->d_slice)) 380 dev_dclose(dp->d_rawdev, oflags, devtype, td); 381 out: 382 dp->d_flags &= ~DISKFLAG_LOCK; 383 if (dp->d_flags & DISKFLAG_WANTED) { 384 dp->d_flags &= ~DISKFLAG_WANTED; 385 wakeup(dp); 386 } 387 388 return(error); 389 } 390 391 /* 392 * Close a disk device or partition 393 */ 394 static 395 int 396 diskclose(dev_t dev, int fflag, int devtype, struct thread *td) 397 { 398 struct disk *dp; 399 int error; 400 401 error = 0; 402 dp = dev->si_disk; 403 404 dsclose(dev, devtype, dp->d_slice); 405 if (!dsisopen(dp->d_slice)) 406 error = dev_dclose(dp->d_rawdev, fflag, devtype, td); 407 return (error); 408 } 409 410 /* 411 * Execute strategy routine 412 */ 413 static 414 void 415 diskstrategy(struct buf *bp) 416 { 417 struct disk *dp; 418 419 dp = bp->b_dev->si_disk; 420 421 if (dp == NULL) { 422 bp->b_error = ENXIO; 423 bp->b_flags |= B_ERROR; 424 biodone(bp); 425 return; 426 } 427 KKASSERT(bp->b_dev->si_disk == dp); 428 429 if (dscheck(bp, dp->d_slice) <= 0) { 430 biodone(bp); 431 return; 432 } 433 bp->b_dev = dp->d_rawdev; 434 dev_dstrategy(dp->d_rawdev, bp); 435 } 436 437 /* 438 * First execute the ioctl on the disk device, and if it isn't supported 439 * try running it on the backing device. 440 */ 441 static 442 int 443 diskioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 444 { 445 struct disk *dp; 446 int error; 447 448 dp = dev->si_disk; 449 if (dp == NULL) 450 return (ENXIO); 451 452 error = dsioctl(dev, cmd, data, fflag, &dp->d_slice); 453 if (error == ENOIOCTL) 454 error = dev_dioctl(dp->d_rawdev, cmd, data, fflag, td); 455 return (error); 456 } 457 458 /* 459 * 460 */ 461 static 462 int 463 diskpsize(dev_t dev) 464 { 465 struct disk *dp; 466 467 dp = dev->si_disk; 468 if (dp == NULL) 469 return (-1); 470 return(dssize(dev, &dp->d_slice)); 471 #if 0 472 if (dp != dev->si_disk) { 473 dev->si_drv1 = pdev->si_drv1; 474 dev->si_drv2 = pdev->si_drv2; 475 /* XXX: don't set bp->b_dev->si_disk (?) */ 476 } 477 #endif 478 } 479 480 SYSCTL_DECL(_debug_sizeof); 481 482 SYSCTL_INT(_debug_sizeof, OID_AUTO, disklabel, CTLFLAG_RD, 483 0, sizeof(struct disklabel), "sizeof(struct disklabel)"); 484 485 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD, 486 0, sizeof(struct diskslices), "sizeof(struct diskslices)"); 487 488 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD, 489 0, sizeof(struct disk), "sizeof(struct disk)"); 490 491 492 /* 493 * Seek sort for disks. 494 * 495 * The buf_queue keep two queues, sorted in ascending block order. The first 496 * queue holds those requests which are positioned after the current block 497 * (in the first request); the second, which starts at queue->switch_point, 498 * holds requests which came in after their block number was passed. Thus 499 * we implement a one way scan, retracting after reaching the end of the drive 500 * to the first request on the second queue, at which time it becomes the 501 * first queue. 502 * 503 * A one-way scan is natural because of the way UNIX read-ahead blocks are 504 * allocated. 505 */ 506 void 507 bufqdisksort(struct buf_queue_head *bufq, struct buf *bp) 508 { 509 struct buf *bq; 510 struct buf *bn; 511 struct buf *be; 512 513 be = TAILQ_LAST(&bufq->queue, buf_queue); 514 /* 515 * If the queue is empty or we are an 516 * ordered transaction, then it's easy. 517 */ 518 if ((bq = bufq_first(bufq)) == NULL || 519 (bp->b_flags & B_ORDERED) != 0) { 520 bufq_insert_tail(bufq, bp); 521 return; 522 } else if (bufq->insert_point != NULL) { 523 524 /* 525 * A certain portion of the list is 526 * "locked" to preserve ordering, so 527 * we can only insert after the insert 528 * point. 529 */ 530 bq = bufq->insert_point; 531 } else { 532 533 /* 534 * If we lie before the last removed (currently active) 535 * request, and are not inserting ourselves into the 536 * "locked" portion of the list, then we must add ourselves 537 * to the second request list. 538 */ 539 if (bp->b_pblkno < bufq->last_pblkno) { 540 541 bq = bufq->switch_point; 542 /* 543 * If we are starting a new secondary list, 544 * then it's easy. 545 */ 546 if (bq == NULL) { 547 bufq->switch_point = bp; 548 bufq_insert_tail(bufq, bp); 549 return; 550 } 551 /* 552 * If we lie ahead of the current switch point, 553 * insert us before the switch point and move 554 * the switch point. 555 */ 556 if (bp->b_pblkno < bq->b_pblkno) { 557 bufq->switch_point = bp; 558 TAILQ_INSERT_BEFORE(bq, bp, b_act); 559 return; 560 } 561 } else { 562 if (bufq->switch_point != NULL) 563 be = TAILQ_PREV(bufq->switch_point, 564 buf_queue, b_act); 565 /* 566 * If we lie between last_pblkno and bq, 567 * insert before bq. 568 */ 569 if (bp->b_pblkno < bq->b_pblkno) { 570 TAILQ_INSERT_BEFORE(bq, bp, b_act); 571 return; 572 } 573 } 574 } 575 576 /* 577 * Request is at/after our current position in the list. 578 * Optimize for sequential I/O by seeing if we go at the tail. 579 */ 580 if (bp->b_pblkno > be->b_pblkno) { 581 TAILQ_INSERT_AFTER(&bufq->queue, be, bp, b_act); 582 return; 583 } 584 585 /* Otherwise, insertion sort */ 586 while ((bn = TAILQ_NEXT(bq, b_act)) != NULL) { 587 588 /* 589 * We want to go after the current request if it is the end 590 * of the first request list, or if the next request is a 591 * larger cylinder than our request. 592 */ 593 if (bn == bufq->switch_point 594 || bp->b_pblkno < bn->b_pblkno) 595 break; 596 bq = bn; 597 } 598 TAILQ_INSERT_AFTER(&bufq->queue, bq, bp, b_act); 599 } 600 601 602 /* 603 * Attempt to read a disk label from a device using the indicated strategy 604 * routine. The label must be partly set up before this: secpercyl, secsize 605 * and anything required in the strategy routine (e.g., dummy bounds for the 606 * partition containing the label) must be filled in before calling us. 607 * Returns NULL on success and an error string on failure. 608 */ 609 char * 610 readdisklabel(dev_t dev, struct disklabel *lp) 611 { 612 struct buf *bp; 613 struct disklabel *dlp; 614 char *msg = NULL; 615 616 bp = geteblk((int)lp->d_secsize); 617 bp->b_dev = dev; 618 bp->b_blkno = LABELSECTOR * ((int)lp->d_secsize/DEV_BSIZE); 619 bp->b_bcount = lp->d_secsize; 620 bp->b_flags &= ~B_INVAL; 621 bp->b_flags |= B_READ; 622 BUF_STRATEGY(bp, 1); 623 if (biowait(bp)) 624 msg = "I/O error"; 625 else for (dlp = (struct disklabel *)bp->b_data; 626 dlp <= (struct disklabel *)((char *)bp->b_data + 627 lp->d_secsize - sizeof(*dlp)); 628 dlp = (struct disklabel *)((char *)dlp + sizeof(long))) { 629 if (dlp->d_magic != DISKMAGIC || dlp->d_magic2 != DISKMAGIC) { 630 if (msg == NULL) 631 msg = "no disk label"; 632 } else if (dlp->d_npartitions > MAXPARTITIONS || 633 dkcksum(dlp) != 0) 634 msg = "disk label corrupted"; 635 else { 636 *lp = *dlp; 637 msg = NULL; 638 break; 639 } 640 } 641 bp->b_flags |= B_INVAL | B_AGE; 642 brelse(bp); 643 return (msg); 644 } 645 646 /* 647 * Check new disk label for sensibility before setting it. 648 */ 649 int 650 setdisklabel(struct disklabel *olp, struct disklabel *nlp, u_long openmask) 651 { 652 int i; 653 struct partition *opp, *npp; 654 655 /* 656 * Check it is actually a disklabel we are looking at. 657 */ 658 if (nlp->d_magic != DISKMAGIC || nlp->d_magic2 != DISKMAGIC || 659 dkcksum(nlp) != 0) 660 return (EINVAL); 661 /* 662 * For each partition that we think is open, 663 */ 664 while ((i = ffs((long)openmask)) != 0) { 665 i--; 666 /* 667 * Check it is not changing.... 668 */ 669 openmask &= ~(1 << i); 670 if (nlp->d_npartitions <= i) 671 return (EBUSY); 672 opp = &olp->d_partitions[i]; 673 npp = &nlp->d_partitions[i]; 674 if (npp->p_offset != opp->p_offset || npp->p_size < opp->p_size) 675 return (EBUSY); 676 /* 677 * Copy internally-set partition information 678 * if new label doesn't include it. XXX 679 * (If we are using it then we had better stay the same type) 680 * This is possibly dubious, as someone else noted (XXX) 681 */ 682 if (npp->p_fstype == FS_UNUSED && opp->p_fstype != FS_UNUSED) { 683 npp->p_fstype = opp->p_fstype; 684 npp->p_fsize = opp->p_fsize; 685 npp->p_frag = opp->p_frag; 686 npp->p_cpg = opp->p_cpg; 687 } 688 } 689 nlp->d_checksum = 0; 690 nlp->d_checksum = dkcksum(nlp); 691 *olp = *nlp; 692 return (0); 693 } 694 695 /* 696 * Write disk label back to device after modification. 697 */ 698 int 699 writedisklabel(dev_t dev, struct disklabel *lp) 700 { 701 struct buf *bp; 702 struct disklabel *dlp; 703 int error = 0; 704 705 if (lp->d_partitions[RAW_PART].p_offset != 0) 706 return (EXDEV); /* not quite right */ 707 bp = geteblk((int)lp->d_secsize); 708 bp->b_dev = dkmodpart(dev, RAW_PART); 709 bp->b_blkno = LABELSECTOR * ((int)lp->d_secsize/DEV_BSIZE); 710 bp->b_bcount = lp->d_secsize; 711 #if 1 712 /* 713 * We read the label first to see if it's there, 714 * in which case we will put ours at the same offset into the block.. 715 * (I think this is stupid [Julian]) 716 * Note that you can't write a label out over a corrupted label! 717 * (also stupid.. how do you write the first one? by raw writes?) 718 */ 719 bp->b_flags &= ~B_INVAL; 720 bp->b_flags |= B_READ; 721 BUF_STRATEGY(bp, 1); 722 error = biowait(bp); 723 if (error) 724 goto done; 725 for (dlp = (struct disklabel *)bp->b_data; 726 dlp <= (struct disklabel *) 727 ((char *)bp->b_data + lp->d_secsize - sizeof(*dlp)); 728 dlp = (struct disklabel *)((char *)dlp + sizeof(long))) { 729 if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC && 730 dkcksum(dlp) == 0) { 731 *dlp = *lp; 732 bp->b_flags &= ~(B_DONE | B_READ); 733 bp->b_flags |= B_WRITE; 734 bp->b_dev = dkmodpart(dev, RAW_PART); 735 #ifdef __alpha__ 736 alpha_fix_srm_checksum(bp); 737 #endif 738 BUF_STRATEGY(bp, 1); 739 error = biowait(bp); 740 goto done; 741 } 742 } 743 error = ESRCH; 744 done: 745 #else 746 bzero(bp->b_data, lp->d_secsize); 747 dlp = (struct disklabel *)bp->b_data; 748 *dlp = *lp; 749 bp->b_flags &= ~B_INVAL; 750 bp->b_flags |= B_WRITE; 751 BUF_STRATEGY(bp, 1); 752 error = biowait(bp); 753 #endif 754 bp->b_flags |= B_INVAL | B_AGE; 755 brelse(bp); 756 return (error); 757 } 758 759 /* 760 * Disk error is the preface to plaintive error messages 761 * about failing disk transfers. It prints messages of the form 762 763 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d) 764 765 * if the offset of the error in the transfer and a disk label 766 * are both available. blkdone should be -1 if the position of the error 767 * is unknown; the disklabel pointer may be null from drivers that have not 768 * been converted to use them. The message is printed with printf 769 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority. 770 * The message should be completed (with at least a newline) with printf 771 * or addlog, respectively. There is no trailing space. 772 */ 773 void 774 diskerr(struct buf *bp, dev_t dev, char *what, int pri, 775 int blkdone, struct disklabel *lp) 776 { 777 int unit = dkunit(dev); 778 int slice = dkslice(dev); 779 int part = dkpart(dev); 780 char partname[2]; 781 char *sname; 782 daddr_t sn; 783 784 sname = dsname(dev, unit, slice, part, partname); 785 printf("%s%s: %s %sing fsbn ", sname, partname, what, 786 bp->b_flags & B_READ ? "read" : "writ"); 787 sn = bp->b_blkno; 788 if (bp->b_bcount <= DEV_BSIZE) { 789 printf("%ld", (long)sn); 790 } else { 791 if (blkdone >= 0) { 792 sn += blkdone; 793 printf("%ld of ", (long)sn); 794 } 795 printf("%ld-%ld", (long)bp->b_blkno, 796 (long)(bp->b_blkno + (bp->b_bcount - 1) / DEV_BSIZE)); 797 } 798 if (lp && (blkdone >= 0 || bp->b_bcount <= lp->d_secsize)) { 799 #ifdef tahoe 800 sn *= DEV_BSIZE / lp->d_secsize; /* XXX */ 801 #endif 802 sn += lp->d_partitions[part].p_offset; 803 /* 804 * XXX should add slice offset and not print the slice, 805 * but we don't know the slice pointer. 806 * XXX should print bp->b_pblkno so that this will work 807 * independent of slices, labels and bad sector remapping, 808 * but some drivers don't set bp->b_pblkno. 809 */ 810 printf(" (%s bn %ld; cn %ld", sname, (long)sn, 811 (long)(sn / lp->d_secpercyl)); 812 sn %= (long)lp->d_secpercyl; 813 printf(" tn %ld sn %ld)", (long)(sn / lp->d_nsectors), 814 (long)(sn % lp->d_nsectors)); 815 } 816 } 817