1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * ---------------------------------------------------------------------------- 35 * "THE BEER-WARE LICENSE" (Revision 42): 36 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 37 * can do whatever you want with this stuff. If we meet some day, and you think 38 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 39 * ---------------------------------------------------------------------------- 40 * 41 * Copyright (c) 1982, 1986, 1988, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94 78 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $ 79 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $ 80 * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $ 81 */ 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/kernel.h> 86 #include <sys/proc.h> 87 #include <sys/sysctl.h> 88 #include <sys/buf.h> 89 #include <sys/conf.h> 90 #include <sys/disklabel.h> 91 #include <sys/disklabel32.h> 92 #include <sys/disklabel64.h> 93 #include <sys/diskslice.h> 94 #include <sys/diskmbr.h> 95 #include <sys/disk.h> 96 #include <sys/kerneldump.h> 97 #include <sys/malloc.h> 98 #include <sys/sysctl.h> 99 #include <machine/md_var.h> 100 #include <sys/ctype.h> 101 #include <sys/syslog.h> 102 #include <sys/device.h> 103 #include <sys/msgport.h> 104 #include <sys/msgport2.h> 105 #include <sys/buf2.h> 106 #include <sys/devfs.h> 107 #include <sys/thread.h> 108 #include <sys/thread2.h> 109 #include <sys/dsched.h> 110 #include <sys/queue.h> 111 #include <sys/lock.h> 112 113 static MALLOC_DEFINE(M_DISK, "disk", "disk data"); 114 static int disk_debug_enable = 0; 115 116 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 117 static void disk_msg_core(void *); 118 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe); 119 static void disk_probe(struct disk *dp, int reprobe); 120 static void _setdiskinfo(struct disk *disk, struct disk_info *info); 121 static void bioqwritereorder(struct bio_queue_head *bioq); 122 static void disk_cleanserial(char *serno); 123 124 static d_open_t diskopen; 125 static d_close_t diskclose; 126 static d_ioctl_t diskioctl; 127 static d_strategy_t diskstrategy; 128 static d_psize_t diskpsize; 129 static d_clone_t diskclone; 130 static d_dump_t diskdump; 131 132 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist); 133 static struct lwkt_token disklist_token; 134 135 static struct dev_ops disk_ops = { 136 { "disk", 0, D_DISK }, 137 .d_open = diskopen, 138 .d_close = diskclose, 139 .d_read = physread, 140 .d_write = physwrite, 141 .d_ioctl = diskioctl, 142 .d_strategy = diskstrategy, 143 .d_dump = diskdump, 144 .d_psize = diskpsize, 145 .d_clone = diskclone 146 }; 147 148 static struct objcache *disk_msg_cache; 149 150 struct objcache_malloc_args disk_msg_malloc_args = { 151 sizeof(struct disk_msg), M_DISK }; 152 153 static struct lwkt_port disk_dispose_port; 154 static struct lwkt_port disk_msg_port; 155 156 static int 157 disk_debug(int level, char *fmt, ...) 158 { 159 __va_list ap; 160 161 __va_start(ap, fmt); 162 if (level <= disk_debug_enable) 163 kvprintf(fmt, ap); 164 __va_end(ap); 165 166 return 0; 167 } 168 169 static int 170 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe) 171 { 172 struct disk_info *info = &dp->d_info; 173 struct diskslice *sp = &dp->d_slice->dss_slices[slice]; 174 disklabel_ops_t ops; 175 struct partinfo part; 176 const char *msg; 177 cdev_t ndev; 178 int sno; 179 u_int i; 180 181 disk_debug(2, 182 "disk_probe_slice (begin): %s (%s)\n", 183 dev->si_name, dp->d_cdev->si_name); 184 185 sno = slice ? slice - 1 : 0; 186 187 ops = &disklabel32_ops; 188 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 189 if (msg && !strcmp(msg, "no disk label")) { 190 ops = &disklabel64_ops; 191 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 192 } 193 if (msg == NULL) { 194 if (slice != WHOLE_DISK_SLICE) 195 ops->op_adjust_label_reserved(dp->d_slice, slice, sp); 196 else 197 sp->ds_reserved = 0; 198 199 sp->ds_ops = ops; 200 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) { 201 ops->op_loadpartinfo(sp->ds_label, i, &part); 202 if (part.fstype) { 203 if (reprobe && 204 (ndev = devfs_find_device_by_name("%s%c", 205 dev->si_name, 'a' + i)) 206 ) { 207 /* 208 * Device already exists and 209 * is still valid. 210 */ 211 ndev->si_flags |= SI_REPROBE_TEST; 212 } else { 213 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 214 dkmakeminor(dkunit(dp->d_cdev), 215 slice, i), 216 UID_ROOT, GID_OPERATOR, 0640, 217 "%s%c", dev->si_name, 'a'+ i); 218 ndev->si_disk = dp; 219 if (dp->d_info.d_serialno) { 220 make_dev_alias(ndev, 221 "serno/%s.s%d%c", 222 dp->d_info.d_serialno, 223 sno, 'a' + i); 224 } 225 ndev->si_flags |= SI_REPROBE_TEST; 226 } 227 } 228 } 229 } else if (info->d_dsflags & DSO_COMPATLABEL) { 230 msg = NULL; 231 if (sp->ds_size >= 0x100000000ULL) 232 ops = &disklabel64_ops; 233 else 234 ops = &disklabel32_ops; 235 sp->ds_label = ops->op_clone_label(info, sp); 236 } else { 237 if (sp->ds_type == DOSPTYP_386BSD || /* XXX */ 238 sp->ds_type == DOSPTYP_NETBSD || 239 sp->ds_type == DOSPTYP_OPENBSD) { 240 log(LOG_WARNING, "%s: cannot find label (%s)\n", 241 dev->si_name, msg); 242 } 243 } 244 245 if (msg == NULL) { 246 sp->ds_wlabel = FALSE; 247 } 248 249 return (msg ? EINVAL : 0); 250 } 251 252 /* 253 * This routine is only called for newly minted drives or to reprobe 254 * a drive with no open slices. disk_probe_slice() is called directly 255 * when reprobing partition changes within slices. 256 */ 257 static void 258 disk_probe(struct disk *dp, int reprobe) 259 { 260 struct disk_info *info = &dp->d_info; 261 cdev_t dev = dp->d_cdev; 262 cdev_t ndev; 263 int error, i, sno; 264 struct diskslices *osp; 265 struct diskslice *sp; 266 267 KKASSERT (info->d_media_blksize != 0); 268 269 osp = dp->d_slice; 270 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info); 271 disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name); 272 273 error = mbrinit(dev, info, &(dp->d_slice)); 274 if (error) { 275 dsgone(&osp); 276 return; 277 } 278 279 for (i = 0; i < dp->d_slice->dss_nslices; i++) { 280 /* 281 * Ignore the whole-disk slice, it has already been created. 282 */ 283 if (i == WHOLE_DISK_SLICE) 284 continue; 285 sp = &dp->d_slice->dss_slices[i]; 286 287 /* 288 * Handle s0. s0 is a compatibility slice if there are no 289 * other slices and it has not otherwise been set up, else 290 * we ignore it. 291 */ 292 if (i == COMPATIBILITY_SLICE) { 293 sno = 0; 294 if (sp->ds_type == 0 && 295 dp->d_slice->dss_nslices == BASE_SLICE) { 296 sp->ds_size = info->d_media_blocks; 297 sp->ds_reserved = 0; 298 } 299 } else { 300 sno = i - 1; 301 sp->ds_reserved = 0; 302 } 303 304 /* 305 * Ignore 0-length slices 306 */ 307 if (sp->ds_size == 0) 308 continue; 309 310 if (reprobe && 311 (ndev = devfs_find_device_by_name("%ss%d", 312 dev->si_name, sno))) { 313 /* 314 * Device already exists and is still valid 315 */ 316 ndev->si_flags |= SI_REPROBE_TEST; 317 } else { 318 /* 319 * Else create new device 320 */ 321 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 322 dkmakewholeslice(dkunit(dev), i), 323 UID_ROOT, GID_OPERATOR, 0640, 324 "%ss%d", dev->si_name, sno); 325 if (dp->d_info.d_serialno) { 326 make_dev_alias(ndev, "serno/%s.s%d", 327 dp->d_info.d_serialno, sno); 328 } 329 ndev->si_disk = dp; 330 ndev->si_flags |= SI_REPROBE_TEST; 331 } 332 sp->ds_dev = ndev; 333 334 /* 335 * Probe appropriate slices for a disklabel 336 * 337 * XXX slice type 1 used by our gpt probe code. 338 * XXX slice type 0 used by mbr compat slice. 339 */ 340 if (sp->ds_type == DOSPTYP_386BSD || 341 sp->ds_type == DOSPTYP_NETBSD || 342 sp->ds_type == DOSPTYP_OPENBSD || 343 sp->ds_type == 0 || 344 sp->ds_type == 1) { 345 if (dp->d_slice->dss_first_bsd_slice == 0) 346 dp->d_slice->dss_first_bsd_slice = i; 347 disk_probe_slice(dp, ndev, i, reprobe); 348 } 349 } 350 dsgone(&osp); 351 disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name); 352 } 353 354 355 static void 356 disk_msg_core(void *arg) 357 { 358 struct disk *dp; 359 struct diskslice *sp; 360 disk_msg_t msg; 361 int run; 362 363 lwkt_initport_thread(&disk_msg_port, curthread); 364 wakeup(curthread); 365 run = 1; 366 367 while (run) { 368 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0); 369 370 switch (msg->hdr.u.ms_result) { 371 case DISK_DISK_PROBE: 372 dp = (struct disk *)msg->load; 373 disk_debug(1, 374 "DISK_DISK_PROBE: %s\n", 375 dp->d_cdev->si_name); 376 disk_probe(dp, 0); 377 break; 378 case DISK_DISK_DESTROY: 379 dp = (struct disk *)msg->load; 380 disk_debug(1, 381 "DISK_DISK_DESTROY: %s\n", 382 dp->d_cdev->si_name); 383 devfs_destroy_subnames(dp->d_cdev->si_name); 384 devfs_destroy_dev(dp->d_cdev); 385 lwkt_gettoken(&disklist_token); 386 LIST_REMOVE(dp, d_list); 387 lwkt_reltoken(&disklist_token); 388 if (dp->d_info.d_serialno) { 389 kfree(dp->d_info.d_serialno, M_TEMP); 390 dp->d_info.d_serialno = NULL; 391 } 392 break; 393 case DISK_UNPROBE: 394 dp = (struct disk *)msg->load; 395 disk_debug(1, 396 "DISK_DISK_UNPROBE: %s\n", 397 dp->d_cdev->si_name); 398 devfs_destroy_subnames(dp->d_cdev->si_name); 399 break; 400 case DISK_SLICE_REPROBE: 401 dp = (struct disk *)msg->load; 402 sp = (struct diskslice *)msg->load2; 403 devfs_clr_subnames_flag(sp->ds_dev->si_name, 404 SI_REPROBE_TEST); 405 disk_debug(1, 406 "DISK_SLICE_REPROBE: %s\n", 407 sp->ds_dev->si_name); 408 disk_probe_slice(dp, sp->ds_dev, 409 dkslice(sp->ds_dev), 1); 410 devfs_destroy_subnames_without_flag( 411 sp->ds_dev->si_name, SI_REPROBE_TEST); 412 break; 413 case DISK_DISK_REPROBE: 414 dp = (struct disk *)msg->load; 415 devfs_clr_subnames_flag(dp->d_cdev->si_name, SI_REPROBE_TEST); 416 disk_debug(1, 417 "DISK_DISK_REPROBE: %s\n", 418 dp->d_cdev->si_name); 419 disk_probe(dp, 1); 420 devfs_destroy_subnames_without_flag( 421 dp->d_cdev->si_name, SI_REPROBE_TEST); 422 break; 423 case DISK_SYNC: 424 disk_debug(1, "DISK_SYNC\n"); 425 break; 426 default: 427 devfs_debug(DEVFS_DEBUG_WARNING, 428 "disk_msg_core: unknown message " 429 "received at core\n"); 430 break; 431 } 432 lwkt_replymsg(&msg->hdr, 0); 433 } 434 lwkt_exit(); 435 } 436 437 438 /* 439 * Acts as a message drain. Any message that is replied to here gets 440 * destroyed and the memory freed. 441 */ 442 static void 443 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 444 { 445 objcache_put(disk_msg_cache, msg); 446 } 447 448 449 void 450 disk_msg_send(uint32_t cmd, void *load, void *load2) 451 { 452 disk_msg_t disk_msg; 453 lwkt_port_t port = &disk_msg_port; 454 455 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 456 457 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0); 458 459 disk_msg->hdr.u.ms_result = cmd; 460 disk_msg->load = load; 461 disk_msg->load2 = load2; 462 KKASSERT(port); 463 lwkt_sendmsg(port, &disk_msg->hdr); 464 } 465 466 void 467 disk_msg_send_sync(uint32_t cmd, void *load, void *load2) 468 { 469 struct lwkt_port rep_port; 470 disk_msg_t disk_msg; 471 lwkt_port_t port; 472 473 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 474 port = &disk_msg_port; 475 476 /* XXX could probably use curthread's built-in msgport */ 477 lwkt_initport_thread(&rep_port, curthread); 478 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0); 479 480 disk_msg->hdr.u.ms_result = cmd; 481 disk_msg->load = load; 482 disk_msg->load2 = load2; 483 484 lwkt_sendmsg(port, &disk_msg->hdr); 485 lwkt_waitmsg(&disk_msg->hdr, 0); 486 objcache_put(disk_msg_cache, disk_msg); 487 } 488 489 /* 490 * Create a raw device for the dev_ops template (which is returned). Also 491 * create a slice and unit managed disk and overload the user visible 492 * device space with it. 493 * 494 * NOTE: The returned raw device is NOT a slice and unit managed device. 495 * It is an actual raw device representing the raw disk as specified by 496 * the passed dev_ops. The disk layer not only returns such a raw device, 497 * it also uses it internally when passing (modified) commands through. 498 */ 499 cdev_t 500 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops) 501 { 502 return disk_create_named(NULL, unit, dp, raw_ops); 503 } 504 505 cdev_t 506 disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops) 507 { 508 cdev_t rawdev; 509 510 if (name == NULL) 511 name = raw_ops->head.name; 512 513 disk_debug(1, "disk_create (begin): %s%d\n", name, unit); 514 515 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit), 516 UID_ROOT, GID_OPERATOR, 0640, 517 "%s%d", name, unit); 518 519 bzero(dp, sizeof(*dp)); 520 521 dp->d_rawdev = rawdev; 522 dp->d_raw_ops = raw_ops; 523 dp->d_dev_ops = &disk_ops; 524 dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 525 dkmakewholedisk(unit), 526 UID_ROOT, GID_OPERATOR, 0640, 527 "%s%d", name, unit); 528 529 dp->d_cdev->si_disk = dp; 530 531 dsched_disk_create_callback(dp, name, unit); 532 533 lwkt_gettoken(&disklist_token); 534 LIST_INSERT_HEAD(&disklist, dp, d_list); 535 lwkt_reltoken(&disklist_token); 536 537 disk_debug(1, "disk_create (end): %s%d\n", name, unit); 538 539 return (dp->d_rawdev); 540 } 541 542 static void 543 _setdiskinfo(struct disk *disk, struct disk_info *info) 544 { 545 char *oldserialno; 546 547 oldserialno = disk->d_info.d_serialno; 548 bcopy(info, &disk->d_info, sizeof(disk->d_info)); 549 info = &disk->d_info; 550 551 disk_debug(1, 552 "_setdiskinfo: %s\n", 553 disk->d_cdev->si_name); 554 555 /* 556 * The serial number is duplicated so the caller can throw 557 * their copy away. 558 */ 559 if (info->d_serialno && info->d_serialno[0]) { 560 info->d_serialno = kstrdup(info->d_serialno, M_TEMP); 561 disk_cleanserial(info->d_serialno); 562 if (disk->d_cdev) { 563 make_dev_alias(disk->d_cdev, "serno/%s", 564 info->d_serialno); 565 } 566 } else { 567 info->d_serialno = NULL; 568 } 569 if (oldserialno) 570 kfree(oldserialno, M_TEMP); 571 572 dsched_disk_update_callback(disk, info); 573 574 /* 575 * The caller may set d_media_size or d_media_blocks and we 576 * calculate the other. 577 */ 578 KKASSERT(info->d_media_size == 0 || info->d_media_blksize == 0); 579 if (info->d_media_size == 0 && info->d_media_blocks) { 580 info->d_media_size = (u_int64_t)info->d_media_blocks * 581 info->d_media_blksize; 582 } else if (info->d_media_size && info->d_media_blocks == 0 && 583 info->d_media_blksize) { 584 info->d_media_blocks = info->d_media_size / 585 info->d_media_blksize; 586 } 587 588 /* 589 * The si_* fields for rawdev are not set until after the 590 * disk_create() call, so someone using the cooked version 591 * of the raw device (i.e. da0s0) will not get the right 592 * si_iosize_max unless we fix it up here. 593 */ 594 if (disk->d_cdev && disk->d_rawdev && 595 disk->d_cdev->si_iosize_max == 0) { 596 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max; 597 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys; 598 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best; 599 } 600 } 601 602 /* 603 * Disk drivers must call this routine when media parameters are available 604 * or have changed. 605 */ 606 void 607 disk_setdiskinfo(struct disk *disk, struct disk_info *info) 608 { 609 _setdiskinfo(disk, info); 610 disk_msg_send(DISK_DISK_PROBE, disk, NULL); 611 disk_debug(1, 612 "disk_setdiskinfo: sent probe for %s\n", 613 disk->d_cdev->si_name); 614 } 615 616 void 617 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info) 618 { 619 _setdiskinfo(disk, info); 620 disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL); 621 disk_debug(1, 622 "disk_setdiskinfo_sync: sent probe for %s\n", 623 disk->d_cdev->si_name); 624 } 625 626 /* 627 * This routine is called when an adapter detaches. The higher level 628 * managed disk device is destroyed while the lower level raw device is 629 * released. 630 */ 631 void 632 disk_destroy(struct disk *disk) 633 { 634 dsched_disk_destroy_callback(disk); 635 disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL); 636 return; 637 } 638 639 int 640 disk_dumpcheck(cdev_t dev, u_int64_t *size, u_int64_t *blkno, u_int32_t *secsize) 641 { 642 struct partinfo pinfo; 643 int error; 644 645 bzero(&pinfo, sizeof(pinfo)); 646 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0, 647 proc0.p_ucred, NULL); 648 if (error) 649 return (error); 650 651 if (pinfo.media_blksize == 0) 652 return (ENXIO); 653 654 if (blkno) /* XXX: make sure this reserved stuff is right */ 655 *blkno = pinfo.reserved_blocks + 656 pinfo.media_offset / pinfo.media_blksize; 657 if (secsize) 658 *secsize = pinfo.media_blksize; 659 if (size) 660 *size = (pinfo.media_blocks - pinfo.reserved_blocks); 661 662 return (0); 663 } 664 665 int 666 disk_dumpconf(cdev_t dev, u_int onoff) 667 { 668 struct dumperinfo di; 669 u_int64_t size, blkno; 670 u_int32_t secsize; 671 int error; 672 673 if (!onoff) 674 return set_dumper(NULL); 675 676 error = disk_dumpcheck(dev, &size, &blkno, &secsize); 677 678 if (error) 679 return ENXIO; 680 681 bzero(&di, sizeof(struct dumperinfo)); 682 di.dumper = diskdump; 683 di.priv = dev; 684 di.blocksize = secsize; 685 di.mediaoffset = blkno * DEV_BSIZE; 686 di.mediasize = size * DEV_BSIZE; 687 688 return set_dumper(&di); 689 } 690 691 void 692 disk_unprobe(struct disk *disk) 693 { 694 if (disk == NULL) 695 return; 696 697 disk_msg_send_sync(DISK_UNPROBE, disk, NULL); 698 } 699 700 void 701 disk_invalidate (struct disk *disk) 702 { 703 dsgone(&disk->d_slice); 704 } 705 706 struct disk * 707 disk_enumerate(struct disk *disk) 708 { 709 struct disk *dp; 710 711 lwkt_gettoken(&disklist_token); 712 if (!disk) 713 dp = (LIST_FIRST(&disklist)); 714 else 715 dp = (LIST_NEXT(disk, d_list)); 716 lwkt_reltoken(&disklist_token); 717 718 return dp; 719 } 720 721 static 722 int 723 sysctl_disks(SYSCTL_HANDLER_ARGS) 724 { 725 struct disk *disk; 726 int error, first; 727 728 disk = NULL; 729 first = 1; 730 731 while ((disk = disk_enumerate(disk))) { 732 if (!first) { 733 error = SYSCTL_OUT(req, " ", 1); 734 if (error) 735 return error; 736 } else { 737 first = 0; 738 } 739 error = SYSCTL_OUT(req, disk->d_rawdev->si_name, 740 strlen(disk->d_rawdev->si_name)); 741 if (error) 742 return error; 743 } 744 error = SYSCTL_OUT(req, "", 1); 745 return error; 746 } 747 748 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 749 sysctl_disks, "A", "names of available disks"); 750 751 /* 752 * Open a disk device or partition. 753 */ 754 static 755 int 756 diskopen(struct dev_open_args *ap) 757 { 758 cdev_t dev = ap->a_head.a_dev; 759 struct disk *dp; 760 int error; 761 762 /* 763 * dp can't be NULL here XXX. 764 * 765 * d_slice will be NULL if setdiskinfo() has not been called yet. 766 * setdiskinfo() is typically called whether the disk is present 767 * or not (e.g. CD), but the base disk device is created first 768 * and there may be a race. 769 */ 770 dp = dev->si_disk; 771 if (dp == NULL || dp->d_slice == NULL) 772 return (ENXIO); 773 error = 0; 774 775 /* 776 * Deal with open races 777 */ 778 while (dp->d_flags & DISKFLAG_LOCK) { 779 dp->d_flags |= DISKFLAG_WANTED; 780 error = tsleep(dp, PCATCH, "diskopen", hz); 781 if (error) 782 return (error); 783 } 784 dp->d_flags |= DISKFLAG_LOCK; 785 786 /* 787 * Open the underlying raw device. 788 */ 789 if (!dsisopen(dp->d_slice)) { 790 #if 0 791 if (!pdev->si_iosize_max) 792 pdev->si_iosize_max = dev->si_iosize_max; 793 #endif 794 error = dev_dopen(dp->d_rawdev, ap->a_oflags, 795 ap->a_devtype, ap->a_cred); 796 } 797 #if 0 798 /* 799 * Inherit properties from the underlying device now that it is 800 * open. 801 */ 802 dev_dclone(dev); 803 #endif 804 805 if (error) 806 goto out; 807 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags, 808 &dp->d_slice, &dp->d_info); 809 if (!dsisopen(dp->d_slice)) { 810 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype); 811 } 812 out: 813 dp->d_flags &= ~DISKFLAG_LOCK; 814 if (dp->d_flags & DISKFLAG_WANTED) { 815 dp->d_flags &= ~DISKFLAG_WANTED; 816 wakeup(dp); 817 } 818 819 return(error); 820 } 821 822 /* 823 * Close a disk device or partition 824 */ 825 static 826 int 827 diskclose(struct dev_close_args *ap) 828 { 829 cdev_t dev = ap->a_head.a_dev; 830 struct disk *dp; 831 int error; 832 833 error = 0; 834 dp = dev->si_disk; 835 836 dsclose(dev, ap->a_devtype, dp->d_slice); 837 if (!dsisopen(dp->d_slice)) { 838 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype); 839 } 840 return (error); 841 } 842 843 /* 844 * First execute the ioctl on the disk device, and if it isn't supported 845 * try running it on the backing device. 846 */ 847 static 848 int 849 diskioctl(struct dev_ioctl_args *ap) 850 { 851 cdev_t dev = ap->a_head.a_dev; 852 struct disk *dp; 853 int error; 854 u_int u; 855 856 dp = dev->si_disk; 857 if (dp == NULL) 858 return (ENXIO); 859 860 devfs_debug(DEVFS_DEBUG_DEBUG, 861 "diskioctl: cmd is: %lx (name: %s)\n", 862 ap->a_cmd, dev->si_name); 863 devfs_debug(DEVFS_DEBUG_DEBUG, 864 "diskioctl: &dp->d_slice is: %p, %p\n", 865 &dp->d_slice, dp->d_slice); 866 867 if (ap->a_cmd == DIOCGKERNELDUMP) { 868 u = *(u_int *)ap->a_data; 869 return disk_dumpconf(dev, u); 870 } 871 872 if (&dp->d_slice == NULL || dp->d_slice == NULL) { 873 error = ENOIOCTL; 874 } else { 875 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, 876 &dp->d_slice, &dp->d_info); 877 } 878 879 if (error == ENOIOCTL) { 880 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data, 881 ap->a_fflag, ap->a_cred, NULL); 882 } 883 return (error); 884 } 885 886 /* 887 * Execute strategy routine 888 */ 889 static 890 int 891 diskstrategy(struct dev_strategy_args *ap) 892 { 893 cdev_t dev = ap->a_head.a_dev; 894 struct bio *bio = ap->a_bio; 895 struct bio *nbio; 896 struct disk *dp; 897 898 dp = dev->si_disk; 899 900 if (dp == NULL) { 901 bio->bio_buf->b_error = ENXIO; 902 bio->bio_buf->b_flags |= B_ERROR; 903 biodone(bio); 904 return(0); 905 } 906 KKASSERT(dev->si_disk == dp); 907 908 /* 909 * The dscheck() function will also transform the slice relative 910 * block number i.e. bio->bio_offset into a block number that can be 911 * passed directly to the underlying raw device. If dscheck() 912 * returns NULL it will have handled the bio for us (e.g. EOF 913 * or error due to being beyond the device size). 914 */ 915 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) { 916 dsched_queue(dp, nbio); 917 } else { 918 biodone(bio); 919 } 920 return(0); 921 } 922 923 /* 924 * Return the partition size in ?blocks? 925 */ 926 static 927 int 928 diskpsize(struct dev_psize_args *ap) 929 { 930 cdev_t dev = ap->a_head.a_dev; 931 struct disk *dp; 932 933 dp = dev->si_disk; 934 if (dp == NULL) 935 return(ENODEV); 936 ap->a_result = dssize(dev, &dp->d_slice); 937 return(0); 938 } 939 940 /* 941 * When new device entries are instantiated, make sure they inherit our 942 * si_disk structure and block and iosize limits from the raw device. 943 * 944 * This routine is always called synchronously in the context of the 945 * client. 946 * 947 * XXX The various io and block size constraints are not always initialized 948 * properly by devices. 949 */ 950 static 951 int 952 diskclone(struct dev_clone_args *ap) 953 { 954 cdev_t dev = ap->a_head.a_dev; 955 struct disk *dp; 956 dp = dev->si_disk; 957 958 KKASSERT(dp != NULL); 959 dev->si_disk = dp; 960 dev->si_iosize_max = dp->d_rawdev->si_iosize_max; 961 dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys; 962 dev->si_bsize_best = dp->d_rawdev->si_bsize_best; 963 return(0); 964 } 965 966 int 967 diskdump(struct dev_dump_args *ap) 968 { 969 cdev_t dev = ap->a_head.a_dev; 970 struct disk *dp = dev->si_disk; 971 u_int64_t size, offset; 972 int error; 973 974 error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize); 975 /* XXX: this should probably go in disk_dumpcheck somehow */ 976 if (ap->a_length != 0) { 977 size *= DEV_BSIZE; 978 offset = ap->a_blkno * DEV_BSIZE; 979 if ((ap->a_offset < offset) || 980 (ap->a_offset + ap->a_length - offset > size)) { 981 kprintf("Attempt to write outside dump device boundaries.\n"); 982 error = ENOSPC; 983 } 984 } 985 986 if (error == 0) { 987 ap->a_head.a_dev = dp->d_rawdev; 988 error = dev_doperate(&ap->a_head); 989 } 990 991 return(error); 992 } 993 994 995 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD, 996 0, sizeof(struct diskslices), "sizeof(struct diskslices)"); 997 998 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD, 999 0, sizeof(struct disk), "sizeof(struct disk)"); 1000 1001 /* 1002 * Reorder interval for burst write allowance and minor write 1003 * allowance. 1004 * 1005 * We always want to trickle some writes in to make use of the 1006 * disk's zone cache. Bursting occurs on a longer interval and only 1007 * runningbufspace is well over the hirunningspace limit. 1008 */ 1009 int bioq_reorder_burst_interval = 60; /* should be multiple of minor */ 1010 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval, 1011 CTLFLAG_RW, &bioq_reorder_burst_interval, 0, ""); 1012 int bioq_reorder_minor_interval = 5; 1013 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval, 1014 CTLFLAG_RW, &bioq_reorder_minor_interval, 0, ""); 1015 1016 int bioq_reorder_burst_bytes = 3000000; 1017 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes, 1018 CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, ""); 1019 int bioq_reorder_minor_bytes = 262144; 1020 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes, 1021 CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, ""); 1022 1023 1024 /* 1025 * Order I/Os. Generally speaking this code is designed to make better 1026 * use of drive zone caches. A drive zone cache can typically track linear 1027 * reads or writes for around 16 zones simultaniously. 1028 * 1029 * Read prioritization issues: It is possible for hundreds of megabytes worth 1030 * of writes to be queued asynchronously. This creates a huge bottleneck 1031 * for reads which reduce read bandwidth to a trickle. 1032 * 1033 * To solve this problem we generally reorder reads before writes. 1034 * 1035 * However, a large number of random reads can also starve writes and 1036 * make poor use of the drive zone cache so we allow writes to trickle 1037 * in every N reads. 1038 */ 1039 void 1040 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio) 1041 { 1042 /* 1043 * The BIO wants to be ordered. Adding to the tail also 1044 * causes transition to be set to NULL, forcing the ordering 1045 * of all prior I/O's. 1046 */ 1047 if (bio->bio_buf->b_flags & B_ORDERED) { 1048 bioq_insert_tail(bioq, bio); 1049 return; 1050 } 1051 1052 switch(bio->bio_buf->b_cmd) { 1053 case BUF_CMD_READ: 1054 if (bioq->transition) { 1055 /* 1056 * Insert before the first write. Bleedover writes 1057 * based on reorder intervals to prevent starvation. 1058 */ 1059 TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act); 1060 ++bioq->reorder; 1061 if (bioq->reorder % bioq_reorder_minor_interval == 0) { 1062 bioqwritereorder(bioq); 1063 if (bioq->reorder >= 1064 bioq_reorder_burst_interval) { 1065 bioq->reorder = 0; 1066 } 1067 } 1068 } else { 1069 /* 1070 * No writes queued (or ordering was forced), 1071 * insert at tail. 1072 */ 1073 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1074 } 1075 break; 1076 case BUF_CMD_WRITE: 1077 /* 1078 * Writes are always appended. If no writes were previously 1079 * queued or an ordered tail insertion occured the transition 1080 * field will be NULL. 1081 */ 1082 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1083 if (bioq->transition == NULL) 1084 bioq->transition = bio; 1085 break; 1086 default: 1087 /* 1088 * All other request types are forced to be ordered. 1089 */ 1090 bioq_insert_tail(bioq, bio); 1091 break; 1092 } 1093 } 1094 1095 /* 1096 * Move the read-write transition point to prevent reads from 1097 * completely starving our writes. This brings a number of writes into 1098 * the fold every N reads. 1099 * 1100 * We bring a few linear writes into the fold on a minor interval 1101 * and we bring a non-linear burst of writes into the fold on a major 1102 * interval. Bursting only occurs if runningbufspace is really high 1103 * (typically from syncs, fsyncs, or HAMMER flushes). 1104 */ 1105 static 1106 void 1107 bioqwritereorder(struct bio_queue_head *bioq) 1108 { 1109 struct bio *bio; 1110 off_t next_offset; 1111 size_t left; 1112 size_t n; 1113 int check_off; 1114 1115 if (bioq->reorder < bioq_reorder_burst_interval || 1116 !buf_runningbufspace_severe()) { 1117 left = (size_t)bioq_reorder_minor_bytes; 1118 check_off = 1; 1119 } else { 1120 left = (size_t)bioq_reorder_burst_bytes; 1121 check_off = 0; 1122 } 1123 1124 next_offset = bioq->transition->bio_offset; 1125 while ((bio = bioq->transition) != NULL && 1126 (check_off == 0 || next_offset == bio->bio_offset) 1127 ) { 1128 n = bio->bio_buf->b_bcount; 1129 next_offset = bio->bio_offset + n; 1130 bioq->transition = TAILQ_NEXT(bio, bio_act); 1131 if (left < n) 1132 break; 1133 left -= n; 1134 } 1135 } 1136 1137 /* 1138 * Bounds checking against the media size, used for the raw partition. 1139 * secsize, mediasize and b_blkno must all be the same units. 1140 * Possibly this has to be DEV_BSIZE (512). 1141 */ 1142 int 1143 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize) 1144 { 1145 struct buf *bp = bio->bio_buf; 1146 int64_t sz; 1147 1148 sz = howmany(bp->b_bcount, secsize); 1149 1150 if (bio->bio_offset/DEV_BSIZE + sz > mediasize) { 1151 sz = mediasize - bio->bio_offset/DEV_BSIZE; 1152 if (sz == 0) { 1153 /* If exactly at end of disk, return EOF. */ 1154 bp->b_resid = bp->b_bcount; 1155 return 0; 1156 } 1157 if (sz < 0) { 1158 /* If past end of disk, return EINVAL. */ 1159 bp->b_error = EINVAL; 1160 return 0; 1161 } 1162 /* Otherwise, truncate request. */ 1163 bp->b_bcount = sz * secsize; 1164 } 1165 1166 return 1; 1167 } 1168 1169 /* 1170 * Disk error is the preface to plaintive error messages 1171 * about failing disk transfers. It prints messages of the form 1172 1173 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d) 1174 1175 * if the offset of the error in the transfer and a disk label 1176 * are both available. blkdone should be -1 if the position of the error 1177 * is unknown; the disklabel pointer may be null from drivers that have not 1178 * been converted to use them. The message is printed with kprintf 1179 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority. 1180 * The message should be completed (with at least a newline) with kprintf 1181 * or log(-1, ...), respectively. There is no trailing space. 1182 */ 1183 void 1184 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt) 1185 { 1186 struct buf *bp = bio->bio_buf; 1187 const char *term; 1188 1189 switch(bp->b_cmd) { 1190 case BUF_CMD_READ: 1191 term = "read"; 1192 break; 1193 case BUF_CMD_WRITE: 1194 term = "write"; 1195 break; 1196 default: 1197 term = "access"; 1198 break; 1199 } 1200 kprintf("%s: %s %sing ", dev->si_name, what, term); 1201 kprintf("offset %012llx for %d", 1202 (long long)bio->bio_offset, 1203 bp->b_bcount); 1204 1205 if (donecnt) 1206 kprintf(" (%d bytes completed)", donecnt); 1207 } 1208 1209 /* 1210 * Locate a disk device 1211 */ 1212 cdev_t 1213 disk_locate(const char *devname) 1214 { 1215 return devfs_find_device_by_name(devname); 1216 } 1217 1218 void 1219 disk_config(void *arg) 1220 { 1221 disk_msg_send_sync(DISK_SYNC, NULL, NULL); 1222 } 1223 1224 static void 1225 disk_init(void) 1226 { 1227 struct thread* td_core; 1228 1229 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0, 1230 NULL, NULL, NULL, 1231 objcache_malloc_alloc, 1232 objcache_malloc_free, 1233 &disk_msg_malloc_args); 1234 1235 lwkt_token_init(&disklist_token, 1); 1236 1237 /* 1238 * Initialize the reply-only port which acts as a message drain 1239 */ 1240 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply); 1241 1242 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL, 1243 0, 0, "disk_msg_core"); 1244 1245 tsleep(td_core, 0, "diskcore", 0); 1246 } 1247 1248 static void 1249 disk_uninit(void) 1250 { 1251 objcache_destroy(disk_msg_cache); 1252 } 1253 1254 /* 1255 * Clean out illegal characters in serial numbers. 1256 */ 1257 static void 1258 disk_cleanserial(char *serno) 1259 { 1260 char c; 1261 1262 while ((c = *serno) != 0) { 1263 if (c >= 'a' && c <= 'z') 1264 ; 1265 else if (c >= 'A' && c <= 'Z') 1266 ; 1267 else if (c >= '0' && c <= '9') 1268 ; 1269 else if (c == '-' || c == '@' || c == '+' || c == '.') 1270 ; 1271 else 1272 c = '_'; 1273 *serno++= c; 1274 } 1275 } 1276 1277 TUNABLE_INT("kern.disk_debug", &disk_debug_enable); 1278 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable, 1279 0, "Enable subr_disk debugging"); 1280 1281 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL); 1282 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL); 1283