1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * ---------------------------------------------------------------------------- 35 * "THE BEER-WARE LICENSE" (Revision 42): 36 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 37 * can do whatever you want with this stuff. If we meet some day, and you think 38 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 39 * ---------------------------------------------------------------------------- 40 * 41 * Copyright (c) 1982, 1986, 1988, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94 78 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $ 79 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $ 80 * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $ 81 */ 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/kernel.h> 86 #include <sys/proc.h> 87 #include <sys/sysctl.h> 88 #include <sys/buf.h> 89 #include <sys/conf.h> 90 #include <sys/disklabel.h> 91 #include <sys/disklabel32.h> 92 #include <sys/disklabel64.h> 93 #include <sys/diskslice.h> 94 #include <sys/diskmbr.h> 95 #include <sys/disk.h> 96 #include <sys/kerneldump.h> 97 #include <sys/malloc.h> 98 #include <sys/sysctl.h> 99 #include <machine/md_var.h> 100 #include <sys/ctype.h> 101 #include <sys/syslog.h> 102 #include <sys/device.h> 103 #include <sys/msgport.h> 104 #include <sys/msgport2.h> 105 #include <sys/buf2.h> 106 #include <sys/devfs.h> 107 #include <sys/thread.h> 108 #include <sys/thread2.h> 109 #include <sys/dsched.h> 110 #include <sys/queue.h> 111 #include <sys/lock.h> 112 #include <sys/udev.h> 113 114 static MALLOC_DEFINE(M_DISK, "disk", "disk data"); 115 static int disk_debug_enable = 0; 116 117 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 118 static void disk_msg_core(void *); 119 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe); 120 static void disk_probe(struct disk *dp, int reprobe); 121 static void _setdiskinfo(struct disk *disk, struct disk_info *info); 122 static void bioqwritereorder(struct bio_queue_head *bioq); 123 static void disk_cleanserial(char *serno); 124 125 static d_open_t diskopen; 126 static d_close_t diskclose; 127 static d_ioctl_t diskioctl; 128 static d_strategy_t diskstrategy; 129 static d_psize_t diskpsize; 130 static d_clone_t diskclone; 131 static d_dump_t diskdump; 132 133 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist); 134 static struct lwkt_token disklist_token; 135 136 static struct dev_ops disk_ops = { 137 { "disk", 0, D_DISK }, 138 .d_open = diskopen, 139 .d_close = diskclose, 140 .d_read = physread, 141 .d_write = physwrite, 142 .d_ioctl = diskioctl, 143 .d_strategy = diskstrategy, 144 .d_dump = diskdump, 145 .d_psize = diskpsize, 146 .d_clone = diskclone 147 }; 148 149 static struct objcache *disk_msg_cache; 150 151 struct objcache_malloc_args disk_msg_malloc_args = { 152 sizeof(struct disk_msg), M_DISK }; 153 154 static struct lwkt_port disk_dispose_port; 155 static struct lwkt_port disk_msg_port; 156 157 static int 158 disk_debug(int level, char *fmt, ...) 159 { 160 __va_list ap; 161 162 __va_start(ap, fmt); 163 if (level <= disk_debug_enable) 164 kvprintf(fmt, ap); 165 __va_end(ap); 166 167 return 0; 168 } 169 170 static int 171 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe) 172 { 173 struct disk_info *info = &dp->d_info; 174 struct diskslice *sp = &dp->d_slice->dss_slices[slice]; 175 disklabel_ops_t ops; 176 struct partinfo part; 177 const char *msg; 178 cdev_t ndev; 179 int sno; 180 u_int i; 181 182 disk_debug(2, 183 "disk_probe_slice (begin): %s (%s)\n", 184 dev->si_name, dp->d_cdev->si_name); 185 186 sno = slice ? slice - 1 : 0; 187 188 ops = &disklabel32_ops; 189 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 190 if (msg && !strcmp(msg, "no disk label")) { 191 ops = &disklabel64_ops; 192 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 193 } 194 if (msg == NULL) { 195 if (slice != WHOLE_DISK_SLICE) 196 ops->op_adjust_label_reserved(dp->d_slice, slice, sp); 197 else 198 sp->ds_reserved = 0; 199 200 sp->ds_ops = ops; 201 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) { 202 ops->op_loadpartinfo(sp->ds_label, i, &part); 203 if (part.fstype) { 204 if (reprobe && 205 (ndev = devfs_find_device_by_name("%s%c", 206 dev->si_name, 'a' + i)) 207 ) { 208 /* 209 * Device already exists and 210 * is still valid. 211 */ 212 ndev->si_flags |= SI_REPROBE_TEST; 213 } else { 214 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 215 dkmakeminor(dkunit(dp->d_cdev), 216 slice, i), 217 UID_ROOT, GID_OPERATOR, 0640, 218 "%s%c", dev->si_name, 'a'+ i); 219 ndev->si_disk = dp; 220 udev_dict_set_cstr(ndev, "subsystem", "disk"); 221 /* Inherit parent's disk type */ 222 if (dp->d_disktype) { 223 udev_dict_set_cstr(ndev, "disk-type", 224 __DECONST(char *, dp->d_disktype)); 225 } 226 if (dp->d_info.d_serialno) { 227 make_dev_alias(ndev, 228 "serno/%s.s%d%c", 229 dp->d_info.d_serialno, 230 sno, 'a' + i); 231 } 232 ndev->si_flags |= SI_REPROBE_TEST; 233 } 234 } 235 } 236 } else if (info->d_dsflags & DSO_COMPATLABEL) { 237 msg = NULL; 238 if (sp->ds_size >= 0x100000000ULL) 239 ops = &disklabel64_ops; 240 else 241 ops = &disklabel32_ops; 242 sp->ds_label = ops->op_clone_label(info, sp); 243 } else { 244 if (sp->ds_type == DOSPTYP_386BSD || /* XXX */ 245 sp->ds_type == DOSPTYP_NETBSD || 246 sp->ds_type == DOSPTYP_OPENBSD) { 247 log(LOG_WARNING, "%s: cannot find label (%s)\n", 248 dev->si_name, msg); 249 } 250 } 251 252 if (msg == NULL) { 253 sp->ds_wlabel = FALSE; 254 } 255 256 return (msg ? EINVAL : 0); 257 } 258 259 /* 260 * This routine is only called for newly minted drives or to reprobe 261 * a drive with no open slices. disk_probe_slice() is called directly 262 * when reprobing partition changes within slices. 263 */ 264 static void 265 disk_probe(struct disk *dp, int reprobe) 266 { 267 struct disk_info *info = &dp->d_info; 268 cdev_t dev = dp->d_cdev; 269 cdev_t ndev; 270 int error, i, sno; 271 struct diskslices *osp; 272 struct diskslice *sp; 273 274 KKASSERT (info->d_media_blksize != 0); 275 276 osp = dp->d_slice; 277 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info); 278 disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name); 279 280 error = mbrinit(dev, info, &(dp->d_slice)); 281 if (error) { 282 dsgone(&osp); 283 return; 284 } 285 286 for (i = 0; i < dp->d_slice->dss_nslices; i++) { 287 /* 288 * Ignore the whole-disk slice, it has already been created. 289 */ 290 if (i == WHOLE_DISK_SLICE) 291 continue; 292 sp = &dp->d_slice->dss_slices[i]; 293 294 /* 295 * Handle s0. s0 is a compatibility slice if there are no 296 * other slices and it has not otherwise been set up, else 297 * we ignore it. 298 */ 299 if (i == COMPATIBILITY_SLICE) { 300 sno = 0; 301 if (sp->ds_type == 0 && 302 dp->d_slice->dss_nslices == BASE_SLICE) { 303 sp->ds_size = info->d_media_blocks; 304 sp->ds_reserved = 0; 305 } 306 } else { 307 sno = i - 1; 308 sp->ds_reserved = 0; 309 } 310 311 /* 312 * Ignore 0-length slices 313 */ 314 if (sp->ds_size == 0) 315 continue; 316 317 if (reprobe && 318 (ndev = devfs_find_device_by_name("%ss%d", 319 dev->si_name, sno))) { 320 /* 321 * Device already exists and is still valid 322 */ 323 ndev->si_flags |= SI_REPROBE_TEST; 324 } else { 325 /* 326 * Else create new device 327 */ 328 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 329 dkmakewholeslice(dkunit(dev), i), 330 UID_ROOT, GID_OPERATOR, 0640, 331 "%ss%d", dev->si_name, sno); 332 udev_dict_set_cstr(ndev, "subsystem", "disk"); 333 /* Inherit parent's disk type */ 334 if (dp->d_disktype) { 335 udev_dict_set_cstr(ndev, "disk-type", 336 __DECONST(char *, dp->d_disktype)); 337 } 338 if (dp->d_info.d_serialno) { 339 make_dev_alias(ndev, "serno/%s.s%d", 340 dp->d_info.d_serialno, sno); 341 } 342 ndev->si_disk = dp; 343 ndev->si_flags |= SI_REPROBE_TEST; 344 } 345 sp->ds_dev = ndev; 346 347 /* 348 * Probe appropriate slices for a disklabel 349 * 350 * XXX slice type 1 used by our gpt probe code. 351 * XXX slice type 0 used by mbr compat slice. 352 */ 353 if (sp->ds_type == DOSPTYP_386BSD || 354 sp->ds_type == DOSPTYP_NETBSD || 355 sp->ds_type == DOSPTYP_OPENBSD || 356 sp->ds_type == 0 || 357 sp->ds_type == 1) { 358 if (dp->d_slice->dss_first_bsd_slice == 0) 359 dp->d_slice->dss_first_bsd_slice = i; 360 disk_probe_slice(dp, ndev, i, reprobe); 361 } 362 } 363 dsgone(&osp); 364 disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name); 365 } 366 367 368 static void 369 disk_msg_core(void *arg) 370 { 371 struct disk *dp; 372 struct diskslice *sp; 373 disk_msg_t msg; 374 int run; 375 376 lwkt_initport_thread(&disk_msg_port, curthread); 377 wakeup(curthread); 378 run = 1; 379 380 while (run) { 381 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0); 382 383 switch (msg->hdr.u.ms_result) { 384 case DISK_DISK_PROBE: 385 dp = (struct disk *)msg->load; 386 disk_debug(1, 387 "DISK_DISK_PROBE: %s\n", 388 dp->d_cdev->si_name); 389 disk_probe(dp, 0); 390 break; 391 case DISK_DISK_DESTROY: 392 dp = (struct disk *)msg->load; 393 disk_debug(1, 394 "DISK_DISK_DESTROY: %s\n", 395 dp->d_cdev->si_name); 396 devfs_destroy_subnames(dp->d_cdev->si_name); 397 devfs_destroy_dev(dp->d_cdev); 398 lwkt_gettoken(&disklist_token); 399 LIST_REMOVE(dp, d_list); 400 lwkt_reltoken(&disklist_token); 401 if (dp->d_info.d_serialno) { 402 kfree(dp->d_info.d_serialno, M_TEMP); 403 dp->d_info.d_serialno = NULL; 404 } 405 break; 406 case DISK_UNPROBE: 407 dp = (struct disk *)msg->load; 408 disk_debug(1, 409 "DISK_DISK_UNPROBE: %s\n", 410 dp->d_cdev->si_name); 411 devfs_destroy_subnames(dp->d_cdev->si_name); 412 break; 413 case DISK_SLICE_REPROBE: 414 dp = (struct disk *)msg->load; 415 sp = (struct diskslice *)msg->load2; 416 devfs_clr_subnames_flag(sp->ds_dev->si_name, 417 SI_REPROBE_TEST); 418 disk_debug(1, 419 "DISK_SLICE_REPROBE: %s\n", 420 sp->ds_dev->si_name); 421 disk_probe_slice(dp, sp->ds_dev, 422 dkslice(sp->ds_dev), 1); 423 devfs_destroy_subnames_without_flag( 424 sp->ds_dev->si_name, SI_REPROBE_TEST); 425 break; 426 case DISK_DISK_REPROBE: 427 dp = (struct disk *)msg->load; 428 devfs_clr_subnames_flag(dp->d_cdev->si_name, SI_REPROBE_TEST); 429 disk_debug(1, 430 "DISK_DISK_REPROBE: %s\n", 431 dp->d_cdev->si_name); 432 disk_probe(dp, 1); 433 devfs_destroy_subnames_without_flag( 434 dp->d_cdev->si_name, SI_REPROBE_TEST); 435 break; 436 case DISK_SYNC: 437 disk_debug(1, "DISK_SYNC\n"); 438 break; 439 default: 440 devfs_debug(DEVFS_DEBUG_WARNING, 441 "disk_msg_core: unknown message " 442 "received at core\n"); 443 break; 444 } 445 lwkt_replymsg(&msg->hdr, 0); 446 } 447 lwkt_exit(); 448 } 449 450 451 /* 452 * Acts as a message drain. Any message that is replied to here gets 453 * destroyed and the memory freed. 454 */ 455 static void 456 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 457 { 458 objcache_put(disk_msg_cache, msg); 459 } 460 461 462 void 463 disk_msg_send(uint32_t cmd, void *load, void *load2) 464 { 465 disk_msg_t disk_msg; 466 lwkt_port_t port = &disk_msg_port; 467 468 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 469 470 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0); 471 472 disk_msg->hdr.u.ms_result = cmd; 473 disk_msg->load = load; 474 disk_msg->load2 = load2; 475 KKASSERT(port); 476 lwkt_sendmsg(port, &disk_msg->hdr); 477 } 478 479 void 480 disk_msg_send_sync(uint32_t cmd, void *load, void *load2) 481 { 482 struct lwkt_port rep_port; 483 disk_msg_t disk_msg; 484 lwkt_port_t port; 485 486 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 487 port = &disk_msg_port; 488 489 /* XXX could probably use curthread's built-in msgport */ 490 lwkt_initport_thread(&rep_port, curthread); 491 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0); 492 493 disk_msg->hdr.u.ms_result = cmd; 494 disk_msg->load = load; 495 disk_msg->load2 = load2; 496 497 lwkt_sendmsg(port, &disk_msg->hdr); 498 lwkt_waitmsg(&disk_msg->hdr, 0); 499 objcache_put(disk_msg_cache, disk_msg); 500 } 501 502 /* 503 * Create a raw device for the dev_ops template (which is returned). Also 504 * create a slice and unit managed disk and overload the user visible 505 * device space with it. 506 * 507 * NOTE: The returned raw device is NOT a slice and unit managed device. 508 * It is an actual raw device representing the raw disk as specified by 509 * the passed dev_ops. The disk layer not only returns such a raw device, 510 * it also uses it internally when passing (modified) commands through. 511 */ 512 cdev_t 513 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops) 514 { 515 return disk_create_named(NULL, unit, dp, raw_ops); 516 } 517 518 cdev_t 519 disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops) 520 { 521 cdev_t rawdev; 522 523 if (name == NULL) 524 name = raw_ops->head.name; 525 526 disk_debug(1, "disk_create (begin): %s%d\n", name, unit); 527 528 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit), 529 UID_ROOT, GID_OPERATOR, 0640, 530 "%s%d", name, unit); 531 532 bzero(dp, sizeof(*dp)); 533 534 dp->d_rawdev = rawdev; 535 dp->d_raw_ops = raw_ops; 536 dp->d_dev_ops = &disk_ops; 537 dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 538 dkmakewholedisk(unit), 539 UID_ROOT, GID_OPERATOR, 0640, 540 "%s%d", name, unit); 541 udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk"); 542 dp->d_cdev->si_disk = dp; 543 544 dsched_disk_create_callback(dp, name, unit); 545 546 lwkt_gettoken(&disklist_token); 547 LIST_INSERT_HEAD(&disklist, dp, d_list); 548 lwkt_reltoken(&disklist_token); 549 550 disk_debug(1, "disk_create (end): %s%d\n", name, unit); 551 552 return (dp->d_rawdev); 553 } 554 555 int 556 disk_setdisktype(struct disk *disk, const char *type) 557 { 558 KKASSERT(disk != NULL); 559 560 disk->d_disktype = type; 561 return udev_dict_set_cstr(disk->d_cdev, "disk-type", __DECONST(char *, type)); 562 } 563 564 static void 565 _setdiskinfo(struct disk *disk, struct disk_info *info) 566 { 567 char *oldserialno; 568 569 oldserialno = disk->d_info.d_serialno; 570 bcopy(info, &disk->d_info, sizeof(disk->d_info)); 571 info = &disk->d_info; 572 573 disk_debug(1, 574 "_setdiskinfo: %s\n", 575 disk->d_cdev->si_name); 576 577 /* 578 * The serial number is duplicated so the caller can throw 579 * their copy away. 580 */ 581 if (info->d_serialno && info->d_serialno[0]) { 582 info->d_serialno = kstrdup(info->d_serialno, M_TEMP); 583 disk_cleanserial(info->d_serialno); 584 if (disk->d_cdev) { 585 make_dev_alias(disk->d_cdev, "serno/%s", 586 info->d_serialno); 587 } 588 } else { 589 info->d_serialno = NULL; 590 } 591 if (oldserialno) 592 kfree(oldserialno, M_TEMP); 593 594 dsched_disk_update_callback(disk, info); 595 596 /* 597 * The caller may set d_media_size or d_media_blocks and we 598 * calculate the other. 599 */ 600 KKASSERT(info->d_media_size == 0 || info->d_media_blksize == 0); 601 if (info->d_media_size == 0 && info->d_media_blocks) { 602 info->d_media_size = (u_int64_t)info->d_media_blocks * 603 info->d_media_blksize; 604 } else if (info->d_media_size && info->d_media_blocks == 0 && 605 info->d_media_blksize) { 606 info->d_media_blocks = info->d_media_size / 607 info->d_media_blksize; 608 } 609 610 /* 611 * The si_* fields for rawdev are not set until after the 612 * disk_create() call, so someone using the cooked version 613 * of the raw device (i.e. da0s0) will not get the right 614 * si_iosize_max unless we fix it up here. 615 */ 616 if (disk->d_cdev && disk->d_rawdev && 617 disk->d_cdev->si_iosize_max == 0) { 618 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max; 619 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys; 620 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best; 621 } 622 623 /* Add the serial number to the udev_dictionary */ 624 if (info->d_serialno) 625 udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno); 626 } 627 628 /* 629 * Disk drivers must call this routine when media parameters are available 630 * or have changed. 631 */ 632 void 633 disk_setdiskinfo(struct disk *disk, struct disk_info *info) 634 { 635 _setdiskinfo(disk, info); 636 disk_msg_send(DISK_DISK_PROBE, disk, NULL); 637 disk_debug(1, 638 "disk_setdiskinfo: sent probe for %s\n", 639 disk->d_cdev->si_name); 640 } 641 642 void 643 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info) 644 { 645 _setdiskinfo(disk, info); 646 disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL); 647 disk_debug(1, 648 "disk_setdiskinfo_sync: sent probe for %s\n", 649 disk->d_cdev->si_name); 650 } 651 652 /* 653 * This routine is called when an adapter detaches. The higher level 654 * managed disk device is destroyed while the lower level raw device is 655 * released. 656 */ 657 void 658 disk_destroy(struct disk *disk) 659 { 660 dsched_disk_destroy_callback(disk); 661 disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL); 662 return; 663 } 664 665 int 666 disk_dumpcheck(cdev_t dev, u_int64_t *size, u_int64_t *blkno, u_int32_t *secsize) 667 { 668 struct partinfo pinfo; 669 int error; 670 671 bzero(&pinfo, sizeof(pinfo)); 672 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0, 673 proc0.p_ucred, NULL); 674 if (error) 675 return (error); 676 677 if (pinfo.media_blksize == 0) 678 return (ENXIO); 679 680 if (blkno) /* XXX: make sure this reserved stuff is right */ 681 *blkno = pinfo.reserved_blocks + 682 pinfo.media_offset / pinfo.media_blksize; 683 if (secsize) 684 *secsize = pinfo.media_blksize; 685 if (size) 686 *size = (pinfo.media_blocks - pinfo.reserved_blocks); 687 688 return (0); 689 } 690 691 int 692 disk_dumpconf(cdev_t dev, u_int onoff) 693 { 694 struct dumperinfo di; 695 u_int64_t size, blkno; 696 u_int32_t secsize; 697 int error; 698 699 if (!onoff) 700 return set_dumper(NULL); 701 702 error = disk_dumpcheck(dev, &size, &blkno, &secsize); 703 704 if (error) 705 return ENXIO; 706 707 bzero(&di, sizeof(struct dumperinfo)); 708 di.dumper = diskdump; 709 di.priv = dev; 710 di.blocksize = secsize; 711 di.mediaoffset = blkno * DEV_BSIZE; 712 di.mediasize = size * DEV_BSIZE; 713 714 return set_dumper(&di); 715 } 716 717 void 718 disk_unprobe(struct disk *disk) 719 { 720 if (disk == NULL) 721 return; 722 723 disk_msg_send_sync(DISK_UNPROBE, disk, NULL); 724 } 725 726 void 727 disk_invalidate (struct disk *disk) 728 { 729 dsgone(&disk->d_slice); 730 } 731 732 struct disk * 733 disk_enumerate(struct disk *disk) 734 { 735 struct disk *dp; 736 737 lwkt_gettoken(&disklist_token); 738 if (!disk) 739 dp = (LIST_FIRST(&disklist)); 740 else 741 dp = (LIST_NEXT(disk, d_list)); 742 lwkt_reltoken(&disklist_token); 743 744 return dp; 745 } 746 747 static 748 int 749 sysctl_disks(SYSCTL_HANDLER_ARGS) 750 { 751 struct disk *disk; 752 int error, first; 753 754 disk = NULL; 755 first = 1; 756 757 while ((disk = disk_enumerate(disk))) { 758 if (!first) { 759 error = SYSCTL_OUT(req, " ", 1); 760 if (error) 761 return error; 762 } else { 763 first = 0; 764 } 765 error = SYSCTL_OUT(req, disk->d_rawdev->si_name, 766 strlen(disk->d_rawdev->si_name)); 767 if (error) 768 return error; 769 } 770 error = SYSCTL_OUT(req, "", 1); 771 return error; 772 } 773 774 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 775 sysctl_disks, "A", "names of available disks"); 776 777 /* 778 * Open a disk device or partition. 779 */ 780 static 781 int 782 diskopen(struct dev_open_args *ap) 783 { 784 cdev_t dev = ap->a_head.a_dev; 785 struct disk *dp; 786 int error; 787 788 /* 789 * dp can't be NULL here XXX. 790 * 791 * d_slice will be NULL if setdiskinfo() has not been called yet. 792 * setdiskinfo() is typically called whether the disk is present 793 * or not (e.g. CD), but the base disk device is created first 794 * and there may be a race. 795 */ 796 dp = dev->si_disk; 797 if (dp == NULL || dp->d_slice == NULL) 798 return (ENXIO); 799 error = 0; 800 801 /* 802 * Deal with open races 803 */ 804 while (dp->d_flags & DISKFLAG_LOCK) { 805 dp->d_flags |= DISKFLAG_WANTED; 806 error = tsleep(dp, PCATCH, "diskopen", hz); 807 if (error) 808 return (error); 809 } 810 dp->d_flags |= DISKFLAG_LOCK; 811 812 /* 813 * Open the underlying raw device. 814 */ 815 if (!dsisopen(dp->d_slice)) { 816 #if 0 817 if (!pdev->si_iosize_max) 818 pdev->si_iosize_max = dev->si_iosize_max; 819 #endif 820 error = dev_dopen(dp->d_rawdev, ap->a_oflags, 821 ap->a_devtype, ap->a_cred); 822 } 823 #if 0 824 /* 825 * Inherit properties from the underlying device now that it is 826 * open. 827 */ 828 dev_dclone(dev); 829 #endif 830 831 if (error) 832 goto out; 833 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags, 834 &dp->d_slice, &dp->d_info); 835 if (!dsisopen(dp->d_slice)) { 836 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype); 837 } 838 out: 839 dp->d_flags &= ~DISKFLAG_LOCK; 840 if (dp->d_flags & DISKFLAG_WANTED) { 841 dp->d_flags &= ~DISKFLAG_WANTED; 842 wakeup(dp); 843 } 844 845 return(error); 846 } 847 848 /* 849 * Close a disk device or partition 850 */ 851 static 852 int 853 diskclose(struct dev_close_args *ap) 854 { 855 cdev_t dev = ap->a_head.a_dev; 856 struct disk *dp; 857 int error; 858 859 error = 0; 860 dp = dev->si_disk; 861 862 dsclose(dev, ap->a_devtype, dp->d_slice); 863 if (!dsisopen(dp->d_slice)) { 864 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype); 865 } 866 return (error); 867 } 868 869 /* 870 * First execute the ioctl on the disk device, and if it isn't supported 871 * try running it on the backing device. 872 */ 873 static 874 int 875 diskioctl(struct dev_ioctl_args *ap) 876 { 877 cdev_t dev = ap->a_head.a_dev; 878 struct disk *dp; 879 int error; 880 u_int u; 881 882 dp = dev->si_disk; 883 if (dp == NULL) 884 return (ENXIO); 885 886 devfs_debug(DEVFS_DEBUG_DEBUG, 887 "diskioctl: cmd is: %lx (name: %s)\n", 888 ap->a_cmd, dev->si_name); 889 devfs_debug(DEVFS_DEBUG_DEBUG, 890 "diskioctl: &dp->d_slice is: %p, %p\n", 891 &dp->d_slice, dp->d_slice); 892 893 if (ap->a_cmd == DIOCGKERNELDUMP) { 894 u = *(u_int *)ap->a_data; 895 return disk_dumpconf(dev, u); 896 } 897 898 if (&dp->d_slice == NULL || dp->d_slice == NULL) { 899 error = ENOIOCTL; 900 } else { 901 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, 902 &dp->d_slice, &dp->d_info); 903 } 904 905 if (error == ENOIOCTL) { 906 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data, 907 ap->a_fflag, ap->a_cred, NULL); 908 } 909 return (error); 910 } 911 912 /* 913 * Execute strategy routine 914 */ 915 static 916 int 917 diskstrategy(struct dev_strategy_args *ap) 918 { 919 cdev_t dev = ap->a_head.a_dev; 920 struct bio *bio = ap->a_bio; 921 struct bio *nbio; 922 struct disk *dp; 923 924 dp = dev->si_disk; 925 926 if (dp == NULL) { 927 bio->bio_buf->b_error = ENXIO; 928 bio->bio_buf->b_flags |= B_ERROR; 929 biodone(bio); 930 return(0); 931 } 932 KKASSERT(dev->si_disk == dp); 933 934 /* 935 * The dscheck() function will also transform the slice relative 936 * block number i.e. bio->bio_offset into a block number that can be 937 * passed directly to the underlying raw device. If dscheck() 938 * returns NULL it will have handled the bio for us (e.g. EOF 939 * or error due to being beyond the device size). 940 */ 941 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) { 942 dsched_queue(dp, nbio); 943 } else { 944 biodone(bio); 945 } 946 return(0); 947 } 948 949 /* 950 * Return the partition size in ?blocks? 951 */ 952 static 953 int 954 diskpsize(struct dev_psize_args *ap) 955 { 956 cdev_t dev = ap->a_head.a_dev; 957 struct disk *dp; 958 959 dp = dev->si_disk; 960 if (dp == NULL) 961 return(ENODEV); 962 ap->a_result = dssize(dev, &dp->d_slice); 963 return(0); 964 } 965 966 /* 967 * When new device entries are instantiated, make sure they inherit our 968 * si_disk structure and block and iosize limits from the raw device. 969 * 970 * This routine is always called synchronously in the context of the 971 * client. 972 * 973 * XXX The various io and block size constraints are not always initialized 974 * properly by devices. 975 */ 976 static 977 int 978 diskclone(struct dev_clone_args *ap) 979 { 980 cdev_t dev = ap->a_head.a_dev; 981 struct disk *dp; 982 dp = dev->si_disk; 983 984 KKASSERT(dp != NULL); 985 dev->si_disk = dp; 986 dev->si_iosize_max = dp->d_rawdev->si_iosize_max; 987 dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys; 988 dev->si_bsize_best = dp->d_rawdev->si_bsize_best; 989 return(0); 990 } 991 992 int 993 diskdump(struct dev_dump_args *ap) 994 { 995 cdev_t dev = ap->a_head.a_dev; 996 struct disk *dp = dev->si_disk; 997 u_int64_t size, offset; 998 int error; 999 1000 error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize); 1001 /* XXX: this should probably go in disk_dumpcheck somehow */ 1002 if (ap->a_length != 0) { 1003 size *= DEV_BSIZE; 1004 offset = ap->a_blkno * DEV_BSIZE; 1005 if ((ap->a_offset < offset) || 1006 (ap->a_offset + ap->a_length - offset > size)) { 1007 kprintf("Attempt to write outside dump device boundaries.\n"); 1008 error = ENOSPC; 1009 } 1010 } 1011 1012 if (error == 0) { 1013 ap->a_head.a_dev = dp->d_rawdev; 1014 error = dev_doperate(&ap->a_head); 1015 } 1016 1017 return(error); 1018 } 1019 1020 1021 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD, 1022 0, sizeof(struct diskslices), "sizeof(struct diskslices)"); 1023 1024 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD, 1025 0, sizeof(struct disk), "sizeof(struct disk)"); 1026 1027 /* 1028 * Reorder interval for burst write allowance and minor write 1029 * allowance. 1030 * 1031 * We always want to trickle some writes in to make use of the 1032 * disk's zone cache. Bursting occurs on a longer interval and only 1033 * runningbufspace is well over the hirunningspace limit. 1034 */ 1035 int bioq_reorder_burst_interval = 60; /* should be multiple of minor */ 1036 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval, 1037 CTLFLAG_RW, &bioq_reorder_burst_interval, 0, ""); 1038 int bioq_reorder_minor_interval = 5; 1039 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval, 1040 CTLFLAG_RW, &bioq_reorder_minor_interval, 0, ""); 1041 1042 int bioq_reorder_burst_bytes = 3000000; 1043 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes, 1044 CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, ""); 1045 int bioq_reorder_minor_bytes = 262144; 1046 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes, 1047 CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, ""); 1048 1049 1050 /* 1051 * Order I/Os. Generally speaking this code is designed to make better 1052 * use of drive zone caches. A drive zone cache can typically track linear 1053 * reads or writes for around 16 zones simultaniously. 1054 * 1055 * Read prioritization issues: It is possible for hundreds of megabytes worth 1056 * of writes to be queued asynchronously. This creates a huge bottleneck 1057 * for reads which reduce read bandwidth to a trickle. 1058 * 1059 * To solve this problem we generally reorder reads before writes. 1060 * 1061 * However, a large number of random reads can also starve writes and 1062 * make poor use of the drive zone cache so we allow writes to trickle 1063 * in every N reads. 1064 */ 1065 void 1066 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio) 1067 { 1068 /* 1069 * The BIO wants to be ordered. Adding to the tail also 1070 * causes transition to be set to NULL, forcing the ordering 1071 * of all prior I/O's. 1072 */ 1073 if (bio->bio_buf->b_flags & B_ORDERED) { 1074 bioq_insert_tail(bioq, bio); 1075 return; 1076 } 1077 1078 switch(bio->bio_buf->b_cmd) { 1079 case BUF_CMD_READ: 1080 if (bioq->transition) { 1081 /* 1082 * Insert before the first write. Bleedover writes 1083 * based on reorder intervals to prevent starvation. 1084 */ 1085 TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act); 1086 ++bioq->reorder; 1087 if (bioq->reorder % bioq_reorder_minor_interval == 0) { 1088 bioqwritereorder(bioq); 1089 if (bioq->reorder >= 1090 bioq_reorder_burst_interval) { 1091 bioq->reorder = 0; 1092 } 1093 } 1094 } else { 1095 /* 1096 * No writes queued (or ordering was forced), 1097 * insert at tail. 1098 */ 1099 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1100 } 1101 break; 1102 case BUF_CMD_WRITE: 1103 /* 1104 * Writes are always appended. If no writes were previously 1105 * queued or an ordered tail insertion occured the transition 1106 * field will be NULL. 1107 */ 1108 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1109 if (bioq->transition == NULL) 1110 bioq->transition = bio; 1111 break; 1112 default: 1113 /* 1114 * All other request types are forced to be ordered. 1115 */ 1116 bioq_insert_tail(bioq, bio); 1117 break; 1118 } 1119 } 1120 1121 /* 1122 * Move the read-write transition point to prevent reads from 1123 * completely starving our writes. This brings a number of writes into 1124 * the fold every N reads. 1125 * 1126 * We bring a few linear writes into the fold on a minor interval 1127 * and we bring a non-linear burst of writes into the fold on a major 1128 * interval. Bursting only occurs if runningbufspace is really high 1129 * (typically from syncs, fsyncs, or HAMMER flushes). 1130 */ 1131 static 1132 void 1133 bioqwritereorder(struct bio_queue_head *bioq) 1134 { 1135 struct bio *bio; 1136 off_t next_offset; 1137 size_t left; 1138 size_t n; 1139 int check_off; 1140 1141 if (bioq->reorder < bioq_reorder_burst_interval || 1142 !buf_runningbufspace_severe()) { 1143 left = (size_t)bioq_reorder_minor_bytes; 1144 check_off = 1; 1145 } else { 1146 left = (size_t)bioq_reorder_burst_bytes; 1147 check_off = 0; 1148 } 1149 1150 next_offset = bioq->transition->bio_offset; 1151 while ((bio = bioq->transition) != NULL && 1152 (check_off == 0 || next_offset == bio->bio_offset) 1153 ) { 1154 n = bio->bio_buf->b_bcount; 1155 next_offset = bio->bio_offset + n; 1156 bioq->transition = TAILQ_NEXT(bio, bio_act); 1157 if (left < n) 1158 break; 1159 left -= n; 1160 } 1161 } 1162 1163 /* 1164 * Bounds checking against the media size, used for the raw partition. 1165 * secsize, mediasize and b_blkno must all be the same units. 1166 * Possibly this has to be DEV_BSIZE (512). 1167 */ 1168 int 1169 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize) 1170 { 1171 struct buf *bp = bio->bio_buf; 1172 int64_t sz; 1173 1174 sz = howmany(bp->b_bcount, secsize); 1175 1176 if (bio->bio_offset/DEV_BSIZE + sz > mediasize) { 1177 sz = mediasize - bio->bio_offset/DEV_BSIZE; 1178 if (sz == 0) { 1179 /* If exactly at end of disk, return EOF. */ 1180 bp->b_resid = bp->b_bcount; 1181 return 0; 1182 } 1183 if (sz < 0) { 1184 /* If past end of disk, return EINVAL. */ 1185 bp->b_error = EINVAL; 1186 return 0; 1187 } 1188 /* Otherwise, truncate request. */ 1189 bp->b_bcount = sz * secsize; 1190 } 1191 1192 return 1; 1193 } 1194 1195 /* 1196 * Disk error is the preface to plaintive error messages 1197 * about failing disk transfers. It prints messages of the form 1198 1199 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d) 1200 1201 * if the offset of the error in the transfer and a disk label 1202 * are both available. blkdone should be -1 if the position of the error 1203 * is unknown; the disklabel pointer may be null from drivers that have not 1204 * been converted to use them. The message is printed with kprintf 1205 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority. 1206 * The message should be completed (with at least a newline) with kprintf 1207 * or log(-1, ...), respectively. There is no trailing space. 1208 */ 1209 void 1210 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt) 1211 { 1212 struct buf *bp = bio->bio_buf; 1213 const char *term; 1214 1215 switch(bp->b_cmd) { 1216 case BUF_CMD_READ: 1217 term = "read"; 1218 break; 1219 case BUF_CMD_WRITE: 1220 term = "write"; 1221 break; 1222 default: 1223 term = "access"; 1224 break; 1225 } 1226 kprintf("%s: %s %sing ", dev->si_name, what, term); 1227 kprintf("offset %012llx for %d", 1228 (long long)bio->bio_offset, 1229 bp->b_bcount); 1230 1231 if (donecnt) 1232 kprintf(" (%d bytes completed)", donecnt); 1233 } 1234 1235 /* 1236 * Locate a disk device 1237 */ 1238 cdev_t 1239 disk_locate(const char *devname) 1240 { 1241 return devfs_find_device_by_name(devname); 1242 } 1243 1244 void 1245 disk_config(void *arg) 1246 { 1247 disk_msg_send_sync(DISK_SYNC, NULL, NULL); 1248 } 1249 1250 static void 1251 disk_init(void) 1252 { 1253 struct thread* td_core; 1254 1255 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0, 1256 NULL, NULL, NULL, 1257 objcache_malloc_alloc, 1258 objcache_malloc_free, 1259 &disk_msg_malloc_args); 1260 1261 lwkt_token_init(&disklist_token, 1, "disks"); 1262 1263 /* 1264 * Initialize the reply-only port which acts as a message drain 1265 */ 1266 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply); 1267 1268 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL, 1269 0, 0, "disk_msg_core"); 1270 1271 tsleep(td_core, 0, "diskcore", 0); 1272 } 1273 1274 static void 1275 disk_uninit(void) 1276 { 1277 objcache_destroy(disk_msg_cache); 1278 } 1279 1280 /* 1281 * Clean out illegal characters in serial numbers. 1282 */ 1283 static void 1284 disk_cleanserial(char *serno) 1285 { 1286 char c; 1287 1288 while ((c = *serno) != 0) { 1289 if (c >= 'a' && c <= 'z') 1290 ; 1291 else if (c >= 'A' && c <= 'Z') 1292 ; 1293 else if (c >= '0' && c <= '9') 1294 ; 1295 else if (c == '-' || c == '@' || c == '+' || c == '.') 1296 ; 1297 else 1298 c = '_'; 1299 *serno++= c; 1300 } 1301 } 1302 1303 TUNABLE_INT("kern.disk_debug", &disk_debug_enable); 1304 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable, 1305 0, "Enable subr_disk debugging"); 1306 1307 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL); 1308 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL); 1309