1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * and Alex Hornung <ahornung@gmail.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * ---------------------------------------------------------------------------- 36 * "THE BEER-WARE LICENSE" (Revision 42): 37 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 38 * can do whatever you want with this stuff. If we meet some day, and you think 39 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 40 * ---------------------------------------------------------------------------- 41 * 42 * Copyright (c) 1982, 1986, 1988, 1993 43 * The Regents of the University of California. All rights reserved. 44 * (c) UNIX System Laboratories, Inc. 45 * All or some portions of this file are derived from material licensed 46 * to the University of California by American Telephone and Telegraph 47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 48 * the permission of UNIX System Laboratories, Inc. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 3. All advertising materials mentioning features or use of this software 59 * must display the following acknowledgement: 60 * This product includes software developed by the University of 61 * California, Berkeley and its contributors. 62 * 4. Neither the name of the University nor the names of its contributors 63 * may be used to endorse or promote products derived from this software 64 * without specific prior written permission. 65 * 66 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 69 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 76 * SUCH DAMAGE. 77 * 78 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94 79 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $ 80 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $ 81 * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $ 82 */ 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/kernel.h> 87 #include <sys/proc.h> 88 #include <sys/sysctl.h> 89 #include <sys/buf.h> 90 #include <sys/conf.h> 91 #include <sys/disklabel.h> 92 #include <sys/disklabel32.h> 93 #include <sys/disklabel64.h> 94 #include <sys/diskslice.h> 95 #include <sys/diskmbr.h> 96 #include <sys/disk.h> 97 #include <sys/kerneldump.h> 98 #include <sys/malloc.h> 99 #include <sys/sysctl.h> 100 #include <machine/md_var.h> 101 #include <sys/ctype.h> 102 #include <sys/syslog.h> 103 #include <sys/device.h> 104 #include <sys/msgport.h> 105 #include <sys/devfs.h> 106 #include <sys/thread.h> 107 #include <sys/dsched.h> 108 #include <sys/queue.h> 109 #include <sys/lock.h> 110 #include <sys/udev.h> 111 #include <sys/uuid.h> 112 113 #include <sys/buf2.h> 114 #include <sys/mplock2.h> 115 #include <sys/msgport2.h> 116 #include <sys/thread2.h> 117 118 static MALLOC_DEFINE(M_DISK, "disk", "disk data"); 119 static int disk_debug_enable = 0; 120 121 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 122 static void disk_msg_core(void *); 123 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe); 124 static void disk_probe(struct disk *dp, int reprobe); 125 static void _setdiskinfo(struct disk *disk, struct disk_info *info); 126 static void bioqwritereorder(struct bio_queue_head *bioq); 127 static void disk_cleanserial(char *serno); 128 static int disk_debug(int, char *, ...) __printflike(2, 3); 129 static cdev_t _disk_create_named(const char *name, int unit, struct disk *dp, 130 struct dev_ops *raw_ops, int clone); 131 132 static d_open_t diskopen; 133 static d_close_t diskclose; 134 static d_ioctl_t diskioctl; 135 static d_strategy_t diskstrategy; 136 static d_psize_t diskpsize; 137 static d_dump_t diskdump; 138 139 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist); 140 static struct lwkt_token disklist_token; 141 142 static struct dev_ops disk_ops = { 143 { "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE }, 144 .d_open = diskopen, 145 .d_close = diskclose, 146 .d_read = physread, 147 .d_write = physwrite, 148 .d_ioctl = diskioctl, 149 .d_strategy = diskstrategy, 150 .d_dump = diskdump, 151 .d_psize = diskpsize, 152 }; 153 154 static struct objcache *disk_msg_cache; 155 156 struct objcache_malloc_args disk_msg_malloc_args = { 157 sizeof(struct disk_msg), M_DISK }; 158 159 static struct lwkt_port disk_dispose_port; 160 static struct lwkt_port disk_msg_port; 161 162 static int 163 disk_debug(int level, char *fmt, ...) 164 { 165 __va_list ap; 166 167 __va_start(ap, fmt); 168 if (level <= disk_debug_enable) 169 kvprintf(fmt, ap); 170 __va_end(ap); 171 172 return 0; 173 } 174 175 static int 176 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe) 177 { 178 struct disk_info *info = &dp->d_info; 179 struct diskslice *sp = &dp->d_slice->dss_slices[slice]; 180 disklabel_ops_t ops; 181 struct partinfo part; 182 const char *msg; 183 char uuid_buf[128]; 184 cdev_t ndev; 185 int sno; 186 u_int i; 187 188 disk_debug(2, 189 "disk_probe_slice (begin): %s (%s)\n", 190 dev->si_name, dp->d_cdev->si_name); 191 192 sno = slice ? slice - 1 : 0; 193 194 ops = &disklabel32_ops; 195 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 196 if (msg && !strcmp(msg, "no disk label")) { 197 ops = &disklabel64_ops; 198 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 199 } 200 if (msg == NULL) { 201 if (slice != WHOLE_DISK_SLICE) 202 ops->op_adjust_label_reserved(dp->d_slice, slice, sp); 203 else 204 sp->ds_reserved = 0; 205 206 sp->ds_ops = ops; 207 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) { 208 ops->op_loadpartinfo(sp->ds_label, i, &part); 209 if (part.fstype) { 210 if (reprobe && 211 (ndev = devfs_find_device_by_name("%s%c", 212 dev->si_name, 'a' + i)) 213 ) { 214 /* 215 * Device already exists and 216 * is still valid. 217 */ 218 ndev->si_flags |= SI_REPROBE_TEST; 219 220 /* 221 * Destroy old UUID alias 222 */ 223 destroy_dev_alias(ndev, "part-by-uuid/*"); 224 225 /* Create UUID alias */ 226 if (!kuuid_is_nil(&part.storage_uuid)) { 227 snprintf_uuid(uuid_buf, 228 sizeof(uuid_buf), 229 &part.storage_uuid); 230 make_dev_alias(ndev, 231 "part-by-uuid/%s", 232 uuid_buf); 233 } 234 } else { 235 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 236 dkmakeminor(dkunit(dp->d_cdev), 237 slice, i), 238 UID_ROOT, GID_OPERATOR, 0640, 239 "%s%c", dev->si_name, 'a'+ i); 240 ndev->si_disk = dp; 241 udev_dict_set_cstr(ndev, "subsystem", "disk"); 242 /* Inherit parent's disk type */ 243 if (dp->d_disktype) { 244 udev_dict_set_cstr(ndev, "disk-type", 245 __DECONST(char *, dp->d_disktype)); 246 } 247 248 /* Create serno alias */ 249 if (dp->d_info.d_serialno) { 250 make_dev_alias(ndev, 251 "serno/%s.s%d%c", 252 dp->d_info.d_serialno, 253 sno, 'a' + i); 254 } 255 256 /* Create UUID alias */ 257 if (!kuuid_is_nil(&part.storage_uuid)) { 258 snprintf_uuid(uuid_buf, 259 sizeof(uuid_buf), 260 &part.storage_uuid); 261 make_dev_alias(ndev, 262 "part-by-uuid/%s", 263 uuid_buf); 264 } 265 ndev->si_flags |= SI_REPROBE_TEST; 266 } 267 } 268 } 269 } else if (info->d_dsflags & DSO_COMPATLABEL) { 270 msg = NULL; 271 if (sp->ds_size >= 0x100000000ULL) 272 ops = &disklabel64_ops; 273 else 274 ops = &disklabel32_ops; 275 sp->ds_label = ops->op_clone_label(info, sp); 276 } else { 277 if (sp->ds_type == DOSPTYP_386BSD || /* XXX */ 278 sp->ds_type == DOSPTYP_NETBSD || 279 sp->ds_type == DOSPTYP_OPENBSD) { 280 log(LOG_WARNING, "%s: cannot find label (%s)\n", 281 dev->si_name, msg); 282 } 283 } 284 285 if (msg == NULL) { 286 sp->ds_wlabel = FALSE; 287 } 288 289 return (msg ? EINVAL : 0); 290 } 291 292 /* 293 * This routine is only called for newly minted drives or to reprobe 294 * a drive with no open slices. disk_probe_slice() is called directly 295 * when reprobing partition changes within slices. 296 */ 297 static void 298 disk_probe(struct disk *dp, int reprobe) 299 { 300 struct disk_info *info = &dp->d_info; 301 cdev_t dev = dp->d_cdev; 302 cdev_t ndev; 303 int error, i, sno; 304 struct diskslices *osp; 305 struct diskslice *sp; 306 char uuid_buf[128]; 307 308 KKASSERT (info->d_media_blksize != 0); 309 310 osp = dp->d_slice; 311 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info); 312 disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name); 313 314 error = mbrinit(dev, info, &(dp->d_slice)); 315 if (error) { 316 dsgone(&osp); 317 return; 318 } 319 320 for (i = 0; i < dp->d_slice->dss_nslices; i++) { 321 /* 322 * Ignore the whole-disk slice, it has already been created. 323 */ 324 if (i == WHOLE_DISK_SLICE) 325 continue; 326 327 #if 1 328 /* 329 * Ignore the compatibility slice s0 if it's a device mapper 330 * volume. 331 */ 332 if ((i == COMPATIBILITY_SLICE) && 333 (info->d_dsflags & DSO_DEVICEMAPPER)) 334 continue; 335 #endif 336 337 sp = &dp->d_slice->dss_slices[i]; 338 339 /* 340 * Handle s0. s0 is a compatibility slice if there are no 341 * other slices and it has not otherwise been set up, else 342 * we ignore it. 343 */ 344 if (i == COMPATIBILITY_SLICE) { 345 sno = 0; 346 if (sp->ds_type == 0 && 347 dp->d_slice->dss_nslices == BASE_SLICE) { 348 sp->ds_size = info->d_media_blocks; 349 sp->ds_reserved = 0; 350 } 351 } else { 352 sno = i - 1; 353 sp->ds_reserved = 0; 354 } 355 356 /* 357 * Ignore 0-length slices 358 */ 359 if (sp->ds_size == 0) 360 continue; 361 362 if (reprobe && 363 (ndev = devfs_find_device_by_name("%ss%d", 364 dev->si_name, sno))) { 365 /* 366 * Device already exists and is still valid 367 */ 368 ndev->si_flags |= SI_REPROBE_TEST; 369 370 /* 371 * Destroy old UUID alias 372 */ 373 destroy_dev_alias(ndev, "slice-by-uuid/*"); 374 375 /* Create UUID alias */ 376 if (!kuuid_is_nil(&sp->ds_stor_uuid)) { 377 snprintf_uuid(uuid_buf, sizeof(uuid_buf), 378 &sp->ds_stor_uuid); 379 make_dev_alias(ndev, "slice-by-uuid/%s", 380 uuid_buf); 381 } 382 } else { 383 /* 384 * Else create new device 385 */ 386 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 387 dkmakewholeslice(dkunit(dev), i), 388 UID_ROOT, GID_OPERATOR, 0640, 389 (info->d_dsflags & DSO_DEVICEMAPPER)? 390 "%s.s%d" : "%ss%d", dev->si_name, sno); 391 udev_dict_set_cstr(ndev, "subsystem", "disk"); 392 /* Inherit parent's disk type */ 393 if (dp->d_disktype) { 394 udev_dict_set_cstr(ndev, "disk-type", 395 __DECONST(char *, dp->d_disktype)); 396 } 397 398 /* Create serno alias */ 399 if (dp->d_info.d_serialno) { 400 make_dev_alias(ndev, "serno/%s.s%d", 401 dp->d_info.d_serialno, sno); 402 } 403 404 /* Create UUID alias */ 405 if (!kuuid_is_nil(&sp->ds_stor_uuid)) { 406 snprintf_uuid(uuid_buf, sizeof(uuid_buf), 407 &sp->ds_stor_uuid); 408 make_dev_alias(ndev, "slice-by-uuid/%s", 409 uuid_buf); 410 } 411 412 ndev->si_disk = dp; 413 ndev->si_flags |= SI_REPROBE_TEST; 414 } 415 sp->ds_dev = ndev; 416 417 /* 418 * Probe appropriate slices for a disklabel 419 * 420 * XXX slice type 1 used by our gpt probe code. 421 * XXX slice type 0 used by mbr compat slice. 422 */ 423 if (sp->ds_type == DOSPTYP_386BSD || 424 sp->ds_type == DOSPTYP_NETBSD || 425 sp->ds_type == DOSPTYP_OPENBSD || 426 sp->ds_type == 0 || 427 sp->ds_type == 1) { 428 if (dp->d_slice->dss_first_bsd_slice == 0) 429 dp->d_slice->dss_first_bsd_slice = i; 430 disk_probe_slice(dp, ndev, i, reprobe); 431 } 432 } 433 dsgone(&osp); 434 disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name); 435 } 436 437 438 static void 439 disk_msg_core(void *arg) 440 { 441 struct disk *dp; 442 struct diskslice *sp; 443 disk_msg_t msg; 444 int run; 445 446 lwkt_gettoken(&disklist_token); 447 lwkt_initport_thread(&disk_msg_port, curthread); 448 wakeup(curthread); /* synchronous startup */ 449 lwkt_reltoken(&disklist_token); 450 451 get_mplock(); /* not mpsafe yet? */ 452 run = 1; 453 454 while (run) { 455 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0); 456 457 switch (msg->hdr.u.ms_result) { 458 case DISK_DISK_PROBE: 459 dp = (struct disk *)msg->load; 460 disk_debug(1, 461 "DISK_DISK_PROBE: %s\n", 462 dp->d_cdev->si_name); 463 disk_probe(dp, 0); 464 break; 465 case DISK_DISK_DESTROY: 466 dp = (struct disk *)msg->load; 467 disk_debug(1, 468 "DISK_DISK_DESTROY: %s\n", 469 dp->d_cdev->si_name); 470 devfs_destroy_subnames(dp->d_cdev->si_name); 471 destroy_dev(dp->d_cdev); 472 destroy_only_dev(dp->d_rawdev); 473 lwkt_gettoken(&disklist_token); 474 LIST_REMOVE(dp, d_list); 475 lwkt_reltoken(&disklist_token); 476 if (dp->d_info.d_serialno) { 477 kfree(dp->d_info.d_serialno, M_TEMP); 478 dp->d_info.d_serialno = NULL; 479 } 480 break; 481 case DISK_UNPROBE: 482 dp = (struct disk *)msg->load; 483 disk_debug(1, 484 "DISK_DISK_UNPROBE: %s\n", 485 dp->d_cdev->si_name); 486 devfs_destroy_subnames(dp->d_cdev->si_name); 487 break; 488 case DISK_SLICE_REPROBE: 489 dp = (struct disk *)msg->load; 490 sp = (struct diskslice *)msg->load2; 491 devfs_clr_subnames_flag(sp->ds_dev->si_name, 492 SI_REPROBE_TEST); 493 disk_debug(1, 494 "DISK_SLICE_REPROBE: %s\n", 495 sp->ds_dev->si_name); 496 disk_probe_slice(dp, sp->ds_dev, 497 dkslice(sp->ds_dev), 1); 498 devfs_destroy_subnames_without_flag( 499 sp->ds_dev->si_name, SI_REPROBE_TEST); 500 break; 501 case DISK_DISK_REPROBE: 502 dp = (struct disk *)msg->load; 503 devfs_clr_subnames_flag(dp->d_cdev->si_name, SI_REPROBE_TEST); 504 disk_debug(1, 505 "DISK_DISK_REPROBE: %s\n", 506 dp->d_cdev->si_name); 507 disk_probe(dp, 1); 508 devfs_destroy_subnames_without_flag( 509 dp->d_cdev->si_name, SI_REPROBE_TEST); 510 break; 511 case DISK_SYNC: 512 disk_debug(1, "DISK_SYNC\n"); 513 break; 514 default: 515 devfs_debug(DEVFS_DEBUG_WARNING, 516 "disk_msg_core: unknown message " 517 "received at core\n"); 518 break; 519 } 520 lwkt_replymsg(&msg->hdr, 0); 521 } 522 lwkt_exit(); 523 } 524 525 526 /* 527 * Acts as a message drain. Any message that is replied to here gets 528 * destroyed and the memory freed. 529 */ 530 static void 531 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 532 { 533 objcache_put(disk_msg_cache, msg); 534 } 535 536 537 void 538 disk_msg_send(uint32_t cmd, void *load, void *load2) 539 { 540 disk_msg_t disk_msg; 541 lwkt_port_t port = &disk_msg_port; 542 543 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 544 545 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0); 546 547 disk_msg->hdr.u.ms_result = cmd; 548 disk_msg->load = load; 549 disk_msg->load2 = load2; 550 KKASSERT(port); 551 lwkt_sendmsg(port, &disk_msg->hdr); 552 } 553 554 void 555 disk_msg_send_sync(uint32_t cmd, void *load, void *load2) 556 { 557 struct lwkt_port rep_port; 558 disk_msg_t disk_msg; 559 lwkt_port_t port; 560 561 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 562 port = &disk_msg_port; 563 564 /* XXX could probably use curthread's built-in msgport */ 565 lwkt_initport_thread(&rep_port, curthread); 566 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0); 567 568 disk_msg->hdr.u.ms_result = cmd; 569 disk_msg->load = load; 570 disk_msg->load2 = load2; 571 572 lwkt_sendmsg(port, &disk_msg->hdr); 573 lwkt_waitmsg(&disk_msg->hdr, 0); 574 objcache_put(disk_msg_cache, disk_msg); 575 } 576 577 /* 578 * Create a raw device for the dev_ops template (which is returned). Also 579 * create a slice and unit managed disk and overload the user visible 580 * device space with it. 581 * 582 * NOTE: The returned raw device is NOT a slice and unit managed device. 583 * It is an actual raw device representing the raw disk as specified by 584 * the passed dev_ops. The disk layer not only returns such a raw device, 585 * it also uses it internally when passing (modified) commands through. 586 */ 587 cdev_t 588 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops) 589 { 590 return _disk_create_named(NULL, unit, dp, raw_ops, 0); 591 } 592 593 cdev_t 594 disk_create_clone(int unit, struct disk *dp, struct dev_ops *raw_ops) 595 { 596 return _disk_create_named(NULL, unit, dp, raw_ops, 1); 597 } 598 599 cdev_t 600 disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops) 601 { 602 return _disk_create_named(name, unit, dp, raw_ops, 0); 603 } 604 605 cdev_t 606 disk_create_named_clone(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops) 607 { 608 return _disk_create_named(name, unit, dp, raw_ops, 1); 609 } 610 611 static cdev_t 612 _disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops, int clone) 613 { 614 cdev_t rawdev; 615 616 disk_debug(1, "disk_create (begin): %s%d\n", name, unit); 617 618 if (name) { 619 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit), 620 UID_ROOT, GID_OPERATOR, 0640, "%s", name); 621 } else { 622 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit), 623 UID_ROOT, GID_OPERATOR, 0640, 624 "%s%d", raw_ops->head.name, unit); 625 } 626 627 bzero(dp, sizeof(*dp)); 628 629 dp->d_rawdev = rawdev; 630 dp->d_raw_ops = raw_ops; 631 dp->d_dev_ops = &disk_ops; 632 633 if (name) { 634 if (clone) { 635 dp->d_cdev = make_only_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 636 dkmakewholedisk(unit), UID_ROOT, GID_OPERATOR, 0640, 637 "%s", name); 638 } else { 639 dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 640 dkmakewholedisk(unit), UID_ROOT, GID_OPERATOR, 0640, 641 "%s", name); 642 } 643 } else { 644 if (clone) { 645 dp->d_cdev = make_only_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 646 dkmakewholedisk(unit), 647 UID_ROOT, GID_OPERATOR, 0640, 648 "%s%d", raw_ops->head.name, unit); 649 } else { 650 dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 651 dkmakewholedisk(unit), 652 UID_ROOT, GID_OPERATOR, 0640, 653 "%s%d", raw_ops->head.name, unit); 654 } 655 } 656 657 udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk"); 658 dp->d_cdev->si_disk = dp; 659 660 if (name) 661 dsched_disk_create_callback(dp, name, unit); 662 else 663 dsched_disk_create_callback(dp, raw_ops->head.name, unit); 664 665 lwkt_gettoken(&disklist_token); 666 LIST_INSERT_HEAD(&disklist, dp, d_list); 667 lwkt_reltoken(&disklist_token); 668 669 disk_debug(1, "disk_create (end): %s%d\n", 670 (name != NULL)?(name):(raw_ops->head.name), unit); 671 672 return (dp->d_rawdev); 673 } 674 675 int 676 disk_setdisktype(struct disk *disk, const char *type) 677 { 678 KKASSERT(disk != NULL); 679 680 disk->d_disktype = type; 681 return udev_dict_set_cstr(disk->d_cdev, "disk-type", __DECONST(char *, type)); 682 } 683 684 int 685 disk_getopencount(struct disk *disk) 686 { 687 return disk->d_opencount; 688 } 689 690 static void 691 _setdiskinfo(struct disk *disk, struct disk_info *info) 692 { 693 char *oldserialno; 694 695 oldserialno = disk->d_info.d_serialno; 696 bcopy(info, &disk->d_info, sizeof(disk->d_info)); 697 info = &disk->d_info; 698 699 disk_debug(1, 700 "_setdiskinfo: %s\n", 701 disk->d_cdev->si_name); 702 703 /* 704 * The serial number is duplicated so the caller can throw 705 * their copy away. 706 */ 707 if (info->d_serialno && info->d_serialno[0]) { 708 info->d_serialno = kstrdup(info->d_serialno, M_TEMP); 709 disk_cleanserial(info->d_serialno); 710 if (disk->d_cdev) { 711 make_dev_alias(disk->d_cdev, "serno/%s", 712 info->d_serialno); 713 } 714 } else { 715 info->d_serialno = NULL; 716 } 717 if (oldserialno) 718 kfree(oldserialno, M_TEMP); 719 720 dsched_disk_update_callback(disk, info); 721 722 /* 723 * The caller may set d_media_size or d_media_blocks and we 724 * calculate the other. 725 */ 726 KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0); 727 if (info->d_media_size == 0 && info->d_media_blocks) { 728 info->d_media_size = (u_int64_t)info->d_media_blocks * 729 info->d_media_blksize; 730 } else if (info->d_media_size && info->d_media_blocks == 0 && 731 info->d_media_blksize) { 732 info->d_media_blocks = info->d_media_size / 733 info->d_media_blksize; 734 } 735 736 /* 737 * The si_* fields for rawdev are not set until after the 738 * disk_create() call, so someone using the cooked version 739 * of the raw device (i.e. da0s0) will not get the right 740 * si_iosize_max unless we fix it up here. 741 */ 742 if (disk->d_cdev && disk->d_rawdev && 743 disk->d_cdev->si_iosize_max == 0) { 744 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max; 745 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys; 746 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best; 747 } 748 749 /* Add the serial number to the udev_dictionary */ 750 if (info->d_serialno) 751 udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno); 752 } 753 754 /* 755 * Disk drivers must call this routine when media parameters are available 756 * or have changed. 757 */ 758 void 759 disk_setdiskinfo(struct disk *disk, struct disk_info *info) 760 { 761 _setdiskinfo(disk, info); 762 disk_msg_send(DISK_DISK_PROBE, disk, NULL); 763 disk_debug(1, 764 "disk_setdiskinfo: sent probe for %s\n", 765 disk->d_cdev->si_name); 766 } 767 768 void 769 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info) 770 { 771 _setdiskinfo(disk, info); 772 disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL); 773 disk_debug(1, 774 "disk_setdiskinfo_sync: sent probe for %s\n", 775 disk->d_cdev->si_name); 776 } 777 778 /* 779 * This routine is called when an adapter detaches. The higher level 780 * managed disk device is destroyed while the lower level raw device is 781 * released. 782 */ 783 void 784 disk_destroy(struct disk *disk) 785 { 786 dsched_disk_destroy_callback(disk); 787 disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL); 788 return; 789 } 790 791 int 792 disk_dumpcheck(cdev_t dev, u_int64_t *size, u_int64_t *blkno, u_int32_t *secsize) 793 { 794 struct partinfo pinfo; 795 int error; 796 797 bzero(&pinfo, sizeof(pinfo)); 798 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0, 799 proc0.p_ucred, NULL); 800 if (error) 801 return (error); 802 803 if (pinfo.media_blksize == 0) 804 return (ENXIO); 805 806 if (blkno) /* XXX: make sure this reserved stuff is right */ 807 *blkno = pinfo.reserved_blocks + 808 pinfo.media_offset / pinfo.media_blksize; 809 if (secsize) 810 *secsize = pinfo.media_blksize; 811 if (size) 812 *size = (pinfo.media_blocks - pinfo.reserved_blocks); 813 814 return (0); 815 } 816 817 int 818 disk_dumpconf(cdev_t dev, u_int onoff) 819 { 820 struct dumperinfo di; 821 u_int64_t size, blkno; 822 u_int32_t secsize; 823 int error; 824 825 if (!onoff) 826 return set_dumper(NULL); 827 828 error = disk_dumpcheck(dev, &size, &blkno, &secsize); 829 830 if (error) 831 return ENXIO; 832 833 bzero(&di, sizeof(struct dumperinfo)); 834 di.dumper = diskdump; 835 di.priv = dev; 836 di.blocksize = secsize; 837 di.mediaoffset = blkno * DEV_BSIZE; 838 di.mediasize = size * DEV_BSIZE; 839 840 return set_dumper(&di); 841 } 842 843 void 844 disk_unprobe(struct disk *disk) 845 { 846 if (disk == NULL) 847 return; 848 849 disk_msg_send_sync(DISK_UNPROBE, disk, NULL); 850 } 851 852 void 853 disk_invalidate (struct disk *disk) 854 { 855 dsgone(&disk->d_slice); 856 } 857 858 struct disk * 859 disk_enumerate(struct disk *disk) 860 { 861 struct disk *dp; 862 863 lwkt_gettoken(&disklist_token); 864 if (!disk) 865 dp = (LIST_FIRST(&disklist)); 866 else 867 dp = (LIST_NEXT(disk, d_list)); 868 lwkt_reltoken(&disklist_token); 869 870 return dp; 871 } 872 873 static 874 int 875 sysctl_disks(SYSCTL_HANDLER_ARGS) 876 { 877 struct disk *disk; 878 int error, first; 879 880 disk = NULL; 881 first = 1; 882 883 while ((disk = disk_enumerate(disk))) { 884 if (!first) { 885 error = SYSCTL_OUT(req, " ", 1); 886 if (error) 887 return error; 888 } else { 889 first = 0; 890 } 891 error = SYSCTL_OUT(req, disk->d_rawdev->si_name, 892 strlen(disk->d_rawdev->si_name)); 893 if (error) 894 return error; 895 } 896 error = SYSCTL_OUT(req, "", 1); 897 return error; 898 } 899 900 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 901 sysctl_disks, "A", "names of available disks"); 902 903 /* 904 * Open a disk device or partition. 905 */ 906 static 907 int 908 diskopen(struct dev_open_args *ap) 909 { 910 cdev_t dev = ap->a_head.a_dev; 911 struct disk *dp; 912 int error; 913 914 /* 915 * dp can't be NULL here XXX. 916 * 917 * d_slice will be NULL if setdiskinfo() has not been called yet. 918 * setdiskinfo() is typically called whether the disk is present 919 * or not (e.g. CD), but the base disk device is created first 920 * and there may be a race. 921 */ 922 dp = dev->si_disk; 923 if (dp == NULL || dp->d_slice == NULL) 924 return (ENXIO); 925 error = 0; 926 927 /* 928 * Deal with open races 929 */ 930 get_mplock(); 931 while (dp->d_flags & DISKFLAG_LOCK) { 932 dp->d_flags |= DISKFLAG_WANTED; 933 error = tsleep(dp, PCATCH, "diskopen", hz); 934 if (error) { 935 rel_mplock(); 936 return (error); 937 } 938 } 939 dp->d_flags |= DISKFLAG_LOCK; 940 941 /* 942 * Open the underlying raw device. 943 */ 944 if (!dsisopen(dp->d_slice)) { 945 #if 0 946 if (!pdev->si_iosize_max) 947 pdev->si_iosize_max = dev->si_iosize_max; 948 #endif 949 error = dev_dopen(dp->d_rawdev, ap->a_oflags, 950 ap->a_devtype, ap->a_cred); 951 } 952 953 if (error) 954 goto out; 955 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags, 956 &dp->d_slice, &dp->d_info); 957 if (!dsisopen(dp->d_slice)) { 958 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype); 959 } 960 out: 961 dp->d_flags &= ~DISKFLAG_LOCK; 962 if (dp->d_flags & DISKFLAG_WANTED) { 963 dp->d_flags &= ~DISKFLAG_WANTED; 964 wakeup(dp); 965 } 966 rel_mplock(); 967 968 KKASSERT(dp->d_opencount >= 0); 969 /* If the open was successful, bump open count */ 970 if (error == 0) 971 atomic_add_int(&dp->d_opencount, 1); 972 973 return(error); 974 } 975 976 /* 977 * Close a disk device or partition 978 */ 979 static 980 int 981 diskclose(struct dev_close_args *ap) 982 { 983 cdev_t dev = ap->a_head.a_dev; 984 struct disk *dp; 985 int error; 986 987 error = 0; 988 dp = dev->si_disk; 989 990 KKASSERT(dp->d_opencount >= 1); 991 /* If this is not the last close, just ignore it */ 992 if ((atomic_fetchadd_int(&dp->d_opencount, -1)) > 1) 993 return 0; 994 995 get_mplock(); 996 dsclose(dev, ap->a_devtype, dp->d_slice); 997 if (!dsisopen(dp->d_slice)) { 998 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype); 999 } 1000 rel_mplock(); 1001 return (error); 1002 } 1003 1004 /* 1005 * First execute the ioctl on the disk device, and if it isn't supported 1006 * try running it on the backing device. 1007 */ 1008 static 1009 int 1010 diskioctl(struct dev_ioctl_args *ap) 1011 { 1012 cdev_t dev = ap->a_head.a_dev; 1013 struct disk *dp; 1014 int error; 1015 u_int u; 1016 1017 dp = dev->si_disk; 1018 if (dp == NULL) 1019 return (ENXIO); 1020 1021 devfs_debug(DEVFS_DEBUG_DEBUG, 1022 "diskioctl: cmd is: %lx (name: %s)\n", 1023 ap->a_cmd, dev->si_name); 1024 devfs_debug(DEVFS_DEBUG_DEBUG, 1025 "diskioctl: &dp->d_slice is: %p, %p\n", 1026 &dp->d_slice, dp->d_slice); 1027 1028 if (ap->a_cmd == DIOCGKERNELDUMP) { 1029 u = *(u_int *)ap->a_data; 1030 return disk_dumpconf(dev, u); 1031 } 1032 1033 if (&dp->d_slice == NULL || dp->d_slice == NULL || 1034 ((dp->d_info.d_dsflags & DSO_DEVICEMAPPER) && 1035 dkslice(dev) == WHOLE_DISK_SLICE)) { 1036 error = ENOIOCTL; 1037 } else { 1038 get_mplock(); 1039 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, 1040 &dp->d_slice, &dp->d_info); 1041 rel_mplock(); 1042 } 1043 1044 if (error == ENOIOCTL) { 1045 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data, 1046 ap->a_fflag, ap->a_cred, NULL); 1047 } 1048 return (error); 1049 } 1050 1051 /* 1052 * Execute strategy routine 1053 */ 1054 static 1055 int 1056 diskstrategy(struct dev_strategy_args *ap) 1057 { 1058 cdev_t dev = ap->a_head.a_dev; 1059 struct bio *bio = ap->a_bio; 1060 struct bio *nbio; 1061 struct disk *dp; 1062 1063 dp = dev->si_disk; 1064 1065 if (dp == NULL) { 1066 bio->bio_buf->b_error = ENXIO; 1067 bio->bio_buf->b_flags |= B_ERROR; 1068 biodone(bio); 1069 return(0); 1070 } 1071 KKASSERT(dev->si_disk == dp); 1072 1073 /* 1074 * The dscheck() function will also transform the slice relative 1075 * block number i.e. bio->bio_offset into a block number that can be 1076 * passed directly to the underlying raw device. If dscheck() 1077 * returns NULL it will have handled the bio for us (e.g. EOF 1078 * or error due to being beyond the device size). 1079 */ 1080 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) { 1081 dsched_queue(dp, nbio); 1082 } else { 1083 biodone(bio); 1084 } 1085 return(0); 1086 } 1087 1088 /* 1089 * Return the partition size in ?blocks? 1090 */ 1091 static 1092 int 1093 diskpsize(struct dev_psize_args *ap) 1094 { 1095 cdev_t dev = ap->a_head.a_dev; 1096 struct disk *dp; 1097 1098 dp = dev->si_disk; 1099 if (dp == NULL) 1100 return(ENODEV); 1101 1102 ap->a_result = dssize(dev, &dp->d_slice); 1103 1104 if ((ap->a_result == -1) && 1105 (dp->d_info.d_dsflags & DSO_DEVICEMAPPER)) { 1106 ap->a_head.a_dev = dp->d_rawdev; 1107 return dev_doperate(&ap->a_head); 1108 } 1109 return(0); 1110 } 1111 1112 int 1113 diskdump(struct dev_dump_args *ap) 1114 { 1115 cdev_t dev = ap->a_head.a_dev; 1116 struct disk *dp = dev->si_disk; 1117 u_int64_t size, offset; 1118 int error; 1119 1120 error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize); 1121 /* XXX: this should probably go in disk_dumpcheck somehow */ 1122 if (ap->a_length != 0) { 1123 size *= DEV_BSIZE; 1124 offset = ap->a_blkno * DEV_BSIZE; 1125 if ((ap->a_offset < offset) || 1126 (ap->a_offset + ap->a_length - offset > size)) { 1127 kprintf("Attempt to write outside dump device boundaries.\n"); 1128 error = ENOSPC; 1129 } 1130 } 1131 1132 if (error == 0) { 1133 ap->a_head.a_dev = dp->d_rawdev; 1134 error = dev_doperate(&ap->a_head); 1135 } 1136 1137 return(error); 1138 } 1139 1140 1141 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD, 1142 0, sizeof(struct diskslices), "sizeof(struct diskslices)"); 1143 1144 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD, 1145 0, sizeof(struct disk), "sizeof(struct disk)"); 1146 1147 /* 1148 * Reorder interval for burst write allowance and minor write 1149 * allowance. 1150 * 1151 * We always want to trickle some writes in to make use of the 1152 * disk's zone cache. Bursting occurs on a longer interval and only 1153 * runningbufspace is well over the hirunningspace limit. 1154 */ 1155 int bioq_reorder_burst_interval = 60; /* should be multiple of minor */ 1156 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval, 1157 CTLFLAG_RW, &bioq_reorder_burst_interval, 0, ""); 1158 int bioq_reorder_minor_interval = 5; 1159 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval, 1160 CTLFLAG_RW, &bioq_reorder_minor_interval, 0, ""); 1161 1162 int bioq_reorder_burst_bytes = 3000000; 1163 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes, 1164 CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, ""); 1165 int bioq_reorder_minor_bytes = 262144; 1166 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes, 1167 CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, ""); 1168 1169 1170 /* 1171 * Order I/Os. Generally speaking this code is designed to make better 1172 * use of drive zone caches. A drive zone cache can typically track linear 1173 * reads or writes for around 16 zones simultaniously. 1174 * 1175 * Read prioritization issues: It is possible for hundreds of megabytes worth 1176 * of writes to be queued asynchronously. This creates a huge bottleneck 1177 * for reads which reduce read bandwidth to a trickle. 1178 * 1179 * To solve this problem we generally reorder reads before writes. 1180 * 1181 * However, a large number of random reads can also starve writes and 1182 * make poor use of the drive zone cache so we allow writes to trickle 1183 * in every N reads. 1184 */ 1185 void 1186 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio) 1187 { 1188 /* 1189 * The BIO wants to be ordered. Adding to the tail also 1190 * causes transition to be set to NULL, forcing the ordering 1191 * of all prior I/O's. 1192 */ 1193 if (bio->bio_buf->b_flags & B_ORDERED) { 1194 bioq_insert_tail(bioq, bio); 1195 return; 1196 } 1197 1198 switch(bio->bio_buf->b_cmd) { 1199 case BUF_CMD_READ: 1200 if (bioq->transition) { 1201 /* 1202 * Insert before the first write. Bleedover writes 1203 * based on reorder intervals to prevent starvation. 1204 */ 1205 TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act); 1206 ++bioq->reorder; 1207 if (bioq->reorder % bioq_reorder_minor_interval == 0) { 1208 bioqwritereorder(bioq); 1209 if (bioq->reorder >= 1210 bioq_reorder_burst_interval) { 1211 bioq->reorder = 0; 1212 } 1213 } 1214 } else { 1215 /* 1216 * No writes queued (or ordering was forced), 1217 * insert at tail. 1218 */ 1219 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1220 } 1221 break; 1222 case BUF_CMD_WRITE: 1223 /* 1224 * Writes are always appended. If no writes were previously 1225 * queued or an ordered tail insertion occured the transition 1226 * field will be NULL. 1227 */ 1228 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1229 if (bioq->transition == NULL) 1230 bioq->transition = bio; 1231 break; 1232 default: 1233 /* 1234 * All other request types are forced to be ordered. 1235 */ 1236 bioq_insert_tail(bioq, bio); 1237 break; 1238 } 1239 } 1240 1241 /* 1242 * Move the read-write transition point to prevent reads from 1243 * completely starving our writes. This brings a number of writes into 1244 * the fold every N reads. 1245 * 1246 * We bring a few linear writes into the fold on a minor interval 1247 * and we bring a non-linear burst of writes into the fold on a major 1248 * interval. Bursting only occurs if runningbufspace is really high 1249 * (typically from syncs, fsyncs, or HAMMER flushes). 1250 */ 1251 static 1252 void 1253 bioqwritereorder(struct bio_queue_head *bioq) 1254 { 1255 struct bio *bio; 1256 off_t next_offset; 1257 size_t left; 1258 size_t n; 1259 int check_off; 1260 1261 if (bioq->reorder < bioq_reorder_burst_interval || 1262 !buf_runningbufspace_severe()) { 1263 left = (size_t)bioq_reorder_minor_bytes; 1264 check_off = 1; 1265 } else { 1266 left = (size_t)bioq_reorder_burst_bytes; 1267 check_off = 0; 1268 } 1269 1270 next_offset = bioq->transition->bio_offset; 1271 while ((bio = bioq->transition) != NULL && 1272 (check_off == 0 || next_offset == bio->bio_offset) 1273 ) { 1274 n = bio->bio_buf->b_bcount; 1275 next_offset = bio->bio_offset + n; 1276 bioq->transition = TAILQ_NEXT(bio, bio_act); 1277 if (left < n) 1278 break; 1279 left -= n; 1280 } 1281 } 1282 1283 /* 1284 * Bounds checking against the media size, used for the raw partition. 1285 * secsize, mediasize and b_blkno must all be the same units. 1286 * Possibly this has to be DEV_BSIZE (512). 1287 */ 1288 int 1289 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize) 1290 { 1291 struct buf *bp = bio->bio_buf; 1292 int64_t sz; 1293 1294 sz = howmany(bp->b_bcount, secsize); 1295 1296 if (bio->bio_offset/DEV_BSIZE + sz > mediasize) { 1297 sz = mediasize - bio->bio_offset/DEV_BSIZE; 1298 if (sz == 0) { 1299 /* If exactly at end of disk, return EOF. */ 1300 bp->b_resid = bp->b_bcount; 1301 return 0; 1302 } 1303 if (sz < 0) { 1304 /* If past end of disk, return EINVAL. */ 1305 bp->b_error = EINVAL; 1306 return 0; 1307 } 1308 /* Otherwise, truncate request. */ 1309 bp->b_bcount = sz * secsize; 1310 } 1311 1312 return 1; 1313 } 1314 1315 /* 1316 * Disk error is the preface to plaintive error messages 1317 * about failing disk transfers. It prints messages of the form 1318 1319 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d) 1320 1321 * if the offset of the error in the transfer and a disk label 1322 * are both available. blkdone should be -1 if the position of the error 1323 * is unknown; the disklabel pointer may be null from drivers that have not 1324 * been converted to use them. The message is printed with kprintf 1325 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority. 1326 * The message should be completed (with at least a newline) with kprintf 1327 * or log(-1, ...), respectively. There is no trailing space. 1328 */ 1329 void 1330 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt) 1331 { 1332 struct buf *bp = bio->bio_buf; 1333 const char *term; 1334 1335 switch(bp->b_cmd) { 1336 case BUF_CMD_READ: 1337 term = "read"; 1338 break; 1339 case BUF_CMD_WRITE: 1340 term = "write"; 1341 break; 1342 default: 1343 term = "access"; 1344 break; 1345 } 1346 kprintf("%s: %s %sing ", dev->si_name, what, term); 1347 kprintf("offset %012llx for %d", 1348 (long long)bio->bio_offset, 1349 bp->b_bcount); 1350 1351 if (donecnt) 1352 kprintf(" (%d bytes completed)", donecnt); 1353 } 1354 1355 /* 1356 * Locate a disk device 1357 */ 1358 cdev_t 1359 disk_locate(const char *devname) 1360 { 1361 return devfs_find_device_by_name(devname); 1362 } 1363 1364 void 1365 disk_config(void *arg) 1366 { 1367 disk_msg_send_sync(DISK_SYNC, NULL, NULL); 1368 } 1369 1370 static void 1371 disk_init(void) 1372 { 1373 struct thread* td_core; 1374 1375 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0, 1376 NULL, NULL, NULL, 1377 objcache_malloc_alloc, 1378 objcache_malloc_free, 1379 &disk_msg_malloc_args); 1380 1381 lwkt_token_init(&disklist_token, 1, "disks"); 1382 1383 /* 1384 * Initialize the reply-only port which acts as a message drain 1385 */ 1386 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply); 1387 1388 lwkt_gettoken(&disklist_token); 1389 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL, 1390 0, 0, "disk_msg_core"); 1391 tsleep(td_core, 0, "diskcore", 0); 1392 lwkt_reltoken(&disklist_token); 1393 } 1394 1395 static void 1396 disk_uninit(void) 1397 { 1398 objcache_destroy(disk_msg_cache); 1399 } 1400 1401 /* 1402 * Clean out illegal characters in serial numbers. 1403 */ 1404 static void 1405 disk_cleanserial(char *serno) 1406 { 1407 char c; 1408 1409 while ((c = *serno) != 0) { 1410 if (c >= 'a' && c <= 'z') 1411 ; 1412 else if (c >= 'A' && c <= 'Z') 1413 ; 1414 else if (c >= '0' && c <= '9') 1415 ; 1416 else if (c == '-' || c == '@' || c == '+' || c == '.') 1417 ; 1418 else 1419 c = '_'; 1420 *serno++= c; 1421 } 1422 } 1423 1424 TUNABLE_INT("kern.disk_debug", &disk_debug_enable); 1425 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable, 1426 0, "Enable subr_disk debugging"); 1427 1428 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL); 1429 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL); 1430