1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * ---------------------------------------------------------------------------- 35 * "THE BEER-WARE LICENSE" (Revision 42): 36 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 37 * can do whatever you want with this stuff. If we meet some day, and you think 38 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 39 * ---------------------------------------------------------------------------- 40 * 41 * Copyright (c) 1982, 1986, 1988, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94 78 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $ 79 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $ 80 * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $ 81 */ 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/kernel.h> 86 #include <sys/proc.h> 87 #include <sys/sysctl.h> 88 #include <sys/buf.h> 89 #include <sys/conf.h> 90 #include <sys/disklabel.h> 91 #include <sys/disklabel32.h> 92 #include <sys/disklabel64.h> 93 #include <sys/diskslice.h> 94 #include <sys/diskmbr.h> 95 #include <sys/disk.h> 96 #include <sys/malloc.h> 97 #include <sys/sysctl.h> 98 #include <machine/md_var.h> 99 #include <sys/ctype.h> 100 #include <sys/syslog.h> 101 #include <sys/device.h> 102 #include <sys/msgport.h> 103 #include <sys/msgport2.h> 104 #include <sys/buf2.h> 105 #include <vfs/devfs/devfs.h> 106 107 #include <sys/thread2.h> 108 109 #include <sys/queue.h> 110 #include <sys/lock.h> 111 112 static MALLOC_DEFINE(M_DISK, "disk", "disk data"); 113 114 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 115 static void disk_msg_core(void *); 116 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice); 117 static void disk_probe(struct disk *dp); 118 119 static d_open_t diskopen; 120 static d_close_t diskclose; 121 static d_ioctl_t diskioctl; 122 static d_strategy_t diskstrategy; 123 static d_psize_t diskpsize; 124 static d_clone_t diskclone; 125 static d_dump_t diskdump; 126 127 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist); 128 129 static struct dev_ops disk_ops = { 130 { "disk", 0, D_DISK }, 131 .d_open = diskopen, 132 .d_close = diskclose, 133 .d_read = physread, 134 .d_write = physwrite, 135 .d_ioctl = diskioctl, 136 .d_strategy = diskstrategy, 137 .d_dump = diskdump, 138 .d_psize = diskpsize, 139 .d_clone = diskclone 140 }; 141 142 static struct objcache *disk_msg_cache; 143 144 struct objcache_malloc_args disk_msg_malloc_args = { 145 sizeof(struct disk_msg), M_DISK }; 146 147 static struct lwkt_port disk_dispose_port; 148 static struct lwkt_port disk_msg_port; 149 150 151 static int 152 disk_probe_slice(struct disk *dp, cdev_t dev, int slice) 153 { 154 struct disk_info *info = &dp->d_info; 155 struct diskslice *sp = &dp->d_slice->dss_slices[slice]; 156 disklabel_ops_t ops; 157 struct partinfo part; 158 const char *msg; 159 cdev_t ndev; 160 unsigned long i; 161 162 //lp.opaque = NULL; 163 164 ops = &disklabel32_ops; 165 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 166 if (msg && !strcmp(msg, "no disk label")) { 167 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe_slice: trying with disklabel64\n"); 168 ops = &disklabel64_ops; 169 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 170 } 171 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe_slice: label: %s\n", (msg)?msg:"is NULL"); 172 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe_slice: found %d partitions in the label\n", ops->op_getnumparts(sp->ds_label)); 173 if (msg == NULL) { 174 if (slice != WHOLE_DISK_SLICE) 175 ops->op_adjust_label_reserved(dp->d_slice, slice, sp); 176 else 177 sp->ds_reserved = 0; 178 179 sp->ds_ops = ops; 180 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe_slice: lp.opaque: %x\n", sp->ds_label.opaque); 181 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) { 182 ops->op_loadpartinfo(sp->ds_label, i, &part); 183 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe_slice: partinfo says fstype=%d for part %d\n", part.fstype, i); 184 if (part.fstype) { 185 ndev = make_only_devfs_dev(&disk_ops, 186 dkmakeminor(dkunit(dp->d_cdev), slice, i), 187 UID_ROOT, GID_OPERATOR, 0640, 188 "%s%c", dev->si_name, 'a'+ (char)i); 189 #if 0 190 make_dev_alias(ndev, "disk-by-id/diskTEST-sliceTEST-part%d", i); 191 #endif 192 ndev->si_disk = dp; 193 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe_slice:end: lp.opaque: %x\n", ndev->si_disk->d_slice->dss_slices[slice].ds_label.opaque); 194 } 195 } 196 } else if (info->d_dsflags & DSO_COMPATLABEL) { 197 msg = NULL; 198 if (sp->ds_size >= 0x100000000ULL) 199 ops = &disklabel64_ops; 200 else 201 ops = &disklabel32_ops; 202 sp->ds_label = ops->op_clone_label(info, sp); 203 } else { 204 if (sp->ds_type == DOSPTYP_386BSD /* XXX */) 205 log(LOG_WARNING, "%s: cannot find label (%s)\n", 206 dev->si_name, msg); 207 } 208 209 if (msg == NULL) { 210 sp->ds_wlabel = FALSE; 211 } 212 213 return (msg ? EINVAL : 0); 214 } 215 216 217 static void 218 disk_probe(struct disk *dp) 219 { 220 struct disk_info *info = &dp->d_info; 221 cdev_t dev = dp->d_cdev; 222 cdev_t ndev; 223 int error, i; 224 225 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe called for %s\n", dp->d_cdev->si_name); 226 KKASSERT (info->d_media_blksize != 0); 227 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe: info set!\n"); 228 229 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info); 230 231 error = mbrinit(dev, info, &(dp->d_slice)); 232 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe: &dp->d_slice is: %x, %x\n", &dp->d_slice, dp->d_slice); 233 if (error != 0) { 234 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe: mbrinit() failed with error: %d\n", error); 235 return; 236 } else { 237 devfs_debug(DEVFS_DEBUG_DEBUG, "mbrinit succeeded, found %d slices\n", dp->d_slice->dss_nslices); 238 if (dp->d_slice->dss_nslices == BASE_SLICE) { 239 dp->d_slice->dss_slices[COMPATIBILITY_SLICE].ds_size = info->d_media_blocks; 240 dp->d_slice->dss_slices[COMPATIBILITY_SLICE].ds_reserved = 0; 241 ndev = make_only_devfs_dev(&disk_ops, 242 dkmakewholeslice(dkunit(dev), COMPATIBILITY_SLICE), 243 UID_ROOT, GID_OPERATOR, 0640, 244 "%ss%d", dev->si_name, COMPATIBILITY_SLICE); 245 246 ndev->si_disk = dp; 247 dp->d_slice->dss_slices[COMPATIBILITY_SLICE].ds_dev = ndev; 248 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe: type of slice is :%x\n", dp->d_slice->dss_slices[COMPATIBILITY_SLICE].ds_type ); 249 //if (dp->d_slice->dss_slices[COMPATIBILITY_SLICE].ds_type == DOSPTYP_386BSD) { 250 dp->d_slice->dss_first_bsd_slice = COMPATIBILITY_SLICE; 251 disk_probe_slice(dp, ndev, COMPATIBILITY_SLICE); 252 //} 253 } 254 for (i = BASE_SLICE; i < dp->d_slice->dss_nslices; i++) { 255 ndev = make_only_devfs_dev(&disk_ops, 256 dkmakewholeslice(dkunit(dev), i), 257 UID_ROOT, GID_OPERATOR, 0640, 258 "%ss%d", dev->si_name, i-1); 259 make_dev_alias(ndev, "disk-by-id/diskTEST-slice%d", i-1); 260 261 ndev->si_disk = dp; 262 dp->d_slice->dss_slices[i].ds_dev = ndev; 263 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_probe-> type of slice is :%x\n", dp->d_slice->dss_slices[i].ds_type ); 264 if (dp->d_slice->dss_slices[i].ds_type == DOSPTYP_386BSD) { 265 if (!dp->d_slice->dss_first_bsd_slice) 266 dp->d_slice->dss_first_bsd_slice = i; 267 disk_probe_slice(dp, ndev, i); 268 } 269 } 270 } 271 } 272 273 274 static void 275 disk_msg_core(void *arg) 276 { 277 uint8_t run = 1; 278 struct disk *dp; 279 struct diskslice *sp; 280 disk_msg_t msg; 281 282 283 lwkt_initport_thread(&disk_msg_port, curthread); 284 wakeup(curthread); 285 286 while (run) { 287 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0); 288 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_msg_core, new msg: %x\n", (unsigned int)msg->hdr.u.ms_result); 289 290 switch (msg->hdr.u.ms_result) { 291 292 case DISK_DISK_PROBE: 293 dp = (struct disk *)msg->load; 294 disk_probe(dp); 295 break; 296 297 case DISK_DISK_DESTROY: 298 dp = (struct disk *)msg->load; 299 devfs_destroy_subnames(dp->d_cdev->si_name); 300 devfs_destroy_dev(dp->d_cdev); 301 //devfs_destroy_dev(dp->d_rawdev); //XXX: needed? when? 302 break; 303 304 case DISK_SLICE_REPROBE: 305 dp = (struct disk *)msg->load; 306 sp = (struct diskslice *)msg->load2; 307 devfs_destroy_subnames(sp->ds_dev->si_name); 308 disk_probe_slice(dp, sp->ds_dev, dkslice(sp->ds_dev)); 309 break; 310 311 case DISK_DISK_REPROBE: 312 dp = (struct disk *)msg->load; 313 devfs_destroy_subnames(dp->d_cdev->si_name); 314 disk_probe(dp); 315 break; 316 317 case DISK_SYNC: 318 break; 319 320 default: 321 devfs_debug(DEVFS_DEBUG_WARNING, "disk_msg_core: unknown message received at core\n"); 322 } 323 324 lwkt_replymsg((lwkt_msg_t)msg, 0); 325 } 326 lwkt_exit(); 327 } 328 329 330 /** 331 * Acts as a message drain. Any message that is replied to here gets destroyed and 332 * the memory freed. 333 **/ 334 static void 335 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 336 { 337 objcache_put(disk_msg_cache, msg); 338 } 339 340 341 void 342 disk_msg_send(uint32_t cmd, void *load, void *load2) 343 { 344 disk_msg_t disk_msg; 345 lwkt_port_t port = &disk_msg_port; 346 347 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 348 349 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0); 350 351 disk_msg->hdr.u.ms_result = cmd; 352 disk_msg->load = load; 353 disk_msg->load2 = load2; 354 KKASSERT(port); 355 lwkt_sendmsg(port, (lwkt_msg_t)disk_msg); 356 } 357 358 /* 359 * Create a raw device for the dev_ops template (which is returned). Also 360 * create a slice and unit managed disk and overload the user visible 361 * device space with it. 362 * 363 * NOTE: The returned raw device is NOT a slice and unit managed device. 364 * It is an actual raw device representing the raw disk as specified by 365 * the passed dev_ops. The disk layer not only returns such a raw device, 366 * it also uses it internally when passing (modified) commands through. 367 */ 368 cdev_t 369 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops) 370 { 371 cdev_t rawdev; 372 373 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit), 374 UID_ROOT, GID_OPERATOR, 0640, 375 "%s%d", raw_ops->head.name, unit); 376 377 378 bzero(dp, sizeof(*dp)); 379 380 dp->d_rawdev = rawdev; 381 dp->d_raw_ops = raw_ops; 382 dp->d_dev_ops = &disk_ops; 383 dp->d_cdev = make_only_devfs_dev(&disk_ops, 384 dkmakewholedisk(unit), 385 UID_ROOT, GID_OPERATOR, 0640, 386 "%s%d", raw_ops->head.name, unit); 387 388 dp->d_cdev->si_disk = dp; 389 390 disk_ops.head.data = dp; 391 392 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_create called for %s\n", dp->d_cdev->si_name); 393 LIST_INSERT_HEAD(&disklist, dp, d_list); 394 return (dp->d_rawdev); 395 } 396 397 /* 398 * Disk drivers must call this routine when media parameters are available 399 * or have changed. 400 */ 401 void 402 disk_setdiskinfo(struct disk *disk, struct disk_info *info) 403 { 404 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_setdiskinfo called for disk -1-: %x\n", disk); 405 bcopy(info, &disk->d_info, sizeof(disk->d_info)); 406 info = &disk->d_info; 407 408 KKASSERT(info->d_media_size == 0 || info->d_media_blksize == 0); 409 if (info->d_media_size == 0 && info->d_media_blocks) { 410 info->d_media_size = (u_int64_t)info->d_media_blocks * 411 info->d_media_blksize; 412 } else if (info->d_media_size && info->d_media_blocks == 0 && 413 info->d_media_blksize) { 414 info->d_media_blocks = info->d_media_size / 415 info->d_media_blksize; 416 } 417 418 /* 419 * The si_* fields for rawdev are not set until after the 420 * disk_create() call, so someone using the cooked version 421 * of the raw device (i.e. da0s0) will not get the right 422 * si_iosize_max unless we fix it up here. 423 */ 424 if (disk->d_cdev && disk->d_rawdev && 425 disk->d_cdev->si_iosize_max == 0) { 426 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max; 427 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys; 428 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best; 429 } 430 431 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_setdiskinfo called for disk -2-: %x\n", disk); 432 disk_msg_send(DISK_DISK_PROBE, disk, NULL); 433 } 434 435 /* 436 * This routine is called when an adapter detaches. The higher level 437 * managed disk device is destroyed while the lower level raw device is 438 * released. 439 */ 440 void 441 disk_destroy(struct disk *disk) 442 { 443 disk_msg_send(DISK_DISK_DESTROY, disk, NULL); 444 return; 445 } 446 447 int 448 disk_dumpcheck(cdev_t dev, u_int64_t *count, u_int64_t *blkno, u_int *secsize) 449 { 450 struct partinfo pinfo; 451 int error; 452 453 bzero(&pinfo, sizeof(pinfo)); 454 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0, proc0.p_ucred); 455 if (error) 456 return (error); 457 if (pinfo.media_blksize == 0) 458 return (ENXIO); 459 *count = (u_int64_t)Maxmem * PAGE_SIZE / pinfo.media_blksize; 460 if (dumplo64 < pinfo.reserved_blocks || 461 dumplo64 + *count > pinfo.media_blocks) { 462 return (ENOSPC); 463 } 464 *blkno = dumplo64 + pinfo.media_offset / pinfo.media_blksize; 465 *secsize = pinfo.media_blksize; 466 return (0); 467 } 468 469 void 470 disk_invalidate (struct disk *disk) 471 { 472 devfs_debug(DEVFS_DEBUG_INFO, "disk_invalidate for %s\n", disk->d_cdev->si_name); 473 if (disk->d_slice) 474 dsgone(&disk->d_slice); 475 } 476 477 struct disk * 478 disk_enumerate(struct disk *disk) 479 { 480 if (!disk) 481 return (LIST_FIRST(&disklist)); 482 else 483 return (LIST_NEXT(disk, d_list)); 484 } 485 486 static 487 int 488 sysctl_disks(SYSCTL_HANDLER_ARGS) 489 { 490 struct disk *disk; 491 int error, first; 492 493 disk = NULL; 494 first = 1; 495 496 while ((disk = disk_enumerate(disk))) { 497 if (!first) { 498 error = SYSCTL_OUT(req, " ", 1); 499 if (error) 500 return error; 501 } else { 502 first = 0; 503 } 504 error = SYSCTL_OUT(req, disk->d_rawdev->si_name, 505 strlen(disk->d_rawdev->si_name)); 506 if (error) 507 return error; 508 } 509 error = SYSCTL_OUT(req, "", 1); 510 return error; 511 } 512 513 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 514 sysctl_disks, "A", "names of available disks"); 515 516 /* 517 * Open a disk device or partition. 518 */ 519 static 520 int 521 diskopen(struct dev_open_args *ap) 522 { 523 cdev_t dev = ap->a_head.a_dev; 524 struct disk *dp; 525 int error; 526 527 devfs_debug(DEVFS_DEBUG_DEBUG, "diskopen: name is %s\n", dev->si_name); 528 529 /* 530 * dp can't be NULL here XXX. 531 */ 532 dp = dev->si_disk; 533 if (dp == NULL) 534 return (ENXIO); 535 error = 0; 536 537 /* 538 * Deal with open races 539 */ 540 while (dp->d_flags & DISKFLAG_LOCK) { 541 dp->d_flags |= DISKFLAG_WANTED; 542 error = tsleep(dp, PCATCH, "diskopen", hz); 543 if (error) 544 return (error); 545 } 546 dp->d_flags |= DISKFLAG_LOCK; 547 548 devfs_debug(DEVFS_DEBUG_DEBUG, "diskopen: -2- name is %s\n", dev->si_name); 549 550 /* 551 * Open the underlying raw device. 552 */ 553 if (!dsisopen(dp->d_slice)) { 554 #if 0 555 if (!pdev->si_iosize_max) 556 pdev->si_iosize_max = dev->si_iosize_max; 557 #endif 558 error = dev_dopen(dp->d_rawdev, ap->a_oflags, 559 ap->a_devtype, ap->a_cred); 560 } 561 #if 0 562 /* 563 * Inherit properties from the underlying device now that it is 564 * open. 565 */ 566 dev_dclone(dev); 567 #endif 568 569 if (error) 570 goto out; 571 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags, 572 &dp->d_slice, &dp->d_info); 573 if (!dsisopen(dp->d_slice)) { 574 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype); 575 } 576 out: 577 dp->d_flags &= ~DISKFLAG_LOCK; 578 if (dp->d_flags & DISKFLAG_WANTED) { 579 dp->d_flags &= ~DISKFLAG_WANTED; 580 wakeup(dp); 581 } 582 583 return(error); 584 } 585 586 /* 587 * Close a disk device or partition 588 */ 589 static 590 int 591 diskclose(struct dev_close_args *ap) 592 { 593 cdev_t dev = ap->a_head.a_dev; 594 struct disk *dp; 595 int error; 596 597 error = 0; 598 dp = dev->si_disk; 599 600 devfs_debug(DEVFS_DEBUG_DEBUG, "diskclose: name %s\n", dev->si_name); 601 602 dsclose(dev, ap->a_devtype, dp->d_slice); 603 if (!dsisopen(dp->d_slice)) { 604 devfs_debug(DEVFS_DEBUG_DEBUG, "diskclose is closing underlying device\n"); 605 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype); 606 } 607 return (error); 608 } 609 610 /* 611 * First execute the ioctl on the disk device, and if it isn't supported 612 * try running it on the backing device. 613 */ 614 static 615 int 616 diskioctl(struct dev_ioctl_args *ap) 617 { 618 cdev_t dev = ap->a_head.a_dev; 619 struct disk *dp; 620 int error; 621 622 dp = dev->si_disk; 623 if (dp == NULL) 624 return (ENXIO); 625 626 devfs_debug(DEVFS_DEBUG_DEBUG, "diskioctl: cmd is: %x (name: %s)\n", ap->a_cmd, dev->si_name); 627 devfs_debug(DEVFS_DEBUG_DEBUG, "diskioctl: &dp->d_slice is: %x, %x\n", &dp->d_slice, dp->d_slice); 628 629 devfs_debug(DEVFS_DEBUG_DEBUG, "diskioctl:1: says lp.opaque is: %x\n", dp->d_slice->dss_slices[0].ds_label.opaque); 630 631 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, 632 &dp->d_slice, &dp->d_info); 633 634 devfs_debug(DEVFS_DEBUG_DEBUG, "diskioctl:2: says lp.opaque is: %x\n", dp->d_slice->dss_slices[0].ds_label.opaque); 635 636 if (error == ENOIOCTL) { 637 devfs_debug(DEVFS_DEBUG_DEBUG, "diskioctl: going for dev_dioctl instead!\n"); 638 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data, 639 ap->a_fflag, ap->a_cred); 640 } 641 return (error); 642 } 643 644 /* 645 * Execute strategy routine 646 */ 647 static 648 int 649 diskstrategy(struct dev_strategy_args *ap) 650 { 651 cdev_t dev = ap->a_head.a_dev; 652 struct bio *bio = ap->a_bio; 653 struct bio *nbio; 654 struct disk *dp; 655 656 dp = dev->si_disk; 657 658 if (dp == NULL) { 659 bio->bio_buf->b_error = ENXIO; 660 bio->bio_buf->b_flags |= B_ERROR; 661 biodone(bio); 662 return(0); 663 } 664 KKASSERT(dev->si_disk == dp); 665 666 /* 667 * The dscheck() function will also transform the slice relative 668 * block number i.e. bio->bio_offset into a block number that can be 669 * passed directly to the underlying raw device. If dscheck() 670 * returns NULL it will have handled the bio for us (e.g. EOF 671 * or error due to being beyond the device size). 672 */ 673 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) { 674 dev_dstrategy(dp->d_rawdev, nbio); 675 } else { 676 devfs_debug(DEVFS_DEBUG_DEBUG, "diskstrategy: dscheck NULL!!! biodone time!\n"); 677 biodone(bio); 678 } 679 return(0); 680 } 681 682 /* 683 * Return the partition size in ?blocks? 684 */ 685 static 686 int 687 diskpsize(struct dev_psize_args *ap) 688 { 689 cdev_t dev = ap->a_head.a_dev; 690 struct disk *dp; 691 692 dp = dev->si_disk; 693 if (dp == NULL) 694 return(ENODEV); 695 ap->a_result = dssize(dev, &dp->d_slice); 696 return(0); 697 } 698 699 /* 700 * When new device entries are instantiated, make sure they inherit our 701 * si_disk structure and block and iosize limits from the raw device. 702 * 703 * This routine is always called synchronously in the context of the 704 * client. 705 * 706 * XXX The various io and block size constraints are not always initialized 707 * properly by devices. 708 */ 709 static 710 int 711 diskclone(struct dev_clone_args *ap) 712 { 713 cdev_t dev = ap->a_head.a_dev; 714 struct disk *dp; 715 //XXX: need changes for devfs 716 dp = dev->si_ops->head.data; 717 KKASSERT(dp != NULL); 718 dev->si_disk = dp; 719 dev->si_iosize_max = dp->d_rawdev->si_iosize_max; 720 dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys; 721 dev->si_bsize_best = dp->d_rawdev->si_bsize_best; 722 return(0); 723 } 724 725 int 726 diskdump(struct dev_dump_args *ap) 727 { 728 cdev_t dev = ap->a_head.a_dev; 729 struct disk *dp = dev->si_ops->head.data; 730 int error; 731 732 error = disk_dumpcheck(dev, &ap->a_count, &ap->a_blkno, &ap->a_secsize); 733 if (error == 0) { 734 ap->a_head.a_dev = dp->d_rawdev; 735 error = dev_doperate(&ap->a_head); 736 } 737 738 return(error); 739 } 740 741 742 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD, 743 0, sizeof(struct diskslices), "sizeof(struct diskslices)"); 744 745 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD, 746 0, sizeof(struct disk), "sizeof(struct disk)"); 747 748 749 /* 750 * Seek sort for disks. 751 * 752 * The bio_queue keep two queues, sorted in ascending block order. The first 753 * queue holds those requests which are positioned after the current block 754 * (in the first request); the second, which starts at queue->switch_point, 755 * holds requests which came in after their block number was passed. Thus 756 * we implement a one way scan, retracting after reaching the end of the drive 757 * to the first request on the second queue, at which time it becomes the 758 * first queue. 759 * 760 * A one-way scan is natural because of the way UNIX read-ahead blocks are 761 * allocated. 762 */ 763 void 764 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio) 765 { 766 struct bio *bq; 767 struct bio *bn; 768 struct bio *be; 769 770 be = TAILQ_LAST(&bioq->queue, bio_queue); 771 /* 772 * If the queue is empty or we are an 773 * ordered transaction, then it's easy. 774 */ 775 if ((bq = bioq_first(bioq)) == NULL || 776 (bio->bio_buf->b_flags & B_ORDERED) != 0) { 777 bioq_insert_tail(bioq, bio); 778 return; 779 } else if (bioq->insert_point != NULL) { 780 781 /* 782 * A certain portion of the list is 783 * "locked" to preserve ordering, so 784 * we can only insert after the insert 785 * point. 786 */ 787 bq = bioq->insert_point; 788 } else { 789 790 /* 791 * If we lie before the last removed (currently active) 792 * request, and are not inserting ourselves into the 793 * "locked" portion of the list, then we must add ourselves 794 * to the second request list. 795 */ 796 if (bio->bio_offset < bioq->last_offset) { 797 bq = bioq->switch_point; 798 /* 799 * If we are starting a new secondary list, 800 * then it's easy. 801 */ 802 if (bq == NULL) { 803 bioq->switch_point = bio; 804 bioq_insert_tail(bioq, bio); 805 return; 806 } 807 /* 808 * If we lie ahead of the current switch point, 809 * insert us before the switch point and move 810 * the switch point. 811 */ 812 if (bio->bio_offset < bq->bio_offset) { 813 bioq->switch_point = bio; 814 TAILQ_INSERT_BEFORE(bq, bio, bio_act); 815 return; 816 } 817 } else { 818 if (bioq->switch_point != NULL) 819 be = TAILQ_PREV(bioq->switch_point, 820 bio_queue, bio_act); 821 /* 822 * If we lie between last_offset and bq, 823 * insert before bq. 824 */ 825 if (bio->bio_offset < bq->bio_offset) { 826 TAILQ_INSERT_BEFORE(bq, bio, bio_act); 827 return; 828 } 829 } 830 } 831 832 /* 833 * Request is at/after our current position in the list. 834 * Optimize for sequential I/O by seeing if we go at the tail. 835 */ 836 if (bio->bio_offset > be->bio_offset) { 837 TAILQ_INSERT_AFTER(&bioq->queue, be, bio, bio_act); 838 return; 839 } 840 841 /* Otherwise, insertion sort */ 842 while ((bn = TAILQ_NEXT(bq, bio_act)) != NULL) { 843 844 /* 845 * We want to go after the current request if it is the end 846 * of the first request list, or if the next request is a 847 * larger cylinder than our request. 848 */ 849 if (bn == bioq->switch_point 850 || bio->bio_offset < bn->bio_offset) 851 break; 852 bq = bn; 853 } 854 TAILQ_INSERT_AFTER(&bioq->queue, bq, bio, bio_act); 855 } 856 857 /* 858 * Disk error is the preface to plaintive error messages 859 * about failing disk transfers. It prints messages of the form 860 861 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d) 862 863 * if the offset of the error in the transfer and a disk label 864 * are both available. blkdone should be -1 if the position of the error 865 * is unknown; the disklabel pointer may be null from drivers that have not 866 * been converted to use them. The message is printed with kprintf 867 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority. 868 * The message should be completed (with at least a newline) with kprintf 869 * or log(-1, ...), respectively. There is no trailing space. 870 */ 871 void 872 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt) 873 { 874 struct buf *bp = bio->bio_buf; 875 const char *term; 876 877 switch(bp->b_cmd) { 878 case BUF_CMD_READ: 879 term = "read"; 880 break; 881 case BUF_CMD_WRITE: 882 term = "write"; 883 break; 884 default: 885 term = "access"; 886 break; 887 } 888 //sname = dsname(dev, unit, slice, part, partname); 889 kprintf("%s: %s %sing ", dev->si_name, what, term); 890 kprintf("offset %012llx for %d", 891 (long long)bio->bio_offset, 892 bp->b_bcount); 893 894 if (donecnt) 895 kprintf(" (%d bytes completed)", donecnt); 896 } 897 898 /* 899 * Locate a disk device 900 */ 901 cdev_t 902 disk_locate(const char *devname) 903 { 904 return devfs_find_device_by_name(devname); 905 } 906 907 908 void 909 disk_config(void *arg) 910 { 911 struct lwkt_port rep_port; 912 disk_msg_t disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 913 disk_msg_t msg_incoming; 914 lwkt_port_t port = &disk_msg_port; 915 916 lwkt_initport_thread(&rep_port, curthread); 917 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0); 918 kprintf("disk_config: sync'ing up\n"); 919 disk_msg->hdr.u.ms_result = DISK_SYNC; 920 921 lwkt_sendmsg(port, (lwkt_msg_t)disk_msg); 922 msg_incoming = lwkt_waitport(&rep_port, 0); 923 } 924 925 926 static void 927 disk_init(void) 928 { 929 struct thread* td_core; 930 devfs_debug(DEVFS_DEBUG_DEBUG, "disk_init() called\n"); 931 932 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0, 933 NULL, NULL, NULL, 934 objcache_malloc_alloc, 935 objcache_malloc_free, 936 &disk_msg_malloc_args ); 937 938 /* Initialize the reply-only port which acts as a message drain */ 939 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply); 940 941 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL, 942 0, 0, "disk_msg_core"); 943 944 tsleep(td_core, 0, "diskcore", 0); 945 } 946 947 948 static void 949 disk_uninit(void) 950 { 951 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); 952 953 objcache_destroy(disk_msg_cache); 954 955 } 956 957 958 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL); 959 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL); 960