1 /* $NetBSD: md.c,v 1.66 2010/11/25 08:53:30 hannken Exp $ */ 2 3 /* 4 * Copyright (c) 1995 Gordon W. Ross, Leo Weppelman. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * This implements a general-purpose memory-disk. 30 * See md.h for notes on the config types. 31 * 32 * Note that this driver provides the same functionality 33 * as the MFS filesystem hack, but this is better because 34 * you can use this for any filesystem type you'd like! 35 * 36 * Credit for most of the kmem ramdisk code goes to: 37 * Leo Weppelman (atari) and Phil Nelson (pc532) 38 * Credit for the ideas behind the "user space memory" code goes 39 * to the authors of the MFS implementation. 40 */ 41 42 #include <sys/cdefs.h> 43 __KERNEL_RCSID(0, "$NetBSD: md.c,v 1.66 2010/11/25 08:53:30 hannken Exp $"); 44 45 #ifdef _KERNEL_OPT 46 #include "opt_md.h" 47 #else 48 #define MEMORY_DISK_SERVER 1 49 #endif 50 51 #include <sys/param.h> 52 #include <sys/kernel.h> 53 #include <sys/malloc.h> 54 #include <sys/systm.h> 55 #include <sys/buf.h> 56 #include <sys/bufq.h> 57 #include <sys/device.h> 58 #include <sys/disk.h> 59 #include <sys/stat.h> 60 #include <sys/proc.h> 61 #include <sys/conf.h> 62 #include <sys/disklabel.h> 63 64 #include <uvm/uvm_extern.h> 65 66 #include <dev/md.h> 67 68 /* 69 * The user-space functionality is included by default. 70 * Use `options MEMORY_DISK_SERVER=0' to turn it off. 71 */ 72 #ifndef MEMORY_DISK_SERVER 73 #error MEMORY_DISK_SERVER should be defined by opt_md.h 74 #endif /* MEMORY_DISK_SERVER */ 75 76 /* 77 * We should use the raw partition for ioctl. 78 */ 79 #define MD_UNIT(unit) DISKUNIT(unit) 80 81 /* autoconfig stuff... */ 82 83 struct md_softc { 84 device_t sc_dev; /* Self. */ 85 struct disk sc_dkdev; /* hook for generic disk handling */ 86 struct md_conf sc_md; 87 kmutex_t sc_lock; /* Protect self. */ 88 kcondvar_t sc_cv; /* Wait here for work. */ 89 struct bufq_state *sc_buflist; 90 }; 91 /* shorthand for fields in sc_md: */ 92 #define sc_addr sc_md.md_addr 93 #define sc_size sc_md.md_size 94 #define sc_type sc_md.md_type 95 96 void mdattach(int); 97 98 static void md_attach(device_t, device_t, void *); 99 static int md_detach(device_t, int); 100 101 static dev_type_open(mdopen); 102 static dev_type_close(mdclose); 103 static dev_type_read(mdread); 104 static dev_type_write(mdwrite); 105 static dev_type_ioctl(mdioctl); 106 static dev_type_strategy(mdstrategy); 107 static dev_type_size(mdsize); 108 109 const struct bdevsw md_bdevsw = { 110 mdopen, mdclose, mdstrategy, mdioctl, nodump, mdsize, D_DISK | D_MPSAFE 111 }; 112 113 const struct cdevsw md_cdevsw = { 114 mdopen, mdclose, mdread, mdwrite, mdioctl, 115 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 116 }; 117 118 static struct dkdriver mddkdriver = { mdstrategy, NULL }; 119 120 extern struct cfdriver md_cd; 121 CFATTACH_DECL3_NEW(md, sizeof(struct md_softc), 122 0, md_attach, md_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 123 124 static kmutex_t md_device_lock; /* Protect unit creation / deletion. */ 125 extern size_t md_root_size; 126 127 static void md_set_disklabel(struct md_softc *); 128 129 /* 130 * This is called if we are configured as a pseudo-device 131 */ 132 void 133 mdattach(int n) 134 { 135 136 mutex_init(&md_device_lock, MUTEX_DEFAULT, IPL_NONE); 137 if (config_cfattach_attach(md_cd.cd_name, &md_ca)) { 138 aprint_error("%s: cfattach_attach failed\n", md_cd.cd_name); 139 return; 140 } 141 } 142 143 static void 144 md_attach(device_t parent, device_t self, void *aux) 145 { 146 struct md_softc *sc = device_private(self); 147 148 sc->sc_dev = self; 149 sc->sc_type = MD_UNCONFIGURED; 150 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE); 151 cv_init(&sc->sc_cv, "mdidle"); 152 bufq_alloc(&sc->sc_buflist, "fcfs", 0); 153 154 /* XXX - Could accept aux info here to set the config. */ 155 #ifdef MEMORY_DISK_HOOKS 156 /* 157 * This external function might setup a pre-loaded disk. 158 * All it would need to do is setup the md_conf struct. 159 * See sys/dev/md_root.c for an example. 160 */ 161 md_attach_hook(device_unit(self), &sc->sc_md); 162 #endif 163 164 /* 165 * Initialize and attach the disk structure. 166 */ 167 disk_init(&sc->sc_dkdev, device_xname(self), &mddkdriver); 168 disk_attach(&sc->sc_dkdev); 169 170 if (sc->sc_type != MD_UNCONFIGURED) 171 md_set_disklabel(sc); 172 173 if (!pmf_device_register(self, NULL, NULL)) 174 aprint_error_dev(self, "couldn't establish power handler\n"); 175 } 176 177 static int 178 md_detach(device_t self, int flags) 179 { 180 struct md_softc *sc = device_private(self); 181 int rc; 182 183 rc = 0; 184 mutex_enter(&sc->sc_dkdev.dk_openlock); 185 if (sc->sc_dkdev.dk_openmask == 0 && sc->sc_type == MD_UNCONFIGURED) 186 ; /* nothing to do */ 187 else if ((flags & DETACH_FORCE) == 0) 188 rc = EBUSY; 189 mutex_exit(&sc->sc_dkdev.dk_openlock); 190 191 if (rc != 0) 192 return rc; 193 194 pmf_device_deregister(self); 195 disk_detach(&sc->sc_dkdev); 196 disk_destroy(&sc->sc_dkdev); 197 bufq_free(sc->sc_buflist); 198 mutex_destroy(&sc->sc_lock); 199 cv_destroy(&sc->sc_cv); 200 return 0; 201 } 202 203 /* 204 * operational routines: 205 * open, close, read, write, strategy, 206 * ioctl, dump, size 207 */ 208 209 #if MEMORY_DISK_SERVER 210 static int md_server_loop(struct md_softc *sc); 211 static int md_ioctl_server(struct md_softc *sc, struct md_conf *umd, 212 struct lwp *l); 213 #endif /* MEMORY_DISK_SERVER */ 214 static int md_ioctl_kalloc(struct md_softc *sc, struct md_conf *umd, 215 struct lwp *l); 216 217 static int 218 mdsize(dev_t dev) 219 { 220 struct md_softc *sc; 221 int res; 222 223 sc = device_lookup_private(&md_cd, MD_UNIT(dev)); 224 if (sc == NULL) 225 return 0; 226 227 mutex_enter(&sc->sc_lock); 228 if (sc->sc_type == MD_UNCONFIGURED) 229 res = 0; 230 else 231 res = sc->sc_size >> DEV_BSHIFT; 232 mutex_exit(&sc->sc_lock); 233 234 return res; 235 } 236 237 static int 238 mdopen(dev_t dev, int flag, int fmt, struct lwp *l) 239 { 240 int unit; 241 int part = DISKPART(dev); 242 int pmask = 1 << part; 243 cfdata_t cf; 244 struct md_softc *sc; 245 struct disk *dk; 246 247 mutex_enter(&md_device_lock); 248 unit = MD_UNIT(dev); 249 sc = device_lookup_private(&md_cd, unit); 250 if (sc == NULL) { 251 if (part != RAW_PART) { 252 mutex_exit(&md_device_lock); 253 return ENXIO; 254 } 255 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK); 256 cf->cf_name = md_cd.cd_name; 257 cf->cf_atname = md_cd.cd_name; 258 cf->cf_unit = unit; 259 cf->cf_fstate = FSTATE_STAR; 260 sc = device_private(config_attach_pseudo(cf)); 261 if (sc == NULL) { 262 mutex_exit(&md_device_lock); 263 return ENOMEM; 264 } 265 } 266 267 dk = &sc->sc_dkdev; 268 269 /* 270 * The raw partition is used for ioctl to configure. 271 */ 272 if (part == RAW_PART) 273 goto ok; 274 275 #ifdef MEMORY_DISK_HOOKS 276 /* Call the open hook to allow loading the device. */ 277 md_open_hook(unit, &sc->sc_md); 278 #endif 279 280 /* 281 * This is a normal, "slave" device, so 282 * enforce initialized. 283 */ 284 if (sc->sc_type == MD_UNCONFIGURED) { 285 mutex_exit(&md_device_lock); 286 return ENXIO; 287 } 288 289 ok: 290 /* XXX duplicates code in dk_open(). Call dk_open(), instead? */ 291 mutex_enter(&dk->dk_openlock); 292 /* Mark our unit as open. */ 293 switch (fmt) { 294 case S_IFCHR: 295 dk->dk_copenmask |= pmask; 296 break; 297 case S_IFBLK: 298 dk->dk_bopenmask |= pmask; 299 break; 300 } 301 302 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask; 303 304 mutex_exit(&dk->dk_openlock); 305 mutex_exit(&md_device_lock); 306 return 0; 307 } 308 309 static int 310 mdclose(dev_t dev, int flag, int fmt, struct lwp *l) 311 { 312 int part = DISKPART(dev); 313 int pmask = 1 << part; 314 int error; 315 cfdata_t cf; 316 struct md_softc *sc; 317 struct disk *dk; 318 319 sc = device_lookup_private(&md_cd, MD_UNIT(dev)); 320 if (sc == NULL) 321 return ENXIO; 322 323 dk = &sc->sc_dkdev; 324 325 mutex_enter(&dk->dk_openlock); 326 327 switch (fmt) { 328 case S_IFCHR: 329 dk->dk_copenmask &= ~pmask; 330 break; 331 case S_IFBLK: 332 dk->dk_bopenmask &= ~pmask; 333 break; 334 } 335 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask; 336 337 mutex_exit(&dk->dk_openlock); 338 339 mutex_enter(&md_device_lock); 340 cf = device_cfdata(sc->sc_dev); 341 error = config_detach(sc->sc_dev, DETACH_QUIET); 342 if (! error) 343 free(cf, M_DEVBUF); 344 mutex_exit(&md_device_lock); 345 return error; 346 } 347 348 static int 349 mdread(dev_t dev, struct uio *uio, int flags) 350 { 351 struct md_softc *sc; 352 353 sc = device_lookup_private(&md_cd, MD_UNIT(dev)); 354 355 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) 356 return ENXIO; 357 358 return (physio(mdstrategy, NULL, dev, B_READ, minphys, uio)); 359 } 360 361 static int 362 mdwrite(dev_t dev, struct uio *uio, int flags) 363 { 364 struct md_softc *sc; 365 366 sc = device_lookup_private(&md_cd, MD_UNIT(dev)); 367 368 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) 369 return ENXIO; 370 371 return (physio(mdstrategy, NULL, dev, B_WRITE, minphys, uio)); 372 } 373 374 /* 375 * Handle I/O requests, either directly, or 376 * by passing them to the server process. 377 */ 378 static void 379 mdstrategy(struct buf *bp) 380 { 381 struct md_softc *sc; 382 void * addr; 383 size_t off, xfer; 384 bool is_read; 385 386 sc = device_lookup_private(&md_cd, MD_UNIT(bp->b_dev)); 387 388 mutex_enter(&sc->sc_lock); 389 390 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) { 391 bp->b_error = ENXIO; 392 goto done; 393 } 394 395 switch (sc->sc_type) { 396 #if MEMORY_DISK_SERVER 397 case MD_UMEM_SERVER: 398 /* Just add this job to the server's queue. */ 399 bufq_put(sc->sc_buflist, bp); 400 cv_signal(&sc->sc_cv); 401 mutex_exit(&sc->sc_lock); 402 /* see md_server_loop() */ 403 /* no biodone in this case */ 404 return; 405 #endif /* MEMORY_DISK_SERVER */ 406 407 case MD_KMEM_FIXED: 408 case MD_KMEM_ALLOCATED: 409 /* These are in kernel space. Access directly. */ 410 is_read = ((bp->b_flags & B_READ) == B_READ); 411 bp->b_resid = bp->b_bcount; 412 off = (bp->b_blkno << DEV_BSHIFT); 413 if (off >= sc->sc_size) { 414 if (is_read) 415 break; /* EOF */ 416 goto set_eio; 417 } 418 xfer = bp->b_resid; 419 if (xfer > (sc->sc_size - off)) 420 xfer = (sc->sc_size - off); 421 addr = (char *)sc->sc_addr + off; 422 disk_busy(&sc->sc_dkdev); 423 if (is_read) 424 memcpy(bp->b_data, addr, xfer); 425 else 426 memcpy(addr, bp->b_data, xfer); 427 disk_unbusy(&sc->sc_dkdev, xfer, is_read); 428 bp->b_resid -= xfer; 429 break; 430 431 default: 432 bp->b_resid = bp->b_bcount; 433 set_eio: 434 bp->b_error = EIO; 435 break; 436 } 437 438 done: 439 mutex_exit(&sc->sc_lock); 440 441 biodone(bp); 442 } 443 444 static int 445 mdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 446 { 447 struct md_softc *sc; 448 struct md_conf *umd; 449 struct disklabel *lp; 450 struct partinfo *pp; 451 int error; 452 453 if ((sc = device_lookup_private(&md_cd, MD_UNIT(dev))) == NULL) 454 return ENXIO; 455 456 mutex_enter(&sc->sc_lock); 457 if (sc->sc_type != MD_UNCONFIGURED) { 458 switch (cmd) { 459 case DIOCGDINFO: 460 lp = (struct disklabel *)data; 461 *lp = *sc->sc_dkdev.dk_label; 462 mutex_exit(&sc->sc_lock); 463 return 0; 464 465 case DIOCGPART: 466 pp = (struct partinfo *)data; 467 pp->disklab = sc->sc_dkdev.dk_label; 468 pp->part = 469 &sc->sc_dkdev.dk_label->d_partitions[DISKPART(dev)]; 470 mutex_exit(&sc->sc_lock); 471 return 0; 472 } 473 } 474 475 /* If this is not the raw partition, punt! */ 476 if (DISKPART(dev) != RAW_PART) { 477 mutex_exit(&sc->sc_lock); 478 return ENOTTY; 479 } 480 481 umd = (struct md_conf *)data; 482 error = EINVAL; 483 switch (cmd) { 484 case MD_GETCONF: 485 *umd = sc->sc_md; 486 error = 0; 487 break; 488 489 case MD_SETCONF: 490 /* Can only set it once. */ 491 if (sc->sc_type != MD_UNCONFIGURED) 492 break; 493 switch (umd->md_type) { 494 case MD_KMEM_ALLOCATED: 495 error = md_ioctl_kalloc(sc, umd, l); 496 break; 497 #if MEMORY_DISK_SERVER 498 case MD_UMEM_SERVER: 499 error = md_ioctl_server(sc, umd, l); 500 break; 501 #endif /* MEMORY_DISK_SERVER */ 502 default: 503 break; 504 } 505 break; 506 } 507 mutex_exit(&sc->sc_lock); 508 return error; 509 } 510 511 static void 512 md_set_disklabel(struct md_softc *sc) 513 { 514 struct disklabel *lp = sc->sc_dkdev.dk_label; 515 struct partition *pp; 516 517 memset(lp, 0, sizeof(*lp)); 518 519 lp->d_secsize = DEV_BSIZE; 520 lp->d_secperunit = sc->sc_size / DEV_BSIZE; 521 if (lp->d_secperunit >= (32*64)) { 522 lp->d_nsectors = 32; 523 lp->d_ntracks = 64; 524 lp->d_ncylinders = lp->d_secperunit / (32*64); 525 } else { 526 lp->d_nsectors = 1; 527 lp->d_ntracks = 1; 528 lp->d_ncylinders = lp->d_secperunit; 529 } 530 lp->d_secpercyl = lp->d_ntracks*lp->d_nsectors; 531 532 strncpy(lp->d_typename, md_cd.cd_name, sizeof(lp->d_typename)); 533 lp->d_type = DTYPE_UNKNOWN; 534 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname)); 535 lp->d_rpm = 3600; 536 lp->d_interleave = 1; 537 lp->d_flags = 0; 538 539 pp = &lp->d_partitions[0]; 540 pp->p_offset = 0; 541 pp->p_size = lp->d_secperunit; 542 pp->p_fstype = FS_BSDFFS; 543 544 pp = &lp->d_partitions[RAW_PART]; 545 pp->p_offset = 0; 546 pp->p_size = lp->d_secperunit; 547 pp->p_fstype = FS_UNUSED; 548 549 lp->d_npartitions = RAW_PART+1; 550 lp->d_magic = DISKMAGIC; 551 lp->d_magic2 = DISKMAGIC; 552 lp->d_checksum = dkcksum(lp); 553 } 554 555 /* 556 * Handle ioctl MD_SETCONF for (sc_type == MD_KMEM_ALLOCATED) 557 * Just allocate some kernel memory and return. 558 */ 559 static int 560 md_ioctl_kalloc(struct md_softc *sc, struct md_conf *umd, 561 struct lwp *l) 562 { 563 vaddr_t addr; 564 vsize_t size; 565 566 mutex_exit(&sc->sc_lock); 567 568 /* Sanity check the size. */ 569 size = umd->md_size; 570 addr = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 571 572 mutex_enter(&sc->sc_lock); 573 574 if (!addr) 575 return ENOMEM; 576 577 /* If another thread beat us to configure this unit: fail. */ 578 if (sc->sc_type != MD_UNCONFIGURED) { 579 uvm_km_free(kernel_map, addr, size, UVM_KMF_WIRED); 580 return EINVAL; 581 } 582 583 /* This unit is now configured. */ 584 sc->sc_addr = (void *)addr; /* kernel space */ 585 sc->sc_size = (size_t)size; 586 sc->sc_type = MD_KMEM_ALLOCATED; 587 md_set_disklabel(sc); 588 return 0; 589 } 590 591 #if MEMORY_DISK_SERVER 592 593 /* 594 * Handle ioctl MD_SETCONF for (sc_type == MD_UMEM_SERVER) 595 * Set config, then become the I/O server for this unit. 596 */ 597 static int 598 md_ioctl_server(struct md_softc *sc, struct md_conf *umd, 599 struct lwp *l) 600 { 601 vaddr_t end; 602 int error; 603 604 KASSERT(mutex_owned(&sc->sc_lock)); 605 606 /* Sanity check addr, size. */ 607 end = (vaddr_t) ((char *)umd->md_addr + umd->md_size); 608 609 if ((end >= VM_MAXUSER_ADDRESS) || 610 (end < ((vaddr_t) umd->md_addr)) ) 611 return EINVAL; 612 613 /* This unit is now configured. */ 614 sc->sc_addr = umd->md_addr; /* user space */ 615 sc->sc_size = umd->md_size; 616 sc->sc_type = MD_UMEM_SERVER; 617 md_set_disklabel(sc); 618 619 /* Become the server daemon */ 620 error = md_server_loop(sc); 621 622 /* This server is now going away! */ 623 sc->sc_type = MD_UNCONFIGURED; 624 sc->sc_addr = 0; 625 sc->sc_size = 0; 626 627 return (error); 628 } 629 630 static int 631 md_server_loop(struct md_softc *sc) 632 { 633 struct buf *bp; 634 void *addr; /* user space address */ 635 size_t off; /* offset into "device" */ 636 size_t xfer; /* amount to transfer */ 637 int error; 638 bool is_read; 639 640 KASSERT(mutex_owned(&sc->sc_lock)); 641 642 for (;;) { 643 /* Wait for some work to arrive. */ 644 while ((bp = bufq_get(sc->sc_buflist)) == NULL) { 645 error = cv_wait_sig(&sc->sc_cv, &sc->sc_lock); 646 if (error) 647 return error; 648 } 649 650 /* Do the transfer to/from user space. */ 651 mutex_exit(&sc->sc_lock); 652 error = 0; 653 is_read = ((bp->b_flags & B_READ) == B_READ); 654 bp->b_resid = bp->b_bcount; 655 off = (bp->b_blkno << DEV_BSHIFT); 656 if (off >= sc->sc_size) { 657 if (is_read) 658 goto done; /* EOF (not an error) */ 659 error = EIO; 660 goto done; 661 } 662 xfer = bp->b_resid; 663 if (xfer > (sc->sc_size - off)) 664 xfer = (sc->sc_size - off); 665 addr = (char *)sc->sc_addr + off; 666 disk_busy(&sc->sc_dkdev); 667 if (is_read) 668 error = copyin(addr, bp->b_data, xfer); 669 else 670 error = copyout(bp->b_data, addr, xfer); 671 disk_unbusy(&sc->sc_dkdev, (error ? 0 : xfer), is_read); 672 if (!error) 673 bp->b_resid -= xfer; 674 675 done: 676 if (error) { 677 bp->b_error = error; 678 } 679 biodone(bp); 680 mutex_enter(&sc->sc_lock); 681 } 682 } 683 #endif /* MEMORY_DISK_SERVER */ 684