1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/mount.h> 38 #include <sys/vnode.h> 39 #include <sys/types.h> 40 #include <sys/lock.h> 41 #include <sys/msgport.h> 42 #include <sys/msgport2.h> 43 #include <sys/spinlock2.h> 44 #include <sys/sysctl.h> 45 #include <sys/ucred.h> 46 #include <sys/param.h> 47 #include <sys/sysref2.h> 48 #include <sys/systm.h> 49 #include <vfs/devfs/devfs.h> 50 #include <vfs/devfs/devfs_rules.h> 51 52 MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations"); 53 DEVFS_DECLARE_CLONE_BITMAP(ops_id); 54 /* 55 * SYSREF Integration - reference counting, allocation, 56 * sysid and syslink integration. 57 */ 58 static void devfs_cdev_terminate(cdev_t dev); 59 static struct sysref_class cdev_sysref_class = { 60 .name = "cdev", 61 .mtype = M_DEVFS, 62 .proto = SYSREF_PROTO_DEV, 63 .offset = offsetof(struct cdev, si_sysref), 64 .objsize = sizeof(struct cdev), 65 .mag_capacity = 32, 66 .flags = 0, 67 .ops = { 68 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate 69 } 70 }; 71 72 static struct objcache *devfs_node_cache; 73 static struct objcache *devfs_msg_cache; 74 static struct objcache *devfs_dev_cache; 75 76 static struct objcache_malloc_args devfs_node_malloc_args = { 77 sizeof(struct devfs_node), M_DEVFS }; 78 struct objcache_malloc_args devfs_msg_malloc_args = { 79 sizeof(struct devfs_msg), M_DEVFS }; 80 struct objcache_malloc_args devfs_dev_malloc_args = { 81 sizeof(struct cdev), M_DEVFS }; 82 83 static struct devfs_dev_head devfs_dev_list = 84 TAILQ_HEAD_INITIALIZER(devfs_dev_list); 85 static struct devfs_mnt_head devfs_mnt_list = 86 TAILQ_HEAD_INITIALIZER(devfs_mnt_list); 87 static struct devfs_chandler_head devfs_chandler_list = 88 TAILQ_HEAD_INITIALIZER(devfs_chandler_list); 89 static struct devfs_alias_head devfs_alias_list = 90 TAILQ_HEAD_INITIALIZER(devfs_alias_list); 91 92 struct lock devfs_lock; 93 static struct lwkt_port devfs_dispose_port; 94 static struct lwkt_port devfs_msg_port; 95 static struct thread *td_core; 96 97 static struct spinlock ino_lock; 98 static ino_t d_ino; 99 static int devfs_debug_enable; 100 static int devfs_run; 101 102 static ino_t devfs_fetch_ino(void); 103 static int devfs_gc_dirs(struct devfs_node *); 104 static int devfs_gc_links(struct devfs_node *, struct devfs_node *, size_t); 105 static int devfs_create_all_dev_worker(struct devfs_node *); 106 static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int); 107 static int devfs_destroy_dev_worker(cdev_t); 108 static int devfs_destroy_subnames_worker(char *); 109 static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int); 110 static int devfs_propagate_dev(cdev_t, int); 111 static int devfs_unlink_dev(cdev_t dev); 112 static void devfs_msg_exec(devfs_msg_t msg); 113 114 static int devfs_chandler_add_worker(char *, d_clone_t *); 115 static int devfs_chandler_del_worker(char *); 116 117 static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 118 static void devfs_msg_core(void *); 119 120 static int devfs_find_device_by_name_worker(devfs_msg_t); 121 static int devfs_find_device_by_udev_worker(devfs_msg_t); 122 123 static struct vnode *devfs_inode_to_vnode_worker(struct devfs_node *, ino_t); 124 125 static int devfs_apply_reset_rules_caller(char *, int); 126 static int devfs_apply_reset_rules_worker(struct devfs_node *, int); 127 128 static int devfs_scan_callback_worker(devfs_scan_t *); 129 130 static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, 131 char *, size_t, int); 132 133 static int devfs_make_alias_worker(struct devfs_alias *); 134 static int devfs_alias_remove(cdev_t); 135 static int devfs_alias_reap(void); 136 static int devfs_alias_propagate(struct devfs_alias *); 137 static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *); 138 static int devfs_alias_check_create(struct devfs_node *); 139 140 static int devfs_clr_subnames_flag_worker(char *, uint32_t); 141 static int devfs_destroy_subnames_without_flag_worker(char *, uint32_t); 142 143 /* 144 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function 145 * using kvprintf 146 */ 147 int 148 devfs_debug(int level, char *fmt, ...) 149 { 150 __va_list ap; 151 152 __va_start(ap, fmt); 153 if (level <= devfs_debug_enable) 154 kvprintf(fmt, ap); 155 __va_end(ap); 156 157 return 0; 158 } 159 160 /* 161 * devfs_allocp() Allocates a new devfs node with the specified 162 * parameters. The node is also automatically linked into the topology 163 * if a parent is specified. It also calls the rule and alias stuff to 164 * be applied on the new node 165 */ 166 struct devfs_node * 167 devfs_allocp(devfs_nodetype devfsnodetype, char *name, 168 struct devfs_node *parent, struct mount *mp, cdev_t dev) 169 { 170 struct devfs_node *node = NULL; 171 size_t namlen = strlen(name); 172 173 node = objcache_get(devfs_node_cache, M_WAITOK); 174 bzero(node, sizeof(*node)); 175 176 atomic_add_int(&(DEVFS_MNTDATA(mp)->leak_count), 1); 177 178 node->d_dev = NULL; 179 node->nchildren = 1; 180 node->mp = mp; 181 node->d_dir.d_ino = devfs_fetch_ino(); 182 183 /* 184 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries 185 * respectively. 186 */ 187 node->cookie_jar = 2; 188 189 /* 190 * Access Control members 191 */ 192 node->mode = DEVFS_DEFAULT_MODE; 193 node->uid = DEVFS_DEFAULT_UID; 194 node->gid = DEVFS_DEFAULT_GID; 195 196 switch (devfsnodetype) { 197 case Proot: 198 /* 199 * Ensure that we don't recycle the root vnode by marking it as 200 * linked into the topology. 201 */ 202 node->flags |= DEVFS_NODE_LINKED; 203 case Pdir: 204 TAILQ_INIT(DEVFS_DENODE_HEAD(node)); 205 node->d_dir.d_type = DT_DIR; 206 node->nchildren = 2; 207 break; 208 209 case Plink: 210 node->d_dir.d_type = DT_LNK; 211 break; 212 213 case Preg: 214 node->d_dir.d_type = DT_REG; 215 break; 216 217 case Pdev: 218 if (dev != NULL) { 219 node->d_dir.d_type = DT_CHR; 220 node->d_dev = dev; 221 222 node->mode = dev->si_perms; 223 node->uid = dev->si_uid; 224 node->gid = dev->si_gid; 225 226 devfs_alias_check_create(node); 227 } 228 break; 229 230 default: 231 panic("devfs_allocp: unknown node type"); 232 } 233 234 node->v_node = NULL; 235 node->node_type = devfsnodetype; 236 237 /* Initialize the dirent structure of each devfs vnode */ 238 KKASSERT(namlen < 256); 239 node->d_dir.d_namlen = namlen; 240 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK); 241 memcpy(node->d_dir.d_name, name, namlen); 242 node->d_dir.d_name[namlen] = '\0'; 243 244 /* Initialize the parent node element */ 245 node->parent = parent; 246 247 /* Apply rules */ 248 devfs_rule_check_apply(node); 249 250 /* Initialize *time members */ 251 nanotime(&node->atime); 252 node->mtime = node->ctime = node->atime; 253 254 /* 255 * Associate with parent as last step, clean out namecache 256 * reference. 257 */ 258 if ((parent != NULL) && 259 ((parent->node_type == Proot) || (parent->node_type == Pdir))) { 260 parent->nchildren++; 261 node->cookie = parent->cookie_jar++; 262 node->flags |= DEVFS_NODE_LINKED; 263 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link); 264 265 /* This forces negative namecache lookups to clear */ 266 ++mp->mnt_namecache_gen; 267 } 268 269 return node; 270 } 271 272 /* 273 * devfs_allocv() allocates a new vnode based on a devfs node. 274 */ 275 int 276 devfs_allocv(struct vnode **vpp, struct devfs_node *node) 277 { 278 struct vnode *vp; 279 int error = 0; 280 281 KKASSERT(node); 282 283 try_again: 284 while ((vp = node->v_node) != NULL) { 285 error = vget(vp, LK_EXCLUSIVE); 286 if (error != ENOENT) { 287 *vpp = vp; 288 goto out; 289 } 290 } 291 292 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0) 293 goto out; 294 295 vp = *vpp; 296 297 if (node->v_node != NULL) { 298 vp->v_type = VBAD; 299 vx_put(vp); 300 goto try_again; 301 } 302 303 vp->v_data = node; 304 node->v_node = vp; 305 306 switch (node->node_type) { 307 case Proot: 308 vp->v_flag |= VROOT; 309 case Pdir: 310 vp->v_type = VDIR; 311 break; 312 313 case Plink: 314 vp->v_type = VLNK; 315 break; 316 317 case Preg: 318 vp->v_type = VREG; 319 break; 320 321 case Pdev: 322 vp->v_type = VCHR; 323 KKASSERT(node->d_dev); 324 325 vp->v_uminor = node->d_dev->si_uminor; 326 vp->v_umajor = 0; 327 328 v_associate_rdev(vp, node->d_dev); 329 vp->v_ops = &node->mp->mnt_vn_spec_ops; 330 break; 331 332 default: 333 panic("devfs_allocv: unknown node type"); 334 } 335 336 out: 337 return error; 338 } 339 340 /* 341 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode 342 * based on the newly created devfs node. 343 */ 344 int 345 devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype, 346 char *name, struct devfs_node *parent, cdev_t dev) 347 { 348 struct devfs_node *node; 349 350 node = devfs_allocp(devfsnodetype, name, parent, mp, dev); 351 352 if (node != NULL) 353 devfs_allocv(vpp, node); 354 else 355 *vpp = NULL; 356 357 return 0; 358 } 359 360 /* 361 * Destroy the devfs_node. The node must be unlinked from the topology. 362 * 363 * This function will also destroy any vnode association with the node 364 * and device. 365 * 366 * The cdev_t itself remains intact. 367 */ 368 int 369 devfs_freep(struct devfs_node *node) 370 { 371 struct vnode *vp; 372 373 KKASSERT(node); 374 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) || 375 (node->node_type == Proot)); 376 KKASSERT((node->flags & DEVFS_DESTROYED) == 0); 377 378 atomic_subtract_int(&(DEVFS_MNTDATA(node->mp)->leak_count), 1); 379 if (node->symlink_name) { 380 kfree(node->symlink_name, M_DEVFS); 381 node->symlink_name = NULL; 382 } 383 384 /* 385 * Remove the node from the orphan list if it is still on it. 386 */ 387 if (node->flags & DEVFS_ORPHANED) 388 devfs_tracer_del_orphan(node); 389 390 /* 391 * Disassociate the vnode from the node. This also prevents the 392 * vnode's reclaim code from double-freeing the node. 393 */ 394 if ((vp = node->v_node) != NULL) { 395 v_release_rdev(vp); 396 vp->v_data = NULL; 397 node->v_node = NULL; 398 } 399 if (node->d_dir.d_name) 400 kfree(node->d_dir.d_name, M_DEVFS); 401 node->flags |= DEVFS_DESTROYED; 402 403 objcache_put(devfs_node_cache, node); 404 405 return 0; 406 } 407 408 /* 409 * Unlink the devfs node from the topology and add it to the orphan list. 410 * The node will later be destroyed by freep. 411 * 412 * Any vnode association, including the v_rdev and v_data, remains intact 413 * until the freep. 414 */ 415 int 416 devfs_unlinkp(struct devfs_node *node) 417 { 418 struct devfs_node *parent; 419 KKASSERT(node); 420 421 /* 422 * Add the node to the orphan list, so it is referenced somewhere, to 423 * so we don't leak it. 424 */ 425 devfs_tracer_add_orphan(node); 426 427 parent = node->parent; 428 429 /* 430 * If the parent is known we can unlink the node out of the topology 431 */ 432 if (parent) { 433 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link); 434 parent->nchildren--; 435 KKASSERT((parent->nchildren >= 0)); 436 node->flags &= ~DEVFS_NODE_LINKED; 437 } 438 node->parent = NULL; 439 return 0; 440 } 441 442 /* 443 * devfs_reaperp() is a recursive function that iterates through all the 444 * topology, unlinking and freeing all devfs nodes. 445 */ 446 int 447 devfs_reaperp(struct devfs_node *node) 448 { 449 struct devfs_node *node1, *node2; 450 451 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 452 if (node->nchildren > 2) { 453 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 454 link, node2) { 455 devfs_reaperp(node1); 456 } 457 } 458 } 459 devfs_unlinkp(node); 460 devfs_freep(node); 461 462 return 0; 463 } 464 465 /* 466 * devfs_gc() is devfs garbage collector. It takes care of unlinking and 467 * freeing a node, but also removes empty directories and links that link 468 * via devfs auto-link mechanism to the node being deleted. 469 */ 470 int 471 devfs_gc(struct devfs_node *node) 472 { 473 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node; 474 475 devfs_gc_links(root_node, node, node->nlinks); 476 devfs_unlinkp(node); 477 devfs_gc_dirs(root_node); 478 479 devfs_freep(node); 480 481 return 0; 482 } 483 484 /* 485 * devfs_gc_dirs() is a helper function for devfs_gc, unlinking and freeing 486 * empty directories. 487 */ 488 static int 489 devfs_gc_dirs(struct devfs_node *node) 490 { 491 struct devfs_node *node1, *node2; 492 493 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 494 if (node->nchildren > 2) { 495 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 496 link, node2) { 497 devfs_gc_dirs(node1); 498 } 499 } 500 501 if (node->nchildren == 2) { 502 devfs_unlinkp(node); 503 devfs_freep(node); 504 } 505 } 506 507 return 0; 508 } 509 510 /* 511 * devfs_gc_links() is a helper function for devfs_gc, unlinking and freeing 512 * eauto-linked nodes linking to the node being deleted. 513 */ 514 static int 515 devfs_gc_links(struct devfs_node *node, struct devfs_node *target, 516 size_t nlinks) 517 { 518 struct devfs_node *node1, *node2; 519 520 if (nlinks > 0) { 521 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 522 if (node->nchildren > 2) { 523 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 524 link, node2) { 525 nlinks = devfs_gc_links(node1, target, nlinks); 526 } 527 } 528 } else if (node->link_target == target) { 529 nlinks--; 530 devfs_unlinkp(node); 531 devfs_freep(node); 532 } 533 } 534 535 KKASSERT(nlinks >= 0); 536 537 return nlinks; 538 } 539 540 /* 541 * devfs_create_dev() is the asynchronous entry point for device creation. 542 * It just sends a message with the relevant details to the devfs core. 543 * 544 * This function will reference the passed device. The reference is owned 545 * by devfs and represents all of the device's node associations. 546 */ 547 int 548 devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms) 549 { 550 reference_dev(dev); 551 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms); 552 553 return 0; 554 } 555 556 /* 557 * devfs_destroy_dev() is the asynchronous entry point for device destruction. 558 * It just sends a message with the relevant details to the devfs core. 559 */ 560 int 561 devfs_destroy_dev(cdev_t dev) 562 { 563 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0); 564 return 0; 565 } 566 567 /* 568 * devfs_mount_add() is the synchronous entry point for adding a new devfs 569 * mount. It sends a synchronous message with the relevant details to the 570 * devfs core. 571 */ 572 int 573 devfs_mount_add(struct devfs_mnt_data *mnt) 574 { 575 devfs_msg_t msg; 576 577 msg = devfs_msg_get(); 578 msg->mdv_mnt = mnt; 579 msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg); 580 devfs_msg_put(msg); 581 582 return 0; 583 } 584 585 /* 586 * devfs_mount_del() is the synchronous entry point for removing a devfs mount. 587 * It sends a synchronous message with the relevant details to the devfs core. 588 */ 589 int 590 devfs_mount_del(struct devfs_mnt_data *mnt) 591 { 592 devfs_msg_t msg; 593 594 msg = devfs_msg_get(); 595 msg->mdv_mnt = mnt; 596 msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg); 597 devfs_msg_put(msg); 598 599 return 0; 600 } 601 602 /* 603 * devfs_destroy_subnames() is the synchronous entry point for device 604 * destruction by subname. It just sends a message with the relevant details to 605 * the devfs core. 606 */ 607 int 608 devfs_destroy_subnames(char *name) 609 { 610 devfs_msg_t msg; 611 612 msg = devfs_msg_get(); 613 msg->mdv_load = name; 614 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES, msg); 615 devfs_msg_put(msg); 616 return 0; 617 } 618 619 int 620 devfs_clr_subnames_flag(char *name, uint32_t flag) 621 { 622 devfs_msg_t msg; 623 624 msg = devfs_msg_get(); 625 msg->mdv_flags.name = name; 626 msg->mdv_flags.flag = flag; 627 msg = devfs_msg_send_sync(DEVFS_CLR_SUBNAMES_FLAG, msg); 628 devfs_msg_put(msg); 629 630 return 0; 631 } 632 633 int 634 devfs_destroy_subnames_without_flag(char *name, uint32_t flag) 635 { 636 devfs_msg_t msg; 637 638 msg = devfs_msg_get(); 639 msg->mdv_flags.name = name; 640 msg->mdv_flags.flag = flag; 641 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES_WO_FLAG, msg); 642 devfs_msg_put(msg); 643 644 return 0; 645 } 646 647 /* 648 * devfs_create_all_dev is the asynchronous entry point to trigger device 649 * node creation. It just sends a message with the relevant details to 650 * the devfs core. 651 */ 652 int 653 devfs_create_all_dev(struct devfs_node *root) 654 { 655 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root); 656 return 0; 657 } 658 659 /* 660 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all 661 * devices with a specific set of dev_ops and minor. It just sends a 662 * message with the relevant details to the devfs core. 663 */ 664 int 665 devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor) 666 { 667 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor); 668 return 0; 669 } 670 671 /* 672 * devfs_clone_handler_add is the synchronous entry point to add a new 673 * clone handler. It just sends a message with the relevant details to 674 * the devfs core. 675 */ 676 int 677 devfs_clone_handler_add(char *name, d_clone_t *nhandler) 678 { 679 devfs_msg_t msg; 680 681 msg = devfs_msg_get(); 682 msg->mdv_chandler.name = name; 683 msg->mdv_chandler.nhandler = nhandler; 684 msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg); 685 devfs_msg_put(msg); 686 return 0; 687 } 688 689 /* 690 * devfs_clone_handler_del is the synchronous entry point to remove a 691 * clone handler. It just sends a message with the relevant details to 692 * the devfs core. 693 */ 694 int 695 devfs_clone_handler_del(char *name) 696 { 697 devfs_msg_t msg; 698 699 msg = devfs_msg_get(); 700 msg->mdv_chandler.name = name; 701 msg->mdv_chandler.nhandler = NULL; 702 msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg); 703 devfs_msg_put(msg); 704 return 0; 705 } 706 707 /* 708 * devfs_find_device_by_name is the synchronous entry point to find a 709 * device given its name. It sends a synchronous message with the 710 * relevant details to the devfs core and returns the answer. 711 */ 712 cdev_t 713 devfs_find_device_by_name(const char *fmt, ...) 714 { 715 cdev_t found = NULL; 716 devfs_msg_t msg; 717 char target[PATH_MAX+1]; 718 __va_list ap; 719 int i; 720 721 if (fmt == NULL) 722 return NULL; 723 724 __va_start(ap, fmt); 725 i = kvcprintf(fmt, NULL, target, 10, ap); 726 target[i] = '\0'; 727 __va_end(ap); 728 729 msg = devfs_msg_get(); 730 msg->mdv_name = target; 731 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg); 732 found = msg->mdv_cdev; 733 devfs_msg_put(msg); 734 735 return found; 736 } 737 738 /* 739 * devfs_find_device_by_udev is the synchronous entry point to find a 740 * device given its udev number. It sends a synchronous message with 741 * the relevant details to the devfs core and returns the answer. 742 */ 743 cdev_t 744 devfs_find_device_by_udev(udev_t udev) 745 { 746 cdev_t found = NULL; 747 devfs_msg_t msg; 748 749 msg = devfs_msg_get(); 750 msg->mdv_udev = udev; 751 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg); 752 found = msg->mdv_cdev; 753 devfs_msg_put(msg); 754 755 devfs_debug(DEVFS_DEBUG_DEBUG, 756 "devfs_find_device_by_udev found? %s -end:3-\n", 757 ((found) ? found->si_name:"NO")); 758 return found; 759 } 760 761 struct vnode * 762 devfs_inode_to_vnode(struct mount *mp, ino_t target) 763 { 764 struct vnode *vp = NULL; 765 devfs_msg_t msg; 766 767 if (mp == NULL) 768 return NULL; 769 770 msg = devfs_msg_get(); 771 msg->mdv_ino.mp = mp; 772 msg->mdv_ino.ino = target; 773 msg = devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg); 774 vp = msg->mdv_ino.vp; 775 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 776 devfs_msg_put(msg); 777 778 return vp; 779 } 780 781 /* 782 * devfs_make_alias is the asynchronous entry point to register an alias 783 * for a device. It just sends a message with the relevant details to the 784 * devfs core. 785 */ 786 int 787 devfs_make_alias(char *name, cdev_t dev_target) 788 { 789 struct devfs_alias *alias; 790 size_t len; 791 792 len = strlen(name); 793 794 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 795 alias->name = kmalloc(len + 1, M_DEVFS, M_WAITOK); 796 memcpy(alias->name, name, len + 1); 797 alias->namlen = len; 798 alias->dev_target = dev_target; 799 800 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias); 801 return 0; 802 } 803 804 /* 805 * devfs_apply_rules is the asynchronous entry point to trigger application 806 * of all rules. It just sends a message with the relevant details to the 807 * devfs core. 808 */ 809 int 810 devfs_apply_rules(char *mntto) 811 { 812 char *new_name; 813 size_t namelen; 814 815 namelen = strlen(mntto) + 1; 816 new_name = kmalloc(namelen, M_DEVFS, M_WAITOK); 817 memcpy(new_name, mntto, namelen); 818 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name); 819 820 return 0; 821 } 822 823 /* 824 * devfs_reset_rules is the asynchronous entry point to trigger reset of all 825 * rules. It just sends a message with the relevant details to the devfs core. 826 */ 827 int 828 devfs_reset_rules(char *mntto) 829 { 830 char *new_name; 831 size_t namelen; 832 833 namelen = strlen(mntto) + 1; 834 new_name = kmalloc(namelen, M_DEVFS, M_WAITOK); 835 memcpy(new_name, mntto, namelen); 836 devfs_msg_send_name(DEVFS_RESET_RULES, new_name); 837 838 return 0; 839 } 840 841 842 /* 843 * devfs_scan_callback is the asynchronous entry point to call a callback 844 * on all cdevs. 845 * It just sends a message with the relevant details to the devfs core. 846 */ 847 int 848 devfs_scan_callback(devfs_scan_t *callback) 849 { 850 devfs_msg_t msg; 851 852 KKASSERT(sizeof(callback) == sizeof(void *)); 853 854 msg = devfs_msg_get(); 855 msg->mdv_load = callback; 856 msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg); 857 devfs_msg_put(msg); 858 859 return 0; 860 } 861 862 863 /* 864 * Acts as a message drain. Any message that is replied to here gets destroyed 865 * and the memory freed. 866 */ 867 static void 868 devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 869 { 870 devfs_msg_put((devfs_msg_t)msg); 871 } 872 873 /* 874 * devfs_msg_get allocates a new devfs msg and returns it. 875 */ 876 devfs_msg_t 877 devfs_msg_get() 878 { 879 return objcache_get(devfs_msg_cache, M_WAITOK); 880 } 881 882 /* 883 * devfs_msg_put deallocates a given devfs msg. 884 */ 885 int 886 devfs_msg_put(devfs_msg_t msg) 887 { 888 objcache_put(devfs_msg_cache, msg); 889 return 0; 890 } 891 892 /* 893 * devfs_msg_send is the generic asynchronous message sending facility 894 * for devfs. By default the reply port is the automatic disposal port. 895 * 896 * If the current thread is the devfs_msg_port thread we execute the 897 * operation synchronously. 898 */ 899 void 900 devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg) 901 { 902 lwkt_port_t port = &devfs_msg_port; 903 904 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0); 905 906 devfs_msg->hdr.u.ms_result = cmd; 907 908 if (port->mpu_td == curthread) { 909 devfs_msg_exec(devfs_msg); 910 lwkt_replymsg(&devfs_msg->hdr, 0); 911 } else { 912 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 913 } 914 } 915 916 /* 917 * devfs_msg_send_sync is the generic synchronous message sending 918 * facility for devfs. It initializes a local reply port and waits 919 * for the core's answer. This answer is then returned. 920 */ 921 devfs_msg_t 922 devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg) 923 { 924 struct lwkt_port rep_port; 925 devfs_msg_t msg_incoming; 926 lwkt_port_t port = &devfs_msg_port; 927 928 lwkt_initport_thread(&rep_port, curthread); 929 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0); 930 931 devfs_msg->hdr.u.ms_result = cmd; 932 933 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 934 msg_incoming = lwkt_waitport(&rep_port, 0); 935 936 return msg_incoming; 937 } 938 939 /* 940 * sends a message with a generic argument. 941 */ 942 void 943 devfs_msg_send_generic(uint32_t cmd, void *load) 944 { 945 devfs_msg_t devfs_msg = devfs_msg_get(); 946 947 devfs_msg->mdv_load = load; 948 devfs_msg_send(cmd, devfs_msg); 949 } 950 951 /* 952 * sends a message with a name argument. 953 */ 954 void 955 devfs_msg_send_name(uint32_t cmd, char *name) 956 { 957 devfs_msg_t devfs_msg = devfs_msg_get(); 958 959 devfs_msg->mdv_name = name; 960 devfs_msg_send(cmd, devfs_msg); 961 } 962 963 /* 964 * sends a message with a mount argument. 965 */ 966 void 967 devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt) 968 { 969 devfs_msg_t devfs_msg = devfs_msg_get(); 970 971 devfs_msg->mdv_mnt = mnt; 972 devfs_msg_send(cmd, devfs_msg); 973 } 974 975 /* 976 * sends a message with an ops argument. 977 */ 978 void 979 devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor) 980 { 981 devfs_msg_t devfs_msg = devfs_msg_get(); 982 983 devfs_msg->mdv_ops.ops = ops; 984 devfs_msg->mdv_ops.minor = minor; 985 devfs_msg_send(cmd, devfs_msg); 986 } 987 988 /* 989 * sends a message with a clone handler argument. 990 */ 991 void 992 devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler) 993 { 994 devfs_msg_t devfs_msg = devfs_msg_get(); 995 996 devfs_msg->mdv_chandler.name = name; 997 devfs_msg->mdv_chandler.nhandler = handler; 998 devfs_msg_send(cmd, devfs_msg); 999 } 1000 1001 /* 1002 * sends a message with a device argument. 1003 */ 1004 void 1005 devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms) 1006 { 1007 devfs_msg_t devfs_msg = devfs_msg_get(); 1008 1009 devfs_msg->mdv_dev.dev = dev; 1010 devfs_msg->mdv_dev.uid = uid; 1011 devfs_msg->mdv_dev.gid = gid; 1012 devfs_msg->mdv_dev.perms = perms; 1013 1014 devfs_msg_send(cmd, devfs_msg); 1015 } 1016 1017 /* 1018 * sends a message with a link argument. 1019 */ 1020 void 1021 devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp) 1022 { 1023 devfs_msg_t devfs_msg = devfs_msg_get(); 1024 1025 devfs_msg->mdv_link.name = name; 1026 devfs_msg->mdv_link.target = target; 1027 devfs_msg->mdv_link.mp = mp; 1028 devfs_msg_send(cmd, devfs_msg); 1029 } 1030 1031 /* 1032 * devfs_msg_core is the main devfs thread. It handles all incoming messages 1033 * and calls the relevant worker functions. By using messages it's assured 1034 * that events occur in the correct order. 1035 */ 1036 static void 1037 devfs_msg_core(void *arg) 1038 { 1039 devfs_msg_t msg; 1040 1041 devfs_run = 1; 1042 lwkt_initport_thread(&devfs_msg_port, curthread); 1043 wakeup(td_core); 1044 1045 while (devfs_run) { 1046 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0); 1047 devfs_debug(DEVFS_DEBUG_DEBUG, 1048 "devfs_msg_core, new msg: %x\n", 1049 (unsigned int)msg->hdr.u.ms_result); 1050 devfs_msg_exec(msg); 1051 lwkt_replymsg(&msg->hdr, 0); 1052 } 1053 wakeup(td_core); 1054 lwkt_exit(); 1055 } 1056 1057 static void 1058 devfs_msg_exec(devfs_msg_t msg) 1059 { 1060 struct devfs_mnt_data *mnt; 1061 struct devfs_node *node; 1062 cdev_t dev; 1063 1064 /* 1065 * Acquire the devfs lock to ensure safety of all called functions 1066 */ 1067 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1068 1069 switch (msg->hdr.u.ms_result) { 1070 case DEVFS_DEVICE_CREATE: 1071 dev = msg->mdv_dev.dev; 1072 devfs_create_dev_worker(dev, 1073 msg->mdv_dev.uid, 1074 msg->mdv_dev.gid, 1075 msg->mdv_dev.perms); 1076 break; 1077 case DEVFS_DEVICE_DESTROY: 1078 dev = msg->mdv_dev.dev; 1079 devfs_destroy_dev_worker(dev); 1080 break; 1081 case DEVFS_DESTROY_SUBNAMES: 1082 devfs_destroy_subnames_worker(msg->mdv_load); 1083 break; 1084 case DEVFS_DESTROY_DEV_BY_OPS: 1085 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops, 1086 msg->mdv_ops.minor); 1087 break; 1088 case DEVFS_CREATE_ALL_DEV: 1089 node = (struct devfs_node *)msg->mdv_load; 1090 devfs_create_all_dev_worker(node); 1091 break; 1092 case DEVFS_MOUNT_ADD: 1093 mnt = msg->mdv_mnt; 1094 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link); 1095 devfs_create_all_dev_worker(mnt->root_node); 1096 break; 1097 case DEVFS_MOUNT_DEL: 1098 mnt = msg->mdv_mnt; 1099 TAILQ_REMOVE(&devfs_mnt_list, mnt, link); 1100 devfs_reaperp(mnt->root_node); 1101 if (mnt->leak_count) { 1102 devfs_debug(DEVFS_DEBUG_SHOW, 1103 "Leaked %d devfs_node elements!\n", 1104 mnt->leak_count); 1105 } 1106 break; 1107 case DEVFS_CHANDLER_ADD: 1108 devfs_chandler_add_worker(msg->mdv_chandler.name, 1109 msg->mdv_chandler.nhandler); 1110 break; 1111 case DEVFS_CHANDLER_DEL: 1112 devfs_chandler_del_worker(msg->mdv_chandler.name); 1113 break; 1114 case DEVFS_FIND_DEVICE_BY_NAME: 1115 devfs_find_device_by_name_worker(msg); 1116 break; 1117 case DEVFS_FIND_DEVICE_BY_UDEV: 1118 devfs_find_device_by_udev_worker(msg); 1119 break; 1120 case DEVFS_MAKE_ALIAS: 1121 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load); 1122 break; 1123 case DEVFS_APPLY_RULES: 1124 devfs_apply_reset_rules_caller(msg->mdv_name, 1); 1125 break; 1126 case DEVFS_RESET_RULES: 1127 devfs_apply_reset_rules_caller(msg->mdv_name, 0); 1128 break; 1129 case DEVFS_SCAN_CALLBACK: 1130 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load); 1131 break; 1132 case DEVFS_CLR_SUBNAMES_FLAG: 1133 devfs_clr_subnames_flag_worker(msg->mdv_flags.name, 1134 msg->mdv_flags.flag); 1135 break; 1136 case DEVFS_DESTROY_SUBNAMES_WO_FLAG: 1137 devfs_destroy_subnames_without_flag_worker(msg->mdv_flags.name, 1138 msg->mdv_flags.flag); 1139 break; 1140 case DEVFS_INODE_TO_VNODE: 1141 msg->mdv_ino.vp = devfs_inode_to_vnode_worker( 1142 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node, 1143 msg->mdv_ino.ino); 1144 break; 1145 case DEVFS_TERMINATE_CORE: 1146 devfs_run = 0; 1147 break; 1148 case DEVFS_SYNC: 1149 break; 1150 default: 1151 devfs_debug(DEVFS_DEBUG_WARNING, 1152 "devfs_msg_core: unknown message " 1153 "received at core\n"); 1154 break; 1155 } 1156 lockmgr(&devfs_lock, LK_RELEASE); 1157 } 1158 1159 /* 1160 * Worker function to insert a new dev into the dev list and initialize its 1161 * permissions. It also calls devfs_propagate_dev which in turn propagates 1162 * the change to all mount points. 1163 * 1164 * The passed dev is already referenced. This reference is eaten by this 1165 * function and represents the dev's linkage into devfs_dev_list. 1166 */ 1167 static int 1168 devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms) 1169 { 1170 KKASSERT(dev); 1171 1172 dev->si_uid = uid; 1173 dev->si_gid = gid; 1174 dev->si_perms = perms; 1175 1176 devfs_link_dev(dev); 1177 devfs_propagate_dev(dev, 1); 1178 1179 return 0; 1180 } 1181 1182 /* 1183 * Worker function to delete a dev from the dev list and free the cdev. 1184 * It also calls devfs_propagate_dev which in turn propagates the change 1185 * to all mount points. 1186 */ 1187 static int 1188 devfs_destroy_dev_worker(cdev_t dev) 1189 { 1190 int error; 1191 1192 KKASSERT(dev); 1193 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1194 1195 error = devfs_unlink_dev(dev); 1196 devfs_propagate_dev(dev, 0); 1197 if (error == 0) 1198 release_dev(dev); /* link ref */ 1199 release_dev(dev); 1200 release_dev(dev); 1201 1202 return 0; 1203 } 1204 1205 /* 1206 * Worker function to destroy all devices with a certain basename. 1207 * Calls devfs_destroy_dev_worker for the actual destruction. 1208 */ 1209 static int 1210 devfs_destroy_subnames_worker(char *name) 1211 { 1212 cdev_t dev, dev1; 1213 size_t len = strlen(name); 1214 1215 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1216 if ((!strncmp(dev->si_name, name, len)) && 1217 (dev->si_name[len] != '\0')) { 1218 devfs_destroy_dev_worker(dev); 1219 } 1220 } 1221 return 0; 1222 } 1223 1224 static int 1225 devfs_clr_subnames_flag_worker(char *name, uint32_t flag) 1226 { 1227 cdev_t dev, dev1; 1228 size_t len = strlen(name); 1229 1230 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1231 if ((!strncmp(dev->si_name, name, len)) && 1232 (dev->si_name[len] != '\0')) { 1233 dev->si_flags &= ~flag; 1234 } 1235 } 1236 1237 return 0; 1238 } 1239 1240 static int 1241 devfs_destroy_subnames_without_flag_worker(char *name, uint32_t flag) 1242 { 1243 cdev_t dev, dev1; 1244 size_t len = strlen(name); 1245 1246 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1247 if ((!strncmp(dev->si_name, name, len)) && 1248 (dev->si_name[len] != '\0')) { 1249 if (!(dev->si_flags & flag)) { 1250 devfs_destroy_dev_worker(dev); 1251 } 1252 } 1253 } 1254 1255 return 0; 1256 } 1257 1258 /* 1259 * Worker function that creates all device nodes on top of a devfs 1260 * root node. 1261 */ 1262 static int 1263 devfs_create_all_dev_worker(struct devfs_node *root) 1264 { 1265 cdev_t dev; 1266 1267 KKASSERT(root); 1268 1269 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1270 devfs_create_device_node(root, dev, NULL, NULL); 1271 } 1272 1273 return 0; 1274 } 1275 1276 /* 1277 * Worker function that destroys all devices that match a specific 1278 * dev_ops and/or minor. If minor is less than 0, it is not matched 1279 * against. It also propagates all changes. 1280 */ 1281 static int 1282 devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor) 1283 { 1284 cdev_t dev, dev1; 1285 1286 KKASSERT(ops); 1287 1288 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1289 if (dev->si_ops != ops) 1290 continue; 1291 if ((minor < 0) || (dev->si_uminor == minor)) { 1292 devfs_destroy_dev_worker(dev); 1293 } 1294 } 1295 1296 return 0; 1297 } 1298 1299 /* 1300 * Worker function that registers a new clone handler in devfs. 1301 */ 1302 static int 1303 devfs_chandler_add_worker(char *name, d_clone_t *nhandler) 1304 { 1305 struct devfs_clone_handler *chandler = NULL; 1306 u_char len = strlen(name); 1307 1308 if (len == 0) 1309 return 1; 1310 1311 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1312 if (chandler->namlen != len) 1313 continue; 1314 1315 if (!memcmp(chandler->name, name, len)) { 1316 /* Clonable basename already exists */ 1317 return 1; 1318 } 1319 } 1320 1321 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO); 1322 chandler->name = kmalloc(len+1, M_DEVFS, M_WAITOK); 1323 memcpy(chandler->name, name, len+1); 1324 chandler->namlen = len; 1325 chandler->nhandler = nhandler; 1326 1327 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link); 1328 return 0; 1329 } 1330 1331 /* 1332 * Worker function that removes a given clone handler from the 1333 * clone handler list. 1334 */ 1335 static int 1336 devfs_chandler_del_worker(char *name) 1337 { 1338 struct devfs_clone_handler *chandler, *chandler2; 1339 u_char len = strlen(name); 1340 1341 if (len == 0) 1342 return 1; 1343 1344 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) { 1345 if (chandler->namlen != len) 1346 continue; 1347 if (memcmp(chandler->name, name, len)) 1348 continue; 1349 1350 TAILQ_REMOVE(&devfs_chandler_list, chandler, link); 1351 kfree(chandler->name, M_DEVFS); 1352 kfree(chandler, M_DEVFS); 1353 break; 1354 } 1355 1356 return 0; 1357 } 1358 1359 /* 1360 * Worker function that finds a given device name and changes 1361 * the message received accordingly so that when replied to, 1362 * the answer is returned to the caller. 1363 */ 1364 static int 1365 devfs_find_device_by_name_worker(devfs_msg_t devfs_msg) 1366 { 1367 struct devfs_alias *alias; 1368 cdev_t dev; 1369 cdev_t found = NULL; 1370 1371 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1372 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) { 1373 found = dev; 1374 break; 1375 } 1376 } 1377 if (found == NULL) { 1378 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1379 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) { 1380 found = alias->dev_target; 1381 break; 1382 } 1383 } 1384 } 1385 devfs_msg->mdv_cdev = found; 1386 1387 return 0; 1388 } 1389 1390 /* 1391 * Worker function that finds a given device udev and changes 1392 * the message received accordingly so that when replied to, 1393 * the answer is returned to the caller. 1394 */ 1395 static int 1396 devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg) 1397 { 1398 cdev_t dev, dev1; 1399 cdev_t found = NULL; 1400 1401 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1402 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) { 1403 found = dev; 1404 break; 1405 } 1406 } 1407 devfs_msg->mdv_cdev = found; 1408 1409 return 0; 1410 } 1411 1412 /* 1413 * Worker function that inserts a given alias into the 1414 * alias list, and propagates the alias to all mount 1415 * points. 1416 */ 1417 static int 1418 devfs_make_alias_worker(struct devfs_alias *alias) 1419 { 1420 struct devfs_alias *alias2; 1421 size_t len = strlen(alias->name); 1422 int found = 0; 1423 1424 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1425 if (len != alias2->namlen) 1426 continue; 1427 1428 if (!memcmp(alias->name, alias2->name, len)) { 1429 found = 1; 1430 break; 1431 } 1432 } 1433 1434 if (!found) { 1435 /* 1436 * The alias doesn't exist yet, so we add it to the alias list 1437 */ 1438 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link); 1439 devfs_alias_propagate(alias); 1440 } else { 1441 devfs_debug(DEVFS_DEBUG_WARNING, 1442 "Warning: duplicate devfs_make_alias for %s\n", 1443 alias->name); 1444 kfree(alias->name, M_DEVFS); 1445 kfree(alias, M_DEVFS); 1446 } 1447 1448 return 0; 1449 } 1450 1451 /* 1452 * Function that removes and frees all aliases. 1453 */ 1454 static int 1455 devfs_alias_reap(void) 1456 { 1457 struct devfs_alias *alias, *alias2; 1458 1459 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1460 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1461 kfree(alias, M_DEVFS); 1462 } 1463 return 0; 1464 } 1465 1466 /* 1467 * Function that removes an alias matching a specific cdev and frees 1468 * it accordingly. 1469 */ 1470 static int 1471 devfs_alias_remove(cdev_t dev) 1472 { 1473 struct devfs_alias *alias, *alias2; 1474 1475 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1476 if (alias->dev_target == dev) { 1477 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1478 kfree(alias, M_DEVFS); 1479 } 1480 } 1481 return 0; 1482 } 1483 1484 /* 1485 * This function propagates a new alias to all mount points. 1486 */ 1487 static int 1488 devfs_alias_propagate(struct devfs_alias *alias) 1489 { 1490 struct devfs_mnt_data *mnt; 1491 1492 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1493 devfs_alias_apply(mnt->root_node, alias); 1494 } 1495 return 0; 1496 } 1497 1498 /* 1499 * This function is a recursive function iterating through 1500 * all device nodes in the topology and, if applicable, 1501 * creating the relevant alias for a device node. 1502 */ 1503 static int 1504 devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias) 1505 { 1506 struct devfs_node *node1, *node2; 1507 1508 KKASSERT(alias != NULL); 1509 1510 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 1511 if (node->nchildren > 2) { 1512 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1513 devfs_alias_apply(node1, alias); 1514 } 1515 } 1516 } else { 1517 if (node->d_dev == alias->dev_target) 1518 devfs_alias_create(alias->name, node); 1519 } 1520 return 0; 1521 } 1522 1523 /* 1524 * This function checks if any alias possibly is applicable 1525 * to the given node. If so, the alias is created. 1526 */ 1527 static int 1528 devfs_alias_check_create(struct devfs_node *node) 1529 { 1530 struct devfs_alias *alias; 1531 1532 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1533 if (node->d_dev == alias->dev_target) 1534 devfs_alias_create(alias->name, node); 1535 } 1536 return 0; 1537 } 1538 1539 /* 1540 * This function creates an alias with a given name 1541 * linking to a given devfs node. It also increments 1542 * the link count on the target node. 1543 */ 1544 int 1545 devfs_alias_create(char *name_orig, struct devfs_node *target) 1546 { 1547 struct mount *mp = target->mp; 1548 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node; 1549 struct devfs_node *linknode; 1550 char *create_path = NULL; 1551 char *name, name_buf[PATH_MAX]; 1552 1553 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1554 1555 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name); 1556 1557 if (create_path) 1558 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1559 1560 1561 if (devfs_find_device_node_by_name(parent, name)) { 1562 devfs_debug(DEVFS_DEBUG_WARNING, 1563 "Node already exists: %s " 1564 "(devfs_make_alias_worker)!\n", 1565 name); 1566 return 1; 1567 } 1568 1569 1570 linknode = devfs_allocp(Plink, name, parent, mp, NULL); 1571 if (linknode == NULL) 1572 return 1; 1573 1574 linknode->link_target = target; 1575 target->nlinks++; 1576 1577 return 0; 1578 } 1579 1580 /* 1581 * This function is called by the core and handles mount point 1582 * strings. It either calls the relevant worker (devfs_apply_ 1583 * reset_rules_worker) on all mountpoints or only a specific 1584 * one. 1585 */ 1586 static int 1587 devfs_apply_reset_rules_caller(char *mountto, int apply) 1588 { 1589 struct devfs_mnt_data *mnt; 1590 size_t len = strlen(mountto); 1591 1592 if (mountto[0] == '*') { 1593 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1594 devfs_apply_reset_rules_worker(mnt->root_node, apply); 1595 } 1596 } else { 1597 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1598 if ((len != mnt->mntonnamelen)) 1599 continue; 1600 1601 if (!memcmp(mnt->mp->mnt_stat.f_mntonname, mountto, len)) { 1602 devfs_apply_reset_rules_worker(mnt->root_node, apply); 1603 break; 1604 } 1605 } 1606 } 1607 1608 kfree(mountto, M_DEVFS); 1609 return 0; 1610 } 1611 1612 /* 1613 * This worker function applies or resets, depending on the arguments, a rule 1614 * to the whole given topology. *RECURSIVE* 1615 */ 1616 static int 1617 devfs_apply_reset_rules_worker(struct devfs_node *node, int apply) 1618 { 1619 struct devfs_node *node1, *node2; 1620 1621 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 1622 if (node->nchildren > 2) { 1623 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1624 devfs_apply_reset_rules_worker(node1, apply); 1625 } 1626 } 1627 } 1628 1629 if (apply) 1630 devfs_rule_check_apply(node); 1631 else 1632 devfs_rule_reset_node(node); 1633 1634 return 0; 1635 } 1636 1637 1638 /* 1639 * This function calls a given callback function for 1640 * every dev node in the devfs dev list. 1641 */ 1642 static int 1643 devfs_scan_callback_worker(devfs_scan_t *callback) 1644 { 1645 cdev_t dev, dev1; 1646 1647 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1648 callback(dev); 1649 } 1650 1651 return 0; 1652 } 1653 1654 1655 /* 1656 * This function tries to resolve a given directory, or if not 1657 * found and creation requested, creates the given directory. 1658 */ 1659 static struct devfs_node * 1660 devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name, 1661 size_t name_len, int create) 1662 { 1663 struct devfs_node *node, *found = NULL; 1664 1665 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1666 if (name_len != node->d_dir.d_namlen) 1667 continue; 1668 1669 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) { 1670 found = node; 1671 break; 1672 } 1673 } 1674 1675 if ((found == NULL) && (create)) { 1676 found = devfs_allocp(Pdir, dir_name, parent, parent->mp, NULL); 1677 } 1678 1679 return found; 1680 } 1681 1682 /* 1683 * This function tries to resolve a complete path. If creation is requested, 1684 * if a given part of the path cannot be resolved (because it doesn't exist), 1685 * it is created. 1686 */ 1687 struct devfs_node * 1688 devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create) 1689 { 1690 struct devfs_node *node = parent; 1691 char buf[PATH_MAX]; 1692 size_t idx = 0; 1693 1694 1695 if (path == NULL) 1696 return parent; 1697 1698 1699 for (; *path != '\0' ; path++) { 1700 if (*path != '/') { 1701 buf[idx++] = *path; 1702 } else { 1703 buf[idx] = '\0'; 1704 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1705 if (node == NULL) 1706 return NULL; 1707 idx = 0; 1708 } 1709 } 1710 buf[idx] = '\0'; 1711 return devfs_resolve_or_create_dir(node, buf, idx, create); 1712 } 1713 1714 /* 1715 * Takes a full path and strips it into a directory path and a name. 1716 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It 1717 * requires a working buffer with enough size to keep the whole 1718 * fullpath. 1719 */ 1720 int 1721 devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep) 1722 { 1723 char *name = NULL; 1724 char *path = NULL; 1725 size_t len = strlen(fullpath) + 1; 1726 int i; 1727 1728 KKASSERT((fullpath != NULL) && (buf != NULL)); 1729 KKASSERT((pathp != NULL) && (namep != NULL)); 1730 1731 memcpy(buf, fullpath, len); 1732 1733 for (i = len-1; i>= 0; i--) { 1734 if (buf[i] == '/') { 1735 buf[i] = '\0'; 1736 name = &(buf[i+1]); 1737 path = buf; 1738 break; 1739 } 1740 } 1741 1742 *pathp = path; 1743 1744 if (name) { 1745 *namep = name; 1746 } else { 1747 *namep = buf; 1748 } 1749 1750 return 0; 1751 } 1752 1753 /* 1754 * This function creates a new devfs node for a given device. It can 1755 * handle a complete path as device name, and accordingly creates 1756 * the path and the final device node. 1757 * 1758 * The reference count on the passed dev remains unchanged. 1759 */ 1760 struct devfs_node * 1761 devfs_create_device_node(struct devfs_node *root, cdev_t dev, 1762 char *dev_name, char *path_fmt, ...) 1763 { 1764 struct devfs_node *parent, *node = NULL; 1765 char *path = NULL; 1766 char *name, name_buf[PATH_MAX]; 1767 __va_list ap; 1768 int i, found; 1769 1770 char *create_path = NULL; 1771 char *names = "pqrsPQRS"; 1772 1773 if (path_fmt != NULL) { 1774 path = kmalloc(PATH_MAX+1, M_DEVFS, M_WAITOK); 1775 1776 __va_start(ap, path_fmt); 1777 i = kvcprintf(path_fmt, NULL, path, 10, ap); 1778 path[i] = '\0'; 1779 __va_end(ap); 1780 } 1781 1782 parent = devfs_resolve_or_create_path(root, path, 1); 1783 KKASSERT(parent); 1784 1785 devfs_resolve_name_path( 1786 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), 1787 name_buf, &create_path, &name); 1788 1789 if (create_path) 1790 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1791 1792 1793 if (devfs_find_device_node_by_name(parent, name)) { 1794 devfs_debug(DEVFS_DEBUG_WARNING, "devfs_create_device_node: " 1795 "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name); 1796 goto out; 1797 } 1798 1799 node = devfs_allocp(Pdev, name, parent, parent->mp, dev); 1800 1801 #if 0 1802 /* 1803 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their 1804 * directory 1805 */ 1806 if ((dev) && (strlen(dev->si_name) >= 4) && 1807 (!memcmp(dev->si_name, "ptm/", 4))) { 1808 node->parent->flags |= DEVFS_HIDDEN; 1809 node->flags |= DEVFS_HIDDEN; 1810 } 1811 #endif 1812 1813 /* 1814 * Ugly pty magic, to tag pty devices as such and hide them if needed. 1815 */ 1816 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3))) 1817 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1818 1819 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) { 1820 found = 0; 1821 for (i = 0; i < strlen(names); i++) { 1822 if (name[3] == names[i]) { 1823 found = 1; 1824 break; 1825 } 1826 } 1827 if (found) 1828 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1829 } 1830 1831 out: 1832 if (path_fmt != NULL) 1833 kfree(path, M_DEVFS); 1834 1835 return node; 1836 } 1837 1838 /* 1839 * This function finds a given device node in the topology with a given 1840 * cdev. 1841 */ 1842 struct devfs_node * 1843 devfs_find_device_node(struct devfs_node *node, cdev_t target) 1844 { 1845 struct devfs_node *node1, *node2, *found = NULL; 1846 1847 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 1848 if (node->nchildren > 2) { 1849 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1850 if ((found = devfs_find_device_node(node1, target))) 1851 return found; 1852 } 1853 } 1854 } else if (node->node_type == Pdev) { 1855 if (node->d_dev == target) 1856 return node; 1857 } 1858 1859 return NULL; 1860 } 1861 1862 /* 1863 * This function finds a device node in the topology by its 1864 * name and returns it. 1865 */ 1866 struct devfs_node * 1867 devfs_find_device_node_by_name(struct devfs_node *parent, char *target) 1868 { 1869 struct devfs_node *node, *found = NULL; 1870 size_t len = strlen(target); 1871 1872 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1873 if (len != node->d_dir.d_namlen) 1874 continue; 1875 1876 if (!memcmp(node->d_dir.d_name, target, len)) { 1877 found = node; 1878 break; 1879 } 1880 } 1881 1882 return found; 1883 } 1884 1885 static struct vnode* 1886 devfs_inode_to_vnode_worker(struct devfs_node *node, ino_t target) 1887 { 1888 struct devfs_node *node1, *node2; 1889 struct vnode* vp; 1890 1891 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 1892 if (node->nchildren > 2) { 1893 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1894 if ((vp = devfs_inode_to_vnode_worker(node1, target))) 1895 return vp; 1896 } 1897 } 1898 } 1899 1900 if (node->d_dir.d_ino == target) { 1901 if (node->v_node) { 1902 vp = node->v_node; 1903 vget(vp, LK_EXCLUSIVE | LK_RETRY); 1904 vn_unlock(vp); 1905 } else { 1906 devfs_allocv(&vp, node); 1907 vn_unlock(vp); 1908 } 1909 return vp; 1910 } 1911 1912 return NULL; 1913 } 1914 1915 /* 1916 * This function takes a cdev and removes its devfs node in the 1917 * given topology. The cdev remains intact. 1918 */ 1919 int 1920 devfs_destroy_device_node(struct devfs_node *root, cdev_t target) 1921 { 1922 struct devfs_node *node, *parent; 1923 char *name, name_buf[PATH_MAX]; 1924 char *create_path = NULL; 1925 1926 KKASSERT(target); 1927 1928 memcpy(name_buf, target->si_name, strlen(target->si_name)+1); 1929 1930 devfs_resolve_name_path(target->si_name, name_buf, &create_path, &name); 1931 1932 if (create_path) 1933 parent = devfs_resolve_or_create_path(root, create_path, 0); 1934 else 1935 parent = root; 1936 1937 if (parent == NULL) 1938 return 1; 1939 1940 node = devfs_find_device_node_by_name(parent, name); 1941 1942 if (node) 1943 devfs_gc(node); 1944 1945 return 0; 1946 } 1947 1948 /* 1949 * Just set perms and ownership for given node. 1950 */ 1951 int 1952 devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, 1953 u_short mode, u_long flags) 1954 { 1955 node->mode = mode; 1956 node->uid = uid; 1957 node->gid = gid; 1958 1959 return 0; 1960 } 1961 1962 /* 1963 * Propagates a device attach/detach to all mount 1964 * points. Also takes care of automatic alias removal 1965 * for a deleted cdev. 1966 */ 1967 static int 1968 devfs_propagate_dev(cdev_t dev, int attach) 1969 { 1970 struct devfs_mnt_data *mnt; 1971 1972 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1973 if (attach) { 1974 /* Device is being attached */ 1975 devfs_create_device_node(mnt->root_node, dev, 1976 NULL, NULL ); 1977 } else { 1978 /* Device is being detached */ 1979 devfs_alias_remove(dev); 1980 devfs_destroy_device_node(mnt->root_node, dev); 1981 } 1982 } 1983 return 0; 1984 } 1985 1986 /* 1987 * devfs_node_to_path takes a node and a buffer of a size of 1988 * at least PATH_MAX, resolves the full path from the root 1989 * node and writes it in a humanly-readable format into the 1990 * buffer. 1991 * If DEVFS_STASH_DEPTH is less than the directory level up 1992 * to the root node, only the last DEVFS_STASH_DEPTH levels 1993 * of the path are resolved. 1994 */ 1995 int 1996 devfs_node_to_path(struct devfs_node *node, char *buffer) 1997 { 1998 #define DEVFS_STASH_DEPTH 32 1999 struct devfs_node *node_stash[DEVFS_STASH_DEPTH]; 2000 int i, offset; 2001 memset(buffer, 0, PATH_MAX); 2002 2003 for (i = 0; (i < DEVFS_STASH_DEPTH) && (node->node_type != Proot); i++) { 2004 node_stash[i] = node; 2005 node = node->parent; 2006 } 2007 i--; 2008 2009 for (offset = 0; i >= 0; i--) { 2010 memcpy(buffer+offset, node_stash[i]->d_dir.d_name, 2011 node_stash[i]->d_dir.d_namlen); 2012 offset += node_stash[i]->d_dir.d_namlen; 2013 if (i > 0) { 2014 *(buffer+offset) = '/'; 2015 offset++; 2016 } 2017 } 2018 #undef DEVFS_STASH_DEPTH 2019 return 0; 2020 } 2021 2022 /* 2023 * devfs_clone either returns a basename from a complete name by 2024 * returning the length of the name without trailing digits, or, 2025 * if clone != 0, calls the device's clone handler to get a new 2026 * device, which in turn is returned in devp. 2027 */ 2028 int 2029 devfs_clone(char *name, size_t *namlenp, cdev_t *devp, int clone, 2030 struct ucred *cred) 2031 { 2032 KKASSERT(namlenp); 2033 2034 size_t len = *namlenp; 2035 int error = 1; 2036 struct devfs_clone_handler *chandler; 2037 struct dev_clone_args ap; 2038 2039 if (!clone) { 2040 for (; (len > 0) && (DEVFS_ISDIGIT(name[len-1])); len--); 2041 } 2042 2043 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 2044 if ((chandler->namlen == len) && 2045 (!memcmp(chandler->name, name, len)) && 2046 (chandler->nhandler)) { 2047 if (clone) { 2048 ap.a_dev = NULL; 2049 ap.a_name = name; 2050 ap.a_namelen = len; 2051 ap.a_cred = cred; 2052 error = (chandler->nhandler)(&ap); 2053 KKASSERT(devp); 2054 *devp = ap.a_dev; 2055 } else { 2056 *namlenp = len; 2057 error = 0; 2058 } 2059 2060 break; 2061 } 2062 } 2063 2064 return error; 2065 } 2066 2067 2068 /* 2069 * Registers a new orphan in the orphan list. 2070 */ 2071 void 2072 devfs_tracer_add_orphan(struct devfs_node *node) 2073 { 2074 struct devfs_orphan *orphan; 2075 2076 KKASSERT(node); 2077 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK); 2078 orphan->node = node; 2079 2080 KKASSERT((node->flags & DEVFS_ORPHANED) == 0); 2081 node->flags |= DEVFS_ORPHANED; 2082 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link); 2083 } 2084 2085 /* 2086 * Removes an orphan from the orphan list. 2087 */ 2088 void 2089 devfs_tracer_del_orphan(struct devfs_node *node) 2090 { 2091 struct devfs_orphan *orphan; 2092 2093 KKASSERT(node); 2094 2095 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) { 2096 if (orphan->node == node) { 2097 node->flags &= ~DEVFS_ORPHANED; 2098 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link); 2099 kfree(orphan, M_DEVFS); 2100 break; 2101 } 2102 } 2103 } 2104 2105 /* 2106 * Counts the orphans in the orphan list, and if cleanup 2107 * is specified, also frees the orphan and removes it from 2108 * the list. 2109 */ 2110 size_t 2111 devfs_tracer_orphan_count(struct mount *mp, int cleanup) 2112 { 2113 struct devfs_orphan *orphan, *orphan2; 2114 size_t count = 0; 2115 2116 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) { 2117 count++; 2118 /* 2119 * If we are instructed to clean up, we do so. 2120 */ 2121 if (cleanup) { 2122 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link); 2123 orphan->node->flags &= ~DEVFS_ORPHANED; 2124 devfs_freep(orphan->node); 2125 kfree(orphan, M_DEVFS); 2126 } 2127 } 2128 2129 return count; 2130 } 2131 2132 /* 2133 * Fetch an ino_t from the global d_ino by increasing it 2134 * while spinlocked. 2135 */ 2136 static ino_t 2137 devfs_fetch_ino(void) 2138 { 2139 ino_t ret; 2140 2141 spin_lock_wr(&ino_lock); 2142 ret = d_ino++; 2143 spin_unlock_wr(&ino_lock); 2144 2145 return ret; 2146 } 2147 2148 /* 2149 * Allocates a new cdev and initializes it's most basic 2150 * fields. 2151 */ 2152 cdev_t 2153 devfs_new_cdev(struct dev_ops *ops, int minor) 2154 { 2155 cdev_t dev = sysref_alloc(&cdev_sysref_class); 2156 sysref_activate(&dev->si_sysref); 2157 reference_dev(dev); 2158 memset(dev, 0, offsetof(struct cdev, si_sysref)); 2159 2160 dev->si_uid = 0; 2161 dev->si_gid = 0; 2162 dev->si_perms = 0; 2163 dev->si_drv1 = NULL; 2164 dev->si_drv2 = NULL; 2165 dev->si_lastread = 0; /* time_second */ 2166 dev->si_lastwrite = 0; /* time_second */ 2167 2168 dev->si_ops = ops; 2169 dev->si_flags = 0; 2170 dev->si_umajor = 0; 2171 dev->si_uminor = minor; 2172 dev->si_inode = makeudev(devfs_reference_ops(ops), minor); 2173 2174 return dev; 2175 } 2176 2177 static void 2178 devfs_cdev_terminate(cdev_t dev) 2179 { 2180 int locked = 0; 2181 2182 /* Check if it is locked already. if not, we acquire the devfs lock */ 2183 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) { 2184 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2185 locked = 1; 2186 } 2187 2188 /* Propagate destruction, just in case */ 2189 devfs_propagate_dev(dev, 0); 2190 2191 /* If we acquired the lock, we also get rid of it */ 2192 if (locked) 2193 lockmgr(&devfs_lock, LK_RELEASE); 2194 2195 devfs_release_ops(dev->si_ops); 2196 2197 /* Finally destroy the device */ 2198 sysref_put(&dev->si_sysref); 2199 } 2200 2201 /* 2202 * Links a given cdev into the dev list. 2203 */ 2204 int 2205 devfs_link_dev(cdev_t dev) 2206 { 2207 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2208 dev->si_flags |= SI_DEVFS_LINKED; 2209 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link); 2210 2211 return 0; 2212 } 2213 2214 /* 2215 * Removes a given cdev from the dev list. The caller is responsible for 2216 * releasing the reference on the device associated with the linkage. 2217 * 2218 * Returns EALREADY if the dev has already been unlinked. 2219 */ 2220 static int 2221 devfs_unlink_dev(cdev_t dev) 2222 { 2223 if ((dev->si_flags & SI_DEVFS_LINKED)) { 2224 TAILQ_REMOVE(&devfs_dev_list, dev, link); 2225 dev->si_flags &= ~SI_DEVFS_LINKED; 2226 return (0); 2227 } 2228 return (EALREADY); 2229 } 2230 2231 int 2232 devfs_node_is_accessible(struct devfs_node *node) 2233 { 2234 if ((node) && (!(node->flags & DEVFS_HIDDEN))) 2235 return 1; 2236 else 2237 return 0; 2238 } 2239 2240 int 2241 devfs_reference_ops(struct dev_ops *ops) 2242 { 2243 int unit; 2244 2245 if (ops->head.refs == 0) { 2246 ops->head.id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255); 2247 if (ops->head.id == -1) { 2248 /* Ran out of unique ids */ 2249 devfs_debug(DEVFS_DEBUG_WARNING, 2250 "devfs_reference_ops: WARNING: ran out of unique ids\n"); 2251 } 2252 } 2253 unit = ops->head.id; 2254 ++ops->head.refs; 2255 2256 return unit; 2257 } 2258 2259 void 2260 devfs_release_ops(struct dev_ops *ops) 2261 { 2262 --ops->head.refs; 2263 2264 if (ops->head.refs == 0) { 2265 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), ops->head.id); 2266 } 2267 } 2268 2269 void 2270 devfs_config(void) 2271 { 2272 devfs_msg_t msg; 2273 2274 msg = devfs_msg_get(); 2275 msg = devfs_msg_send_sync(DEVFS_SYNC, msg); 2276 devfs_msg_put(msg); 2277 } 2278 2279 /* 2280 * Called on init of devfs; creates the objcaches and 2281 * spawns off the devfs core thread. Also initializes 2282 * locks. 2283 */ 2284 static void 2285 devfs_init(void) 2286 { 2287 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n"); 2288 /* Create objcaches for nodes, msgs and devs */ 2289 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0, 2290 NULL, NULL, NULL, 2291 objcache_malloc_alloc, 2292 objcache_malloc_free, 2293 &devfs_node_malloc_args ); 2294 2295 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0, 2296 NULL, NULL, NULL, 2297 objcache_malloc_alloc, 2298 objcache_malloc_free, 2299 &devfs_msg_malloc_args ); 2300 2301 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0, 2302 NULL, NULL, NULL, 2303 objcache_malloc_alloc, 2304 objcache_malloc_free, 2305 &devfs_dev_malloc_args ); 2306 2307 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id)); 2308 2309 /* Initialize the reply-only port which acts as a message drain */ 2310 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply); 2311 2312 /* Initialize *THE* devfs lock */ 2313 lockinit(&devfs_lock, "devfs_core lock", 0, 0); 2314 2315 2316 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL, 2317 0, 0, "devfs_msg_core"); 2318 2319 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); 2320 2321 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n"); 2322 } 2323 2324 /* 2325 * Called on unload of devfs; takes care of destroying the core 2326 * and the objcaches. Also removes aliases that are no longer needed. 2327 */ 2328 static void 2329 devfs_uninit(void) 2330 { 2331 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); 2332 2333 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL); 2334 2335 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); 2336 tsleep(td_core/*devfs_id*/, 0, "devfsc", 10000); 2337 2338 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id)); 2339 2340 /* Destroy the objcaches */ 2341 objcache_destroy(devfs_msg_cache); 2342 objcache_destroy(devfs_node_cache); 2343 objcache_destroy(devfs_dev_cache); 2344 2345 devfs_alias_reap(); 2346 } 2347 2348 /* 2349 * This is a sysctl handler to assist userland devname(3) to 2350 * find the device name for a given udev. 2351 */ 2352 static int 2353 devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS) 2354 { 2355 udev_t udev; 2356 cdev_t found; 2357 int error; 2358 2359 2360 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t)))) 2361 return (error); 2362 2363 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev); 2364 2365 if (udev == NOUDEV) 2366 return(EINVAL); 2367 2368 if ((found = devfs_find_device_by_udev(udev)) == NULL) 2369 return(ENOENT); 2370 2371 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1)); 2372 } 2373 2374 2375 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, 2376 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)"); 2377 2378 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); 2379 TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable); 2380 SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 2381 0, "Enable DevFS debugging"); 2382 2383 SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, 2384 devfs_init, NULL); 2385 SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, 2386 devfs_uninit, NULL); 2387