1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/mount.h> 38 #include <sys/vnode.h> 39 #include <sys/types.h> 40 #include <sys/lock.h> 41 #include <sys/msgport.h> 42 #include <sys/msgport2.h> 43 #include <sys/spinlock2.h> 44 #include <sys/sysctl.h> 45 #include <sys/ucred.h> 46 #include <sys/param.h> 47 #include <sys/sysref2.h> 48 #include <sys/systm.h> 49 #include <sys/devfs.h> 50 #include <sys/devfs_rules.h> 51 52 MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations"); 53 DEVFS_DECLARE_CLONE_BITMAP(ops_id); 54 /* 55 * SYSREF Integration - reference counting, allocation, 56 * sysid and syslink integration. 57 */ 58 static void devfs_cdev_terminate(cdev_t dev); 59 static struct sysref_class cdev_sysref_class = { 60 .name = "cdev", 61 .mtype = M_DEVFS, 62 .proto = SYSREF_PROTO_DEV, 63 .offset = offsetof(struct cdev, si_sysref), 64 .objsize = sizeof(struct cdev), 65 .mag_capacity = 32, 66 .flags = 0, 67 .ops = { 68 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate 69 } 70 }; 71 72 static struct objcache *devfs_node_cache; 73 static struct objcache *devfs_msg_cache; 74 static struct objcache *devfs_dev_cache; 75 76 static struct objcache_malloc_args devfs_node_malloc_args = { 77 sizeof(struct devfs_node), M_DEVFS }; 78 struct objcache_malloc_args devfs_msg_malloc_args = { 79 sizeof(struct devfs_msg), M_DEVFS }; 80 struct objcache_malloc_args devfs_dev_malloc_args = { 81 sizeof(struct cdev), M_DEVFS }; 82 83 static struct devfs_dev_head devfs_dev_list = 84 TAILQ_HEAD_INITIALIZER(devfs_dev_list); 85 static struct devfs_mnt_head devfs_mnt_list = 86 TAILQ_HEAD_INITIALIZER(devfs_mnt_list); 87 static struct devfs_chandler_head devfs_chandler_list = 88 TAILQ_HEAD_INITIALIZER(devfs_chandler_list); 89 static struct devfs_alias_head devfs_alias_list = 90 TAILQ_HEAD_INITIALIZER(devfs_alias_list); 91 92 struct lock devfs_lock; 93 static struct lwkt_port devfs_dispose_port; 94 static struct lwkt_port devfs_msg_port; 95 static struct thread *td_core; 96 97 static struct spinlock ino_lock; 98 static ino_t d_ino; 99 static int devfs_debug_enable; 100 static int devfs_run; 101 102 static ino_t devfs_fetch_ino(void); 103 static int devfs_create_all_dev_worker(struct devfs_node *); 104 static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int); 105 static int devfs_destroy_dev_worker(cdev_t); 106 static int devfs_destroy_subnames_worker(char *); 107 static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int); 108 static int devfs_propagate_dev(cdev_t, int); 109 static int devfs_unlink_dev(cdev_t dev); 110 static void devfs_msg_exec(devfs_msg_t msg); 111 112 static int devfs_chandler_add_worker(const char *, d_clone_t *); 113 static int devfs_chandler_del_worker(const char *); 114 115 static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 116 static void devfs_msg_core(void *); 117 118 static int devfs_find_device_by_name_worker(devfs_msg_t); 119 static int devfs_find_device_by_udev_worker(devfs_msg_t); 120 121 static int devfs_apply_reset_rules_caller(char *, int); 122 123 static int devfs_scan_callback_worker(devfs_scan_t *); 124 125 static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, 126 char *, size_t, int); 127 128 static int devfs_make_alias_worker(struct devfs_alias *); 129 static int devfs_alias_remove(cdev_t); 130 static int devfs_alias_reap(void); 131 static int devfs_alias_propagate(struct devfs_alias *); 132 static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *); 133 static int devfs_alias_check_create(struct devfs_node *); 134 135 static int devfs_clr_subnames_flag_worker(char *, uint32_t); 136 static int devfs_destroy_subnames_without_flag_worker(char *, uint32_t); 137 138 static void *devfs_reaperp_callback(struct devfs_node *, void *); 139 static void *devfs_gc_dirs_callback(struct devfs_node *, void *); 140 static void *devfs_gc_links_callback(struct devfs_node *, struct devfs_node *); 141 static void * 142 devfs_inode_to_vnode_worker_callback(struct devfs_node *, ino_t *); 143 144 /* 145 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function 146 * using kvprintf 147 */ 148 int 149 devfs_debug(int level, char *fmt, ...) 150 { 151 __va_list ap; 152 153 __va_start(ap, fmt); 154 if (level <= devfs_debug_enable) 155 kvprintf(fmt, ap); 156 __va_end(ap); 157 158 return 0; 159 } 160 161 /* 162 * devfs_allocp() Allocates a new devfs node with the specified 163 * parameters. The node is also automatically linked into the topology 164 * if a parent is specified. It also calls the rule and alias stuff to 165 * be applied on the new node 166 */ 167 struct devfs_node * 168 devfs_allocp(devfs_nodetype devfsnodetype, char *name, 169 struct devfs_node *parent, struct mount *mp, cdev_t dev) 170 { 171 struct devfs_node *node = NULL; 172 size_t namlen = strlen(name); 173 174 node = objcache_get(devfs_node_cache, M_WAITOK); 175 bzero(node, sizeof(*node)); 176 177 atomic_add_long(&(DEVFS_MNTDATA(mp)->leak_count), 1); 178 179 node->d_dev = NULL; 180 node->nchildren = 1; 181 node->mp = mp; 182 node->d_dir.d_ino = devfs_fetch_ino(); 183 184 /* 185 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries 186 * respectively. 187 */ 188 node->cookie_jar = 2; 189 190 /* 191 * Access Control members 192 */ 193 node->mode = DEVFS_DEFAULT_MODE; 194 node->uid = DEVFS_DEFAULT_UID; 195 node->gid = DEVFS_DEFAULT_GID; 196 197 switch (devfsnodetype) { 198 case Proot: 199 /* 200 * Ensure that we don't recycle the root vnode by marking it as 201 * linked into the topology. 202 */ 203 node->flags |= DEVFS_NODE_LINKED; 204 case Pdir: 205 TAILQ_INIT(DEVFS_DENODE_HEAD(node)); 206 node->d_dir.d_type = DT_DIR; 207 node->nchildren = 2; 208 break; 209 210 case Plink: 211 node->d_dir.d_type = DT_LNK; 212 break; 213 214 case Preg: 215 node->d_dir.d_type = DT_REG; 216 break; 217 218 case Pdev: 219 if (dev != NULL) { 220 node->d_dir.d_type = DT_CHR; 221 node->d_dev = dev; 222 223 node->mode = dev->si_perms; 224 node->uid = dev->si_uid; 225 node->gid = dev->si_gid; 226 227 devfs_alias_check_create(node); 228 } 229 break; 230 231 default: 232 panic("devfs_allocp: unknown node type"); 233 } 234 235 node->v_node = NULL; 236 node->node_type = devfsnodetype; 237 238 /* Initialize the dirent structure of each devfs vnode */ 239 KKASSERT(namlen < 256); 240 node->d_dir.d_namlen = namlen; 241 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK); 242 memcpy(node->d_dir.d_name, name, namlen); 243 node->d_dir.d_name[namlen] = '\0'; 244 245 /* Initialize the parent node element */ 246 node->parent = parent; 247 248 /* Apply rules */ 249 devfs_rule_check_apply(node, NULL); 250 251 /* Initialize *time members */ 252 nanotime(&node->atime); 253 node->mtime = node->ctime = node->atime; 254 255 /* 256 * Associate with parent as last step, clean out namecache 257 * reference. 258 */ 259 if ((parent != NULL) && 260 ((parent->node_type == Proot) || (parent->node_type == Pdir))) { 261 parent->nchildren++; 262 node->cookie = parent->cookie_jar++; 263 node->flags |= DEVFS_NODE_LINKED; 264 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link); 265 266 /* This forces negative namecache lookups to clear */ 267 ++mp->mnt_namecache_gen; 268 } 269 270 return node; 271 } 272 273 /* 274 * devfs_allocv() allocates a new vnode based on a devfs node. 275 */ 276 int 277 devfs_allocv(struct vnode **vpp, struct devfs_node *node) 278 { 279 struct vnode *vp; 280 int error = 0; 281 282 KKASSERT(node); 283 284 try_again: 285 while ((vp = node->v_node) != NULL) { 286 error = vget(vp, LK_EXCLUSIVE); 287 if (error != ENOENT) { 288 *vpp = vp; 289 goto out; 290 } 291 } 292 293 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0) 294 goto out; 295 296 vp = *vpp; 297 298 if (node->v_node != NULL) { 299 vp->v_type = VBAD; 300 vx_put(vp); 301 goto try_again; 302 } 303 304 vp->v_data = node; 305 node->v_node = vp; 306 307 switch (node->node_type) { 308 case Proot: 309 vp->v_flag |= VROOT; 310 case Pdir: 311 vp->v_type = VDIR; 312 break; 313 314 case Plink: 315 vp->v_type = VLNK; 316 break; 317 318 case Preg: 319 vp->v_type = VREG; 320 break; 321 322 case Pdev: 323 vp->v_type = VCHR; 324 KKASSERT(node->d_dev); 325 326 vp->v_uminor = node->d_dev->si_uminor; 327 vp->v_umajor = 0; 328 329 v_associate_rdev(vp, node->d_dev); 330 vp->v_ops = &node->mp->mnt_vn_spec_ops; 331 break; 332 333 default: 334 panic("devfs_allocv: unknown node type"); 335 } 336 337 out: 338 return error; 339 } 340 341 /* 342 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode 343 * based on the newly created devfs node. 344 */ 345 int 346 devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype, 347 char *name, struct devfs_node *parent, cdev_t dev) 348 { 349 struct devfs_node *node; 350 351 node = devfs_allocp(devfsnodetype, name, parent, mp, dev); 352 353 if (node != NULL) 354 devfs_allocv(vpp, node); 355 else 356 *vpp = NULL; 357 358 return 0; 359 } 360 361 /* 362 * Destroy the devfs_node. The node must be unlinked from the topology. 363 * 364 * This function will also destroy any vnode association with the node 365 * and device. 366 * 367 * The cdev_t itself remains intact. 368 */ 369 int 370 devfs_freep(struct devfs_node *node) 371 { 372 struct vnode *vp; 373 374 KKASSERT(node); 375 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) || 376 (node->node_type == Proot)); 377 KKASSERT((node->flags & DEVFS_DESTROYED) == 0); 378 379 atomic_subtract_long(&(DEVFS_MNTDATA(node->mp)->leak_count), 1); 380 if (node->symlink_name) { 381 kfree(node->symlink_name, M_DEVFS); 382 node->symlink_name = NULL; 383 } 384 385 /* 386 * Remove the node from the orphan list if it is still on it. 387 */ 388 if (node->flags & DEVFS_ORPHANED) 389 devfs_tracer_del_orphan(node); 390 391 /* 392 * Disassociate the vnode from the node. This also prevents the 393 * vnode's reclaim code from double-freeing the node. 394 * 395 * The vget is needed to safely modify the vp. It also serves 396 * to cycle the refs and terminate the vnode if it happens to 397 * be inactive, otherwise namecache references may not get cleared. 398 */ 399 while ((vp = node->v_node) != NULL) { 400 if (vget(vp, LK_EXCLUSIVE | LK_RETRY) != 0) 401 break; 402 v_release_rdev(vp); 403 vp->v_data = NULL; 404 node->v_node = NULL; 405 cache_inval_vp(vp, CINV_DESTROY); 406 vput(vp); 407 } 408 if (node->d_dir.d_name) { 409 kfree(node->d_dir.d_name, M_DEVFS); 410 node->d_dir.d_name = NULL; 411 } 412 node->flags |= DEVFS_DESTROYED; 413 414 objcache_put(devfs_node_cache, node); 415 416 return 0; 417 } 418 419 /* 420 * Unlink the devfs node from the topology and add it to the orphan list. 421 * The node will later be destroyed by freep. 422 * 423 * Any vnode association, including the v_rdev and v_data, remains intact 424 * until the freep. 425 */ 426 int 427 devfs_unlinkp(struct devfs_node *node) 428 { 429 struct devfs_node *parent; 430 KKASSERT(node); 431 432 /* 433 * Add the node to the orphan list, so it is referenced somewhere, to 434 * so we don't leak it. 435 */ 436 devfs_tracer_add_orphan(node); 437 438 parent = node->parent; 439 440 /* 441 * If the parent is known we can unlink the node out of the topology 442 */ 443 if (parent) { 444 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link); 445 parent->nchildren--; 446 KKASSERT((parent->nchildren >= 0)); 447 node->flags &= ~DEVFS_NODE_LINKED; 448 } 449 node->parent = NULL; 450 return 0; 451 } 452 453 void * 454 devfs_iterate_topology(struct devfs_node *node, 455 devfs_iterate_callback_t *callback, void *arg1) 456 { 457 struct devfs_node *node1, *node2; 458 void *ret = NULL; 459 460 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 461 if (node->nchildren > 2) { 462 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 463 link, node2) { 464 if ((ret = devfs_iterate_topology(node1, callback, arg1))) 465 return ret; 466 } 467 } 468 } 469 470 ret = callback(node, arg1); 471 return ret; 472 } 473 474 /* 475 * devfs_reaperp() is a recursive function that iterates through all the 476 * topology, unlinking and freeing all devfs nodes. 477 */ 478 static void * 479 devfs_reaperp_callback(struct devfs_node *node, void *unused) 480 { 481 devfs_unlinkp(node); 482 devfs_freep(node); 483 484 return NULL; 485 } 486 487 static void * 488 devfs_gc_dirs_callback(struct devfs_node *node, void *unused) 489 { 490 if (node->node_type == Pdir) { 491 if (node->nchildren == 2) { 492 devfs_unlinkp(node); 493 devfs_freep(node); 494 } 495 } 496 497 return NULL; 498 } 499 500 static void * 501 devfs_gc_links_callback(struct devfs_node *node, struct devfs_node *target) 502 { 503 if ((node->node_type == Plink) && (node->link_target == target)) { 504 devfs_unlinkp(node); 505 devfs_freep(node); 506 } 507 508 return NULL; 509 } 510 511 /* 512 * devfs_gc() is devfs garbage collector. It takes care of unlinking and 513 * freeing a node, but also removes empty directories and links that link 514 * via devfs auto-link mechanism to the node being deleted. 515 */ 516 int 517 devfs_gc(struct devfs_node *node) 518 { 519 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node; 520 521 if (node->nlinks > 0) 522 devfs_iterate_topology(root_node, 523 (devfs_iterate_callback_t *)devfs_gc_links_callback, node); 524 525 devfs_unlinkp(node); 526 devfs_iterate_topology(root_node, 527 (devfs_iterate_callback_t *)devfs_gc_dirs_callback, NULL); 528 529 devfs_freep(node); 530 531 return 0; 532 } 533 534 /* 535 * devfs_create_dev() is the asynchronous entry point for device creation. 536 * It just sends a message with the relevant details to the devfs core. 537 * 538 * This function will reference the passed device. The reference is owned 539 * by devfs and represents all of the device's node associations. 540 */ 541 int 542 devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms) 543 { 544 reference_dev(dev); 545 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms); 546 547 return 0; 548 } 549 550 /* 551 * devfs_destroy_dev() is the asynchronous entry point for device destruction. 552 * It just sends a message with the relevant details to the devfs core. 553 */ 554 int 555 devfs_destroy_dev(cdev_t dev) 556 { 557 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0); 558 return 0; 559 } 560 561 /* 562 * devfs_mount_add() is the synchronous entry point for adding a new devfs 563 * mount. It sends a synchronous message with the relevant details to the 564 * devfs core. 565 */ 566 int 567 devfs_mount_add(struct devfs_mnt_data *mnt) 568 { 569 devfs_msg_t msg; 570 571 msg = devfs_msg_get(); 572 msg->mdv_mnt = mnt; 573 msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg); 574 devfs_msg_put(msg); 575 576 return 0; 577 } 578 579 /* 580 * devfs_mount_del() is the synchronous entry point for removing a devfs mount. 581 * It sends a synchronous message with the relevant details to the devfs core. 582 */ 583 int 584 devfs_mount_del(struct devfs_mnt_data *mnt) 585 { 586 devfs_msg_t msg; 587 588 msg = devfs_msg_get(); 589 msg->mdv_mnt = mnt; 590 msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg); 591 devfs_msg_put(msg); 592 593 return 0; 594 } 595 596 /* 597 * devfs_destroy_subnames() is the synchronous entry point for device 598 * destruction by subname. It just sends a message with the relevant details to 599 * the devfs core. 600 */ 601 int 602 devfs_destroy_subnames(char *name) 603 { 604 devfs_msg_t msg; 605 606 msg = devfs_msg_get(); 607 msg->mdv_load = name; 608 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES, msg); 609 devfs_msg_put(msg); 610 return 0; 611 } 612 613 int 614 devfs_clr_subnames_flag(char *name, uint32_t flag) 615 { 616 devfs_msg_t msg; 617 618 msg = devfs_msg_get(); 619 msg->mdv_flags.name = name; 620 msg->mdv_flags.flag = flag; 621 msg = devfs_msg_send_sync(DEVFS_CLR_SUBNAMES_FLAG, msg); 622 devfs_msg_put(msg); 623 624 return 0; 625 } 626 627 int 628 devfs_destroy_subnames_without_flag(char *name, uint32_t flag) 629 { 630 devfs_msg_t msg; 631 632 msg = devfs_msg_get(); 633 msg->mdv_flags.name = name; 634 msg->mdv_flags.flag = flag; 635 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES_WO_FLAG, msg); 636 devfs_msg_put(msg); 637 638 return 0; 639 } 640 641 /* 642 * devfs_create_all_dev is the asynchronous entry point to trigger device 643 * node creation. It just sends a message with the relevant details to 644 * the devfs core. 645 */ 646 int 647 devfs_create_all_dev(struct devfs_node *root) 648 { 649 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root); 650 return 0; 651 } 652 653 /* 654 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all 655 * devices with a specific set of dev_ops and minor. It just sends a 656 * message with the relevant details to the devfs core. 657 */ 658 int 659 devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor) 660 { 661 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor); 662 return 0; 663 } 664 665 /* 666 * devfs_clone_handler_add is the synchronous entry point to add a new 667 * clone handler. It just sends a message with the relevant details to 668 * the devfs core. 669 */ 670 int 671 devfs_clone_handler_add(const char *name, d_clone_t *nhandler) 672 { 673 devfs_msg_t msg; 674 675 msg = devfs_msg_get(); 676 msg->mdv_chandler.name = name; 677 msg->mdv_chandler.nhandler = nhandler; 678 msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg); 679 devfs_msg_put(msg); 680 return 0; 681 } 682 683 /* 684 * devfs_clone_handler_del is the synchronous entry point to remove a 685 * clone handler. It just sends a message with the relevant details to 686 * the devfs core. 687 */ 688 int 689 devfs_clone_handler_del(const char *name) 690 { 691 devfs_msg_t msg; 692 693 msg = devfs_msg_get(); 694 msg->mdv_chandler.name = name; 695 msg->mdv_chandler.nhandler = NULL; 696 msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg); 697 devfs_msg_put(msg); 698 return 0; 699 } 700 701 /* 702 * devfs_find_device_by_name is the synchronous entry point to find a 703 * device given its name. It sends a synchronous message with the 704 * relevant details to the devfs core and returns the answer. 705 */ 706 cdev_t 707 devfs_find_device_by_name(const char *fmt, ...) 708 { 709 cdev_t found = NULL; 710 devfs_msg_t msg; 711 char target[PATH_MAX+1]; 712 __va_list ap; 713 int i; 714 715 if (fmt == NULL) 716 return NULL; 717 718 __va_start(ap, fmt); 719 i = kvcprintf(fmt, NULL, target, 10, ap); 720 target[i] = '\0'; 721 __va_end(ap); 722 723 msg = devfs_msg_get(); 724 msg->mdv_name = target; 725 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg); 726 found = msg->mdv_cdev; 727 devfs_msg_put(msg); 728 729 return found; 730 } 731 732 /* 733 * devfs_find_device_by_udev is the synchronous entry point to find a 734 * device given its udev number. It sends a synchronous message with 735 * the relevant details to the devfs core and returns the answer. 736 */ 737 cdev_t 738 devfs_find_device_by_udev(udev_t udev) 739 { 740 cdev_t found = NULL; 741 devfs_msg_t msg; 742 743 msg = devfs_msg_get(); 744 msg->mdv_udev = udev; 745 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg); 746 found = msg->mdv_cdev; 747 devfs_msg_put(msg); 748 749 devfs_debug(DEVFS_DEBUG_DEBUG, 750 "devfs_find_device_by_udev found? %s -end:3-\n", 751 ((found) ? found->si_name:"NO")); 752 return found; 753 } 754 755 struct vnode * 756 devfs_inode_to_vnode(struct mount *mp, ino_t target) 757 { 758 struct vnode *vp = NULL; 759 devfs_msg_t msg; 760 761 if (mp == NULL) 762 return NULL; 763 764 msg = devfs_msg_get(); 765 msg->mdv_ino.mp = mp; 766 msg->mdv_ino.ino = target; 767 msg = devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg); 768 vp = msg->mdv_ino.vp; 769 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 770 devfs_msg_put(msg); 771 772 return vp; 773 } 774 775 /* 776 * devfs_make_alias is the asynchronous entry point to register an alias 777 * for a device. It just sends a message with the relevant details to the 778 * devfs core. 779 */ 780 int 781 devfs_make_alias(const char *name, cdev_t dev_target) 782 { 783 struct devfs_alias *alias; 784 size_t len; 785 786 len = strlen(name); 787 788 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 789 alias->name = kstrdup(name, M_DEVFS); 790 alias->namlen = len; 791 alias->dev_target = dev_target; 792 793 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias); 794 return 0; 795 } 796 797 /* 798 * devfs_apply_rules is the asynchronous entry point to trigger application 799 * of all rules. It just sends a message with the relevant details to the 800 * devfs core. 801 */ 802 int 803 devfs_apply_rules(char *mntto) 804 { 805 char *new_name; 806 807 new_name = kstrdup(mntto, M_DEVFS); 808 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name); 809 810 return 0; 811 } 812 813 /* 814 * devfs_reset_rules is the asynchronous entry point to trigger reset of all 815 * rules. It just sends a message with the relevant details to the devfs core. 816 */ 817 int 818 devfs_reset_rules(char *mntto) 819 { 820 char *new_name; 821 822 new_name = kstrdup(mntto, M_DEVFS); 823 devfs_msg_send_name(DEVFS_RESET_RULES, new_name); 824 825 return 0; 826 } 827 828 829 /* 830 * devfs_scan_callback is the asynchronous entry point to call a callback 831 * on all cdevs. 832 * It just sends a message with the relevant details to the devfs core. 833 */ 834 int 835 devfs_scan_callback(devfs_scan_t *callback) 836 { 837 devfs_msg_t msg; 838 839 KKASSERT(sizeof(callback) == sizeof(void *)); 840 841 msg = devfs_msg_get(); 842 msg->mdv_load = callback; 843 msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg); 844 devfs_msg_put(msg); 845 846 return 0; 847 } 848 849 850 /* 851 * Acts as a message drain. Any message that is replied to here gets destroyed 852 * and the memory freed. 853 */ 854 static void 855 devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 856 { 857 devfs_msg_put((devfs_msg_t)msg); 858 } 859 860 /* 861 * devfs_msg_get allocates a new devfs msg and returns it. 862 */ 863 devfs_msg_t 864 devfs_msg_get() 865 { 866 return objcache_get(devfs_msg_cache, M_WAITOK); 867 } 868 869 /* 870 * devfs_msg_put deallocates a given devfs msg. 871 */ 872 int 873 devfs_msg_put(devfs_msg_t msg) 874 { 875 objcache_put(devfs_msg_cache, msg); 876 return 0; 877 } 878 879 /* 880 * devfs_msg_send is the generic asynchronous message sending facility 881 * for devfs. By default the reply port is the automatic disposal port. 882 * 883 * If the current thread is the devfs_msg_port thread we execute the 884 * operation synchronously. 885 */ 886 void 887 devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg) 888 { 889 lwkt_port_t port = &devfs_msg_port; 890 891 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0); 892 893 devfs_msg->hdr.u.ms_result = cmd; 894 895 if (port->mpu_td == curthread) { 896 devfs_msg_exec(devfs_msg); 897 lwkt_replymsg(&devfs_msg->hdr, 0); 898 } else { 899 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 900 } 901 } 902 903 /* 904 * devfs_msg_send_sync is the generic synchronous message sending 905 * facility for devfs. It initializes a local reply port and waits 906 * for the core's answer. This answer is then returned. 907 */ 908 devfs_msg_t 909 devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg) 910 { 911 struct lwkt_port rep_port; 912 devfs_msg_t msg_incoming; 913 lwkt_port_t port = &devfs_msg_port; 914 915 lwkt_initport_thread(&rep_port, curthread); 916 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0); 917 918 devfs_msg->hdr.u.ms_result = cmd; 919 920 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 921 msg_incoming = lwkt_waitport(&rep_port, 0); 922 923 return msg_incoming; 924 } 925 926 /* 927 * sends a message with a generic argument. 928 */ 929 void 930 devfs_msg_send_generic(uint32_t cmd, void *load) 931 { 932 devfs_msg_t devfs_msg = devfs_msg_get(); 933 934 devfs_msg->mdv_load = load; 935 devfs_msg_send(cmd, devfs_msg); 936 } 937 938 /* 939 * sends a message with a name argument. 940 */ 941 void 942 devfs_msg_send_name(uint32_t cmd, char *name) 943 { 944 devfs_msg_t devfs_msg = devfs_msg_get(); 945 946 devfs_msg->mdv_name = name; 947 devfs_msg_send(cmd, devfs_msg); 948 } 949 950 /* 951 * sends a message with a mount argument. 952 */ 953 void 954 devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt) 955 { 956 devfs_msg_t devfs_msg = devfs_msg_get(); 957 958 devfs_msg->mdv_mnt = mnt; 959 devfs_msg_send(cmd, devfs_msg); 960 } 961 962 /* 963 * sends a message with an ops argument. 964 */ 965 void 966 devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor) 967 { 968 devfs_msg_t devfs_msg = devfs_msg_get(); 969 970 devfs_msg->mdv_ops.ops = ops; 971 devfs_msg->mdv_ops.minor = minor; 972 devfs_msg_send(cmd, devfs_msg); 973 } 974 975 /* 976 * sends a message with a clone handler argument. 977 */ 978 void 979 devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler) 980 { 981 devfs_msg_t devfs_msg = devfs_msg_get(); 982 983 devfs_msg->mdv_chandler.name = name; 984 devfs_msg->mdv_chandler.nhandler = handler; 985 devfs_msg_send(cmd, devfs_msg); 986 } 987 988 /* 989 * sends a message with a device argument. 990 */ 991 void 992 devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms) 993 { 994 devfs_msg_t devfs_msg = devfs_msg_get(); 995 996 devfs_msg->mdv_dev.dev = dev; 997 devfs_msg->mdv_dev.uid = uid; 998 devfs_msg->mdv_dev.gid = gid; 999 devfs_msg->mdv_dev.perms = perms; 1000 1001 devfs_msg_send(cmd, devfs_msg); 1002 } 1003 1004 /* 1005 * sends a message with a link argument. 1006 */ 1007 void 1008 devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp) 1009 { 1010 devfs_msg_t devfs_msg = devfs_msg_get(); 1011 1012 devfs_msg->mdv_link.name = name; 1013 devfs_msg->mdv_link.target = target; 1014 devfs_msg->mdv_link.mp = mp; 1015 devfs_msg_send(cmd, devfs_msg); 1016 } 1017 1018 /* 1019 * devfs_msg_core is the main devfs thread. It handles all incoming messages 1020 * and calls the relevant worker functions. By using messages it's assured 1021 * that events occur in the correct order. 1022 */ 1023 static void 1024 devfs_msg_core(void *arg) 1025 { 1026 devfs_msg_t msg; 1027 1028 devfs_run = 1; 1029 lwkt_initport_thread(&devfs_msg_port, curthread); 1030 wakeup(td_core); 1031 1032 while (devfs_run) { 1033 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0); 1034 devfs_debug(DEVFS_DEBUG_DEBUG, 1035 "devfs_msg_core, new msg: %x\n", 1036 (unsigned int)msg->hdr.u.ms_result); 1037 devfs_msg_exec(msg); 1038 lwkt_replymsg(&msg->hdr, 0); 1039 } 1040 wakeup(td_core); 1041 lwkt_exit(); 1042 } 1043 1044 static void 1045 devfs_msg_exec(devfs_msg_t msg) 1046 { 1047 struct devfs_mnt_data *mnt; 1048 struct devfs_node *node; 1049 cdev_t dev; 1050 1051 /* 1052 * Acquire the devfs lock to ensure safety of all called functions 1053 */ 1054 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1055 1056 switch (msg->hdr.u.ms_result) { 1057 case DEVFS_DEVICE_CREATE: 1058 dev = msg->mdv_dev.dev; 1059 devfs_create_dev_worker(dev, 1060 msg->mdv_dev.uid, 1061 msg->mdv_dev.gid, 1062 msg->mdv_dev.perms); 1063 break; 1064 case DEVFS_DEVICE_DESTROY: 1065 dev = msg->mdv_dev.dev; 1066 devfs_destroy_dev_worker(dev); 1067 break; 1068 case DEVFS_DESTROY_SUBNAMES: 1069 devfs_destroy_subnames_worker(msg->mdv_load); 1070 break; 1071 case DEVFS_DESTROY_DEV_BY_OPS: 1072 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops, 1073 msg->mdv_ops.minor); 1074 break; 1075 case DEVFS_CREATE_ALL_DEV: 1076 node = (struct devfs_node *)msg->mdv_load; 1077 devfs_create_all_dev_worker(node); 1078 break; 1079 case DEVFS_MOUNT_ADD: 1080 mnt = msg->mdv_mnt; 1081 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link); 1082 devfs_create_all_dev_worker(mnt->root_node); 1083 break; 1084 case DEVFS_MOUNT_DEL: 1085 mnt = msg->mdv_mnt; 1086 TAILQ_REMOVE(&devfs_mnt_list, mnt, link); 1087 devfs_iterate_topology(mnt->root_node, devfs_reaperp_callback, 1088 NULL); 1089 if (mnt->leak_count) { 1090 devfs_debug(DEVFS_DEBUG_SHOW, 1091 "Leaked %ld devfs_node elements!\n", 1092 mnt->leak_count); 1093 } 1094 break; 1095 case DEVFS_CHANDLER_ADD: 1096 devfs_chandler_add_worker(msg->mdv_chandler.name, 1097 msg->mdv_chandler.nhandler); 1098 break; 1099 case DEVFS_CHANDLER_DEL: 1100 devfs_chandler_del_worker(msg->mdv_chandler.name); 1101 break; 1102 case DEVFS_FIND_DEVICE_BY_NAME: 1103 devfs_find_device_by_name_worker(msg); 1104 break; 1105 case DEVFS_FIND_DEVICE_BY_UDEV: 1106 devfs_find_device_by_udev_worker(msg); 1107 break; 1108 case DEVFS_MAKE_ALIAS: 1109 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load); 1110 break; 1111 case DEVFS_APPLY_RULES: 1112 devfs_apply_reset_rules_caller(msg->mdv_name, 1); 1113 break; 1114 case DEVFS_RESET_RULES: 1115 devfs_apply_reset_rules_caller(msg->mdv_name, 0); 1116 break; 1117 case DEVFS_SCAN_CALLBACK: 1118 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load); 1119 break; 1120 case DEVFS_CLR_SUBNAMES_FLAG: 1121 devfs_clr_subnames_flag_worker(msg->mdv_flags.name, 1122 msg->mdv_flags.flag); 1123 break; 1124 case DEVFS_DESTROY_SUBNAMES_WO_FLAG: 1125 devfs_destroy_subnames_without_flag_worker(msg->mdv_flags.name, 1126 msg->mdv_flags.flag); 1127 break; 1128 case DEVFS_INODE_TO_VNODE: 1129 msg->mdv_ino.vp = devfs_iterate_topology( 1130 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node, 1131 (devfs_iterate_callback_t *)devfs_inode_to_vnode_worker_callback, 1132 &msg->mdv_ino.ino); 1133 break; 1134 case DEVFS_TERMINATE_CORE: 1135 devfs_run = 0; 1136 break; 1137 case DEVFS_SYNC: 1138 break; 1139 default: 1140 devfs_debug(DEVFS_DEBUG_WARNING, 1141 "devfs_msg_core: unknown message " 1142 "received at core\n"); 1143 break; 1144 } 1145 lockmgr(&devfs_lock, LK_RELEASE); 1146 } 1147 1148 /* 1149 * Worker function to insert a new dev into the dev list and initialize its 1150 * permissions. It also calls devfs_propagate_dev which in turn propagates 1151 * the change to all mount points. 1152 * 1153 * The passed dev is already referenced. This reference is eaten by this 1154 * function and represents the dev's linkage into devfs_dev_list. 1155 */ 1156 static int 1157 devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms) 1158 { 1159 KKASSERT(dev); 1160 1161 dev->si_uid = uid; 1162 dev->si_gid = gid; 1163 dev->si_perms = perms; 1164 1165 devfs_link_dev(dev); 1166 devfs_propagate_dev(dev, 1); 1167 1168 return 0; 1169 } 1170 1171 /* 1172 * Worker function to delete a dev from the dev list and free the cdev. 1173 * It also calls devfs_propagate_dev which in turn propagates the change 1174 * to all mount points. 1175 */ 1176 static int 1177 devfs_destroy_dev_worker(cdev_t dev) 1178 { 1179 int error; 1180 1181 KKASSERT(dev); 1182 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1183 1184 error = devfs_unlink_dev(dev); 1185 devfs_propagate_dev(dev, 0); 1186 if (error == 0) 1187 release_dev(dev); /* link ref */ 1188 release_dev(dev); 1189 release_dev(dev); 1190 1191 return 0; 1192 } 1193 1194 /* 1195 * Worker function to destroy all devices with a certain basename. 1196 * Calls devfs_destroy_dev_worker for the actual destruction. 1197 */ 1198 static int 1199 devfs_destroy_subnames_worker(char *name) 1200 { 1201 cdev_t dev, dev1; 1202 size_t len = strlen(name); 1203 1204 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1205 if ((!strncmp(dev->si_name, name, len)) && 1206 (dev->si_name[len] != '\0')) { 1207 devfs_destroy_dev_worker(dev); 1208 } 1209 } 1210 return 0; 1211 } 1212 1213 static int 1214 devfs_clr_subnames_flag_worker(char *name, uint32_t flag) 1215 { 1216 cdev_t dev, dev1; 1217 size_t len = strlen(name); 1218 1219 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1220 if ((!strncmp(dev->si_name, name, len)) && 1221 (dev->si_name[len] != '\0')) { 1222 dev->si_flags &= ~flag; 1223 } 1224 } 1225 1226 return 0; 1227 } 1228 1229 static int 1230 devfs_destroy_subnames_without_flag_worker(char *name, uint32_t flag) 1231 { 1232 cdev_t dev, dev1; 1233 size_t len = strlen(name); 1234 1235 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1236 if ((!strncmp(dev->si_name, name, len)) && 1237 (dev->si_name[len] != '\0')) { 1238 if (!(dev->si_flags & flag)) { 1239 devfs_destroy_dev_worker(dev); 1240 } 1241 } 1242 } 1243 1244 return 0; 1245 } 1246 1247 /* 1248 * Worker function that creates all device nodes on top of a devfs 1249 * root node. 1250 */ 1251 static int 1252 devfs_create_all_dev_worker(struct devfs_node *root) 1253 { 1254 cdev_t dev; 1255 1256 KKASSERT(root); 1257 1258 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1259 devfs_create_device_node(root, dev, NULL, NULL); 1260 } 1261 1262 return 0; 1263 } 1264 1265 /* 1266 * Worker function that destroys all devices that match a specific 1267 * dev_ops and/or minor. If minor is less than 0, it is not matched 1268 * against. It also propagates all changes. 1269 */ 1270 static int 1271 devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor) 1272 { 1273 cdev_t dev, dev1; 1274 1275 KKASSERT(ops); 1276 1277 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1278 if (dev->si_ops != ops) 1279 continue; 1280 if ((minor < 0) || (dev->si_uminor == minor)) { 1281 devfs_destroy_dev_worker(dev); 1282 } 1283 } 1284 1285 return 0; 1286 } 1287 1288 /* 1289 * Worker function that registers a new clone handler in devfs. 1290 */ 1291 static int 1292 devfs_chandler_add_worker(const char *name, d_clone_t *nhandler) 1293 { 1294 struct devfs_clone_handler *chandler = NULL; 1295 u_char len = strlen(name); 1296 1297 if (len == 0) 1298 return 1; 1299 1300 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1301 if (chandler->namlen != len) 1302 continue; 1303 1304 if (!memcmp(chandler->name, name, len)) { 1305 /* Clonable basename already exists */ 1306 return 1; 1307 } 1308 } 1309 1310 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO); 1311 chandler->name = kstrdup(name, M_DEVFS); 1312 chandler->namlen = len; 1313 chandler->nhandler = nhandler; 1314 1315 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link); 1316 return 0; 1317 } 1318 1319 /* 1320 * Worker function that removes a given clone handler from the 1321 * clone handler list. 1322 */ 1323 static int 1324 devfs_chandler_del_worker(const char *name) 1325 { 1326 struct devfs_clone_handler *chandler, *chandler2; 1327 u_char len = strlen(name); 1328 1329 if (len == 0) 1330 return 1; 1331 1332 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) { 1333 if (chandler->namlen != len) 1334 continue; 1335 if (memcmp(chandler->name, name, len)) 1336 continue; 1337 1338 TAILQ_REMOVE(&devfs_chandler_list, chandler, link); 1339 kfree(chandler->name, M_DEVFS); 1340 kfree(chandler, M_DEVFS); 1341 break; 1342 } 1343 1344 return 0; 1345 } 1346 1347 /* 1348 * Worker function that finds a given device name and changes 1349 * the message received accordingly so that when replied to, 1350 * the answer is returned to the caller. 1351 */ 1352 static int 1353 devfs_find_device_by_name_worker(devfs_msg_t devfs_msg) 1354 { 1355 struct devfs_alias *alias; 1356 cdev_t dev; 1357 cdev_t found = NULL; 1358 1359 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1360 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) { 1361 found = dev; 1362 break; 1363 } 1364 } 1365 if (found == NULL) { 1366 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1367 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) { 1368 found = alias->dev_target; 1369 break; 1370 } 1371 } 1372 } 1373 devfs_msg->mdv_cdev = found; 1374 1375 return 0; 1376 } 1377 1378 /* 1379 * Worker function that finds a given device udev and changes 1380 * the message received accordingly so that when replied to, 1381 * the answer is returned to the caller. 1382 */ 1383 static int 1384 devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg) 1385 { 1386 cdev_t dev, dev1; 1387 cdev_t found = NULL; 1388 1389 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1390 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) { 1391 found = dev; 1392 break; 1393 } 1394 } 1395 devfs_msg->mdv_cdev = found; 1396 1397 return 0; 1398 } 1399 1400 /* 1401 * Worker function that inserts a given alias into the 1402 * alias list, and propagates the alias to all mount 1403 * points. 1404 */ 1405 static int 1406 devfs_make_alias_worker(struct devfs_alias *alias) 1407 { 1408 struct devfs_alias *alias2; 1409 size_t len = strlen(alias->name); 1410 int found = 0; 1411 1412 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1413 if (len != alias2->namlen) 1414 continue; 1415 1416 if (!memcmp(alias->name, alias2->name, len)) { 1417 found = 1; 1418 break; 1419 } 1420 } 1421 1422 if (!found) { 1423 /* 1424 * The alias doesn't exist yet, so we add it to the alias list 1425 */ 1426 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link); 1427 devfs_alias_propagate(alias); 1428 } else { 1429 devfs_debug(DEVFS_DEBUG_WARNING, 1430 "Warning: duplicate devfs_make_alias for %s\n", 1431 alias->name); 1432 kfree(alias->name, M_DEVFS); 1433 kfree(alias, M_DEVFS); 1434 } 1435 1436 return 0; 1437 } 1438 1439 /* 1440 * Function that removes and frees all aliases. 1441 */ 1442 static int 1443 devfs_alias_reap(void) 1444 { 1445 struct devfs_alias *alias, *alias2; 1446 1447 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1448 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1449 kfree(alias, M_DEVFS); 1450 } 1451 return 0; 1452 } 1453 1454 /* 1455 * Function that removes an alias matching a specific cdev and frees 1456 * it accordingly. 1457 */ 1458 static int 1459 devfs_alias_remove(cdev_t dev) 1460 { 1461 struct devfs_alias *alias, *alias2; 1462 1463 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1464 if (alias->dev_target == dev) { 1465 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1466 kfree(alias, M_DEVFS); 1467 } 1468 } 1469 return 0; 1470 } 1471 1472 /* 1473 * This function propagates a new alias to all mount points. 1474 */ 1475 static int 1476 devfs_alias_propagate(struct devfs_alias *alias) 1477 { 1478 struct devfs_mnt_data *mnt; 1479 1480 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1481 devfs_alias_apply(mnt->root_node, alias); 1482 } 1483 return 0; 1484 } 1485 1486 /* 1487 * This function is a recursive function iterating through 1488 * all device nodes in the topology and, if applicable, 1489 * creating the relevant alias for a device node. 1490 */ 1491 static int 1492 devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias) 1493 { 1494 struct devfs_node *node1, *node2; 1495 1496 KKASSERT(alias != NULL); 1497 1498 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 1499 if (node->nchildren > 2) { 1500 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1501 devfs_alias_apply(node1, alias); 1502 } 1503 } 1504 } else { 1505 if (node->d_dev == alias->dev_target) 1506 devfs_alias_create(alias->name, node, 0); 1507 } 1508 return 0; 1509 } 1510 1511 /* 1512 * This function checks if any alias possibly is applicable 1513 * to the given node. If so, the alias is created. 1514 */ 1515 static int 1516 devfs_alias_check_create(struct devfs_node *node) 1517 { 1518 struct devfs_alias *alias; 1519 1520 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1521 if (node->d_dev == alias->dev_target) 1522 devfs_alias_create(alias->name, node, 0); 1523 } 1524 return 0; 1525 } 1526 1527 /* 1528 * This function creates an alias with a given name 1529 * linking to a given devfs node. It also increments 1530 * the link count on the target node. 1531 */ 1532 int 1533 devfs_alias_create(char *name_orig, struct devfs_node *target, int rule_based) 1534 { 1535 struct mount *mp = target->mp; 1536 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node; 1537 struct devfs_node *linknode; 1538 char *create_path = NULL; 1539 char *name, name_buf[PATH_MAX]; 1540 1541 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1542 1543 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name); 1544 1545 if (create_path) 1546 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1547 1548 1549 if (devfs_find_device_node_by_name(parent, name)) { 1550 devfs_debug(DEVFS_DEBUG_WARNING, 1551 "Node already exists: %s " 1552 "(devfs_make_alias_worker)!\n", 1553 name); 1554 return 1; 1555 } 1556 1557 1558 linknode = devfs_allocp(Plink, name, parent, mp, NULL); 1559 if (linknode == NULL) 1560 return 1; 1561 1562 linknode->link_target = target; 1563 target->nlinks++; 1564 1565 if (rule_based) 1566 linknode->flags |= DEVFS_RULE_CREATED; 1567 1568 return 0; 1569 } 1570 1571 /* 1572 * This function is called by the core and handles mount point 1573 * strings. It either calls the relevant worker (devfs_apply_ 1574 * reset_rules_worker) on all mountpoints or only a specific 1575 * one. 1576 */ 1577 static int 1578 devfs_apply_reset_rules_caller(char *mountto, int apply) 1579 { 1580 struct devfs_mnt_data *mnt; 1581 size_t len = strlen(mountto); 1582 1583 if (mountto[0] == '*') { 1584 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1585 devfs_iterate_topology(mnt->root_node, 1586 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1587 NULL); 1588 } 1589 } else { 1590 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1591 if (!strcmp(mnt->mp->mnt_stat.f_mntonname, mountto)) { 1592 devfs_iterate_topology(mnt->root_node, 1593 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1594 NULL); 1595 break; 1596 } 1597 } 1598 } 1599 1600 kfree(mountto, M_DEVFS); 1601 return 0; 1602 } 1603 1604 /* 1605 * This function calls a given callback function for 1606 * every dev node in the devfs dev list. 1607 */ 1608 static int 1609 devfs_scan_callback_worker(devfs_scan_t *callback) 1610 { 1611 cdev_t dev, dev1; 1612 1613 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1614 callback(dev); 1615 } 1616 1617 return 0; 1618 } 1619 1620 /* 1621 * This function tries to resolve a given directory, or if not 1622 * found and creation requested, creates the given directory. 1623 */ 1624 static struct devfs_node * 1625 devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name, 1626 size_t name_len, int create) 1627 { 1628 struct devfs_node *node, *found = NULL; 1629 1630 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1631 if (name_len != node->d_dir.d_namlen) 1632 continue; 1633 1634 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) { 1635 found = node; 1636 break; 1637 } 1638 } 1639 1640 if ((found == NULL) && (create)) { 1641 found = devfs_allocp(Pdir, dir_name, parent, parent->mp, NULL); 1642 } 1643 1644 return found; 1645 } 1646 1647 /* 1648 * This function tries to resolve a complete path. If creation is requested, 1649 * if a given part of the path cannot be resolved (because it doesn't exist), 1650 * it is created. 1651 */ 1652 struct devfs_node * 1653 devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create) 1654 { 1655 struct devfs_node *node = parent; 1656 char buf[PATH_MAX]; 1657 size_t idx = 0; 1658 1659 1660 if (path == NULL) 1661 return parent; 1662 1663 1664 for (; *path != '\0' ; path++) { 1665 if (*path != '/') { 1666 buf[idx++] = *path; 1667 } else { 1668 buf[idx] = '\0'; 1669 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1670 if (node == NULL) 1671 return NULL; 1672 idx = 0; 1673 } 1674 } 1675 buf[idx] = '\0'; 1676 return devfs_resolve_or_create_dir(node, buf, idx, create); 1677 } 1678 1679 /* 1680 * Takes a full path and strips it into a directory path and a name. 1681 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It 1682 * requires a working buffer with enough size to keep the whole 1683 * fullpath. 1684 */ 1685 int 1686 devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep) 1687 { 1688 char *name = NULL; 1689 char *path = NULL; 1690 size_t len = strlen(fullpath) + 1; 1691 int i; 1692 1693 KKASSERT((fullpath != NULL) && (buf != NULL)); 1694 KKASSERT((pathp != NULL) && (namep != NULL)); 1695 1696 memcpy(buf, fullpath, len); 1697 1698 for (i = len-1; i>= 0; i--) { 1699 if (buf[i] == '/') { 1700 buf[i] = '\0'; 1701 name = &(buf[i+1]); 1702 path = buf; 1703 break; 1704 } 1705 } 1706 1707 *pathp = path; 1708 1709 if (name) { 1710 *namep = name; 1711 } else { 1712 *namep = buf; 1713 } 1714 1715 return 0; 1716 } 1717 1718 /* 1719 * This function creates a new devfs node for a given device. It can 1720 * handle a complete path as device name, and accordingly creates 1721 * the path and the final device node. 1722 * 1723 * The reference count on the passed dev remains unchanged. 1724 */ 1725 struct devfs_node * 1726 devfs_create_device_node(struct devfs_node *root, cdev_t dev, 1727 char *dev_name, char *path_fmt, ...) 1728 { 1729 struct devfs_node *parent, *node = NULL; 1730 char *path = NULL; 1731 char *name, name_buf[PATH_MAX]; 1732 __va_list ap; 1733 int i, found; 1734 1735 char *create_path = NULL; 1736 char *names = "pqrsPQRS"; 1737 1738 if (path_fmt != NULL) { 1739 path = kmalloc(PATH_MAX+1, M_DEVFS, M_WAITOK); 1740 1741 __va_start(ap, path_fmt); 1742 i = kvcprintf(path_fmt, NULL, path, 10, ap); 1743 path[i] = '\0'; 1744 __va_end(ap); 1745 } 1746 1747 parent = devfs_resolve_or_create_path(root, path, 1); 1748 KKASSERT(parent); 1749 1750 devfs_resolve_name_path( 1751 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), 1752 name_buf, &create_path, &name); 1753 1754 if (create_path) 1755 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1756 1757 1758 if (devfs_find_device_node_by_name(parent, name)) { 1759 devfs_debug(DEVFS_DEBUG_WARNING, "devfs_create_device_node: " 1760 "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name); 1761 goto out; 1762 } 1763 1764 node = devfs_allocp(Pdev, name, parent, parent->mp, dev); 1765 nanotime(&parent->mtime); 1766 #if 0 1767 /* 1768 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their 1769 * directory 1770 */ 1771 if ((dev) && (strlen(dev->si_name) >= 4) && 1772 (!memcmp(dev->si_name, "ptm/", 4))) { 1773 node->parent->flags |= DEVFS_HIDDEN; 1774 node->flags |= DEVFS_HIDDEN; 1775 } 1776 #endif 1777 1778 /* 1779 * Ugly pty magic, to tag pty devices as such and hide them if needed. 1780 */ 1781 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3))) 1782 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1783 1784 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) { 1785 found = 0; 1786 for (i = 0; i < strlen(names); i++) { 1787 if (name[3] == names[i]) { 1788 found = 1; 1789 break; 1790 } 1791 } 1792 if (found) 1793 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1794 } 1795 1796 out: 1797 if (path_fmt != NULL) 1798 kfree(path, M_DEVFS); 1799 1800 return node; 1801 } 1802 1803 /* 1804 * This function finds a given device node in the topology with a given 1805 * cdev. 1806 */ 1807 void * 1808 devfs_find_device_node_callback(struct devfs_node *node, cdev_t target) 1809 { 1810 if ((node->node_type == Pdev) && (node->d_dev == target)) { 1811 return node; 1812 } 1813 1814 return NULL; 1815 } 1816 1817 /* 1818 * This function finds a device node in the given parent directory by its 1819 * name and returns it. 1820 */ 1821 struct devfs_node * 1822 devfs_find_device_node_by_name(struct devfs_node *parent, char *target) 1823 { 1824 struct devfs_node *node, *found = NULL; 1825 size_t len = strlen(target); 1826 1827 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1828 if (len != node->d_dir.d_namlen) 1829 continue; 1830 1831 if (!memcmp(node->d_dir.d_name, target, len)) { 1832 found = node; 1833 break; 1834 } 1835 } 1836 1837 return found; 1838 } 1839 1840 static void * 1841 devfs_inode_to_vnode_worker_callback(struct devfs_node *node, ino_t *inop) 1842 { 1843 struct vnode *vp = NULL; 1844 ino_t target = *inop; 1845 1846 if (node->d_dir.d_ino == target) { 1847 if (node->v_node) { 1848 vp = node->v_node; 1849 vget(vp, LK_EXCLUSIVE | LK_RETRY); 1850 vn_unlock(vp); 1851 } else { 1852 devfs_allocv(&vp, node); 1853 vn_unlock(vp); 1854 } 1855 } 1856 1857 return vp; 1858 } 1859 1860 /* 1861 * This function takes a cdev and removes its devfs node in the 1862 * given topology. The cdev remains intact. 1863 */ 1864 int 1865 devfs_destroy_device_node(struct devfs_node *root, cdev_t target) 1866 { 1867 struct devfs_node *node, *parent; 1868 char *name, name_buf[PATH_MAX]; 1869 char *create_path = NULL; 1870 1871 KKASSERT(target); 1872 1873 memcpy(name_buf, target->si_name, strlen(target->si_name)+1); 1874 1875 devfs_resolve_name_path(target->si_name, name_buf, &create_path, &name); 1876 1877 if (create_path) 1878 parent = devfs_resolve_or_create_path(root, create_path, 0); 1879 else 1880 parent = root; 1881 1882 if (parent == NULL) 1883 return 1; 1884 1885 node = devfs_find_device_node_by_name(parent, name); 1886 1887 if (node) { 1888 nanotime(&node->parent->mtime); 1889 devfs_gc(node); 1890 } 1891 1892 return 0; 1893 } 1894 1895 /* 1896 * Just set perms and ownership for given node. 1897 */ 1898 int 1899 devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, 1900 u_short mode, u_long flags) 1901 { 1902 node->mode = mode; 1903 node->uid = uid; 1904 node->gid = gid; 1905 1906 return 0; 1907 } 1908 1909 /* 1910 * Propagates a device attach/detach to all mount 1911 * points. Also takes care of automatic alias removal 1912 * for a deleted cdev. 1913 */ 1914 static int 1915 devfs_propagate_dev(cdev_t dev, int attach) 1916 { 1917 struct devfs_mnt_data *mnt; 1918 1919 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1920 if (attach) { 1921 /* Device is being attached */ 1922 devfs_create_device_node(mnt->root_node, dev, 1923 NULL, NULL ); 1924 } else { 1925 /* Device is being detached */ 1926 devfs_alias_remove(dev); 1927 devfs_destroy_device_node(mnt->root_node, dev); 1928 } 1929 } 1930 return 0; 1931 } 1932 1933 /* 1934 * devfs_node_to_path takes a node and a buffer of a size of 1935 * at least PATH_MAX, resolves the full path from the root 1936 * node and writes it in a humanly-readable format into the 1937 * buffer. 1938 * If DEVFS_STASH_DEPTH is less than the directory level up 1939 * to the root node, only the last DEVFS_STASH_DEPTH levels 1940 * of the path are resolved. 1941 */ 1942 int 1943 devfs_node_to_path(struct devfs_node *node, char *buffer) 1944 { 1945 #define DEVFS_STASH_DEPTH 32 1946 struct devfs_node *node_stash[DEVFS_STASH_DEPTH]; 1947 int i, offset; 1948 memset(buffer, 0, PATH_MAX); 1949 1950 for (i = 0; (i < DEVFS_STASH_DEPTH) && (node->node_type != Proot); i++) { 1951 node_stash[i] = node; 1952 node = node->parent; 1953 } 1954 i--; 1955 1956 for (offset = 0; i >= 0; i--) { 1957 memcpy(buffer+offset, node_stash[i]->d_dir.d_name, 1958 node_stash[i]->d_dir.d_namlen); 1959 offset += node_stash[i]->d_dir.d_namlen; 1960 if (i > 0) { 1961 *(buffer+offset) = '/'; 1962 offset++; 1963 } 1964 } 1965 #undef DEVFS_STASH_DEPTH 1966 return 0; 1967 } 1968 1969 /* 1970 * devfs_clone either returns a basename from a complete name by 1971 * returning the length of the name without trailing digits, or, 1972 * if clone != 0, calls the device's clone handler to get a new 1973 * device, which in turn is returned in devp. 1974 */ 1975 cdev_t 1976 devfs_clone(cdev_t dev, const char *name, size_t len, int mode, 1977 struct ucred *cred) 1978 { 1979 int error; 1980 struct devfs_clone_handler *chandler; 1981 struct dev_clone_args ap; 1982 1983 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1984 if (chandler->namlen != len) 1985 continue; 1986 if ((!memcmp(chandler->name, name, len)) && (chandler->nhandler)) { 1987 lockmgr(&devfs_lock, LK_RELEASE); 1988 devfs_config(); 1989 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1990 1991 ap.a_head.a_dev = dev; 1992 ap.a_dev = NULL; 1993 ap.a_name = name; 1994 ap.a_namelen = len; 1995 ap.a_mode = mode; 1996 ap.a_cred = cred; 1997 error = (chandler->nhandler)(&ap); 1998 if (error) 1999 continue; 2000 2001 return ap.a_dev; 2002 } 2003 } 2004 2005 return NULL; 2006 } 2007 2008 2009 /* 2010 * Registers a new orphan in the orphan list. 2011 */ 2012 void 2013 devfs_tracer_add_orphan(struct devfs_node *node) 2014 { 2015 struct devfs_orphan *orphan; 2016 2017 KKASSERT(node); 2018 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK); 2019 orphan->node = node; 2020 2021 KKASSERT((node->flags & DEVFS_ORPHANED) == 0); 2022 node->flags |= DEVFS_ORPHANED; 2023 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link); 2024 } 2025 2026 /* 2027 * Removes an orphan from the orphan list. 2028 */ 2029 void 2030 devfs_tracer_del_orphan(struct devfs_node *node) 2031 { 2032 struct devfs_orphan *orphan; 2033 2034 KKASSERT(node); 2035 2036 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) { 2037 if (orphan->node == node) { 2038 node->flags &= ~DEVFS_ORPHANED; 2039 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link); 2040 kfree(orphan, M_DEVFS); 2041 break; 2042 } 2043 } 2044 } 2045 2046 /* 2047 * Counts the orphans in the orphan list, and if cleanup 2048 * is specified, also frees the orphan and removes it from 2049 * the list. 2050 */ 2051 size_t 2052 devfs_tracer_orphan_count(struct mount *mp, int cleanup) 2053 { 2054 struct devfs_orphan *orphan, *orphan2; 2055 size_t count = 0; 2056 2057 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) { 2058 count++; 2059 /* 2060 * If we are instructed to clean up, we do so. 2061 */ 2062 if (cleanup) { 2063 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link); 2064 orphan->node->flags &= ~DEVFS_ORPHANED; 2065 devfs_freep(orphan->node); 2066 kfree(orphan, M_DEVFS); 2067 } 2068 } 2069 2070 return count; 2071 } 2072 2073 /* 2074 * Fetch an ino_t from the global d_ino by increasing it 2075 * while spinlocked. 2076 */ 2077 static ino_t 2078 devfs_fetch_ino(void) 2079 { 2080 ino_t ret; 2081 2082 spin_lock_wr(&ino_lock); 2083 ret = d_ino++; 2084 spin_unlock_wr(&ino_lock); 2085 2086 return ret; 2087 } 2088 2089 /* 2090 * Allocates a new cdev and initializes it's most basic 2091 * fields. 2092 */ 2093 cdev_t 2094 devfs_new_cdev(struct dev_ops *ops, int minor) 2095 { 2096 cdev_t dev = sysref_alloc(&cdev_sysref_class); 2097 sysref_activate(&dev->si_sysref); 2098 reference_dev(dev); 2099 memset(dev, 0, offsetof(struct cdev, si_sysref)); 2100 2101 dev->si_uid = 0; 2102 dev->si_gid = 0; 2103 dev->si_perms = 0; 2104 dev->si_drv1 = NULL; 2105 dev->si_drv2 = NULL; 2106 dev->si_lastread = 0; /* time_second */ 2107 dev->si_lastwrite = 0; /* time_second */ 2108 2109 dev->si_ops = ops; 2110 dev->si_flags = 0; 2111 dev->si_umajor = 0; 2112 dev->si_uminor = minor; 2113 dev->si_inode = makeudev(devfs_reference_ops(ops), minor); 2114 2115 return dev; 2116 } 2117 2118 static void 2119 devfs_cdev_terminate(cdev_t dev) 2120 { 2121 int locked = 0; 2122 2123 /* Check if it is locked already. if not, we acquire the devfs lock */ 2124 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) { 2125 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2126 locked = 1; 2127 } 2128 2129 /* Propagate destruction, just in case */ 2130 devfs_propagate_dev(dev, 0); 2131 2132 /* If we acquired the lock, we also get rid of it */ 2133 if (locked) 2134 lockmgr(&devfs_lock, LK_RELEASE); 2135 2136 devfs_release_ops(dev->si_ops); 2137 2138 /* Finally destroy the device */ 2139 sysref_put(&dev->si_sysref); 2140 } 2141 2142 /* 2143 * Links a given cdev into the dev list. 2144 */ 2145 int 2146 devfs_link_dev(cdev_t dev) 2147 { 2148 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2149 dev->si_flags |= SI_DEVFS_LINKED; 2150 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link); 2151 2152 return 0; 2153 } 2154 2155 /* 2156 * Removes a given cdev from the dev list. The caller is responsible for 2157 * releasing the reference on the device associated with the linkage. 2158 * 2159 * Returns EALREADY if the dev has already been unlinked. 2160 */ 2161 static int 2162 devfs_unlink_dev(cdev_t dev) 2163 { 2164 if ((dev->si_flags & SI_DEVFS_LINKED)) { 2165 TAILQ_REMOVE(&devfs_dev_list, dev, link); 2166 dev->si_flags &= ~SI_DEVFS_LINKED; 2167 return (0); 2168 } 2169 return (EALREADY); 2170 } 2171 2172 int 2173 devfs_node_is_accessible(struct devfs_node *node) 2174 { 2175 if ((node) && (!(node->flags & DEVFS_HIDDEN))) 2176 return 1; 2177 else 2178 return 0; 2179 } 2180 2181 int 2182 devfs_reference_ops(struct dev_ops *ops) 2183 { 2184 int unit; 2185 2186 if (ops->head.refs == 0) { 2187 ops->head.id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255); 2188 if (ops->head.id == -1) { 2189 /* Ran out of unique ids */ 2190 devfs_debug(DEVFS_DEBUG_WARNING, 2191 "devfs_reference_ops: WARNING: ran out of unique ids\n"); 2192 } 2193 } 2194 unit = ops->head.id; 2195 ++ops->head.refs; 2196 2197 return unit; 2198 } 2199 2200 void 2201 devfs_release_ops(struct dev_ops *ops) 2202 { 2203 --ops->head.refs; 2204 2205 if (ops->head.refs == 0) { 2206 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), ops->head.id); 2207 } 2208 } 2209 2210 void 2211 devfs_config(void) 2212 { 2213 devfs_msg_t msg; 2214 2215 msg = devfs_msg_get(); 2216 msg = devfs_msg_send_sync(DEVFS_SYNC, msg); 2217 devfs_msg_put(msg); 2218 } 2219 2220 /* 2221 * Called on init of devfs; creates the objcaches and 2222 * spawns off the devfs core thread. Also initializes 2223 * locks. 2224 */ 2225 static void 2226 devfs_init(void) 2227 { 2228 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n"); 2229 /* Create objcaches for nodes, msgs and devs */ 2230 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0, 2231 NULL, NULL, NULL, 2232 objcache_malloc_alloc, 2233 objcache_malloc_free, 2234 &devfs_node_malloc_args ); 2235 2236 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0, 2237 NULL, NULL, NULL, 2238 objcache_malloc_alloc, 2239 objcache_malloc_free, 2240 &devfs_msg_malloc_args ); 2241 2242 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0, 2243 NULL, NULL, NULL, 2244 objcache_malloc_alloc, 2245 objcache_malloc_free, 2246 &devfs_dev_malloc_args ); 2247 2248 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id)); 2249 2250 /* Initialize the reply-only port which acts as a message drain */ 2251 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply); 2252 2253 /* Initialize *THE* devfs lock */ 2254 lockinit(&devfs_lock, "devfs_core lock", 0, 0); 2255 2256 2257 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL, 2258 0, 0, "devfs_msg_core"); 2259 2260 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); 2261 2262 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n"); 2263 } 2264 2265 /* 2266 * Called on unload of devfs; takes care of destroying the core 2267 * and the objcaches. Also removes aliases that are no longer needed. 2268 */ 2269 static void 2270 devfs_uninit(void) 2271 { 2272 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); 2273 2274 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL); 2275 2276 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); 2277 tsleep(td_core/*devfs_id*/, 0, "devfsc", 10000); 2278 2279 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id)); 2280 2281 /* Destroy the objcaches */ 2282 objcache_destroy(devfs_msg_cache); 2283 objcache_destroy(devfs_node_cache); 2284 objcache_destroy(devfs_dev_cache); 2285 2286 devfs_alias_reap(); 2287 } 2288 2289 /* 2290 * This is a sysctl handler to assist userland devname(3) to 2291 * find the device name for a given udev. 2292 */ 2293 static int 2294 devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS) 2295 { 2296 udev_t udev; 2297 cdev_t found; 2298 int error; 2299 2300 2301 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t)))) 2302 return (error); 2303 2304 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev); 2305 2306 if (udev == NOUDEV) 2307 return(EINVAL); 2308 2309 if ((found = devfs_find_device_by_udev(udev)) == NULL) 2310 return(ENOENT); 2311 2312 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1)); 2313 } 2314 2315 2316 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, 2317 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)"); 2318 2319 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); 2320 TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable); 2321 SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 2322 0, "Enable DevFS debugging"); 2323 2324 SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, 2325 devfs_init, NULL); 2326 SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, 2327 devfs_uninit, NULL); 2328