1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/mount.h> 38 #include <sys/vnode.h> 39 #include <sys/types.h> 40 #include <sys/lock.h> 41 #include <sys/msgport.h> 42 #include <sys/msgport2.h> 43 #include <sys/spinlock2.h> 44 #include <sys/sysctl.h> 45 #include <sys/ucred.h> 46 #include <sys/param.h> 47 #include <sys/sysref2.h> 48 #include <sys/systm.h> 49 #include <sys/devfs.h> 50 #include <sys/devfs_rules.h> 51 #include <sys/hotplug.h> 52 53 MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations"); 54 DEVFS_DECLARE_CLONE_BITMAP(ops_id); 55 /* 56 * SYSREF Integration - reference counting, allocation, 57 * sysid and syslink integration. 58 */ 59 static void devfs_cdev_terminate(cdev_t dev); 60 static struct sysref_class cdev_sysref_class = { 61 .name = "cdev", 62 .mtype = M_DEVFS, 63 .proto = SYSREF_PROTO_DEV, 64 .offset = offsetof(struct cdev, si_sysref), 65 .objsize = sizeof(struct cdev), 66 .mag_capacity = 32, 67 .flags = 0, 68 .ops = { 69 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate 70 } 71 }; 72 73 static struct objcache *devfs_node_cache; 74 static struct objcache *devfs_msg_cache; 75 static struct objcache *devfs_dev_cache; 76 77 static struct objcache_malloc_args devfs_node_malloc_args = { 78 sizeof(struct devfs_node), M_DEVFS }; 79 struct objcache_malloc_args devfs_msg_malloc_args = { 80 sizeof(struct devfs_msg), M_DEVFS }; 81 struct objcache_malloc_args devfs_dev_malloc_args = { 82 sizeof(struct cdev), M_DEVFS }; 83 84 static struct devfs_dev_head devfs_dev_list = 85 TAILQ_HEAD_INITIALIZER(devfs_dev_list); 86 static struct devfs_mnt_head devfs_mnt_list = 87 TAILQ_HEAD_INITIALIZER(devfs_mnt_list); 88 static struct devfs_chandler_head devfs_chandler_list = 89 TAILQ_HEAD_INITIALIZER(devfs_chandler_list); 90 static struct devfs_alias_head devfs_alias_list = 91 TAILQ_HEAD_INITIALIZER(devfs_alias_list); 92 static struct devfs_dev_ops_head devfs_dev_ops_list = 93 TAILQ_HEAD_INITIALIZER(devfs_dev_ops_list); 94 95 struct lock devfs_lock; 96 static struct lwkt_port devfs_dispose_port; 97 static struct lwkt_port devfs_msg_port; 98 static struct thread *td_core; 99 100 static struct spinlock ino_lock; 101 static ino_t d_ino; 102 static int devfs_debug_enable; 103 static int devfs_run; 104 105 static ino_t devfs_fetch_ino(void); 106 static int devfs_create_all_dev_worker(struct devfs_node *); 107 static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int); 108 static int devfs_destroy_dev_worker(cdev_t); 109 static int devfs_destroy_subnames_worker(char *); 110 static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int); 111 static int devfs_propagate_dev(cdev_t, int); 112 static int devfs_unlink_dev(cdev_t dev); 113 static void devfs_msg_exec(devfs_msg_t msg); 114 115 static int devfs_chandler_add_worker(const char *, d_clone_t *); 116 static int devfs_chandler_del_worker(const char *); 117 118 static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 119 static void devfs_msg_core(void *); 120 121 static int devfs_find_device_by_name_worker(devfs_msg_t); 122 static int devfs_find_device_by_udev_worker(devfs_msg_t); 123 124 static int devfs_apply_reset_rules_caller(char *, int); 125 126 static int devfs_scan_callback_worker(devfs_scan_t *); 127 128 static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, 129 char *, size_t, int); 130 131 static int devfs_make_alias_worker(struct devfs_alias *); 132 static int devfs_alias_remove(cdev_t); 133 static int devfs_alias_reap(void); 134 static int devfs_alias_propagate(struct devfs_alias *); 135 static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *); 136 static int devfs_alias_check_create(struct devfs_node *); 137 138 static int devfs_clr_subnames_flag_worker(char *, uint32_t); 139 static int devfs_destroy_subnames_without_flag_worker(char *, uint32_t); 140 141 static void *devfs_reaperp_callback(struct devfs_node *, void *); 142 static void *devfs_gc_dirs_callback(struct devfs_node *, void *); 143 static void *devfs_gc_links_callback(struct devfs_node *, struct devfs_node *); 144 static void * 145 devfs_inode_to_vnode_worker_callback(struct devfs_node *, ino_t *); 146 147 /* hotplug */ 148 void (*devfs_node_added)(struct hotplug_device*) = NULL; 149 void (*devfs_node_removed)(struct hotplug_device*) = NULL; 150 151 /* 152 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function 153 * using kvprintf 154 */ 155 int 156 devfs_debug(int level, char *fmt, ...) 157 { 158 __va_list ap; 159 160 __va_start(ap, fmt); 161 if (level <= devfs_debug_enable) 162 kvprintf(fmt, ap); 163 __va_end(ap); 164 165 return 0; 166 } 167 168 /* 169 * devfs_allocp() Allocates a new devfs node with the specified 170 * parameters. The node is also automatically linked into the topology 171 * if a parent is specified. It also calls the rule and alias stuff to 172 * be applied on the new node 173 */ 174 struct devfs_node * 175 devfs_allocp(devfs_nodetype devfsnodetype, char *name, 176 struct devfs_node *parent, struct mount *mp, cdev_t dev) 177 { 178 struct devfs_node *node = NULL; 179 size_t namlen = strlen(name); 180 181 node = objcache_get(devfs_node_cache, M_WAITOK); 182 bzero(node, sizeof(*node)); 183 184 atomic_add_long(&(DEVFS_MNTDATA(mp)->leak_count), 1); 185 186 node->d_dev = NULL; 187 node->nchildren = 1; 188 node->mp = mp; 189 node->d_dir.d_ino = devfs_fetch_ino(); 190 191 /* 192 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries 193 * respectively. 194 */ 195 node->cookie_jar = 2; 196 197 /* 198 * Access Control members 199 */ 200 node->mode = DEVFS_DEFAULT_MODE; 201 node->uid = DEVFS_DEFAULT_UID; 202 node->gid = DEVFS_DEFAULT_GID; 203 204 switch (devfsnodetype) { 205 case Proot: 206 /* 207 * Ensure that we don't recycle the root vnode by marking it as 208 * linked into the topology. 209 */ 210 node->flags |= DEVFS_NODE_LINKED; 211 case Pdir: 212 TAILQ_INIT(DEVFS_DENODE_HEAD(node)); 213 node->d_dir.d_type = DT_DIR; 214 node->nchildren = 2; 215 break; 216 217 case Plink: 218 node->d_dir.d_type = DT_LNK; 219 break; 220 221 case Preg: 222 node->d_dir.d_type = DT_REG; 223 break; 224 225 case Pdev: 226 if (dev != NULL) { 227 node->d_dir.d_type = DT_CHR; 228 node->d_dev = dev; 229 230 node->mode = dev->si_perms; 231 node->uid = dev->si_uid; 232 node->gid = dev->si_gid; 233 234 devfs_alias_check_create(node); 235 } 236 break; 237 238 default: 239 panic("devfs_allocp: unknown node type"); 240 } 241 242 node->v_node = NULL; 243 node->node_type = devfsnodetype; 244 245 /* Initialize the dirent structure of each devfs vnode */ 246 KKASSERT(namlen < 256); 247 node->d_dir.d_namlen = namlen; 248 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK); 249 memcpy(node->d_dir.d_name, name, namlen); 250 node->d_dir.d_name[namlen] = '\0'; 251 252 /* Initialize the parent node element */ 253 node->parent = parent; 254 255 /* Apply rules */ 256 devfs_rule_check_apply(node, NULL); 257 258 /* Initialize *time members */ 259 nanotime(&node->atime); 260 node->mtime = node->ctime = node->atime; 261 262 /* 263 * Associate with parent as last step, clean out namecache 264 * reference. 265 */ 266 if ((parent != NULL) && 267 ((parent->node_type == Proot) || (parent->node_type == Pdir))) { 268 parent->nchildren++; 269 node->cookie = parent->cookie_jar++; 270 node->flags |= DEVFS_NODE_LINKED; 271 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link); 272 273 /* This forces negative namecache lookups to clear */ 274 ++mp->mnt_namecache_gen; 275 } 276 277 ++DEVFS_MNTDATA(mp)->file_count; 278 279 return node; 280 } 281 282 /* 283 * devfs_allocv() allocates a new vnode based on a devfs node. 284 */ 285 int 286 devfs_allocv(struct vnode **vpp, struct devfs_node *node) 287 { 288 struct vnode *vp; 289 int error = 0; 290 291 KKASSERT(node); 292 293 try_again: 294 while ((vp = node->v_node) != NULL) { 295 error = vget(vp, LK_EXCLUSIVE); 296 if (error != ENOENT) { 297 *vpp = vp; 298 goto out; 299 } 300 } 301 302 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0) 303 goto out; 304 305 vp = *vpp; 306 307 if (node->v_node != NULL) { 308 vp->v_type = VBAD; 309 vx_put(vp); 310 goto try_again; 311 } 312 313 vp->v_data = node; 314 node->v_node = vp; 315 316 switch (node->node_type) { 317 case Proot: 318 vp->v_flag |= VROOT; 319 case Pdir: 320 vp->v_type = VDIR; 321 break; 322 323 case Plink: 324 vp->v_type = VLNK; 325 break; 326 327 case Preg: 328 vp->v_type = VREG; 329 break; 330 331 case Pdev: 332 vp->v_type = VCHR; 333 KKASSERT(node->d_dev); 334 335 vp->v_uminor = node->d_dev->si_uminor; 336 vp->v_umajor = 0; 337 338 v_associate_rdev(vp, node->d_dev); 339 vp->v_ops = &node->mp->mnt_vn_spec_ops; 340 break; 341 342 default: 343 panic("devfs_allocv: unknown node type"); 344 } 345 346 out: 347 return error; 348 } 349 350 /* 351 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode 352 * based on the newly created devfs node. 353 */ 354 int 355 devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype, 356 char *name, struct devfs_node *parent, cdev_t dev) 357 { 358 struct devfs_node *node; 359 360 node = devfs_allocp(devfsnodetype, name, parent, mp, dev); 361 362 if (node != NULL) 363 devfs_allocv(vpp, node); 364 else 365 *vpp = NULL; 366 367 return 0; 368 } 369 370 /* 371 * Destroy the devfs_node. The node must be unlinked from the topology. 372 * 373 * This function will also destroy any vnode association with the node 374 * and device. 375 * 376 * The cdev_t itself remains intact. 377 */ 378 int 379 devfs_freep(struct devfs_node *node) 380 { 381 struct vnode *vp; 382 383 KKASSERT(node); 384 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) || 385 (node->node_type == Proot)); 386 KKASSERT((node->flags & DEVFS_DESTROYED) == 0); 387 388 atomic_subtract_long(&(DEVFS_MNTDATA(node->mp)->leak_count), 1); 389 if (node->symlink_name) { 390 kfree(node->symlink_name, M_DEVFS); 391 node->symlink_name = NULL; 392 } 393 394 /* 395 * Remove the node from the orphan list if it is still on it. 396 */ 397 if (node->flags & DEVFS_ORPHANED) 398 devfs_tracer_del_orphan(node); 399 400 /* 401 * Disassociate the vnode from the node. This also prevents the 402 * vnode's reclaim code from double-freeing the node. 403 * 404 * The vget is needed to safely modify the vp. It also serves 405 * to cycle the refs and terminate the vnode if it happens to 406 * be inactive, otherwise namecache references may not get cleared. 407 */ 408 while ((vp = node->v_node) != NULL) { 409 if (vget(vp, LK_EXCLUSIVE | LK_RETRY) != 0) 410 break; 411 v_release_rdev(vp); 412 vp->v_data = NULL; 413 node->v_node = NULL; 414 cache_inval_vp(vp, CINV_DESTROY); 415 vput(vp); 416 } 417 if (node->d_dir.d_name) { 418 kfree(node->d_dir.d_name, M_DEVFS); 419 node->d_dir.d_name = NULL; 420 } 421 node->flags |= DEVFS_DESTROYED; 422 423 --DEVFS_MNTDATA(node->mp)->file_count; 424 425 objcache_put(devfs_node_cache, node); 426 427 return 0; 428 } 429 430 /* 431 * Unlink the devfs node from the topology and add it to the orphan list. 432 * The node will later be destroyed by freep. 433 * 434 * Any vnode association, including the v_rdev and v_data, remains intact 435 * until the freep. 436 */ 437 int 438 devfs_unlinkp(struct devfs_node *node) 439 { 440 struct devfs_node *parent; 441 struct hotplug_device *hpdev; 442 KKASSERT(node); 443 444 /* 445 * Add the node to the orphan list, so it is referenced somewhere, to 446 * so we don't leak it. 447 */ 448 devfs_tracer_add_orphan(node); 449 450 parent = node->parent; 451 452 /* 453 * If the parent is known we can unlink the node out of the topology 454 */ 455 if (parent) { 456 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link); 457 parent->nchildren--; 458 KKASSERT((parent->nchildren >= 0)); 459 node->flags &= ~DEVFS_NODE_LINKED; 460 } 461 /* hotplug handler */ 462 if(devfs_node_removed) { 463 hpdev = kmalloc(sizeof(struct hotplug_device), M_TEMP, M_WAITOK); 464 hpdev->dev = node->d_dev; 465 if(hpdev->dev) 466 hpdev->name = node->d_dev->si_name; 467 devfs_node_removed(hpdev); 468 kfree(hpdev, M_TEMP); 469 } 470 node->parent = NULL; 471 return 0; 472 } 473 474 void * 475 devfs_iterate_topology(struct devfs_node *node, 476 devfs_iterate_callback_t *callback, void *arg1) 477 { 478 struct devfs_node *node1, *node2; 479 void *ret = NULL; 480 481 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 482 if (node->nchildren > 2) { 483 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 484 link, node2) { 485 if ((ret = devfs_iterate_topology(node1, callback, arg1))) 486 return ret; 487 } 488 } 489 } 490 491 ret = callback(node, arg1); 492 return ret; 493 } 494 495 /* 496 * devfs_reaperp() is a recursive function that iterates through all the 497 * topology, unlinking and freeing all devfs nodes. 498 */ 499 static void * 500 devfs_reaperp_callback(struct devfs_node *node, void *unused) 501 { 502 devfs_unlinkp(node); 503 devfs_freep(node); 504 505 return NULL; 506 } 507 508 static void * 509 devfs_gc_dirs_callback(struct devfs_node *node, void *unused) 510 { 511 if (node->node_type == Pdir) { 512 if (node->nchildren == 2) { 513 devfs_unlinkp(node); 514 devfs_freep(node); 515 } 516 } 517 518 return NULL; 519 } 520 521 static void * 522 devfs_gc_links_callback(struct devfs_node *node, struct devfs_node *target) 523 { 524 if ((node->node_type == Plink) && (node->link_target == target)) { 525 devfs_unlinkp(node); 526 devfs_freep(node); 527 } 528 529 return NULL; 530 } 531 532 /* 533 * devfs_gc() is devfs garbage collector. It takes care of unlinking and 534 * freeing a node, but also removes empty directories and links that link 535 * via devfs auto-link mechanism to the node being deleted. 536 */ 537 int 538 devfs_gc(struct devfs_node *node) 539 { 540 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node; 541 542 if (node->nlinks > 0) 543 devfs_iterate_topology(root_node, 544 (devfs_iterate_callback_t *)devfs_gc_links_callback, node); 545 546 devfs_unlinkp(node); 547 devfs_iterate_topology(root_node, 548 (devfs_iterate_callback_t *)devfs_gc_dirs_callback, NULL); 549 550 devfs_freep(node); 551 552 return 0; 553 } 554 555 /* 556 * devfs_create_dev() is the asynchronous entry point for device creation. 557 * It just sends a message with the relevant details to the devfs core. 558 * 559 * This function will reference the passed device. The reference is owned 560 * by devfs and represents all of the device's node associations. 561 */ 562 int 563 devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms) 564 { 565 reference_dev(dev); 566 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms); 567 568 return 0; 569 } 570 571 /* 572 * devfs_destroy_dev() is the asynchronous entry point for device destruction. 573 * It just sends a message with the relevant details to the devfs core. 574 */ 575 int 576 devfs_destroy_dev(cdev_t dev) 577 { 578 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0); 579 return 0; 580 } 581 582 /* 583 * devfs_mount_add() is the synchronous entry point for adding a new devfs 584 * mount. It sends a synchronous message with the relevant details to the 585 * devfs core. 586 */ 587 int 588 devfs_mount_add(struct devfs_mnt_data *mnt) 589 { 590 devfs_msg_t msg; 591 592 msg = devfs_msg_get(); 593 msg->mdv_mnt = mnt; 594 msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg); 595 devfs_msg_put(msg); 596 597 return 0; 598 } 599 600 /* 601 * devfs_mount_del() is the synchronous entry point for removing a devfs mount. 602 * It sends a synchronous message with the relevant details to the devfs core. 603 */ 604 int 605 devfs_mount_del(struct devfs_mnt_data *mnt) 606 { 607 devfs_msg_t msg; 608 609 msg = devfs_msg_get(); 610 msg->mdv_mnt = mnt; 611 msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg); 612 devfs_msg_put(msg); 613 614 return 0; 615 } 616 617 /* 618 * devfs_destroy_subnames() is the synchronous entry point for device 619 * destruction by subname. It just sends a message with the relevant details to 620 * the devfs core. 621 */ 622 int 623 devfs_destroy_subnames(char *name) 624 { 625 devfs_msg_t msg; 626 627 msg = devfs_msg_get(); 628 msg->mdv_load = name; 629 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES, msg); 630 devfs_msg_put(msg); 631 return 0; 632 } 633 634 int 635 devfs_clr_subnames_flag(char *name, uint32_t flag) 636 { 637 devfs_msg_t msg; 638 639 msg = devfs_msg_get(); 640 msg->mdv_flags.name = name; 641 msg->mdv_flags.flag = flag; 642 msg = devfs_msg_send_sync(DEVFS_CLR_SUBNAMES_FLAG, msg); 643 devfs_msg_put(msg); 644 645 return 0; 646 } 647 648 int 649 devfs_destroy_subnames_without_flag(char *name, uint32_t flag) 650 { 651 devfs_msg_t msg; 652 653 msg = devfs_msg_get(); 654 msg->mdv_flags.name = name; 655 msg->mdv_flags.flag = flag; 656 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES_WO_FLAG, msg); 657 devfs_msg_put(msg); 658 659 return 0; 660 } 661 662 /* 663 * devfs_create_all_dev is the asynchronous entry point to trigger device 664 * node creation. It just sends a message with the relevant details to 665 * the devfs core. 666 */ 667 int 668 devfs_create_all_dev(struct devfs_node *root) 669 { 670 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root); 671 return 0; 672 } 673 674 /* 675 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all 676 * devices with a specific set of dev_ops and minor. It just sends a 677 * message with the relevant details to the devfs core. 678 */ 679 int 680 devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor) 681 { 682 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor); 683 return 0; 684 } 685 686 /* 687 * devfs_clone_handler_add is the synchronous entry point to add a new 688 * clone handler. It just sends a message with the relevant details to 689 * the devfs core. 690 */ 691 int 692 devfs_clone_handler_add(const char *name, d_clone_t *nhandler) 693 { 694 devfs_msg_t msg; 695 696 msg = devfs_msg_get(); 697 msg->mdv_chandler.name = name; 698 msg->mdv_chandler.nhandler = nhandler; 699 msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg); 700 devfs_msg_put(msg); 701 return 0; 702 } 703 704 /* 705 * devfs_clone_handler_del is the synchronous entry point to remove a 706 * clone handler. It just sends a message with the relevant details to 707 * the devfs core. 708 */ 709 int 710 devfs_clone_handler_del(const char *name) 711 { 712 devfs_msg_t msg; 713 714 msg = devfs_msg_get(); 715 msg->mdv_chandler.name = name; 716 msg->mdv_chandler.nhandler = NULL; 717 msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg); 718 devfs_msg_put(msg); 719 return 0; 720 } 721 722 /* 723 * devfs_find_device_by_name is the synchronous entry point to find a 724 * device given its name. It sends a synchronous message with the 725 * relevant details to the devfs core and returns the answer. 726 */ 727 cdev_t 728 devfs_find_device_by_name(const char *fmt, ...) 729 { 730 cdev_t found = NULL; 731 devfs_msg_t msg; 732 char *target; 733 __va_list ap; 734 735 if (fmt == NULL) 736 return NULL; 737 738 __va_start(ap, fmt); 739 kvasnrprintf(&target, PATH_MAX, 10, fmt, ap); 740 __va_end(ap); 741 742 msg = devfs_msg_get(); 743 msg->mdv_name = target; 744 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg); 745 found = msg->mdv_cdev; 746 devfs_msg_put(msg); 747 kvasfree(&target); 748 749 return found; 750 } 751 752 /* 753 * devfs_find_device_by_udev is the synchronous entry point to find a 754 * device given its udev number. It sends a synchronous message with 755 * the relevant details to the devfs core and returns the answer. 756 */ 757 cdev_t 758 devfs_find_device_by_udev(udev_t udev) 759 { 760 cdev_t found = NULL; 761 devfs_msg_t msg; 762 763 msg = devfs_msg_get(); 764 msg->mdv_udev = udev; 765 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg); 766 found = msg->mdv_cdev; 767 devfs_msg_put(msg); 768 769 devfs_debug(DEVFS_DEBUG_DEBUG, 770 "devfs_find_device_by_udev found? %s -end:3-\n", 771 ((found) ? found->si_name:"NO")); 772 return found; 773 } 774 775 struct vnode * 776 devfs_inode_to_vnode(struct mount *mp, ino_t target) 777 { 778 struct vnode *vp = NULL; 779 devfs_msg_t msg; 780 781 if (mp == NULL) 782 return NULL; 783 784 msg = devfs_msg_get(); 785 msg->mdv_ino.mp = mp; 786 msg->mdv_ino.ino = target; 787 msg = devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg); 788 vp = msg->mdv_ino.vp; 789 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 790 devfs_msg_put(msg); 791 792 return vp; 793 } 794 795 /* 796 * devfs_make_alias is the asynchronous entry point to register an alias 797 * for a device. It just sends a message with the relevant details to the 798 * devfs core. 799 */ 800 int 801 devfs_make_alias(const char *name, cdev_t dev_target) 802 { 803 struct devfs_alias *alias; 804 size_t len; 805 806 len = strlen(name); 807 808 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 809 alias->name = kstrdup(name, M_DEVFS); 810 alias->namlen = len; 811 alias->dev_target = dev_target; 812 813 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias); 814 return 0; 815 } 816 817 /* 818 * devfs_apply_rules is the asynchronous entry point to trigger application 819 * of all rules. It just sends a message with the relevant details to the 820 * devfs core. 821 */ 822 int 823 devfs_apply_rules(char *mntto) 824 { 825 char *new_name; 826 827 new_name = kstrdup(mntto, M_DEVFS); 828 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name); 829 830 return 0; 831 } 832 833 /* 834 * devfs_reset_rules is the asynchronous entry point to trigger reset of all 835 * rules. It just sends a message with the relevant details to the devfs core. 836 */ 837 int 838 devfs_reset_rules(char *mntto) 839 { 840 char *new_name; 841 842 new_name = kstrdup(mntto, M_DEVFS); 843 devfs_msg_send_name(DEVFS_RESET_RULES, new_name); 844 845 return 0; 846 } 847 848 849 /* 850 * devfs_scan_callback is the asynchronous entry point to call a callback 851 * on all cdevs. 852 * It just sends a message with the relevant details to the devfs core. 853 */ 854 int 855 devfs_scan_callback(devfs_scan_t *callback) 856 { 857 devfs_msg_t msg; 858 859 KKASSERT(sizeof(callback) == sizeof(void *)); 860 861 msg = devfs_msg_get(); 862 msg->mdv_load = callback; 863 msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg); 864 devfs_msg_put(msg); 865 866 return 0; 867 } 868 869 870 /* 871 * Acts as a message drain. Any message that is replied to here gets destroyed 872 * and the memory freed. 873 */ 874 static void 875 devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 876 { 877 devfs_msg_put((devfs_msg_t)msg); 878 } 879 880 /* 881 * devfs_msg_get allocates a new devfs msg and returns it. 882 */ 883 devfs_msg_t 884 devfs_msg_get() 885 { 886 return objcache_get(devfs_msg_cache, M_WAITOK); 887 } 888 889 /* 890 * devfs_msg_put deallocates a given devfs msg. 891 */ 892 int 893 devfs_msg_put(devfs_msg_t msg) 894 { 895 objcache_put(devfs_msg_cache, msg); 896 return 0; 897 } 898 899 /* 900 * devfs_msg_send is the generic asynchronous message sending facility 901 * for devfs. By default the reply port is the automatic disposal port. 902 * 903 * If the current thread is the devfs_msg_port thread we execute the 904 * operation synchronously. 905 */ 906 void 907 devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg) 908 { 909 lwkt_port_t port = &devfs_msg_port; 910 911 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0); 912 913 devfs_msg->hdr.u.ms_result = cmd; 914 915 if (port->mpu_td == curthread) { 916 devfs_msg_exec(devfs_msg); 917 lwkt_replymsg(&devfs_msg->hdr, 0); 918 } else { 919 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 920 } 921 } 922 923 /* 924 * devfs_msg_send_sync is the generic synchronous message sending 925 * facility for devfs. It initializes a local reply port and waits 926 * for the core's answer. This answer is then returned. 927 */ 928 devfs_msg_t 929 devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg) 930 { 931 struct lwkt_port rep_port; 932 devfs_msg_t msg_incoming; 933 lwkt_port_t port = &devfs_msg_port; 934 935 lwkt_initport_thread(&rep_port, curthread); 936 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0); 937 938 devfs_msg->hdr.u.ms_result = cmd; 939 940 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 941 msg_incoming = lwkt_waitport(&rep_port, 0); 942 943 return msg_incoming; 944 } 945 946 /* 947 * sends a message with a generic argument. 948 */ 949 void 950 devfs_msg_send_generic(uint32_t cmd, void *load) 951 { 952 devfs_msg_t devfs_msg = devfs_msg_get(); 953 954 devfs_msg->mdv_load = load; 955 devfs_msg_send(cmd, devfs_msg); 956 } 957 958 /* 959 * sends a message with a name argument. 960 */ 961 void 962 devfs_msg_send_name(uint32_t cmd, char *name) 963 { 964 devfs_msg_t devfs_msg = devfs_msg_get(); 965 966 devfs_msg->mdv_name = name; 967 devfs_msg_send(cmd, devfs_msg); 968 } 969 970 /* 971 * sends a message with a mount argument. 972 */ 973 void 974 devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt) 975 { 976 devfs_msg_t devfs_msg = devfs_msg_get(); 977 978 devfs_msg->mdv_mnt = mnt; 979 devfs_msg_send(cmd, devfs_msg); 980 } 981 982 /* 983 * sends a message with an ops argument. 984 */ 985 void 986 devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor) 987 { 988 devfs_msg_t devfs_msg = devfs_msg_get(); 989 990 devfs_msg->mdv_ops.ops = ops; 991 devfs_msg->mdv_ops.minor = minor; 992 devfs_msg_send(cmd, devfs_msg); 993 } 994 995 /* 996 * sends a message with a clone handler argument. 997 */ 998 void 999 devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler) 1000 { 1001 devfs_msg_t devfs_msg = devfs_msg_get(); 1002 1003 devfs_msg->mdv_chandler.name = name; 1004 devfs_msg->mdv_chandler.nhandler = handler; 1005 devfs_msg_send(cmd, devfs_msg); 1006 } 1007 1008 /* 1009 * sends a message with a device argument. 1010 */ 1011 void 1012 devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms) 1013 { 1014 devfs_msg_t devfs_msg = devfs_msg_get(); 1015 1016 devfs_msg->mdv_dev.dev = dev; 1017 devfs_msg->mdv_dev.uid = uid; 1018 devfs_msg->mdv_dev.gid = gid; 1019 devfs_msg->mdv_dev.perms = perms; 1020 1021 devfs_msg_send(cmd, devfs_msg); 1022 } 1023 1024 /* 1025 * sends a message with a link argument. 1026 */ 1027 void 1028 devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp) 1029 { 1030 devfs_msg_t devfs_msg = devfs_msg_get(); 1031 1032 devfs_msg->mdv_link.name = name; 1033 devfs_msg->mdv_link.target = target; 1034 devfs_msg->mdv_link.mp = mp; 1035 devfs_msg_send(cmd, devfs_msg); 1036 } 1037 1038 /* 1039 * devfs_msg_core is the main devfs thread. It handles all incoming messages 1040 * and calls the relevant worker functions. By using messages it's assured 1041 * that events occur in the correct order. 1042 */ 1043 static void 1044 devfs_msg_core(void *arg) 1045 { 1046 devfs_msg_t msg; 1047 1048 devfs_run = 1; 1049 lwkt_initport_thread(&devfs_msg_port, curthread); 1050 wakeup(td_core); 1051 1052 while (devfs_run) { 1053 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0); 1054 devfs_debug(DEVFS_DEBUG_DEBUG, 1055 "devfs_msg_core, new msg: %x\n", 1056 (unsigned int)msg->hdr.u.ms_result); 1057 devfs_msg_exec(msg); 1058 lwkt_replymsg(&msg->hdr, 0); 1059 } 1060 wakeup(td_core); 1061 lwkt_exit(); 1062 } 1063 1064 static void 1065 devfs_msg_exec(devfs_msg_t msg) 1066 { 1067 struct devfs_mnt_data *mnt; 1068 struct devfs_node *node; 1069 cdev_t dev; 1070 1071 /* 1072 * Acquire the devfs lock to ensure safety of all called functions 1073 */ 1074 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1075 1076 switch (msg->hdr.u.ms_result) { 1077 case DEVFS_DEVICE_CREATE: 1078 dev = msg->mdv_dev.dev; 1079 devfs_create_dev_worker(dev, 1080 msg->mdv_dev.uid, 1081 msg->mdv_dev.gid, 1082 msg->mdv_dev.perms); 1083 break; 1084 case DEVFS_DEVICE_DESTROY: 1085 dev = msg->mdv_dev.dev; 1086 devfs_destroy_dev_worker(dev); 1087 break; 1088 case DEVFS_DESTROY_SUBNAMES: 1089 devfs_destroy_subnames_worker(msg->mdv_load); 1090 break; 1091 case DEVFS_DESTROY_DEV_BY_OPS: 1092 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops, 1093 msg->mdv_ops.minor); 1094 break; 1095 case DEVFS_CREATE_ALL_DEV: 1096 node = (struct devfs_node *)msg->mdv_load; 1097 devfs_create_all_dev_worker(node); 1098 break; 1099 case DEVFS_MOUNT_ADD: 1100 mnt = msg->mdv_mnt; 1101 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link); 1102 devfs_create_all_dev_worker(mnt->root_node); 1103 break; 1104 case DEVFS_MOUNT_DEL: 1105 mnt = msg->mdv_mnt; 1106 TAILQ_REMOVE(&devfs_mnt_list, mnt, link); 1107 devfs_iterate_topology(mnt->root_node, devfs_reaperp_callback, 1108 NULL); 1109 if (mnt->leak_count) { 1110 devfs_debug(DEVFS_DEBUG_SHOW, 1111 "Leaked %ld devfs_node elements!\n", 1112 mnt->leak_count); 1113 } 1114 break; 1115 case DEVFS_CHANDLER_ADD: 1116 devfs_chandler_add_worker(msg->mdv_chandler.name, 1117 msg->mdv_chandler.nhandler); 1118 break; 1119 case DEVFS_CHANDLER_DEL: 1120 devfs_chandler_del_worker(msg->mdv_chandler.name); 1121 break; 1122 case DEVFS_FIND_DEVICE_BY_NAME: 1123 devfs_find_device_by_name_worker(msg); 1124 break; 1125 case DEVFS_FIND_DEVICE_BY_UDEV: 1126 devfs_find_device_by_udev_worker(msg); 1127 break; 1128 case DEVFS_MAKE_ALIAS: 1129 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load); 1130 break; 1131 case DEVFS_APPLY_RULES: 1132 devfs_apply_reset_rules_caller(msg->mdv_name, 1); 1133 break; 1134 case DEVFS_RESET_RULES: 1135 devfs_apply_reset_rules_caller(msg->mdv_name, 0); 1136 break; 1137 case DEVFS_SCAN_CALLBACK: 1138 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load); 1139 break; 1140 case DEVFS_CLR_SUBNAMES_FLAG: 1141 devfs_clr_subnames_flag_worker(msg->mdv_flags.name, 1142 msg->mdv_flags.flag); 1143 break; 1144 case DEVFS_DESTROY_SUBNAMES_WO_FLAG: 1145 devfs_destroy_subnames_without_flag_worker(msg->mdv_flags.name, 1146 msg->mdv_flags.flag); 1147 break; 1148 case DEVFS_INODE_TO_VNODE: 1149 msg->mdv_ino.vp = devfs_iterate_topology( 1150 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node, 1151 (devfs_iterate_callback_t *)devfs_inode_to_vnode_worker_callback, 1152 &msg->mdv_ino.ino); 1153 break; 1154 case DEVFS_TERMINATE_CORE: 1155 devfs_run = 0; 1156 break; 1157 case DEVFS_SYNC: 1158 break; 1159 default: 1160 devfs_debug(DEVFS_DEBUG_WARNING, 1161 "devfs_msg_core: unknown message " 1162 "received at core\n"); 1163 break; 1164 } 1165 lockmgr(&devfs_lock, LK_RELEASE); 1166 } 1167 1168 /* 1169 * Worker function to insert a new dev into the dev list and initialize its 1170 * permissions. It also calls devfs_propagate_dev which in turn propagates 1171 * the change to all mount points. 1172 * 1173 * The passed dev is already referenced. This reference is eaten by this 1174 * function and represents the dev's linkage into devfs_dev_list. 1175 */ 1176 static int 1177 devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms) 1178 { 1179 KKASSERT(dev); 1180 1181 dev->si_uid = uid; 1182 dev->si_gid = gid; 1183 dev->si_perms = perms; 1184 1185 devfs_link_dev(dev); 1186 devfs_propagate_dev(dev, 1); 1187 1188 return 0; 1189 } 1190 1191 /* 1192 * Worker function to delete a dev from the dev list and free the cdev. 1193 * It also calls devfs_propagate_dev which in turn propagates the change 1194 * to all mount points. 1195 */ 1196 static int 1197 devfs_destroy_dev_worker(cdev_t dev) 1198 { 1199 int error; 1200 1201 KKASSERT(dev); 1202 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1203 1204 error = devfs_unlink_dev(dev); 1205 devfs_propagate_dev(dev, 0); 1206 if (error == 0) 1207 release_dev(dev); /* link ref */ 1208 release_dev(dev); 1209 release_dev(dev); 1210 1211 return 0; 1212 } 1213 1214 /* 1215 * Worker function to destroy all devices with a certain basename. 1216 * Calls devfs_destroy_dev_worker for the actual destruction. 1217 */ 1218 static int 1219 devfs_destroy_subnames_worker(char *name) 1220 { 1221 cdev_t dev, dev1; 1222 size_t len = strlen(name); 1223 1224 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1225 if ((!strncmp(dev->si_name, name, len)) && 1226 (dev->si_name[len] != '\0')) { 1227 devfs_destroy_dev_worker(dev); 1228 } 1229 } 1230 return 0; 1231 } 1232 1233 static int 1234 devfs_clr_subnames_flag_worker(char *name, uint32_t flag) 1235 { 1236 cdev_t dev, dev1; 1237 size_t len = strlen(name); 1238 1239 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1240 if ((!strncmp(dev->si_name, name, len)) && 1241 (dev->si_name[len] != '\0')) { 1242 dev->si_flags &= ~flag; 1243 } 1244 } 1245 1246 return 0; 1247 } 1248 1249 static int 1250 devfs_destroy_subnames_without_flag_worker(char *name, uint32_t flag) 1251 { 1252 cdev_t dev, dev1; 1253 size_t len = strlen(name); 1254 1255 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1256 if ((!strncmp(dev->si_name, name, len)) && 1257 (dev->si_name[len] != '\0')) { 1258 if (!(dev->si_flags & flag)) { 1259 devfs_destroy_dev_worker(dev); 1260 } 1261 } 1262 } 1263 1264 return 0; 1265 } 1266 1267 /* 1268 * Worker function that creates all device nodes on top of a devfs 1269 * root node. 1270 */ 1271 static int 1272 devfs_create_all_dev_worker(struct devfs_node *root) 1273 { 1274 cdev_t dev; 1275 1276 KKASSERT(root); 1277 1278 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1279 devfs_create_device_node(root, dev, NULL, NULL); 1280 } 1281 1282 return 0; 1283 } 1284 1285 /* 1286 * Worker function that destroys all devices that match a specific 1287 * dev_ops and/or minor. If minor is less than 0, it is not matched 1288 * against. It also propagates all changes. 1289 */ 1290 static int 1291 devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor) 1292 { 1293 cdev_t dev, dev1; 1294 1295 KKASSERT(ops); 1296 1297 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1298 if (dev->si_ops != ops) 1299 continue; 1300 if ((minor < 0) || (dev->si_uminor == minor)) { 1301 devfs_destroy_dev_worker(dev); 1302 } 1303 } 1304 1305 return 0; 1306 } 1307 1308 /* 1309 * Worker function that registers a new clone handler in devfs. 1310 */ 1311 static int 1312 devfs_chandler_add_worker(const char *name, d_clone_t *nhandler) 1313 { 1314 struct devfs_clone_handler *chandler = NULL; 1315 u_char len = strlen(name); 1316 1317 if (len == 0) 1318 return 1; 1319 1320 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1321 if (chandler->namlen != len) 1322 continue; 1323 1324 if (!memcmp(chandler->name, name, len)) { 1325 /* Clonable basename already exists */ 1326 return 1; 1327 } 1328 } 1329 1330 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO); 1331 chandler->name = kstrdup(name, M_DEVFS); 1332 chandler->namlen = len; 1333 chandler->nhandler = nhandler; 1334 1335 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link); 1336 return 0; 1337 } 1338 1339 /* 1340 * Worker function that removes a given clone handler from the 1341 * clone handler list. 1342 */ 1343 static int 1344 devfs_chandler_del_worker(const char *name) 1345 { 1346 struct devfs_clone_handler *chandler, *chandler2; 1347 u_char len = strlen(name); 1348 1349 if (len == 0) 1350 return 1; 1351 1352 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) { 1353 if (chandler->namlen != len) 1354 continue; 1355 if (memcmp(chandler->name, name, len)) 1356 continue; 1357 1358 TAILQ_REMOVE(&devfs_chandler_list, chandler, link); 1359 kfree(chandler->name, M_DEVFS); 1360 kfree(chandler, M_DEVFS); 1361 break; 1362 } 1363 1364 return 0; 1365 } 1366 1367 /* 1368 * Worker function that finds a given device name and changes 1369 * the message received accordingly so that when replied to, 1370 * the answer is returned to the caller. 1371 */ 1372 static int 1373 devfs_find_device_by_name_worker(devfs_msg_t devfs_msg) 1374 { 1375 struct devfs_alias *alias; 1376 cdev_t dev; 1377 cdev_t found = NULL; 1378 1379 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1380 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) { 1381 found = dev; 1382 break; 1383 } 1384 } 1385 if (found == NULL) { 1386 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1387 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) { 1388 found = alias->dev_target; 1389 break; 1390 } 1391 } 1392 } 1393 devfs_msg->mdv_cdev = found; 1394 1395 return 0; 1396 } 1397 1398 /* 1399 * Worker function that finds a given device udev and changes 1400 * the message received accordingly so that when replied to, 1401 * the answer is returned to the caller. 1402 */ 1403 static int 1404 devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg) 1405 { 1406 cdev_t dev, dev1; 1407 cdev_t found = NULL; 1408 1409 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1410 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) { 1411 found = dev; 1412 break; 1413 } 1414 } 1415 devfs_msg->mdv_cdev = found; 1416 1417 return 0; 1418 } 1419 1420 /* 1421 * Worker function that inserts a given alias into the 1422 * alias list, and propagates the alias to all mount 1423 * points. 1424 */ 1425 static int 1426 devfs_make_alias_worker(struct devfs_alias *alias) 1427 { 1428 struct devfs_alias *alias2; 1429 size_t len = strlen(alias->name); 1430 int found = 0; 1431 1432 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1433 if (len != alias2->namlen) 1434 continue; 1435 1436 if (!memcmp(alias->name, alias2->name, len)) { 1437 found = 1; 1438 break; 1439 } 1440 } 1441 1442 if (!found) { 1443 /* 1444 * The alias doesn't exist yet, so we add it to the alias list 1445 */ 1446 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link); 1447 devfs_alias_propagate(alias); 1448 } else { 1449 devfs_debug(DEVFS_DEBUG_WARNING, 1450 "Warning: duplicate devfs_make_alias for %s\n", 1451 alias->name); 1452 kfree(alias->name, M_DEVFS); 1453 kfree(alias, M_DEVFS); 1454 } 1455 1456 return 0; 1457 } 1458 1459 /* 1460 * Function that removes and frees all aliases. 1461 */ 1462 static int 1463 devfs_alias_reap(void) 1464 { 1465 struct devfs_alias *alias, *alias2; 1466 1467 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1468 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1469 kfree(alias, M_DEVFS); 1470 } 1471 return 0; 1472 } 1473 1474 /* 1475 * Function that removes an alias matching a specific cdev and frees 1476 * it accordingly. 1477 */ 1478 static int 1479 devfs_alias_remove(cdev_t dev) 1480 { 1481 struct devfs_alias *alias, *alias2; 1482 1483 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1484 if (alias->dev_target == dev) { 1485 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1486 kfree(alias, M_DEVFS); 1487 } 1488 } 1489 return 0; 1490 } 1491 1492 /* 1493 * This function propagates a new alias to all mount points. 1494 */ 1495 static int 1496 devfs_alias_propagate(struct devfs_alias *alias) 1497 { 1498 struct devfs_mnt_data *mnt; 1499 1500 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1501 devfs_alias_apply(mnt->root_node, alias); 1502 } 1503 return 0; 1504 } 1505 1506 /* 1507 * This function is a recursive function iterating through 1508 * all device nodes in the topology and, if applicable, 1509 * creating the relevant alias for a device node. 1510 */ 1511 static int 1512 devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias) 1513 { 1514 struct devfs_node *node1, *node2; 1515 1516 KKASSERT(alias != NULL); 1517 1518 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 1519 if (node->nchildren > 2) { 1520 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1521 devfs_alias_apply(node1, alias); 1522 } 1523 } 1524 } else { 1525 if (node->d_dev == alias->dev_target) 1526 devfs_alias_create(alias->name, node, 0); 1527 } 1528 return 0; 1529 } 1530 1531 /* 1532 * This function checks if any alias possibly is applicable 1533 * to the given node. If so, the alias is created. 1534 */ 1535 static int 1536 devfs_alias_check_create(struct devfs_node *node) 1537 { 1538 struct devfs_alias *alias; 1539 1540 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1541 if (node->d_dev == alias->dev_target) 1542 devfs_alias_create(alias->name, node, 0); 1543 } 1544 return 0; 1545 } 1546 1547 /* 1548 * This function creates an alias with a given name 1549 * linking to a given devfs node. It also increments 1550 * the link count on the target node. 1551 */ 1552 int 1553 devfs_alias_create(char *name_orig, struct devfs_node *target, int rule_based) 1554 { 1555 struct mount *mp = target->mp; 1556 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node; 1557 struct devfs_node *linknode; 1558 struct hotplug_device *hpdev; 1559 char *create_path = NULL; 1560 char *name; 1561 char *name_buf; 1562 int result = 0; 1563 1564 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1565 1566 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1567 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name); 1568 1569 if (create_path) 1570 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1571 1572 1573 if (devfs_find_device_node_by_name(parent, name)) { 1574 devfs_debug(DEVFS_DEBUG_WARNING, 1575 "Node already exists: %s " 1576 "(devfs_make_alias_worker)!\n", 1577 name); 1578 result = 1; 1579 goto done; 1580 } 1581 1582 linknode = devfs_allocp(Plink, name, parent, mp, NULL); 1583 if (linknode == NULL) { 1584 result = 1; 1585 goto done; 1586 } 1587 1588 linknode->link_target = target; 1589 target->nlinks++; 1590 1591 if (rule_based) 1592 linknode->flags |= DEVFS_RULE_CREATED; 1593 1594 done: 1595 /* hotplug handler */ 1596 if(devfs_node_added) { 1597 hpdev = kmalloc(sizeof(struct hotplug_device), M_TEMP, M_WAITOK); 1598 hpdev->dev = target->d_dev; 1599 hpdev->name = name_orig; 1600 devfs_node_added(hpdev); 1601 kfree(hpdev, M_TEMP); 1602 } 1603 kfree(name_buf, M_TEMP); 1604 return (result); 1605 } 1606 1607 /* 1608 * This function is called by the core and handles mount point 1609 * strings. It either calls the relevant worker (devfs_apply_ 1610 * reset_rules_worker) on all mountpoints or only a specific 1611 * one. 1612 */ 1613 static int 1614 devfs_apply_reset_rules_caller(char *mountto, int apply) 1615 { 1616 struct devfs_mnt_data *mnt; 1617 1618 if (mountto[0] == '*') { 1619 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1620 devfs_iterate_topology(mnt->root_node, 1621 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1622 NULL); 1623 } 1624 } else { 1625 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1626 if (!strcmp(mnt->mp->mnt_stat.f_mntonname, mountto)) { 1627 devfs_iterate_topology(mnt->root_node, 1628 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1629 NULL); 1630 break; 1631 } 1632 } 1633 } 1634 1635 kfree(mountto, M_DEVFS); 1636 return 0; 1637 } 1638 1639 /* 1640 * This function calls a given callback function for 1641 * every dev node in the devfs dev list. 1642 */ 1643 static int 1644 devfs_scan_callback_worker(devfs_scan_t *callback) 1645 { 1646 cdev_t dev, dev1; 1647 1648 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1649 callback(dev); 1650 } 1651 1652 return 0; 1653 } 1654 1655 /* 1656 * This function tries to resolve a given directory, or if not 1657 * found and creation requested, creates the given directory. 1658 */ 1659 static struct devfs_node * 1660 devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name, 1661 size_t name_len, int create) 1662 { 1663 struct devfs_node *node, *found = NULL; 1664 1665 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1666 if (name_len != node->d_dir.d_namlen) 1667 continue; 1668 1669 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) { 1670 found = node; 1671 break; 1672 } 1673 } 1674 1675 if ((found == NULL) && (create)) { 1676 found = devfs_allocp(Pdir, dir_name, parent, parent->mp, NULL); 1677 } 1678 1679 return found; 1680 } 1681 1682 /* 1683 * This function tries to resolve a complete path. If creation is requested, 1684 * if a given part of the path cannot be resolved (because it doesn't exist), 1685 * it is created. 1686 */ 1687 struct devfs_node * 1688 devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create) 1689 { 1690 struct devfs_node *node = parent; 1691 char *buf; 1692 size_t idx = 0; 1693 1694 if (path == NULL) 1695 return parent; 1696 1697 buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1698 1699 while (*path && idx < PATH_MAX - 1) { 1700 if (*path != '/') { 1701 buf[idx++] = *path; 1702 } else { 1703 buf[idx] = '\0'; 1704 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1705 if (node == NULL) { 1706 kfree(buf, M_TEMP); 1707 return NULL; 1708 } 1709 idx = 0; 1710 } 1711 ++path; 1712 } 1713 buf[idx] = '\0'; 1714 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1715 kfree (buf, M_TEMP); 1716 return (node); 1717 } 1718 1719 /* 1720 * Takes a full path and strips it into a directory path and a name. 1721 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It 1722 * requires a working buffer with enough size to keep the whole 1723 * fullpath. 1724 */ 1725 int 1726 devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep) 1727 { 1728 char *name = NULL; 1729 char *path = NULL; 1730 size_t len = strlen(fullpath) + 1; 1731 int i; 1732 1733 KKASSERT((fullpath != NULL) && (buf != NULL)); 1734 KKASSERT((pathp != NULL) && (namep != NULL)); 1735 1736 memcpy(buf, fullpath, len); 1737 1738 for (i = len-1; i>= 0; i--) { 1739 if (buf[i] == '/') { 1740 buf[i] = '\0'; 1741 name = &(buf[i+1]); 1742 path = buf; 1743 break; 1744 } 1745 } 1746 1747 *pathp = path; 1748 1749 if (name) { 1750 *namep = name; 1751 } else { 1752 *namep = buf; 1753 } 1754 1755 return 0; 1756 } 1757 1758 /* 1759 * This function creates a new devfs node for a given device. It can 1760 * handle a complete path as device name, and accordingly creates 1761 * the path and the final device node. 1762 * 1763 * The reference count on the passed dev remains unchanged. 1764 */ 1765 struct devfs_node * 1766 devfs_create_device_node(struct devfs_node *root, cdev_t dev, 1767 char *dev_name, char *path_fmt, ...) 1768 { 1769 struct devfs_node *parent, *node = NULL; 1770 struct hotplug_device *hpdev; 1771 char *path = NULL; 1772 char *name; 1773 char *name_buf; 1774 __va_list ap; 1775 int i, found; 1776 char *create_path = NULL; 1777 char *names = "pqrsPQRS"; 1778 1779 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1780 1781 if (path_fmt != NULL) { 1782 __va_start(ap, path_fmt); 1783 kvasnrprintf(&path, PATH_MAX, 10, path_fmt, ap); 1784 __va_end(ap); 1785 } 1786 1787 parent = devfs_resolve_or_create_path(root, path, 1); 1788 KKASSERT(parent); 1789 1790 devfs_resolve_name_path( 1791 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), 1792 name_buf, &create_path, &name); 1793 1794 if (create_path) 1795 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1796 1797 1798 if (devfs_find_device_node_by_name(parent, name)) { 1799 devfs_debug(DEVFS_DEBUG_WARNING, "devfs_create_device_node: " 1800 "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name); 1801 goto out; 1802 } 1803 1804 node = devfs_allocp(Pdev, name, parent, parent->mp, dev); 1805 nanotime(&parent->mtime); 1806 1807 /* 1808 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their 1809 * directory 1810 */ 1811 if ((dev) && (strlen(dev->si_name) >= 4) && 1812 (!memcmp(dev->si_name, "ptm/", 4))) { 1813 node->parent->flags |= DEVFS_HIDDEN; 1814 node->flags |= DEVFS_HIDDEN; 1815 } 1816 1817 /* 1818 * Ugly pty magic, to tag pty devices as such and hide them if needed. 1819 */ 1820 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3))) 1821 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1822 1823 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) { 1824 found = 0; 1825 for (i = 0; i < strlen(names); i++) { 1826 if (name[3] == names[i]) { 1827 found = 1; 1828 break; 1829 } 1830 } 1831 if (found) 1832 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1833 } 1834 /* hotplug handler */ 1835 if(devfs_node_added) { 1836 hpdev = kmalloc(sizeof(struct hotplug_device), M_TEMP, M_WAITOK); 1837 hpdev->dev = node->d_dev; 1838 hpdev->name = node->d_dev->si_name; 1839 devfs_node_added(hpdev); 1840 kfree(hpdev, M_TEMP); 1841 } 1842 1843 out: 1844 kfree(name_buf, M_TEMP); 1845 kvasfree(&path); 1846 return node; 1847 } 1848 1849 /* 1850 * This function finds a given device node in the topology with a given 1851 * cdev. 1852 */ 1853 void * 1854 devfs_find_device_node_callback(struct devfs_node *node, cdev_t target) 1855 { 1856 if ((node->node_type == Pdev) && (node->d_dev == target)) { 1857 return node; 1858 } 1859 1860 return NULL; 1861 } 1862 1863 /* 1864 * This function finds a device node in the given parent directory by its 1865 * name and returns it. 1866 */ 1867 struct devfs_node * 1868 devfs_find_device_node_by_name(struct devfs_node *parent, char *target) 1869 { 1870 struct devfs_node *node, *found = NULL; 1871 size_t len = strlen(target); 1872 1873 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1874 if (len != node->d_dir.d_namlen) 1875 continue; 1876 1877 if (!memcmp(node->d_dir.d_name, target, len)) { 1878 found = node; 1879 break; 1880 } 1881 } 1882 1883 return found; 1884 } 1885 1886 static void * 1887 devfs_inode_to_vnode_worker_callback(struct devfs_node *node, ino_t *inop) 1888 { 1889 struct vnode *vp = NULL; 1890 ino_t target = *inop; 1891 1892 if (node->d_dir.d_ino == target) { 1893 if (node->v_node) { 1894 vp = node->v_node; 1895 vget(vp, LK_EXCLUSIVE | LK_RETRY); 1896 vn_unlock(vp); 1897 } else { 1898 devfs_allocv(&vp, node); 1899 vn_unlock(vp); 1900 } 1901 } 1902 1903 return vp; 1904 } 1905 1906 /* 1907 * This function takes a cdev and removes its devfs node in the 1908 * given topology. The cdev remains intact. 1909 */ 1910 int 1911 devfs_destroy_device_node(struct devfs_node *root, cdev_t target) 1912 { 1913 struct devfs_node *node, *parent; 1914 char *name; 1915 char *name_buf; 1916 char *create_path = NULL; 1917 1918 KKASSERT(target); 1919 1920 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1921 ksnprintf(name_buf, PATH_MAX, "%s", target->si_name); 1922 1923 devfs_resolve_name_path(target->si_name, name_buf, &create_path, &name); 1924 1925 if (create_path) 1926 parent = devfs_resolve_or_create_path(root, create_path, 0); 1927 else 1928 parent = root; 1929 1930 if (parent == NULL) 1931 return 1; 1932 1933 node = devfs_find_device_node_by_name(parent, name); 1934 1935 if (node) { 1936 nanotime(&node->parent->mtime); 1937 devfs_gc(node); 1938 } 1939 1940 kfree(name_buf, M_TEMP); 1941 1942 return 0; 1943 } 1944 1945 /* 1946 * Just set perms and ownership for given node. 1947 */ 1948 int 1949 devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, 1950 u_short mode, u_long flags) 1951 { 1952 node->mode = mode; 1953 node->uid = uid; 1954 node->gid = gid; 1955 1956 return 0; 1957 } 1958 1959 /* 1960 * Propagates a device attach/detach to all mount 1961 * points. Also takes care of automatic alias removal 1962 * for a deleted cdev. 1963 */ 1964 static int 1965 devfs_propagate_dev(cdev_t dev, int attach) 1966 { 1967 struct devfs_mnt_data *mnt; 1968 1969 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1970 if (attach) { 1971 /* Device is being attached */ 1972 devfs_create_device_node(mnt->root_node, dev, 1973 NULL, NULL ); 1974 } else { 1975 /* Device is being detached */ 1976 devfs_alias_remove(dev); 1977 devfs_destroy_device_node(mnt->root_node, dev); 1978 } 1979 } 1980 return 0; 1981 } 1982 1983 /* 1984 * devfs_clone either returns a basename from a complete name by 1985 * returning the length of the name without trailing digits, or, 1986 * if clone != 0, calls the device's clone handler to get a new 1987 * device, which in turn is returned in devp. 1988 */ 1989 cdev_t 1990 devfs_clone(cdev_t dev, const char *name, size_t len, int mode, 1991 struct ucred *cred) 1992 { 1993 int error; 1994 struct devfs_clone_handler *chandler; 1995 struct dev_clone_args ap; 1996 1997 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1998 if (chandler->namlen != len) 1999 continue; 2000 if ((!memcmp(chandler->name, name, len)) && (chandler->nhandler)) { 2001 lockmgr(&devfs_lock, LK_RELEASE); 2002 devfs_config(); 2003 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2004 2005 ap.a_head.a_dev = dev; 2006 ap.a_dev = NULL; 2007 ap.a_name = name; 2008 ap.a_namelen = len; 2009 ap.a_mode = mode; 2010 ap.a_cred = cred; 2011 error = (chandler->nhandler)(&ap); 2012 if (error) 2013 continue; 2014 2015 return ap.a_dev; 2016 } 2017 } 2018 2019 return NULL; 2020 } 2021 2022 2023 /* 2024 * Registers a new orphan in the orphan list. 2025 */ 2026 void 2027 devfs_tracer_add_orphan(struct devfs_node *node) 2028 { 2029 struct devfs_orphan *orphan; 2030 2031 KKASSERT(node); 2032 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK); 2033 orphan->node = node; 2034 2035 KKASSERT((node->flags & DEVFS_ORPHANED) == 0); 2036 node->flags |= DEVFS_ORPHANED; 2037 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link); 2038 } 2039 2040 /* 2041 * Removes an orphan from the orphan list. 2042 */ 2043 void 2044 devfs_tracer_del_orphan(struct devfs_node *node) 2045 { 2046 struct devfs_orphan *orphan; 2047 2048 KKASSERT(node); 2049 2050 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) { 2051 if (orphan->node == node) { 2052 node->flags &= ~DEVFS_ORPHANED; 2053 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link); 2054 kfree(orphan, M_DEVFS); 2055 break; 2056 } 2057 } 2058 } 2059 2060 /* 2061 * Counts the orphans in the orphan list, and if cleanup 2062 * is specified, also frees the orphan and removes it from 2063 * the list. 2064 */ 2065 size_t 2066 devfs_tracer_orphan_count(struct mount *mp, int cleanup) 2067 { 2068 struct devfs_orphan *orphan, *orphan2; 2069 size_t count = 0; 2070 2071 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) { 2072 count++; 2073 /* 2074 * If we are instructed to clean up, we do so. 2075 */ 2076 if (cleanup) { 2077 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link); 2078 orphan->node->flags &= ~DEVFS_ORPHANED; 2079 devfs_freep(orphan->node); 2080 kfree(orphan, M_DEVFS); 2081 } 2082 } 2083 2084 return count; 2085 } 2086 2087 /* 2088 * Fetch an ino_t from the global d_ino by increasing it 2089 * while spinlocked. 2090 */ 2091 static ino_t 2092 devfs_fetch_ino(void) 2093 { 2094 ino_t ret; 2095 2096 spin_lock_wr(&ino_lock); 2097 ret = d_ino++; 2098 spin_unlock_wr(&ino_lock); 2099 2100 return ret; 2101 } 2102 2103 /* 2104 * Allocates a new cdev and initializes it's most basic 2105 * fields. 2106 */ 2107 cdev_t 2108 devfs_new_cdev(struct dev_ops *ops, int minor, struct dev_ops *bops) 2109 { 2110 cdev_t dev = sysref_alloc(&cdev_sysref_class); 2111 2112 sysref_activate(&dev->si_sysref); 2113 reference_dev(dev); 2114 bzero(dev, offsetof(struct cdev, si_sysref)); 2115 2116 dev->si_uid = 0; 2117 dev->si_gid = 0; 2118 dev->si_perms = 0; 2119 dev->si_drv1 = NULL; 2120 dev->si_drv2 = NULL; 2121 dev->si_lastread = 0; /* time_second */ 2122 dev->si_lastwrite = 0; /* time_second */ 2123 2124 dev->si_ops = ops; 2125 dev->si_flags = 0; 2126 dev->si_umajor = 0; 2127 dev->si_uminor = minor; 2128 dev->si_bops = bops; 2129 /* If there is a backing device, we reference its ops */ 2130 dev->si_inode = makeudev( 2131 devfs_reference_ops((bops)?(bops):(ops)), 2132 minor ); 2133 2134 return dev; 2135 } 2136 2137 static void 2138 devfs_cdev_terminate(cdev_t dev) 2139 { 2140 int locked = 0; 2141 2142 /* Check if it is locked already. if not, we acquire the devfs lock */ 2143 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) { 2144 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2145 locked = 1; 2146 } 2147 2148 /* Propagate destruction, just in case */ 2149 devfs_propagate_dev(dev, 0); 2150 2151 /* If we acquired the lock, we also get rid of it */ 2152 if (locked) 2153 lockmgr(&devfs_lock, LK_RELEASE); 2154 2155 /* If there is a backing device, we release the backing device's ops */ 2156 devfs_release_ops((dev->si_bops)?(dev->si_bops):(dev->si_ops)); 2157 2158 /* Finally destroy the device */ 2159 sysref_put(&dev->si_sysref); 2160 } 2161 2162 /* 2163 * Links a given cdev into the dev list. 2164 */ 2165 int 2166 devfs_link_dev(cdev_t dev) 2167 { 2168 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2169 dev->si_flags |= SI_DEVFS_LINKED; 2170 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link); 2171 2172 return 0; 2173 } 2174 2175 /* 2176 * Removes a given cdev from the dev list. The caller is responsible for 2177 * releasing the reference on the device associated with the linkage. 2178 * 2179 * Returns EALREADY if the dev has already been unlinked. 2180 */ 2181 static int 2182 devfs_unlink_dev(cdev_t dev) 2183 { 2184 if ((dev->si_flags & SI_DEVFS_LINKED)) { 2185 TAILQ_REMOVE(&devfs_dev_list, dev, link); 2186 dev->si_flags &= ~SI_DEVFS_LINKED; 2187 return (0); 2188 } 2189 return (EALREADY); 2190 } 2191 2192 int 2193 devfs_node_is_accessible(struct devfs_node *node) 2194 { 2195 if ((node) && (!(node->flags & DEVFS_HIDDEN))) 2196 return 1; 2197 else 2198 return 0; 2199 } 2200 2201 int 2202 devfs_reference_ops(struct dev_ops *ops) 2203 { 2204 int unit; 2205 struct devfs_dev_ops *found = NULL; 2206 struct devfs_dev_ops *devops; 2207 2208 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2209 if (devops->ops == ops) { 2210 found = devops; 2211 break; 2212 } 2213 } 2214 2215 if (!found) { 2216 found = kmalloc(sizeof(struct devfs_dev_ops), M_DEVFS, M_WAITOK); 2217 found->ops = ops; 2218 found->ref_count = 0; 2219 TAILQ_INSERT_TAIL(&devfs_dev_ops_list, found, link); 2220 } 2221 2222 KKASSERT(found); 2223 2224 if (found->ref_count == 0) { 2225 found->id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255); 2226 if (found->id == -1) { 2227 /* Ran out of unique ids */ 2228 devfs_debug(DEVFS_DEBUG_WARNING, 2229 "devfs_reference_ops: WARNING: ran out of unique ids\n"); 2230 } 2231 } 2232 unit = found->id; 2233 ++found->ref_count; 2234 2235 return unit; 2236 } 2237 2238 void 2239 devfs_release_ops(struct dev_ops *ops) 2240 { 2241 struct devfs_dev_ops *found = NULL; 2242 struct devfs_dev_ops *devops; 2243 2244 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2245 if (devops->ops == ops) { 2246 found = devops; 2247 break; 2248 } 2249 } 2250 2251 KKASSERT(found); 2252 2253 --found->ref_count; 2254 2255 if (found->ref_count == 0) { 2256 TAILQ_REMOVE(&devfs_dev_ops_list, found, link); 2257 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), found->id); 2258 kfree(found, M_DEVFS); 2259 } 2260 } 2261 2262 void 2263 devfs_config(void) 2264 { 2265 devfs_msg_t msg; 2266 2267 msg = devfs_msg_get(); 2268 msg = devfs_msg_send_sync(DEVFS_SYNC, msg); 2269 devfs_msg_put(msg); 2270 } 2271 2272 /* 2273 * Called on init of devfs; creates the objcaches and 2274 * spawns off the devfs core thread. Also initializes 2275 * locks. 2276 */ 2277 static void 2278 devfs_init(void) 2279 { 2280 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n"); 2281 /* Create objcaches for nodes, msgs and devs */ 2282 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0, 2283 NULL, NULL, NULL, 2284 objcache_malloc_alloc, 2285 objcache_malloc_free, 2286 &devfs_node_malloc_args ); 2287 2288 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0, 2289 NULL, NULL, NULL, 2290 objcache_malloc_alloc, 2291 objcache_malloc_free, 2292 &devfs_msg_malloc_args ); 2293 2294 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0, 2295 NULL, NULL, NULL, 2296 objcache_malloc_alloc, 2297 objcache_malloc_free, 2298 &devfs_dev_malloc_args ); 2299 2300 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id)); 2301 2302 /* Initialize the reply-only port which acts as a message drain */ 2303 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply); 2304 2305 /* Initialize *THE* devfs lock */ 2306 lockinit(&devfs_lock, "devfs_core lock", 0, 0); 2307 2308 2309 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL, 2310 0, 0, "devfs_msg_core"); 2311 2312 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); 2313 2314 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n"); 2315 } 2316 2317 /* 2318 * Called on unload of devfs; takes care of destroying the core 2319 * and the objcaches. Also removes aliases that are no longer needed. 2320 */ 2321 static void 2322 devfs_uninit(void) 2323 { 2324 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); 2325 2326 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL); 2327 2328 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); 2329 tsleep(td_core/*devfs_id*/, 0, "devfsc", 10000); 2330 2331 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id)); 2332 2333 /* Destroy the objcaches */ 2334 objcache_destroy(devfs_msg_cache); 2335 objcache_destroy(devfs_node_cache); 2336 objcache_destroy(devfs_dev_cache); 2337 2338 devfs_alias_reap(); 2339 } 2340 2341 /* 2342 * This is a sysctl handler to assist userland devname(3) to 2343 * find the device name for a given udev. 2344 */ 2345 static int 2346 devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS) 2347 { 2348 udev_t udev; 2349 cdev_t found; 2350 int error; 2351 2352 2353 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t)))) 2354 return (error); 2355 2356 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev); 2357 2358 if (udev == NOUDEV) 2359 return(EINVAL); 2360 2361 if ((found = devfs_find_device_by_udev(udev)) == NULL) 2362 return(ENOENT); 2363 2364 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1)); 2365 } 2366 2367 2368 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, 2369 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)"); 2370 2371 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); 2372 TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable); 2373 SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 2374 0, "Enable DevFS debugging"); 2375 2376 SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, 2377 devfs_init, NULL); 2378 SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, 2379 devfs_uninit, NULL); 2380