1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/mount.h> 38 #include <sys/vnode.h> 39 #include <sys/types.h> 40 #include <sys/lock.h> 41 #include <sys/msgport.h> 42 #include <sys/msgport2.h> 43 #include <sys/spinlock2.h> 44 #include <sys/sysctl.h> 45 #include <sys/ucred.h> 46 #include <sys/param.h> 47 #include <sys/sysref2.h> 48 #include <sys/systm.h> 49 #include <sys/devfs.h> 50 #include <sys/devfs_rules.h> 51 #include <sys/hotplug.h> 52 53 MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations"); 54 DEVFS_DECLARE_CLONE_BITMAP(ops_id); 55 /* 56 * SYSREF Integration - reference counting, allocation, 57 * sysid and syslink integration. 58 */ 59 static void devfs_cdev_terminate(cdev_t dev); 60 static struct sysref_class cdev_sysref_class = { 61 .name = "cdev", 62 .mtype = M_DEVFS, 63 .proto = SYSREF_PROTO_DEV, 64 .offset = offsetof(struct cdev, si_sysref), 65 .objsize = sizeof(struct cdev), 66 .mag_capacity = 32, 67 .flags = 0, 68 .ops = { 69 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate 70 } 71 }; 72 73 static struct objcache *devfs_node_cache; 74 static struct objcache *devfs_msg_cache; 75 static struct objcache *devfs_dev_cache; 76 77 static struct objcache_malloc_args devfs_node_malloc_args = { 78 sizeof(struct devfs_node), M_DEVFS }; 79 struct objcache_malloc_args devfs_msg_malloc_args = { 80 sizeof(struct devfs_msg), M_DEVFS }; 81 struct objcache_malloc_args devfs_dev_malloc_args = { 82 sizeof(struct cdev), M_DEVFS }; 83 84 static struct devfs_dev_head devfs_dev_list = 85 TAILQ_HEAD_INITIALIZER(devfs_dev_list); 86 static struct devfs_mnt_head devfs_mnt_list = 87 TAILQ_HEAD_INITIALIZER(devfs_mnt_list); 88 static struct devfs_chandler_head devfs_chandler_list = 89 TAILQ_HEAD_INITIALIZER(devfs_chandler_list); 90 static struct devfs_alias_head devfs_alias_list = 91 TAILQ_HEAD_INITIALIZER(devfs_alias_list); 92 static struct devfs_dev_ops_head devfs_dev_ops_list = 93 TAILQ_HEAD_INITIALIZER(devfs_dev_ops_list); 94 95 struct lock devfs_lock; 96 static struct lwkt_port devfs_dispose_port; 97 static struct lwkt_port devfs_msg_port; 98 static struct thread *td_core; 99 100 static struct spinlock ino_lock; 101 static ino_t d_ino; 102 static int devfs_debug_enable; 103 static int devfs_run; 104 105 static ino_t devfs_fetch_ino(void); 106 static int devfs_create_all_dev_worker(struct devfs_node *); 107 static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int); 108 static int devfs_destroy_dev_worker(cdev_t); 109 static int devfs_destroy_subnames_worker(char *); 110 static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int); 111 static int devfs_propagate_dev(cdev_t, int); 112 static int devfs_unlink_dev(cdev_t dev); 113 static void devfs_msg_exec(devfs_msg_t msg); 114 115 static int devfs_chandler_add_worker(const char *, d_clone_t *); 116 static int devfs_chandler_del_worker(const char *); 117 118 static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 119 static void devfs_msg_core(void *); 120 121 static int devfs_find_device_by_name_worker(devfs_msg_t); 122 static int devfs_find_device_by_udev_worker(devfs_msg_t); 123 124 static int devfs_apply_reset_rules_caller(char *, int); 125 126 static int devfs_scan_callback_worker(devfs_scan_t *); 127 128 static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, 129 char *, size_t, int); 130 131 static int devfs_make_alias_worker(struct devfs_alias *); 132 static int devfs_alias_remove(cdev_t); 133 static int devfs_alias_reap(void); 134 static int devfs_alias_propagate(struct devfs_alias *); 135 static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *); 136 static int devfs_alias_check_create(struct devfs_node *); 137 138 static int devfs_clr_subnames_flag_worker(char *, uint32_t); 139 static int devfs_destroy_subnames_without_flag_worker(char *, uint32_t); 140 141 static void *devfs_reaperp_callback(struct devfs_node *, void *); 142 static void *devfs_gc_dirs_callback(struct devfs_node *, void *); 143 static void *devfs_gc_links_callback(struct devfs_node *, struct devfs_node *); 144 static void * 145 devfs_inode_to_vnode_worker_callback(struct devfs_node *, ino_t *); 146 147 /* hotplug */ 148 void (*devfs_node_added)(struct hotplug_device*) = NULL; 149 void (*devfs_node_removed)(struct hotplug_device*) = NULL; 150 151 /* 152 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function 153 * using kvprintf 154 */ 155 int 156 devfs_debug(int level, char *fmt, ...) 157 { 158 __va_list ap; 159 160 __va_start(ap, fmt); 161 if (level <= devfs_debug_enable) 162 kvprintf(fmt, ap); 163 __va_end(ap); 164 165 return 0; 166 } 167 168 /* 169 * devfs_allocp() Allocates a new devfs node with the specified 170 * parameters. The node is also automatically linked into the topology 171 * if a parent is specified. It also calls the rule and alias stuff to 172 * be applied on the new node 173 */ 174 struct devfs_node * 175 devfs_allocp(devfs_nodetype devfsnodetype, char *name, 176 struct devfs_node *parent, struct mount *mp, cdev_t dev) 177 { 178 struct devfs_node *node = NULL; 179 size_t namlen = strlen(name); 180 181 node = objcache_get(devfs_node_cache, M_WAITOK); 182 bzero(node, sizeof(*node)); 183 184 atomic_add_long(&(DEVFS_MNTDATA(mp)->leak_count), 1); 185 186 node->d_dev = NULL; 187 node->nchildren = 1; 188 node->mp = mp; 189 node->d_dir.d_ino = devfs_fetch_ino(); 190 191 /* 192 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries 193 * respectively. 194 */ 195 node->cookie_jar = 2; 196 197 /* 198 * Access Control members 199 */ 200 node->mode = DEVFS_DEFAULT_MODE; 201 node->uid = DEVFS_DEFAULT_UID; 202 node->gid = DEVFS_DEFAULT_GID; 203 204 switch (devfsnodetype) { 205 case Proot: 206 /* 207 * Ensure that we don't recycle the root vnode by marking it as 208 * linked into the topology. 209 */ 210 node->flags |= DEVFS_NODE_LINKED; 211 case Pdir: 212 TAILQ_INIT(DEVFS_DENODE_HEAD(node)); 213 node->d_dir.d_type = DT_DIR; 214 node->nchildren = 2; 215 break; 216 217 case Plink: 218 node->d_dir.d_type = DT_LNK; 219 break; 220 221 case Preg: 222 node->d_dir.d_type = DT_REG; 223 break; 224 225 case Pdev: 226 if (dev != NULL) { 227 node->d_dir.d_type = DT_CHR; 228 node->d_dev = dev; 229 230 node->mode = dev->si_perms; 231 node->uid = dev->si_uid; 232 node->gid = dev->si_gid; 233 234 devfs_alias_check_create(node); 235 } 236 break; 237 238 default: 239 panic("devfs_allocp: unknown node type"); 240 } 241 242 node->v_node = NULL; 243 node->node_type = devfsnodetype; 244 245 /* Initialize the dirent structure of each devfs vnode */ 246 KKASSERT(namlen < 256); 247 node->d_dir.d_namlen = namlen; 248 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK); 249 memcpy(node->d_dir.d_name, name, namlen); 250 node->d_dir.d_name[namlen] = '\0'; 251 252 /* Initialize the parent node element */ 253 node->parent = parent; 254 255 /* Apply rules */ 256 devfs_rule_check_apply(node, NULL); 257 258 /* Initialize *time members */ 259 nanotime(&node->atime); 260 node->mtime = node->ctime = node->atime; 261 262 /* 263 * Associate with parent as last step, clean out namecache 264 * reference. 265 */ 266 if ((parent != NULL) && 267 ((parent->node_type == Proot) || (parent->node_type == Pdir))) { 268 parent->nchildren++; 269 node->cookie = parent->cookie_jar++; 270 node->flags |= DEVFS_NODE_LINKED; 271 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link); 272 273 /* This forces negative namecache lookups to clear */ 274 ++mp->mnt_namecache_gen; 275 } 276 277 ++DEVFS_MNTDATA(mp)->file_count; 278 279 return node; 280 } 281 282 /* 283 * devfs_allocv() allocates a new vnode based on a devfs node. 284 */ 285 int 286 devfs_allocv(struct vnode **vpp, struct devfs_node *node) 287 { 288 struct vnode *vp; 289 int error = 0; 290 291 KKASSERT(node); 292 293 try_again: 294 while ((vp = node->v_node) != NULL) { 295 error = vget(vp, LK_EXCLUSIVE); 296 if (error != ENOENT) { 297 *vpp = vp; 298 goto out; 299 } 300 } 301 302 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0) 303 goto out; 304 305 vp = *vpp; 306 307 if (node->v_node != NULL) { 308 vp->v_type = VBAD; 309 vx_put(vp); 310 goto try_again; 311 } 312 313 vp->v_data = node; 314 node->v_node = vp; 315 316 switch (node->node_type) { 317 case Proot: 318 vsetflags(vp, VROOT); 319 /* fall through */ 320 case Pdir: 321 vp->v_type = VDIR; 322 break; 323 324 case Plink: 325 vp->v_type = VLNK; 326 break; 327 328 case Preg: 329 vp->v_type = VREG; 330 break; 331 332 case Pdev: 333 vp->v_type = VCHR; 334 KKASSERT(node->d_dev); 335 336 vp->v_uminor = node->d_dev->si_uminor; 337 vp->v_umajor = 0; 338 339 v_associate_rdev(vp, node->d_dev); 340 vp->v_ops = &node->mp->mnt_vn_spec_ops; 341 break; 342 343 default: 344 panic("devfs_allocv: unknown node type"); 345 } 346 347 out: 348 return error; 349 } 350 351 /* 352 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode 353 * based on the newly created devfs node. 354 */ 355 int 356 devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype, 357 char *name, struct devfs_node *parent, cdev_t dev) 358 { 359 struct devfs_node *node; 360 361 node = devfs_allocp(devfsnodetype, name, parent, mp, dev); 362 363 if (node != NULL) 364 devfs_allocv(vpp, node); 365 else 366 *vpp = NULL; 367 368 return 0; 369 } 370 371 /* 372 * Destroy the devfs_node. The node must be unlinked from the topology. 373 * 374 * This function will also destroy any vnode association with the node 375 * and device. 376 * 377 * The cdev_t itself remains intact. 378 */ 379 int 380 devfs_freep(struct devfs_node *node) 381 { 382 struct vnode *vp; 383 384 KKASSERT(node); 385 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) || 386 (node->node_type == Proot)); 387 KKASSERT((node->flags & DEVFS_DESTROYED) == 0); 388 389 atomic_subtract_long(&(DEVFS_MNTDATA(node->mp)->leak_count), 1); 390 if (node->symlink_name) { 391 kfree(node->symlink_name, M_DEVFS); 392 node->symlink_name = NULL; 393 } 394 395 /* 396 * Remove the node from the orphan list if it is still on it. 397 */ 398 if (node->flags & DEVFS_ORPHANED) 399 devfs_tracer_del_orphan(node); 400 401 /* 402 * Disassociate the vnode from the node. This also prevents the 403 * vnode's reclaim code from double-freeing the node. 404 * 405 * The vget is needed to safely modify the vp. It also serves 406 * to cycle the refs and terminate the vnode if it happens to 407 * be inactive, otherwise namecache references may not get cleared. 408 */ 409 while ((vp = node->v_node) != NULL) { 410 if (vget(vp, LK_EXCLUSIVE | LK_RETRY) != 0) 411 break; 412 v_release_rdev(vp); 413 vp->v_data = NULL; 414 node->v_node = NULL; 415 cache_inval_vp(vp, CINV_DESTROY); 416 vput(vp); 417 } 418 if (node->d_dir.d_name) { 419 kfree(node->d_dir.d_name, M_DEVFS); 420 node->d_dir.d_name = NULL; 421 } 422 node->flags |= DEVFS_DESTROYED; 423 424 --DEVFS_MNTDATA(node->mp)->file_count; 425 426 objcache_put(devfs_node_cache, node); 427 428 return 0; 429 } 430 431 /* 432 * Unlink the devfs node from the topology and add it to the orphan list. 433 * The node will later be destroyed by freep. 434 * 435 * Any vnode association, including the v_rdev and v_data, remains intact 436 * until the freep. 437 */ 438 int 439 devfs_unlinkp(struct devfs_node *node) 440 { 441 struct devfs_node *parent; 442 struct hotplug_device *hpdev; 443 KKASSERT(node); 444 445 /* 446 * Add the node to the orphan list, so it is referenced somewhere, to 447 * so we don't leak it. 448 */ 449 devfs_tracer_add_orphan(node); 450 451 parent = node->parent; 452 453 /* 454 * If the parent is known we can unlink the node out of the topology 455 */ 456 if (parent) { 457 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link); 458 parent->nchildren--; 459 KKASSERT((parent->nchildren >= 0)); 460 node->flags &= ~DEVFS_NODE_LINKED; 461 } 462 /* hotplug handler */ 463 if(devfs_node_removed) { 464 hpdev = kmalloc(sizeof(struct hotplug_device), M_TEMP, M_WAITOK); 465 hpdev->dev = node->d_dev; 466 if(hpdev->dev) 467 hpdev->name = node->d_dev->si_name; 468 devfs_node_removed(hpdev); 469 kfree(hpdev, M_TEMP); 470 } 471 node->parent = NULL; 472 return 0; 473 } 474 475 void * 476 devfs_iterate_topology(struct devfs_node *node, 477 devfs_iterate_callback_t *callback, void *arg1) 478 { 479 struct devfs_node *node1, *node2; 480 void *ret = NULL; 481 482 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 483 if (node->nchildren > 2) { 484 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 485 link, node2) { 486 if ((ret = devfs_iterate_topology(node1, callback, arg1))) 487 return ret; 488 } 489 } 490 } 491 492 ret = callback(node, arg1); 493 return ret; 494 } 495 496 /* 497 * devfs_reaperp() is a recursive function that iterates through all the 498 * topology, unlinking and freeing all devfs nodes. 499 */ 500 static void * 501 devfs_reaperp_callback(struct devfs_node *node, void *unused) 502 { 503 devfs_unlinkp(node); 504 devfs_freep(node); 505 506 return NULL; 507 } 508 509 static void * 510 devfs_gc_dirs_callback(struct devfs_node *node, void *unused) 511 { 512 if (node->node_type == Pdir) { 513 if (node->nchildren == 2) { 514 devfs_unlinkp(node); 515 devfs_freep(node); 516 } 517 } 518 519 return NULL; 520 } 521 522 static void * 523 devfs_gc_links_callback(struct devfs_node *node, struct devfs_node *target) 524 { 525 if ((node->node_type == Plink) && (node->link_target == target)) { 526 devfs_unlinkp(node); 527 devfs_freep(node); 528 } 529 530 return NULL; 531 } 532 533 /* 534 * devfs_gc() is devfs garbage collector. It takes care of unlinking and 535 * freeing a node, but also removes empty directories and links that link 536 * via devfs auto-link mechanism to the node being deleted. 537 */ 538 int 539 devfs_gc(struct devfs_node *node) 540 { 541 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node; 542 543 if (node->nlinks > 0) 544 devfs_iterate_topology(root_node, 545 (devfs_iterate_callback_t *)devfs_gc_links_callback, node); 546 547 devfs_unlinkp(node); 548 devfs_iterate_topology(root_node, 549 (devfs_iterate_callback_t *)devfs_gc_dirs_callback, NULL); 550 551 devfs_freep(node); 552 553 return 0; 554 } 555 556 /* 557 * devfs_create_dev() is the asynchronous entry point for device creation. 558 * It just sends a message with the relevant details to the devfs core. 559 * 560 * This function will reference the passed device. The reference is owned 561 * by devfs and represents all of the device's node associations. 562 */ 563 int 564 devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms) 565 { 566 reference_dev(dev); 567 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms); 568 569 return 0; 570 } 571 572 /* 573 * devfs_destroy_dev() is the asynchronous entry point for device destruction. 574 * It just sends a message with the relevant details to the devfs core. 575 */ 576 int 577 devfs_destroy_dev(cdev_t dev) 578 { 579 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0); 580 return 0; 581 } 582 583 /* 584 * devfs_mount_add() is the synchronous entry point for adding a new devfs 585 * mount. It sends a synchronous message with the relevant details to the 586 * devfs core. 587 */ 588 int 589 devfs_mount_add(struct devfs_mnt_data *mnt) 590 { 591 devfs_msg_t msg; 592 593 msg = devfs_msg_get(); 594 msg->mdv_mnt = mnt; 595 msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg); 596 devfs_msg_put(msg); 597 598 return 0; 599 } 600 601 /* 602 * devfs_mount_del() is the synchronous entry point for removing a devfs mount. 603 * It sends a synchronous message with the relevant details to the devfs core. 604 */ 605 int 606 devfs_mount_del(struct devfs_mnt_data *mnt) 607 { 608 devfs_msg_t msg; 609 610 msg = devfs_msg_get(); 611 msg->mdv_mnt = mnt; 612 msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg); 613 devfs_msg_put(msg); 614 615 return 0; 616 } 617 618 /* 619 * devfs_destroy_subnames() is the synchronous entry point for device 620 * destruction by subname. It just sends a message with the relevant details to 621 * the devfs core. 622 */ 623 int 624 devfs_destroy_subnames(char *name) 625 { 626 devfs_msg_t msg; 627 628 msg = devfs_msg_get(); 629 msg->mdv_load = name; 630 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES, msg); 631 devfs_msg_put(msg); 632 return 0; 633 } 634 635 int 636 devfs_clr_subnames_flag(char *name, uint32_t flag) 637 { 638 devfs_msg_t msg; 639 640 msg = devfs_msg_get(); 641 msg->mdv_flags.name = name; 642 msg->mdv_flags.flag = flag; 643 msg = devfs_msg_send_sync(DEVFS_CLR_SUBNAMES_FLAG, msg); 644 devfs_msg_put(msg); 645 646 return 0; 647 } 648 649 int 650 devfs_destroy_subnames_without_flag(char *name, uint32_t flag) 651 { 652 devfs_msg_t msg; 653 654 msg = devfs_msg_get(); 655 msg->mdv_flags.name = name; 656 msg->mdv_flags.flag = flag; 657 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES_WO_FLAG, msg); 658 devfs_msg_put(msg); 659 660 return 0; 661 } 662 663 /* 664 * devfs_create_all_dev is the asynchronous entry point to trigger device 665 * node creation. It just sends a message with the relevant details to 666 * the devfs core. 667 */ 668 int 669 devfs_create_all_dev(struct devfs_node *root) 670 { 671 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root); 672 return 0; 673 } 674 675 /* 676 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all 677 * devices with a specific set of dev_ops and minor. It just sends a 678 * message with the relevant details to the devfs core. 679 */ 680 int 681 devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor) 682 { 683 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor); 684 return 0; 685 } 686 687 /* 688 * devfs_clone_handler_add is the synchronous entry point to add a new 689 * clone handler. It just sends a message with the relevant details to 690 * the devfs core. 691 */ 692 int 693 devfs_clone_handler_add(const char *name, d_clone_t *nhandler) 694 { 695 devfs_msg_t msg; 696 697 msg = devfs_msg_get(); 698 msg->mdv_chandler.name = name; 699 msg->mdv_chandler.nhandler = nhandler; 700 msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg); 701 devfs_msg_put(msg); 702 return 0; 703 } 704 705 /* 706 * devfs_clone_handler_del is the synchronous entry point to remove a 707 * clone handler. It just sends a message with the relevant details to 708 * the devfs core. 709 */ 710 int 711 devfs_clone_handler_del(const char *name) 712 { 713 devfs_msg_t msg; 714 715 msg = devfs_msg_get(); 716 msg->mdv_chandler.name = name; 717 msg->mdv_chandler.nhandler = NULL; 718 msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg); 719 devfs_msg_put(msg); 720 return 0; 721 } 722 723 /* 724 * devfs_find_device_by_name is the synchronous entry point to find a 725 * device given its name. It sends a synchronous message with the 726 * relevant details to the devfs core and returns the answer. 727 */ 728 cdev_t 729 devfs_find_device_by_name(const char *fmt, ...) 730 { 731 cdev_t found = NULL; 732 devfs_msg_t msg; 733 char *target; 734 __va_list ap; 735 736 if (fmt == NULL) 737 return NULL; 738 739 __va_start(ap, fmt); 740 kvasnrprintf(&target, PATH_MAX, 10, fmt, ap); 741 __va_end(ap); 742 743 msg = devfs_msg_get(); 744 msg->mdv_name = target; 745 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg); 746 found = msg->mdv_cdev; 747 devfs_msg_put(msg); 748 kvasfree(&target); 749 750 return found; 751 } 752 753 /* 754 * devfs_find_device_by_udev is the synchronous entry point to find a 755 * device given its udev number. It sends a synchronous message with 756 * the relevant details to the devfs core and returns the answer. 757 */ 758 cdev_t 759 devfs_find_device_by_udev(udev_t udev) 760 { 761 cdev_t found = NULL; 762 devfs_msg_t msg; 763 764 msg = devfs_msg_get(); 765 msg->mdv_udev = udev; 766 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg); 767 found = msg->mdv_cdev; 768 devfs_msg_put(msg); 769 770 devfs_debug(DEVFS_DEBUG_DEBUG, 771 "devfs_find_device_by_udev found? %s -end:3-\n", 772 ((found) ? found->si_name:"NO")); 773 return found; 774 } 775 776 struct vnode * 777 devfs_inode_to_vnode(struct mount *mp, ino_t target) 778 { 779 struct vnode *vp = NULL; 780 devfs_msg_t msg; 781 782 if (mp == NULL) 783 return NULL; 784 785 msg = devfs_msg_get(); 786 msg->mdv_ino.mp = mp; 787 msg->mdv_ino.ino = target; 788 msg = devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg); 789 vp = msg->mdv_ino.vp; 790 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 791 devfs_msg_put(msg); 792 793 return vp; 794 } 795 796 /* 797 * devfs_make_alias is the asynchronous entry point to register an alias 798 * for a device. It just sends a message with the relevant details to the 799 * devfs core. 800 */ 801 int 802 devfs_make_alias(const char *name, cdev_t dev_target) 803 { 804 struct devfs_alias *alias; 805 size_t len; 806 807 len = strlen(name); 808 809 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 810 alias->name = kstrdup(name, M_DEVFS); 811 alias->namlen = len; 812 alias->dev_target = dev_target; 813 814 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias); 815 return 0; 816 } 817 818 /* 819 * devfs_apply_rules is the asynchronous entry point to trigger application 820 * of all rules. It just sends a message with the relevant details to the 821 * devfs core. 822 */ 823 int 824 devfs_apply_rules(char *mntto) 825 { 826 char *new_name; 827 828 new_name = kstrdup(mntto, M_DEVFS); 829 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name); 830 831 return 0; 832 } 833 834 /* 835 * devfs_reset_rules is the asynchronous entry point to trigger reset of all 836 * rules. It just sends a message with the relevant details to the devfs core. 837 */ 838 int 839 devfs_reset_rules(char *mntto) 840 { 841 char *new_name; 842 843 new_name = kstrdup(mntto, M_DEVFS); 844 devfs_msg_send_name(DEVFS_RESET_RULES, new_name); 845 846 return 0; 847 } 848 849 850 /* 851 * devfs_scan_callback is the asynchronous entry point to call a callback 852 * on all cdevs. 853 * It just sends a message with the relevant details to the devfs core. 854 */ 855 int 856 devfs_scan_callback(devfs_scan_t *callback) 857 { 858 devfs_msg_t msg; 859 860 KKASSERT(sizeof(callback) == sizeof(void *)); 861 862 msg = devfs_msg_get(); 863 msg->mdv_load = callback; 864 msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg); 865 devfs_msg_put(msg); 866 867 return 0; 868 } 869 870 871 /* 872 * Acts as a message drain. Any message that is replied to here gets destroyed 873 * and the memory freed. 874 */ 875 static void 876 devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 877 { 878 devfs_msg_put((devfs_msg_t)msg); 879 } 880 881 /* 882 * devfs_msg_get allocates a new devfs msg and returns it. 883 */ 884 devfs_msg_t 885 devfs_msg_get(void) 886 { 887 return objcache_get(devfs_msg_cache, M_WAITOK); 888 } 889 890 /* 891 * devfs_msg_put deallocates a given devfs msg. 892 */ 893 int 894 devfs_msg_put(devfs_msg_t msg) 895 { 896 objcache_put(devfs_msg_cache, msg); 897 return 0; 898 } 899 900 /* 901 * devfs_msg_send is the generic asynchronous message sending facility 902 * for devfs. By default the reply port is the automatic disposal port. 903 * 904 * If the current thread is the devfs_msg_port thread we execute the 905 * operation synchronously. 906 */ 907 void 908 devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg) 909 { 910 lwkt_port_t port = &devfs_msg_port; 911 912 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0); 913 914 devfs_msg->hdr.u.ms_result = cmd; 915 916 if (port->mpu_td == curthread) { 917 devfs_msg_exec(devfs_msg); 918 lwkt_replymsg(&devfs_msg->hdr, 0); 919 } else { 920 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 921 } 922 } 923 924 /* 925 * devfs_msg_send_sync is the generic synchronous message sending 926 * facility for devfs. It initializes a local reply port and waits 927 * for the core's answer. This answer is then returned. 928 */ 929 devfs_msg_t 930 devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg) 931 { 932 struct lwkt_port rep_port; 933 devfs_msg_t msg_incoming; 934 lwkt_port_t port = &devfs_msg_port; 935 936 lwkt_initport_thread(&rep_port, curthread); 937 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0); 938 939 devfs_msg->hdr.u.ms_result = cmd; 940 941 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 942 msg_incoming = lwkt_waitport(&rep_port, 0); 943 944 return msg_incoming; 945 } 946 947 /* 948 * sends a message with a generic argument. 949 */ 950 void 951 devfs_msg_send_generic(uint32_t cmd, void *load) 952 { 953 devfs_msg_t devfs_msg = devfs_msg_get(); 954 955 devfs_msg->mdv_load = load; 956 devfs_msg_send(cmd, devfs_msg); 957 } 958 959 /* 960 * sends a message with a name argument. 961 */ 962 void 963 devfs_msg_send_name(uint32_t cmd, char *name) 964 { 965 devfs_msg_t devfs_msg = devfs_msg_get(); 966 967 devfs_msg->mdv_name = name; 968 devfs_msg_send(cmd, devfs_msg); 969 } 970 971 /* 972 * sends a message with a mount argument. 973 */ 974 void 975 devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt) 976 { 977 devfs_msg_t devfs_msg = devfs_msg_get(); 978 979 devfs_msg->mdv_mnt = mnt; 980 devfs_msg_send(cmd, devfs_msg); 981 } 982 983 /* 984 * sends a message with an ops argument. 985 */ 986 void 987 devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor) 988 { 989 devfs_msg_t devfs_msg = devfs_msg_get(); 990 991 devfs_msg->mdv_ops.ops = ops; 992 devfs_msg->mdv_ops.minor = minor; 993 devfs_msg_send(cmd, devfs_msg); 994 } 995 996 /* 997 * sends a message with a clone handler argument. 998 */ 999 void 1000 devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler) 1001 { 1002 devfs_msg_t devfs_msg = devfs_msg_get(); 1003 1004 devfs_msg->mdv_chandler.name = name; 1005 devfs_msg->mdv_chandler.nhandler = handler; 1006 devfs_msg_send(cmd, devfs_msg); 1007 } 1008 1009 /* 1010 * sends a message with a device argument. 1011 */ 1012 void 1013 devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms) 1014 { 1015 devfs_msg_t devfs_msg = devfs_msg_get(); 1016 1017 devfs_msg->mdv_dev.dev = dev; 1018 devfs_msg->mdv_dev.uid = uid; 1019 devfs_msg->mdv_dev.gid = gid; 1020 devfs_msg->mdv_dev.perms = perms; 1021 1022 devfs_msg_send(cmd, devfs_msg); 1023 } 1024 1025 /* 1026 * sends a message with a link argument. 1027 */ 1028 void 1029 devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp) 1030 { 1031 devfs_msg_t devfs_msg = devfs_msg_get(); 1032 1033 devfs_msg->mdv_link.name = name; 1034 devfs_msg->mdv_link.target = target; 1035 devfs_msg->mdv_link.mp = mp; 1036 devfs_msg_send(cmd, devfs_msg); 1037 } 1038 1039 /* 1040 * devfs_msg_core is the main devfs thread. It handles all incoming messages 1041 * and calls the relevant worker functions. By using messages it's assured 1042 * that events occur in the correct order. 1043 */ 1044 static void 1045 devfs_msg_core(void *arg) 1046 { 1047 devfs_msg_t msg; 1048 1049 devfs_run = 1; 1050 lwkt_initport_thread(&devfs_msg_port, curthread); 1051 wakeup(td_core); 1052 1053 while (devfs_run) { 1054 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0); 1055 devfs_debug(DEVFS_DEBUG_DEBUG, 1056 "devfs_msg_core, new msg: %x\n", 1057 (unsigned int)msg->hdr.u.ms_result); 1058 devfs_msg_exec(msg); 1059 lwkt_replymsg(&msg->hdr, 0); 1060 } 1061 wakeup(td_core); 1062 lwkt_exit(); 1063 } 1064 1065 static void 1066 devfs_msg_exec(devfs_msg_t msg) 1067 { 1068 struct devfs_mnt_data *mnt; 1069 struct devfs_node *node; 1070 cdev_t dev; 1071 1072 /* 1073 * Acquire the devfs lock to ensure safety of all called functions 1074 */ 1075 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1076 1077 switch (msg->hdr.u.ms_result) { 1078 case DEVFS_DEVICE_CREATE: 1079 dev = msg->mdv_dev.dev; 1080 devfs_create_dev_worker(dev, 1081 msg->mdv_dev.uid, 1082 msg->mdv_dev.gid, 1083 msg->mdv_dev.perms); 1084 break; 1085 case DEVFS_DEVICE_DESTROY: 1086 dev = msg->mdv_dev.dev; 1087 devfs_destroy_dev_worker(dev); 1088 break; 1089 case DEVFS_DESTROY_SUBNAMES: 1090 devfs_destroy_subnames_worker(msg->mdv_load); 1091 break; 1092 case DEVFS_DESTROY_DEV_BY_OPS: 1093 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops, 1094 msg->mdv_ops.minor); 1095 break; 1096 case DEVFS_CREATE_ALL_DEV: 1097 node = (struct devfs_node *)msg->mdv_load; 1098 devfs_create_all_dev_worker(node); 1099 break; 1100 case DEVFS_MOUNT_ADD: 1101 mnt = msg->mdv_mnt; 1102 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link); 1103 devfs_create_all_dev_worker(mnt->root_node); 1104 break; 1105 case DEVFS_MOUNT_DEL: 1106 mnt = msg->mdv_mnt; 1107 TAILQ_REMOVE(&devfs_mnt_list, mnt, link); 1108 devfs_iterate_topology(mnt->root_node, devfs_reaperp_callback, 1109 NULL); 1110 if (mnt->leak_count) { 1111 devfs_debug(DEVFS_DEBUG_SHOW, 1112 "Leaked %ld devfs_node elements!\n", 1113 mnt->leak_count); 1114 } 1115 break; 1116 case DEVFS_CHANDLER_ADD: 1117 devfs_chandler_add_worker(msg->mdv_chandler.name, 1118 msg->mdv_chandler.nhandler); 1119 break; 1120 case DEVFS_CHANDLER_DEL: 1121 devfs_chandler_del_worker(msg->mdv_chandler.name); 1122 break; 1123 case DEVFS_FIND_DEVICE_BY_NAME: 1124 devfs_find_device_by_name_worker(msg); 1125 break; 1126 case DEVFS_FIND_DEVICE_BY_UDEV: 1127 devfs_find_device_by_udev_worker(msg); 1128 break; 1129 case DEVFS_MAKE_ALIAS: 1130 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load); 1131 break; 1132 case DEVFS_APPLY_RULES: 1133 devfs_apply_reset_rules_caller(msg->mdv_name, 1); 1134 break; 1135 case DEVFS_RESET_RULES: 1136 devfs_apply_reset_rules_caller(msg->mdv_name, 0); 1137 break; 1138 case DEVFS_SCAN_CALLBACK: 1139 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load); 1140 break; 1141 case DEVFS_CLR_SUBNAMES_FLAG: 1142 devfs_clr_subnames_flag_worker(msg->mdv_flags.name, 1143 msg->mdv_flags.flag); 1144 break; 1145 case DEVFS_DESTROY_SUBNAMES_WO_FLAG: 1146 devfs_destroy_subnames_without_flag_worker(msg->mdv_flags.name, 1147 msg->mdv_flags.flag); 1148 break; 1149 case DEVFS_INODE_TO_VNODE: 1150 msg->mdv_ino.vp = devfs_iterate_topology( 1151 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node, 1152 (devfs_iterate_callback_t *)devfs_inode_to_vnode_worker_callback, 1153 &msg->mdv_ino.ino); 1154 break; 1155 case DEVFS_TERMINATE_CORE: 1156 devfs_run = 0; 1157 break; 1158 case DEVFS_SYNC: 1159 break; 1160 default: 1161 devfs_debug(DEVFS_DEBUG_WARNING, 1162 "devfs_msg_core: unknown message " 1163 "received at core\n"); 1164 break; 1165 } 1166 lockmgr(&devfs_lock, LK_RELEASE); 1167 } 1168 1169 /* 1170 * Worker function to insert a new dev into the dev list and initialize its 1171 * permissions. It also calls devfs_propagate_dev which in turn propagates 1172 * the change to all mount points. 1173 * 1174 * The passed dev is already referenced. This reference is eaten by this 1175 * function and represents the dev's linkage into devfs_dev_list. 1176 */ 1177 static int 1178 devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms) 1179 { 1180 KKASSERT(dev); 1181 1182 dev->si_uid = uid; 1183 dev->si_gid = gid; 1184 dev->si_perms = perms; 1185 1186 devfs_link_dev(dev); 1187 devfs_propagate_dev(dev, 1); 1188 1189 return 0; 1190 } 1191 1192 /* 1193 * Worker function to delete a dev from the dev list and free the cdev. 1194 * It also calls devfs_propagate_dev which in turn propagates the change 1195 * to all mount points. 1196 */ 1197 static int 1198 devfs_destroy_dev_worker(cdev_t dev) 1199 { 1200 int error; 1201 1202 KKASSERT(dev); 1203 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1204 1205 error = devfs_unlink_dev(dev); 1206 devfs_propagate_dev(dev, 0); 1207 if (error == 0) 1208 release_dev(dev); /* link ref */ 1209 release_dev(dev); 1210 release_dev(dev); 1211 1212 return 0; 1213 } 1214 1215 /* 1216 * Worker function to destroy all devices with a certain basename. 1217 * Calls devfs_destroy_dev_worker for the actual destruction. 1218 */ 1219 static int 1220 devfs_destroy_subnames_worker(char *name) 1221 { 1222 cdev_t dev, dev1; 1223 size_t len = strlen(name); 1224 1225 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1226 if ((!strncmp(dev->si_name, name, len)) && 1227 (dev->si_name[len] != '\0')) { 1228 devfs_destroy_dev_worker(dev); 1229 } 1230 } 1231 return 0; 1232 } 1233 1234 static int 1235 devfs_clr_subnames_flag_worker(char *name, uint32_t flag) 1236 { 1237 cdev_t dev, dev1; 1238 size_t len = strlen(name); 1239 1240 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1241 if ((!strncmp(dev->si_name, name, len)) && 1242 (dev->si_name[len] != '\0')) { 1243 dev->si_flags &= ~flag; 1244 } 1245 } 1246 1247 return 0; 1248 } 1249 1250 static int 1251 devfs_destroy_subnames_without_flag_worker(char *name, uint32_t flag) 1252 { 1253 cdev_t dev, dev1; 1254 size_t len = strlen(name); 1255 1256 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1257 if ((!strncmp(dev->si_name, name, len)) && 1258 (dev->si_name[len] != '\0')) { 1259 if (!(dev->si_flags & flag)) { 1260 devfs_destroy_dev_worker(dev); 1261 } 1262 } 1263 } 1264 1265 return 0; 1266 } 1267 1268 /* 1269 * Worker function that creates all device nodes on top of a devfs 1270 * root node. 1271 */ 1272 static int 1273 devfs_create_all_dev_worker(struct devfs_node *root) 1274 { 1275 cdev_t dev; 1276 1277 KKASSERT(root); 1278 1279 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1280 devfs_create_device_node(root, dev, NULL, NULL); 1281 } 1282 1283 return 0; 1284 } 1285 1286 /* 1287 * Worker function that destroys all devices that match a specific 1288 * dev_ops and/or minor. If minor is less than 0, it is not matched 1289 * against. It also propagates all changes. 1290 */ 1291 static int 1292 devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor) 1293 { 1294 cdev_t dev, dev1; 1295 1296 KKASSERT(ops); 1297 1298 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1299 if (dev->si_ops != ops) 1300 continue; 1301 if ((minor < 0) || (dev->si_uminor == minor)) { 1302 devfs_destroy_dev_worker(dev); 1303 } 1304 } 1305 1306 return 0; 1307 } 1308 1309 /* 1310 * Worker function that registers a new clone handler in devfs. 1311 */ 1312 static int 1313 devfs_chandler_add_worker(const char *name, d_clone_t *nhandler) 1314 { 1315 struct devfs_clone_handler *chandler = NULL; 1316 u_char len = strlen(name); 1317 1318 if (len == 0) 1319 return 1; 1320 1321 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1322 if (chandler->namlen != len) 1323 continue; 1324 1325 if (!memcmp(chandler->name, name, len)) { 1326 /* Clonable basename already exists */ 1327 return 1; 1328 } 1329 } 1330 1331 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO); 1332 chandler->name = kstrdup(name, M_DEVFS); 1333 chandler->namlen = len; 1334 chandler->nhandler = nhandler; 1335 1336 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link); 1337 return 0; 1338 } 1339 1340 /* 1341 * Worker function that removes a given clone handler from the 1342 * clone handler list. 1343 */ 1344 static int 1345 devfs_chandler_del_worker(const char *name) 1346 { 1347 struct devfs_clone_handler *chandler, *chandler2; 1348 u_char len = strlen(name); 1349 1350 if (len == 0) 1351 return 1; 1352 1353 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) { 1354 if (chandler->namlen != len) 1355 continue; 1356 if (memcmp(chandler->name, name, len)) 1357 continue; 1358 1359 TAILQ_REMOVE(&devfs_chandler_list, chandler, link); 1360 kfree(chandler->name, M_DEVFS); 1361 kfree(chandler, M_DEVFS); 1362 break; 1363 } 1364 1365 return 0; 1366 } 1367 1368 /* 1369 * Worker function that finds a given device name and changes 1370 * the message received accordingly so that when replied to, 1371 * the answer is returned to the caller. 1372 */ 1373 static int 1374 devfs_find_device_by_name_worker(devfs_msg_t devfs_msg) 1375 { 1376 struct devfs_alias *alias; 1377 cdev_t dev; 1378 cdev_t found = NULL; 1379 1380 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1381 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) { 1382 found = dev; 1383 break; 1384 } 1385 } 1386 if (found == NULL) { 1387 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1388 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) { 1389 found = alias->dev_target; 1390 break; 1391 } 1392 } 1393 } 1394 devfs_msg->mdv_cdev = found; 1395 1396 return 0; 1397 } 1398 1399 /* 1400 * Worker function that finds a given device udev and changes 1401 * the message received accordingly so that when replied to, 1402 * the answer is returned to the caller. 1403 */ 1404 static int 1405 devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg) 1406 { 1407 cdev_t dev, dev1; 1408 cdev_t found = NULL; 1409 1410 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1411 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) { 1412 found = dev; 1413 break; 1414 } 1415 } 1416 devfs_msg->mdv_cdev = found; 1417 1418 return 0; 1419 } 1420 1421 /* 1422 * Worker function that inserts a given alias into the 1423 * alias list, and propagates the alias to all mount 1424 * points. 1425 */ 1426 static int 1427 devfs_make_alias_worker(struct devfs_alias *alias) 1428 { 1429 struct devfs_alias *alias2; 1430 size_t len = strlen(alias->name); 1431 int found = 0; 1432 1433 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1434 if (len != alias2->namlen) 1435 continue; 1436 1437 if (!memcmp(alias->name, alias2->name, len)) { 1438 found = 1; 1439 break; 1440 } 1441 } 1442 1443 if (!found) { 1444 /* 1445 * The alias doesn't exist yet, so we add it to the alias list 1446 */ 1447 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link); 1448 devfs_alias_propagate(alias); 1449 } else { 1450 devfs_debug(DEVFS_DEBUG_WARNING, 1451 "Warning: duplicate devfs_make_alias for %s\n", 1452 alias->name); 1453 kfree(alias->name, M_DEVFS); 1454 kfree(alias, M_DEVFS); 1455 } 1456 1457 return 0; 1458 } 1459 1460 /* 1461 * Function that removes and frees all aliases. 1462 */ 1463 static int 1464 devfs_alias_reap(void) 1465 { 1466 struct devfs_alias *alias, *alias2; 1467 1468 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1469 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1470 kfree(alias, M_DEVFS); 1471 } 1472 return 0; 1473 } 1474 1475 /* 1476 * Function that removes an alias matching a specific cdev and frees 1477 * it accordingly. 1478 */ 1479 static int 1480 devfs_alias_remove(cdev_t dev) 1481 { 1482 struct devfs_alias *alias, *alias2; 1483 1484 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1485 if (alias->dev_target == dev) { 1486 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1487 kfree(alias, M_DEVFS); 1488 } 1489 } 1490 return 0; 1491 } 1492 1493 /* 1494 * This function propagates a new alias to all mount points. 1495 */ 1496 static int 1497 devfs_alias_propagate(struct devfs_alias *alias) 1498 { 1499 struct devfs_mnt_data *mnt; 1500 1501 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1502 devfs_alias_apply(mnt->root_node, alias); 1503 } 1504 return 0; 1505 } 1506 1507 /* 1508 * This function is a recursive function iterating through 1509 * all device nodes in the topology and, if applicable, 1510 * creating the relevant alias for a device node. 1511 */ 1512 static int 1513 devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias) 1514 { 1515 struct devfs_node *node1, *node2; 1516 1517 KKASSERT(alias != NULL); 1518 1519 if ((node->node_type == Proot) || (node->node_type == Pdir)) { 1520 if (node->nchildren > 2) { 1521 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1522 devfs_alias_apply(node1, alias); 1523 } 1524 } 1525 } else { 1526 if (node->d_dev == alias->dev_target) 1527 devfs_alias_create(alias->name, node, 0); 1528 } 1529 return 0; 1530 } 1531 1532 /* 1533 * This function checks if any alias possibly is applicable 1534 * to the given node. If so, the alias is created. 1535 */ 1536 static int 1537 devfs_alias_check_create(struct devfs_node *node) 1538 { 1539 struct devfs_alias *alias; 1540 1541 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1542 if (node->d_dev == alias->dev_target) 1543 devfs_alias_create(alias->name, node, 0); 1544 } 1545 return 0; 1546 } 1547 1548 /* 1549 * This function creates an alias with a given name 1550 * linking to a given devfs node. It also increments 1551 * the link count on the target node. 1552 */ 1553 int 1554 devfs_alias_create(char *name_orig, struct devfs_node *target, int rule_based) 1555 { 1556 struct mount *mp = target->mp; 1557 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node; 1558 struct devfs_node *linknode; 1559 struct hotplug_device *hpdev; 1560 char *create_path = NULL; 1561 char *name; 1562 char *name_buf; 1563 int result = 0; 1564 1565 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1566 1567 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1568 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name); 1569 1570 if (create_path) 1571 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1572 1573 1574 if (devfs_find_device_node_by_name(parent, name)) { 1575 devfs_debug(DEVFS_DEBUG_WARNING, 1576 "Node already exists: %s " 1577 "(devfs_make_alias_worker)!\n", 1578 name); 1579 result = 1; 1580 goto done; 1581 } 1582 1583 linknode = devfs_allocp(Plink, name, parent, mp, NULL); 1584 if (linknode == NULL) { 1585 result = 1; 1586 goto done; 1587 } 1588 1589 linknode->link_target = target; 1590 target->nlinks++; 1591 1592 if (rule_based) 1593 linknode->flags |= DEVFS_RULE_CREATED; 1594 1595 done: 1596 /* hotplug handler */ 1597 if(devfs_node_added) { 1598 hpdev = kmalloc(sizeof(struct hotplug_device), M_TEMP, M_WAITOK); 1599 hpdev->dev = target->d_dev; 1600 hpdev->name = name_orig; 1601 devfs_node_added(hpdev); 1602 kfree(hpdev, M_TEMP); 1603 } 1604 kfree(name_buf, M_TEMP); 1605 return (result); 1606 } 1607 1608 /* 1609 * This function is called by the core and handles mount point 1610 * strings. It either calls the relevant worker (devfs_apply_ 1611 * reset_rules_worker) on all mountpoints or only a specific 1612 * one. 1613 */ 1614 static int 1615 devfs_apply_reset_rules_caller(char *mountto, int apply) 1616 { 1617 struct devfs_mnt_data *mnt; 1618 1619 if (mountto[0] == '*') { 1620 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1621 devfs_iterate_topology(mnt->root_node, 1622 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1623 NULL); 1624 } 1625 } else { 1626 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1627 if (!strcmp(mnt->mp->mnt_stat.f_mntonname, mountto)) { 1628 devfs_iterate_topology(mnt->root_node, 1629 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1630 NULL); 1631 break; 1632 } 1633 } 1634 } 1635 1636 kfree(mountto, M_DEVFS); 1637 return 0; 1638 } 1639 1640 /* 1641 * This function calls a given callback function for 1642 * every dev node in the devfs dev list. 1643 */ 1644 static int 1645 devfs_scan_callback_worker(devfs_scan_t *callback) 1646 { 1647 cdev_t dev, dev1; 1648 1649 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1650 callback(dev); 1651 } 1652 1653 return 0; 1654 } 1655 1656 /* 1657 * This function tries to resolve a given directory, or if not 1658 * found and creation requested, creates the given directory. 1659 */ 1660 static struct devfs_node * 1661 devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name, 1662 size_t name_len, int create) 1663 { 1664 struct devfs_node *node, *found = NULL; 1665 1666 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1667 if (name_len != node->d_dir.d_namlen) 1668 continue; 1669 1670 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) { 1671 found = node; 1672 break; 1673 } 1674 } 1675 1676 if ((found == NULL) && (create)) { 1677 found = devfs_allocp(Pdir, dir_name, parent, parent->mp, NULL); 1678 } 1679 1680 return found; 1681 } 1682 1683 /* 1684 * This function tries to resolve a complete path. If creation is requested, 1685 * if a given part of the path cannot be resolved (because it doesn't exist), 1686 * it is created. 1687 */ 1688 struct devfs_node * 1689 devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create) 1690 { 1691 struct devfs_node *node = parent; 1692 char *buf; 1693 size_t idx = 0; 1694 1695 if (path == NULL) 1696 return parent; 1697 1698 buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1699 1700 while (*path && idx < PATH_MAX - 1) { 1701 if (*path != '/') { 1702 buf[idx++] = *path; 1703 } else { 1704 buf[idx] = '\0'; 1705 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1706 if (node == NULL) { 1707 kfree(buf, M_TEMP); 1708 return NULL; 1709 } 1710 idx = 0; 1711 } 1712 ++path; 1713 } 1714 buf[idx] = '\0'; 1715 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1716 kfree (buf, M_TEMP); 1717 return (node); 1718 } 1719 1720 /* 1721 * Takes a full path and strips it into a directory path and a name. 1722 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It 1723 * requires a working buffer with enough size to keep the whole 1724 * fullpath. 1725 */ 1726 int 1727 devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep) 1728 { 1729 char *name = NULL; 1730 char *path = NULL; 1731 size_t len = strlen(fullpath) + 1; 1732 int i; 1733 1734 KKASSERT((fullpath != NULL) && (buf != NULL)); 1735 KKASSERT((pathp != NULL) && (namep != NULL)); 1736 1737 memcpy(buf, fullpath, len); 1738 1739 for (i = len-1; i>= 0; i--) { 1740 if (buf[i] == '/') { 1741 buf[i] = '\0'; 1742 name = &(buf[i+1]); 1743 path = buf; 1744 break; 1745 } 1746 } 1747 1748 *pathp = path; 1749 1750 if (name) { 1751 *namep = name; 1752 } else { 1753 *namep = buf; 1754 } 1755 1756 return 0; 1757 } 1758 1759 /* 1760 * This function creates a new devfs node for a given device. It can 1761 * handle a complete path as device name, and accordingly creates 1762 * the path and the final device node. 1763 * 1764 * The reference count on the passed dev remains unchanged. 1765 */ 1766 struct devfs_node * 1767 devfs_create_device_node(struct devfs_node *root, cdev_t dev, 1768 char *dev_name, char *path_fmt, ...) 1769 { 1770 struct devfs_node *parent, *node = NULL; 1771 struct hotplug_device *hpdev; 1772 char *path = NULL; 1773 char *name; 1774 char *name_buf; 1775 __va_list ap; 1776 int i, found; 1777 char *create_path = NULL; 1778 char *names = "pqrsPQRS"; 1779 1780 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1781 1782 if (path_fmt != NULL) { 1783 __va_start(ap, path_fmt); 1784 kvasnrprintf(&path, PATH_MAX, 10, path_fmt, ap); 1785 __va_end(ap); 1786 } 1787 1788 parent = devfs_resolve_or_create_path(root, path, 1); 1789 KKASSERT(parent); 1790 1791 devfs_resolve_name_path( 1792 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), 1793 name_buf, &create_path, &name); 1794 1795 if (create_path) 1796 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1797 1798 1799 if (devfs_find_device_node_by_name(parent, name)) { 1800 devfs_debug(DEVFS_DEBUG_WARNING, "devfs_create_device_node: " 1801 "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name); 1802 goto out; 1803 } 1804 1805 node = devfs_allocp(Pdev, name, parent, parent->mp, dev); 1806 nanotime(&parent->mtime); 1807 1808 /* 1809 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their 1810 * directory 1811 */ 1812 if ((dev) && (strlen(dev->si_name) >= 4) && 1813 (!memcmp(dev->si_name, "ptm/", 4))) { 1814 node->parent->flags |= DEVFS_HIDDEN; 1815 node->flags |= DEVFS_HIDDEN; 1816 } 1817 1818 /* 1819 * Ugly pty magic, to tag pty devices as such and hide them if needed. 1820 */ 1821 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3))) 1822 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1823 1824 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) { 1825 found = 0; 1826 for (i = 0; i < strlen(names); i++) { 1827 if (name[3] == names[i]) { 1828 found = 1; 1829 break; 1830 } 1831 } 1832 if (found) 1833 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1834 } 1835 /* hotplug handler */ 1836 if(devfs_node_added) { 1837 hpdev = kmalloc(sizeof(struct hotplug_device), M_TEMP, M_WAITOK); 1838 hpdev->dev = node->d_dev; 1839 hpdev->name = node->d_dev->si_name; 1840 devfs_node_added(hpdev); 1841 kfree(hpdev, M_TEMP); 1842 } 1843 1844 out: 1845 kfree(name_buf, M_TEMP); 1846 kvasfree(&path); 1847 return node; 1848 } 1849 1850 /* 1851 * This function finds a given device node in the topology with a given 1852 * cdev. 1853 */ 1854 void * 1855 devfs_find_device_node_callback(struct devfs_node *node, cdev_t target) 1856 { 1857 if ((node->node_type == Pdev) && (node->d_dev == target)) { 1858 return node; 1859 } 1860 1861 return NULL; 1862 } 1863 1864 /* 1865 * This function finds a device node in the given parent directory by its 1866 * name and returns it. 1867 */ 1868 struct devfs_node * 1869 devfs_find_device_node_by_name(struct devfs_node *parent, char *target) 1870 { 1871 struct devfs_node *node, *found = NULL; 1872 size_t len = strlen(target); 1873 1874 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1875 if (len != node->d_dir.d_namlen) 1876 continue; 1877 1878 if (!memcmp(node->d_dir.d_name, target, len)) { 1879 found = node; 1880 break; 1881 } 1882 } 1883 1884 return found; 1885 } 1886 1887 static void * 1888 devfs_inode_to_vnode_worker_callback(struct devfs_node *node, ino_t *inop) 1889 { 1890 struct vnode *vp = NULL; 1891 ino_t target = *inop; 1892 1893 if (node->d_dir.d_ino == target) { 1894 if (node->v_node) { 1895 vp = node->v_node; 1896 vget(vp, LK_EXCLUSIVE | LK_RETRY); 1897 vn_unlock(vp); 1898 } else { 1899 devfs_allocv(&vp, node); 1900 vn_unlock(vp); 1901 } 1902 } 1903 1904 return vp; 1905 } 1906 1907 /* 1908 * This function takes a cdev and removes its devfs node in the 1909 * given topology. The cdev remains intact. 1910 */ 1911 int 1912 devfs_destroy_device_node(struct devfs_node *root, cdev_t target) 1913 { 1914 struct devfs_node *node, *parent; 1915 char *name; 1916 char *name_buf; 1917 char *create_path = NULL; 1918 1919 KKASSERT(target); 1920 1921 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1922 ksnprintf(name_buf, PATH_MAX, "%s", target->si_name); 1923 1924 devfs_resolve_name_path(target->si_name, name_buf, &create_path, &name); 1925 1926 if (create_path) 1927 parent = devfs_resolve_or_create_path(root, create_path, 0); 1928 else 1929 parent = root; 1930 1931 if (parent == NULL) 1932 return 1; 1933 1934 node = devfs_find_device_node_by_name(parent, name); 1935 1936 if (node) { 1937 nanotime(&node->parent->mtime); 1938 devfs_gc(node); 1939 } 1940 1941 kfree(name_buf, M_TEMP); 1942 1943 return 0; 1944 } 1945 1946 /* 1947 * Just set perms and ownership for given node. 1948 */ 1949 int 1950 devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, 1951 u_short mode, u_long flags) 1952 { 1953 node->mode = mode; 1954 node->uid = uid; 1955 node->gid = gid; 1956 1957 return 0; 1958 } 1959 1960 /* 1961 * Propagates a device attach/detach to all mount 1962 * points. Also takes care of automatic alias removal 1963 * for a deleted cdev. 1964 */ 1965 static int 1966 devfs_propagate_dev(cdev_t dev, int attach) 1967 { 1968 struct devfs_mnt_data *mnt; 1969 1970 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1971 if (attach) { 1972 /* Device is being attached */ 1973 devfs_create_device_node(mnt->root_node, dev, 1974 NULL, NULL ); 1975 } else { 1976 /* Device is being detached */ 1977 devfs_alias_remove(dev); 1978 devfs_destroy_device_node(mnt->root_node, dev); 1979 } 1980 } 1981 return 0; 1982 } 1983 1984 /* 1985 * devfs_clone either returns a basename from a complete name by 1986 * returning the length of the name without trailing digits, or, 1987 * if clone != 0, calls the device's clone handler to get a new 1988 * device, which in turn is returned in devp. 1989 */ 1990 cdev_t 1991 devfs_clone(cdev_t dev, const char *name, size_t len, int mode, 1992 struct ucred *cred) 1993 { 1994 int error; 1995 struct devfs_clone_handler *chandler; 1996 struct dev_clone_args ap; 1997 1998 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1999 if (chandler->namlen != len) 2000 continue; 2001 if ((!memcmp(chandler->name, name, len)) && (chandler->nhandler)) { 2002 lockmgr(&devfs_lock, LK_RELEASE); 2003 devfs_config(); 2004 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2005 2006 ap.a_head.a_dev = dev; 2007 ap.a_dev = NULL; 2008 ap.a_name = name; 2009 ap.a_namelen = len; 2010 ap.a_mode = mode; 2011 ap.a_cred = cred; 2012 error = (chandler->nhandler)(&ap); 2013 if (error) 2014 continue; 2015 2016 return ap.a_dev; 2017 } 2018 } 2019 2020 return NULL; 2021 } 2022 2023 2024 /* 2025 * Registers a new orphan in the orphan list. 2026 */ 2027 void 2028 devfs_tracer_add_orphan(struct devfs_node *node) 2029 { 2030 struct devfs_orphan *orphan; 2031 2032 KKASSERT(node); 2033 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK); 2034 orphan->node = node; 2035 2036 KKASSERT((node->flags & DEVFS_ORPHANED) == 0); 2037 node->flags |= DEVFS_ORPHANED; 2038 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link); 2039 } 2040 2041 /* 2042 * Removes an orphan from the orphan list. 2043 */ 2044 void 2045 devfs_tracer_del_orphan(struct devfs_node *node) 2046 { 2047 struct devfs_orphan *orphan; 2048 2049 KKASSERT(node); 2050 2051 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) { 2052 if (orphan->node == node) { 2053 node->flags &= ~DEVFS_ORPHANED; 2054 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link); 2055 kfree(orphan, M_DEVFS); 2056 break; 2057 } 2058 } 2059 } 2060 2061 /* 2062 * Counts the orphans in the orphan list, and if cleanup 2063 * is specified, also frees the orphan and removes it from 2064 * the list. 2065 */ 2066 size_t 2067 devfs_tracer_orphan_count(struct mount *mp, int cleanup) 2068 { 2069 struct devfs_orphan *orphan, *orphan2; 2070 size_t count = 0; 2071 2072 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) { 2073 count++; 2074 /* 2075 * If we are instructed to clean up, we do so. 2076 */ 2077 if (cleanup) { 2078 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link); 2079 orphan->node->flags &= ~DEVFS_ORPHANED; 2080 devfs_freep(orphan->node); 2081 kfree(orphan, M_DEVFS); 2082 } 2083 } 2084 2085 return count; 2086 } 2087 2088 /* 2089 * Fetch an ino_t from the global d_ino by increasing it 2090 * while spinlocked. 2091 */ 2092 static ino_t 2093 devfs_fetch_ino(void) 2094 { 2095 ino_t ret; 2096 2097 spin_lock_wr(&ino_lock); 2098 ret = d_ino++; 2099 spin_unlock_wr(&ino_lock); 2100 2101 return ret; 2102 } 2103 2104 /* 2105 * Allocates a new cdev and initializes it's most basic 2106 * fields. 2107 */ 2108 cdev_t 2109 devfs_new_cdev(struct dev_ops *ops, int minor, struct dev_ops *bops) 2110 { 2111 cdev_t dev = sysref_alloc(&cdev_sysref_class); 2112 2113 sysref_activate(&dev->si_sysref); 2114 reference_dev(dev); 2115 bzero(dev, offsetof(struct cdev, si_sysref)); 2116 2117 dev->si_uid = 0; 2118 dev->si_gid = 0; 2119 dev->si_perms = 0; 2120 dev->si_drv1 = NULL; 2121 dev->si_drv2 = NULL; 2122 dev->si_lastread = 0; /* time_second */ 2123 dev->si_lastwrite = 0; /* time_second */ 2124 2125 dev->si_ops = ops; 2126 dev->si_flags = 0; 2127 dev->si_umajor = 0; 2128 dev->si_uminor = minor; 2129 dev->si_bops = bops; 2130 /* If there is a backing device, we reference its ops */ 2131 dev->si_inode = makeudev( 2132 devfs_reference_ops((bops)?(bops):(ops)), 2133 minor ); 2134 2135 return dev; 2136 } 2137 2138 static void 2139 devfs_cdev_terminate(cdev_t dev) 2140 { 2141 int locked = 0; 2142 2143 /* Check if it is locked already. if not, we acquire the devfs lock */ 2144 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) { 2145 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2146 locked = 1; 2147 } 2148 2149 /* Propagate destruction, just in case */ 2150 devfs_propagate_dev(dev, 0); 2151 2152 /* If we acquired the lock, we also get rid of it */ 2153 if (locked) 2154 lockmgr(&devfs_lock, LK_RELEASE); 2155 2156 /* If there is a backing device, we release the backing device's ops */ 2157 devfs_release_ops((dev->si_bops)?(dev->si_bops):(dev->si_ops)); 2158 2159 /* Finally destroy the device */ 2160 sysref_put(&dev->si_sysref); 2161 } 2162 2163 /* 2164 * Links a given cdev into the dev list. 2165 */ 2166 int 2167 devfs_link_dev(cdev_t dev) 2168 { 2169 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2170 dev->si_flags |= SI_DEVFS_LINKED; 2171 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link); 2172 2173 return 0; 2174 } 2175 2176 /* 2177 * Removes a given cdev from the dev list. The caller is responsible for 2178 * releasing the reference on the device associated with the linkage. 2179 * 2180 * Returns EALREADY if the dev has already been unlinked. 2181 */ 2182 static int 2183 devfs_unlink_dev(cdev_t dev) 2184 { 2185 if ((dev->si_flags & SI_DEVFS_LINKED)) { 2186 TAILQ_REMOVE(&devfs_dev_list, dev, link); 2187 dev->si_flags &= ~SI_DEVFS_LINKED; 2188 return (0); 2189 } 2190 return (EALREADY); 2191 } 2192 2193 int 2194 devfs_node_is_accessible(struct devfs_node *node) 2195 { 2196 if ((node) && (!(node->flags & DEVFS_HIDDEN))) 2197 return 1; 2198 else 2199 return 0; 2200 } 2201 2202 int 2203 devfs_reference_ops(struct dev_ops *ops) 2204 { 2205 int unit; 2206 struct devfs_dev_ops *found = NULL; 2207 struct devfs_dev_ops *devops; 2208 2209 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2210 if (devops->ops == ops) { 2211 found = devops; 2212 break; 2213 } 2214 } 2215 2216 if (!found) { 2217 found = kmalloc(sizeof(struct devfs_dev_ops), M_DEVFS, M_WAITOK); 2218 found->ops = ops; 2219 found->ref_count = 0; 2220 TAILQ_INSERT_TAIL(&devfs_dev_ops_list, found, link); 2221 } 2222 2223 KKASSERT(found); 2224 2225 if (found->ref_count == 0) { 2226 found->id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255); 2227 if (found->id == -1) { 2228 /* Ran out of unique ids */ 2229 devfs_debug(DEVFS_DEBUG_WARNING, 2230 "devfs_reference_ops: WARNING: ran out of unique ids\n"); 2231 } 2232 } 2233 unit = found->id; 2234 ++found->ref_count; 2235 2236 return unit; 2237 } 2238 2239 void 2240 devfs_release_ops(struct dev_ops *ops) 2241 { 2242 struct devfs_dev_ops *found = NULL; 2243 struct devfs_dev_ops *devops; 2244 2245 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2246 if (devops->ops == ops) { 2247 found = devops; 2248 break; 2249 } 2250 } 2251 2252 KKASSERT(found); 2253 2254 --found->ref_count; 2255 2256 if (found->ref_count == 0) { 2257 TAILQ_REMOVE(&devfs_dev_ops_list, found, link); 2258 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), found->id); 2259 kfree(found, M_DEVFS); 2260 } 2261 } 2262 2263 void 2264 devfs_config(void) 2265 { 2266 devfs_msg_t msg; 2267 2268 msg = devfs_msg_get(); 2269 msg = devfs_msg_send_sync(DEVFS_SYNC, msg); 2270 devfs_msg_put(msg); 2271 } 2272 2273 /* 2274 * Called on init of devfs; creates the objcaches and 2275 * spawns off the devfs core thread. Also initializes 2276 * locks. 2277 */ 2278 static void 2279 devfs_init(void) 2280 { 2281 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n"); 2282 /* Create objcaches for nodes, msgs and devs */ 2283 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0, 2284 NULL, NULL, NULL, 2285 objcache_malloc_alloc, 2286 objcache_malloc_free, 2287 &devfs_node_malloc_args ); 2288 2289 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0, 2290 NULL, NULL, NULL, 2291 objcache_malloc_alloc, 2292 objcache_malloc_free, 2293 &devfs_msg_malloc_args ); 2294 2295 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0, 2296 NULL, NULL, NULL, 2297 objcache_malloc_alloc, 2298 objcache_malloc_free, 2299 &devfs_dev_malloc_args ); 2300 2301 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id)); 2302 2303 /* Initialize the reply-only port which acts as a message drain */ 2304 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply); 2305 2306 /* Initialize *THE* devfs lock */ 2307 lockinit(&devfs_lock, "devfs_core lock", 0, 0); 2308 2309 2310 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL, 2311 0, 0, "devfs_msg_core"); 2312 2313 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); 2314 2315 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n"); 2316 } 2317 2318 /* 2319 * Called on unload of devfs; takes care of destroying the core 2320 * and the objcaches. Also removes aliases that are no longer needed. 2321 */ 2322 static void 2323 devfs_uninit(void) 2324 { 2325 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); 2326 2327 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL); 2328 2329 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); 2330 tsleep(td_core/*devfs_id*/, 0, "devfsc", 10000); 2331 2332 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id)); 2333 2334 /* Destroy the objcaches */ 2335 objcache_destroy(devfs_msg_cache); 2336 objcache_destroy(devfs_node_cache); 2337 objcache_destroy(devfs_dev_cache); 2338 2339 devfs_alias_reap(); 2340 } 2341 2342 /* 2343 * This is a sysctl handler to assist userland devname(3) to 2344 * find the device name for a given udev. 2345 */ 2346 static int 2347 devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS) 2348 { 2349 udev_t udev; 2350 cdev_t found; 2351 int error; 2352 2353 2354 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t)))) 2355 return (error); 2356 2357 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev); 2358 2359 if (udev == NOUDEV) 2360 return(EINVAL); 2361 2362 if ((found = devfs_find_device_by_udev(udev)) == NULL) 2363 return(ENOENT); 2364 2365 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1)); 2366 } 2367 2368 2369 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, 2370 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)"); 2371 2372 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); 2373 TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable); 2374 SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 2375 0, "Enable DevFS debugging"); 2376 2377 SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, 2378 devfs_init, NULL); 2379 SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, 2380 devfs_uninit, NULL); 2381