1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/mount.h> 39 #include <sys/vnode.h> 40 #include <sys/lock.h> 41 #include <sys/file.h> 42 #include <sys/msgport.h> 43 #include <sys/sysctl.h> 44 #include <sys/ucred.h> 45 #include <sys/devfs.h> 46 #include <sys/devfs_rules.h> 47 #include <sys/udev.h> 48 49 #include <sys/msgport2.h> 50 #include <sys/spinlock2.h> 51 #include <sys/sysref2.h> 52 53 MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations"); 54 DEVFS_DEFINE_CLONE_BITMAP(ops_id); 55 /* 56 * SYSREF Integration - reference counting, allocation, 57 * sysid and syslink integration. 58 */ 59 static void devfs_cdev_terminate(cdev_t dev); 60 static void devfs_cdev_lock(cdev_t dev); 61 static void devfs_cdev_unlock(cdev_t dev); 62 static struct sysref_class cdev_sysref_class = { 63 .name = "cdev", 64 .mtype = M_DEVFS, 65 .proto = SYSREF_PROTO_DEV, 66 .offset = offsetof(struct cdev, si_sysref), 67 .objsize = sizeof(struct cdev), 68 .nom_cache = 32, 69 .flags = 0, 70 .ops = { 71 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate, 72 .lock = (sysref_lock_func_t)devfs_cdev_lock, 73 .unlock = (sysref_unlock_func_t)devfs_cdev_unlock 74 } 75 }; 76 77 static struct objcache *devfs_node_cache; 78 static struct objcache *devfs_msg_cache; 79 static struct objcache *devfs_dev_cache; 80 81 static struct objcache_malloc_args devfs_node_malloc_args = { 82 sizeof(struct devfs_node), M_DEVFS }; 83 struct objcache_malloc_args devfs_msg_malloc_args = { 84 sizeof(struct devfs_msg), M_DEVFS }; 85 struct objcache_malloc_args devfs_dev_malloc_args = { 86 sizeof(struct cdev), M_DEVFS }; 87 88 static struct devfs_dev_head devfs_dev_list = 89 TAILQ_HEAD_INITIALIZER(devfs_dev_list); 90 static struct devfs_mnt_head devfs_mnt_list = 91 TAILQ_HEAD_INITIALIZER(devfs_mnt_list); 92 static struct devfs_chandler_head devfs_chandler_list = 93 TAILQ_HEAD_INITIALIZER(devfs_chandler_list); 94 static struct devfs_alias_head devfs_alias_list = 95 TAILQ_HEAD_INITIALIZER(devfs_alias_list); 96 static struct devfs_dev_ops_head devfs_dev_ops_list = 97 TAILQ_HEAD_INITIALIZER(devfs_dev_ops_list); 98 99 struct lock devfs_lock; 100 struct lwkt_token devfs_token; 101 static struct lwkt_port devfs_dispose_port; 102 static struct lwkt_port devfs_msg_port; 103 static struct thread *td_core; 104 105 static struct spinlock ino_lock; 106 static ino_t d_ino; 107 static int devfs_debug_enable; 108 static int devfs_run; 109 110 static ino_t devfs_fetch_ino(void); 111 static int devfs_create_all_dev_worker(struct devfs_node *); 112 static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int); 113 static int devfs_destroy_dev_worker(cdev_t); 114 static int devfs_destroy_related_worker(cdev_t); 115 static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int); 116 static int devfs_propagate_dev(cdev_t, int); 117 static int devfs_unlink_dev(cdev_t dev); 118 static void devfs_msg_exec(devfs_msg_t msg); 119 120 static int devfs_chandler_add_worker(const char *, d_clone_t *); 121 static int devfs_chandler_del_worker(const char *); 122 123 static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 124 static void devfs_msg_core(void *); 125 126 static int devfs_find_device_by_name_worker(devfs_msg_t); 127 static int devfs_find_device_by_udev_worker(devfs_msg_t); 128 129 static int devfs_apply_reset_rules_caller(char *, int); 130 131 static int devfs_scan_callback_worker(devfs_scan_t *, void *); 132 133 static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, 134 char *, size_t, int); 135 136 static int devfs_make_alias_worker(struct devfs_alias *); 137 static int devfs_destroy_alias_worker(struct devfs_alias *); 138 static int devfs_alias_remove(cdev_t); 139 static int devfs_alias_reap(void); 140 static int devfs_alias_propagate(struct devfs_alias *, int); 141 static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *); 142 static int devfs_alias_check_create(struct devfs_node *); 143 144 static int devfs_clr_related_flag_worker(cdev_t, uint32_t); 145 static int devfs_destroy_related_without_flag_worker(cdev_t, uint32_t); 146 147 static void *devfs_reaperp_callback(struct devfs_node *, void *); 148 static void devfs_iterate_orphans_unmount(struct mount *mp); 149 static void *devfs_gc_dirs_callback(struct devfs_node *, void *); 150 static void *devfs_gc_links_callback(struct devfs_node *, struct devfs_node *); 151 static void * 152 devfs_inode_to_vnode_worker_callback(struct devfs_node *, ino_t *); 153 154 /* 155 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function 156 * using kvprintf 157 */ 158 int 159 devfs_debug(int level, char *fmt, ...) 160 { 161 __va_list ap; 162 163 __va_start(ap, fmt); 164 if (level <= devfs_debug_enable) 165 kvprintf(fmt, ap); 166 __va_end(ap); 167 168 return 0; 169 } 170 171 /* 172 * devfs_allocp() Allocates a new devfs node with the specified 173 * parameters. The node is also automatically linked into the topology 174 * if a parent is specified. It also calls the rule and alias stuff to 175 * be applied on the new node 176 */ 177 struct devfs_node * 178 devfs_allocp(devfs_nodetype devfsnodetype, char *name, 179 struct devfs_node *parent, struct mount *mp, cdev_t dev) 180 { 181 struct devfs_node *node = NULL; 182 size_t namlen = strlen(name); 183 184 node = objcache_get(devfs_node_cache, M_WAITOK); 185 bzero(node, sizeof(*node)); 186 187 atomic_add_long(&DEVFS_MNTDATA(mp)->leak_count, 1); 188 189 node->d_dev = NULL; 190 node->nchildren = 1; 191 node->mp = mp; 192 node->d_dir.d_ino = devfs_fetch_ino(); 193 194 /* 195 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries 196 * respectively. 197 */ 198 node->cookie_jar = 2; 199 200 /* 201 * Access Control members 202 */ 203 node->mode = DEVFS_DEFAULT_MODE; 204 node->uid = DEVFS_DEFAULT_UID; 205 node->gid = DEVFS_DEFAULT_GID; 206 207 switch (devfsnodetype) { 208 case Nroot: 209 /* 210 * Ensure that we don't recycle the root vnode by marking it as 211 * linked into the topology. 212 */ 213 node->flags |= DEVFS_NODE_LINKED; 214 case Ndir: 215 TAILQ_INIT(DEVFS_DENODE_HEAD(node)); 216 node->d_dir.d_type = DT_DIR; 217 node->nchildren = 2; 218 break; 219 220 case Nlink: 221 node->d_dir.d_type = DT_LNK; 222 break; 223 224 case Nreg: 225 node->d_dir.d_type = DT_REG; 226 break; 227 228 case Ndev: 229 if (dev != NULL) { 230 node->d_dir.d_type = DT_CHR; 231 node->d_dev = dev; 232 233 node->mode = dev->si_perms; 234 node->uid = dev->si_uid; 235 node->gid = dev->si_gid; 236 237 devfs_alias_check_create(node); 238 } 239 break; 240 241 default: 242 panic("devfs_allocp: unknown node type"); 243 } 244 245 node->v_node = NULL; 246 node->node_type = devfsnodetype; 247 248 /* Initialize the dirent structure of each devfs vnode */ 249 node->d_dir.d_namlen = namlen; 250 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK); 251 memcpy(node->d_dir.d_name, name, namlen); 252 node->d_dir.d_name[namlen] = '\0'; 253 254 /* Initialize the parent node element */ 255 node->parent = parent; 256 257 /* Initialize *time members */ 258 nanotime(&node->atime); 259 node->mtime = node->ctime = node->atime; 260 261 /* 262 * Associate with parent as last step, clean out namecache 263 * reference. 264 */ 265 if (parent) { 266 if (parent->node_type == Nroot || 267 parent->node_type == Ndir) { 268 parent->nchildren++; 269 node->cookie = parent->cookie_jar++; 270 node->flags |= DEVFS_NODE_LINKED; 271 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link); 272 273 /* This forces negative namecache lookups to clear */ 274 ++mp->mnt_namecache_gen; 275 } else { 276 kprintf("devfs: Cannot link node %p (%s) " 277 "into %p (%s)\n", 278 node, node->d_dir.d_name, 279 parent, parent->d_dir.d_name); 280 print_backtrace(-1); 281 } 282 } 283 284 /* 285 * Apply rules (requires root node, skip if we are creating the root 286 * node) 287 */ 288 if (DEVFS_MNTDATA(mp)->root_node) 289 devfs_rule_check_apply(node, NULL); 290 291 atomic_add_long(&DEVFS_MNTDATA(mp)->file_count, 1); 292 293 return node; 294 } 295 296 /* 297 * devfs_allocv() allocates a new vnode based on a devfs node. 298 */ 299 int 300 devfs_allocv(struct vnode **vpp, struct devfs_node *node) 301 { 302 struct vnode *vp; 303 int error = 0; 304 305 KKASSERT(node); 306 307 /* 308 * devfs master lock must not be held across a vget() call, we have 309 * to hold our ad-hoc vp to avoid a free race from destroying the 310 * contents of the structure. The vget() will interlock recycles 311 * for us. 312 */ 313 try_again: 314 while ((vp = node->v_node) != NULL) { 315 vhold(vp); 316 lockmgr(&devfs_lock, LK_RELEASE); 317 error = vget(vp, LK_EXCLUSIVE); 318 vdrop(vp); 319 lockmgr(&devfs_lock, LK_EXCLUSIVE); 320 if (error == 0) { 321 *vpp = vp; 322 goto out; 323 } 324 if (error != ENOENT) { 325 *vpp = NULL; 326 goto out; 327 } 328 } 329 330 /* 331 * devfs master lock must not be held across a getnewvnode() call. 332 */ 333 lockmgr(&devfs_lock, LK_RELEASE); 334 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0) { 335 lockmgr(&devfs_lock, LK_EXCLUSIVE); 336 goto out; 337 } 338 lockmgr(&devfs_lock, LK_EXCLUSIVE); 339 340 vp = *vpp; 341 342 if (node->v_node != NULL) { 343 vp->v_type = VBAD; 344 vx_put(vp); 345 goto try_again; 346 } 347 348 vp->v_data = node; 349 node->v_node = vp; 350 351 switch (node->node_type) { 352 case Nroot: 353 vsetflags(vp, VROOT); 354 /* fall through */ 355 case Ndir: 356 vp->v_type = VDIR; 357 break; 358 359 case Nlink: 360 vp->v_type = VLNK; 361 break; 362 363 case Nreg: 364 vp->v_type = VREG; 365 break; 366 367 case Ndev: 368 vp->v_type = VCHR; 369 KKASSERT(node->d_dev); 370 371 vp->v_uminor = node->d_dev->si_uminor; 372 vp->v_umajor = node->d_dev->si_umajor; 373 374 v_associate_rdev(vp, node->d_dev); 375 vp->v_ops = &node->mp->mnt_vn_spec_ops; 376 if (node->d_dev->si_ops->head.flags & D_KVABIO) 377 vsetflags(vp, VKVABIO); 378 break; 379 380 default: 381 panic("devfs_allocv: unknown node type"); 382 } 383 384 out: 385 return error; 386 } 387 388 /* 389 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode 390 * based on the newly created devfs node. 391 */ 392 int 393 devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype, 394 char *name, struct devfs_node *parent, cdev_t dev) 395 { 396 struct devfs_node *node; 397 398 node = devfs_allocp(devfsnodetype, name, parent, mp, dev); 399 400 if (node != NULL) 401 devfs_allocv(vpp, node); 402 else 403 *vpp = NULL; 404 405 return 0; 406 } 407 408 /* 409 * Destroy the devfs_node. The node must be unlinked from the topology. 410 * 411 * This function will also destroy any vnode association with the node 412 * and device. 413 * 414 * The cdev_t itself remains intact. 415 * 416 * The core lock is not necessarily held on call and must be temporarily 417 * released if it is to avoid a deadlock. 418 */ 419 void 420 devfs_freep(struct devfs_node *node) 421 { 422 struct vnode *vp; 423 int maxloops; 424 425 KKASSERT(node); 426 427 /* 428 * It is possible for devfs_freep() to race a destruction due 429 * to having to release the lock below. We use DEVFS_DESTROYED 430 * to interlock the race (mediated by devfs_lock) 431 * 432 * We use NLINKSWAIT to indicate that the node couldn't be 433 * freed due to having pending nlinks. We can free 434 * the node when nlinks drops to 0. This should never print 435 * a "(null)" name, if it ever does there are still unresolved 436 * issues. 437 */ 438 if (node->flags & DEVFS_DESTROYED) { 439 if ((node->flags & DEVFS_NLINKSWAIT) && 440 node->nlinks == 0) { 441 kprintf("devfs: final node '%s' on nlinks\n", 442 node->d_dir.d_name); 443 if (node->d_dir.d_name) { 444 kfree(node->d_dir.d_name, M_DEVFS); 445 node->d_dir.d_name = NULL; 446 } 447 objcache_put(devfs_node_cache, node); 448 } else { 449 kprintf("devfs: race avoided node '%s' (%p)\n", 450 node->d_dir.d_name, node); 451 #if 0 452 if (lockstatus(&devfs_lock, curthread) == LK_EXCLUSIVE) { 453 lockmgr(&devfs_lock, LK_RELEASE); 454 Debugger("devfs1"); 455 lockmgr(&devfs_lock, LK_EXCLUSIVE); 456 } else { 457 Debugger("devfs2"); 458 } 459 #endif 460 } 461 return; 462 } 463 node->flags |= DEVFS_DESTROYED; 464 465 /* 466 * Items we have to dispose of before potentially releasing 467 * devfs_lock. 468 * 469 * Remove the node from the orphan list if it is still on it. 470 */ 471 atomic_subtract_long(&DEVFS_MNTDATA(node->mp)->leak_count, 1); 472 atomic_subtract_long(&DEVFS_MNTDATA(node->mp)->file_count, 1); 473 if (node->flags & DEVFS_ORPHANED) 474 devfs_tracer_del_orphan(node); 475 476 /* 477 * At this point only the vp points to node, and node cannot be 478 * physically freed because we own DEVFS_DESTROYED. 479 * 480 * We must dispose of the vnode without deadlocking or racing 481 * against e.g. a vnode reclaim. 482 * 483 * This also prevents the vnode reclaim code from double-freeing 484 * the node. The vget() is required to safely modified the vp 485 * and cycle the refs to terminate an inactive vp. 486 */ 487 maxloops = 1000; 488 while ((vp = node->v_node) != NULL) { 489 int relock; 490 491 vhold(vp); 492 if (lockstatus(&devfs_lock, curthread) == LK_EXCLUSIVE) { 493 lockmgr(&devfs_lock, LK_RELEASE); 494 relock = 1; 495 } else { 496 relock = 0; 497 } 498 if (node->v_node == NULL) { 499 /* reclaim race, mediated by devfs_lock */ 500 vdrop(vp); 501 } else if (vget(vp, LK_EXCLUSIVE | LK_RETRY) == 0) { 502 vdrop(vp); 503 v_release_rdev(vp); 504 vp->v_data = NULL; 505 node->v_node = NULL; 506 vput(vp); 507 } else { 508 /* reclaim race, mediated by devfs_lock */ 509 vdrop(vp); 510 } 511 if (relock) 512 lockmgr(&devfs_lock, LK_EXCLUSIVE); 513 if (--maxloops == 0) { 514 kprintf("devfs_freep: livelock on node %p\n", node); 515 break; 516 } 517 } 518 519 /* 520 * Remaining cleanup 521 */ 522 if (node->symlink_name) { 523 kfree(node->symlink_name, M_DEVFS); 524 node->symlink_name = NULL; 525 } 526 527 /* 528 * We cannot actually free the node if it still has 529 * nlinks. 530 */ 531 if (node->nlinks) { 532 node->flags |= DEVFS_NLINKSWAIT; 533 } else { 534 if (node->d_dir.d_name) { 535 kfree(node->d_dir.d_name, M_DEVFS); 536 node->d_dir.d_name = NULL; 537 } 538 objcache_put(devfs_node_cache, node); 539 } 540 } 541 542 /* 543 * Returns a valid vp associated with the devfs alias node or NULL 544 */ 545 static void *devfs_alias_getvp(struct devfs_node *node) 546 { 547 struct devfs_node *found = node; 548 int depth = 0; 549 550 while ((found->node_type == Nlink) && (found->link_target)) { 551 if (depth >= 8) { 552 devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); 553 break; 554 } 555 556 found = found->link_target; 557 ++depth; 558 } 559 560 return found->v_node; 561 } 562 563 /* 564 * Unlink the devfs node from the topology and add it to the orphan list. 565 * The node will later be destroyed by freep. 566 * 567 * Any vnode association, including the v_rdev and v_data, remains intact 568 * until the freep. 569 */ 570 void 571 devfs_unlinkp(struct devfs_node *node) 572 { 573 struct devfs_node *parent; 574 struct devfs_node *target; 575 struct vnode *vp; 576 KKASSERT(node); 577 578 /* 579 * Add the node to the orphan list, so it is referenced somewhere, to 580 * so we don't leak it. 581 */ 582 devfs_tracer_add_orphan(node); 583 584 parent = node->parent; 585 node->parent = NULL; 586 587 /* 588 * If the parent is known we can unlink the node out of the topology 589 */ 590 if (node->flags & DEVFS_NODE_LINKED) { 591 if (parent) { 592 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link); 593 parent->nchildren--; 594 } else if (node == DEVFS_MNTDATA(node->mp)->root_node) { 595 DEVFS_MNTDATA(node->mp)->root_node = NULL; 596 } 597 node->flags &= ~DEVFS_NODE_LINKED; 598 } 599 600 /* 601 * Namecache invalidation. 602 * 603 * devfs alias nodes are special: their v_node entry is always null 604 * and they use the one from their link target. We thus use the 605 * target node's vp to invalidate both alias and target entries in 606 * the namecache. 607 * 608 * Doing so for the target is not necessary but it would be more 609 * expensive to resolve only the namecache entry of the alias node 610 * from the information available in this function. 611 * 612 * WARNING! We do not disassociate the vnode here. That can only 613 * be safely done in devfs_freep(). 614 */ 615 if (node->node_type == Nlink) { 616 if ((target = node->link_target) != NULL) { 617 vp = devfs_alias_getvp(node); 618 node->link_target = NULL; 619 target->nlinks--; 620 if (target->nlinks == 0 && 621 (target->flags & DEVFS_DESTROYED)) { 622 devfs_freep(target); 623 } 624 } else { 625 vp = NULL; 626 } 627 } else { 628 vp = node->v_node; 629 } 630 631 if (vp != NULL) 632 cache_inval_vp(vp, CINV_DESTROY); 633 } 634 635 void * 636 devfs_iterate_topology(struct devfs_node *node, 637 devfs_iterate_callback_t *callback, void *arg1) 638 { 639 struct devfs_node *node1, *node2; 640 void *ret = NULL; 641 642 if (((node->node_type == Nroot) || (node->node_type == Ndir)) && 643 node->nchildren > 2) { 644 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 645 link, node2) { 646 ret = devfs_iterate_topology(node1, callback, arg1); 647 if (ret) 648 return ret; 649 } 650 } 651 ret = callback(node, arg1); 652 653 return ret; 654 } 655 656 static void * 657 devfs_alias_reaper_callback(struct devfs_node *node, void *unused) 658 { 659 if (node->node_type == Nlink) { 660 devfs_unlinkp(node); 661 devfs_freep(node); 662 } 663 664 return NULL; 665 } 666 667 /* 668 * devfs_reaperp() is a recursive function that iterates through all the 669 * topology, unlinking and freeing all devfs nodes. 670 */ 671 static void * 672 devfs_reaperp_callback(struct devfs_node *node, void *unused) 673 { 674 devfs_unlinkp(node); 675 devfs_freep(node); 676 677 return NULL; 678 } 679 680 /* 681 * Report any orphans that we couldn't delete. The mp and mnt_data 682 * are both disappearing, so we must also clean up the nodes a bit. 683 */ 684 static void 685 devfs_iterate_orphans_unmount(struct mount *mp) 686 { 687 struct devfs_orphan *orphan; 688 689 while ((orphan = TAILQ_FIRST(DEVFS_ORPHANLIST(mp))) != NULL) { 690 devfs_freep(orphan->node); 691 /* orphan stale */ 692 } 693 } 694 695 static void * 696 devfs_gc_dirs_callback(struct devfs_node *node, void *unused) 697 { 698 if (node->node_type == Ndir) { 699 if ((node->nchildren == 2) && 700 !(node->flags & DEVFS_USER_CREATED)) { 701 devfs_unlinkp(node); 702 devfs_freep(node); 703 } 704 } 705 706 return NULL; 707 } 708 709 static void * 710 devfs_gc_links_callback(struct devfs_node *node, struct devfs_node *target) 711 { 712 if ((node->node_type == Nlink) && (node->link_target == target)) { 713 devfs_unlinkp(node); 714 devfs_freep(node); 715 } 716 717 return NULL; 718 } 719 720 /* 721 * devfs_gc() is devfs garbage collector. It takes care of unlinking and 722 * freeing a node, but also removes empty directories and links that link 723 * via devfs auto-link mechanism to the node being deleted. 724 */ 725 int 726 devfs_gc(struct devfs_node *node) 727 { 728 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node; 729 730 if (node->nlinks > 0) 731 devfs_iterate_topology(root_node, 732 (devfs_iterate_callback_t *)devfs_gc_links_callback, node); 733 734 devfs_unlinkp(node); 735 devfs_iterate_topology(root_node, 736 (devfs_iterate_callback_t *)devfs_gc_dirs_callback, NULL); 737 738 devfs_freep(node); 739 740 return 0; 741 } 742 743 /* 744 * devfs_create_dev() is the asynchronous entry point for device creation. 745 * It just sends a message with the relevant details to the devfs core. 746 * 747 * This function will reference the passed device. The reference is owned 748 * by devfs and represents all of the device's node associations. 749 */ 750 int 751 devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms) 752 { 753 reference_dev(dev); 754 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms); 755 756 return 0; 757 } 758 759 /* 760 * devfs_destroy_dev() is the asynchronous entry point for device destruction. 761 * It just sends a message with the relevant details to the devfs core. 762 */ 763 int 764 devfs_destroy_dev(cdev_t dev) 765 { 766 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0); 767 return 0; 768 } 769 770 /* 771 * devfs_mount_add() is the synchronous entry point for adding a new devfs 772 * mount. It sends a synchronous message with the relevant details to the 773 * devfs core. 774 */ 775 int 776 devfs_mount_add(struct devfs_mnt_data *mnt) 777 { 778 devfs_msg_t msg; 779 780 msg = devfs_msg_get(); 781 msg->mdv_mnt = mnt; 782 devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg); 783 devfs_msg_put(msg); 784 785 return 0; 786 } 787 788 /* 789 * devfs_mount_del() is the synchronous entry point for removing a devfs mount. 790 * It sends a synchronous message with the relevant details to the devfs core. 791 */ 792 int 793 devfs_mount_del(struct devfs_mnt_data *mnt) 794 { 795 devfs_msg_t msg; 796 797 msg = devfs_msg_get(); 798 msg->mdv_mnt = mnt; 799 devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg); 800 devfs_msg_put(msg); 801 802 return 0; 803 } 804 805 /* 806 * devfs_destroy_related() is the synchronous entry point for device 807 * destruction by subname. It just sends a message with the relevant details to 808 * the devfs core. 809 */ 810 int 811 devfs_destroy_related(cdev_t dev) 812 { 813 devfs_msg_t msg; 814 815 msg = devfs_msg_get(); 816 msg->mdv_load = dev; 817 devfs_msg_send_sync(DEVFS_DESTROY_RELATED, msg); 818 devfs_msg_put(msg); 819 return 0; 820 } 821 822 int 823 devfs_clr_related_flag(cdev_t dev, uint32_t flag) 824 { 825 devfs_msg_t msg; 826 827 msg = devfs_msg_get(); 828 msg->mdv_flags.dev = dev; 829 msg->mdv_flags.flag = flag; 830 devfs_msg_send_sync(DEVFS_CLR_RELATED_FLAG, msg); 831 devfs_msg_put(msg); 832 833 return 0; 834 } 835 836 int 837 devfs_destroy_related_without_flag(cdev_t dev, uint32_t flag) 838 { 839 devfs_msg_t msg; 840 841 msg = devfs_msg_get(); 842 msg->mdv_flags.dev = dev; 843 msg->mdv_flags.flag = flag; 844 devfs_msg_send_sync(DEVFS_DESTROY_RELATED_WO_FLAG, msg); 845 devfs_msg_put(msg); 846 847 return 0; 848 } 849 850 /* 851 * devfs_create_all_dev is the asynchronous entry point to trigger device 852 * node creation. It just sends a message with the relevant details to 853 * the devfs core. 854 */ 855 int 856 devfs_create_all_dev(struct devfs_node *root) 857 { 858 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root); 859 return 0; 860 } 861 862 /* 863 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all 864 * devices with a specific set of dev_ops and minor. It just sends a 865 * message with the relevant details to the devfs core. 866 */ 867 int 868 devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor) 869 { 870 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor); 871 return 0; 872 } 873 874 /* 875 * devfs_clone_handler_add is the synchronous entry point to add a new 876 * clone handler. It just sends a message with the relevant details to 877 * the devfs core. 878 */ 879 int 880 devfs_clone_handler_add(const char *name, d_clone_t *nhandler) 881 { 882 devfs_msg_t msg; 883 884 msg = devfs_msg_get(); 885 msg->mdv_chandler.name = name; 886 msg->mdv_chandler.nhandler = nhandler; 887 devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg); 888 devfs_msg_put(msg); 889 return 0; 890 } 891 892 /* 893 * devfs_clone_handler_del is the synchronous entry point to remove a 894 * clone handler. It just sends a message with the relevant details to 895 * the devfs core. 896 */ 897 int 898 devfs_clone_handler_del(const char *name) 899 { 900 devfs_msg_t msg; 901 902 msg = devfs_msg_get(); 903 msg->mdv_chandler.name = name; 904 msg->mdv_chandler.nhandler = NULL; 905 devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg); 906 devfs_msg_put(msg); 907 return 0; 908 } 909 910 /* 911 * devfs_find_device_by_name is the synchronous entry point to find a 912 * device given its name. It sends a synchronous message with the 913 * relevant details to the devfs core and returns the answer. 914 */ 915 cdev_t 916 devfs_find_device_by_name(const char *fmt, ...) 917 { 918 cdev_t found = NULL; 919 devfs_msg_t msg; 920 char *target; 921 __va_list ap; 922 923 if (fmt == NULL) 924 return NULL; 925 926 __va_start(ap, fmt); 927 kvasnprintf(&target, PATH_MAX, fmt, ap); 928 __va_end(ap); 929 930 msg = devfs_msg_get(); 931 msg->mdv_name = target; 932 devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg); 933 found = msg->mdv_cdev; 934 devfs_msg_put(msg); 935 kvasfree(&target); 936 937 return found; 938 } 939 940 /* 941 * devfs_find_device_by_udev is the synchronous entry point to find a 942 * device given its udev number. It sends a synchronous message with 943 * the relevant details to the devfs core and returns the answer. 944 */ 945 cdev_t 946 devfs_find_device_by_udev(udev_t udev) 947 { 948 cdev_t found = NULL; 949 devfs_msg_t msg; 950 951 msg = devfs_msg_get(); 952 msg->mdv_udev = udev; 953 devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg); 954 found = msg->mdv_cdev; 955 devfs_msg_put(msg); 956 957 devfs_debug(DEVFS_DEBUG_DEBUG, 958 "devfs_find_device_by_udev found? %s -end:3-\n", 959 ((found) ? found->si_name:"NO")); 960 return found; 961 } 962 963 struct vnode * 964 devfs_inode_to_vnode(struct mount *mp, ino_t target) 965 { 966 struct vnode *vp = NULL; 967 devfs_msg_t msg; 968 969 if (mp == NULL) 970 return NULL; 971 972 msg = devfs_msg_get(); 973 msg->mdv_ino.mp = mp; 974 msg->mdv_ino.ino = target; 975 devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg); 976 vp = msg->mdv_ino.vp; 977 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 978 devfs_msg_put(msg); 979 980 return vp; 981 } 982 983 /* 984 * devfs_make_alias is the asynchronous entry point to register an alias 985 * for a device. It just sends a message with the relevant details to the 986 * devfs core. 987 */ 988 int 989 devfs_make_alias(const char *name, cdev_t dev_target) 990 { 991 struct devfs_alias *alias; 992 size_t len; 993 994 len = strlen(name); 995 996 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 997 alias->name = kstrdup(name, M_DEVFS); 998 alias->namlen = len; 999 alias->dev_target = dev_target; 1000 1001 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias); 1002 return 0; 1003 } 1004 1005 /* 1006 * devfs_destroy_alias is the asynchronous entry point to deregister an alias 1007 * for a device. It just sends a message with the relevant details to the 1008 * devfs core. 1009 */ 1010 int 1011 devfs_destroy_alias(const char *name, cdev_t dev_target) 1012 { 1013 struct devfs_alias *alias; 1014 size_t len; 1015 1016 len = strlen(name); 1017 1018 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 1019 alias->name = kstrdup(name, M_DEVFS); 1020 alias->namlen = len; 1021 alias->dev_target = dev_target; 1022 1023 devfs_msg_send_generic(DEVFS_DESTROY_ALIAS, alias); 1024 return 0; 1025 } 1026 1027 /* 1028 * devfs_apply_rules is the asynchronous entry point to trigger application 1029 * of all rules. It just sends a message with the relevant details to the 1030 * devfs core. 1031 */ 1032 int 1033 devfs_apply_rules(char *mntto) 1034 { 1035 char *new_name; 1036 1037 new_name = kstrdup(mntto, M_DEVFS); 1038 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name); 1039 1040 return 0; 1041 } 1042 1043 /* 1044 * devfs_reset_rules is the asynchronous entry point to trigger reset of all 1045 * rules. It just sends a message with the relevant details to the devfs core. 1046 */ 1047 int 1048 devfs_reset_rules(char *mntto) 1049 { 1050 char *new_name; 1051 1052 new_name = kstrdup(mntto, M_DEVFS); 1053 devfs_msg_send_name(DEVFS_RESET_RULES, new_name); 1054 1055 return 0; 1056 } 1057 1058 1059 /* 1060 * devfs_scan_callback is the asynchronous entry point to call a callback 1061 * on all cdevs. 1062 * It just sends a message with the relevant details to the devfs core. 1063 */ 1064 int 1065 devfs_scan_callback(devfs_scan_t *callback, void *arg) 1066 { 1067 devfs_msg_t msg; 1068 1069 KKASSERT(callback); 1070 1071 msg = devfs_msg_get(); 1072 msg->mdv_load = callback; 1073 msg->mdv_load2 = arg; 1074 devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg); 1075 devfs_msg_put(msg); 1076 1077 return 0; 1078 } 1079 1080 1081 /* 1082 * Acts as a message drain. Any message that is replied to here gets destroyed 1083 * and the memory freed. 1084 */ 1085 static void 1086 devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 1087 { 1088 devfs_msg_put((devfs_msg_t)msg); 1089 } 1090 1091 /* 1092 * devfs_msg_get allocates a new devfs msg and returns it. 1093 */ 1094 devfs_msg_t 1095 devfs_msg_get(void) 1096 { 1097 return objcache_get(devfs_msg_cache, M_WAITOK); 1098 } 1099 1100 /* 1101 * devfs_msg_put deallocates a given devfs msg. 1102 */ 1103 int 1104 devfs_msg_put(devfs_msg_t msg) 1105 { 1106 objcache_put(devfs_msg_cache, msg); 1107 return 0; 1108 } 1109 1110 /* 1111 * devfs_msg_send is the generic asynchronous message sending facility 1112 * for devfs. By default the reply port is the automatic disposal port. 1113 * 1114 * If the current thread is the devfs_msg_port thread we execute the 1115 * operation synchronously. 1116 */ 1117 void 1118 devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg) 1119 { 1120 lwkt_port_t port = &devfs_msg_port; 1121 1122 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0); 1123 1124 devfs_msg->hdr.u.ms_result = cmd; 1125 1126 if (port->mpu_td == curthread) { 1127 devfs_msg_exec(devfs_msg); 1128 lwkt_replymsg(&devfs_msg->hdr, 0); 1129 } else { 1130 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 1131 } 1132 } 1133 1134 /* 1135 * devfs_msg_send_sync is the generic synchronous message sending 1136 * facility for devfs. It initializes a local reply port and waits 1137 * for the core's answer. The core will write the answer on the same 1138 * message which is sent back as reply. The caller still has a reference 1139 * to the message, so we don't need to return it. 1140 */ 1141 int 1142 devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg) 1143 { 1144 struct lwkt_port rep_port; 1145 int error; 1146 lwkt_port_t port = &devfs_msg_port; 1147 1148 lwkt_initport_thread(&rep_port, curthread); 1149 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0); 1150 1151 devfs_msg->hdr.u.ms_result = cmd; 1152 1153 error = lwkt_domsg(port, (lwkt_msg_t)devfs_msg, 0); 1154 1155 return error; 1156 } 1157 1158 /* 1159 * sends a message with a generic argument. 1160 */ 1161 void 1162 devfs_msg_send_generic(uint32_t cmd, void *load) 1163 { 1164 devfs_msg_t devfs_msg = devfs_msg_get(); 1165 1166 devfs_msg->mdv_load = load; 1167 devfs_msg_send(cmd, devfs_msg); 1168 } 1169 1170 /* 1171 * sends a message with a name argument. 1172 */ 1173 void 1174 devfs_msg_send_name(uint32_t cmd, char *name) 1175 { 1176 devfs_msg_t devfs_msg = devfs_msg_get(); 1177 1178 devfs_msg->mdv_name = name; 1179 devfs_msg_send(cmd, devfs_msg); 1180 } 1181 1182 /* 1183 * sends a message with a mount argument. 1184 */ 1185 void 1186 devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt) 1187 { 1188 devfs_msg_t devfs_msg = devfs_msg_get(); 1189 1190 devfs_msg->mdv_mnt = mnt; 1191 devfs_msg_send(cmd, devfs_msg); 1192 } 1193 1194 /* 1195 * sends a message with an ops argument. 1196 */ 1197 void 1198 devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor) 1199 { 1200 devfs_msg_t devfs_msg = devfs_msg_get(); 1201 1202 devfs_msg->mdv_ops.ops = ops; 1203 devfs_msg->mdv_ops.minor = minor; 1204 devfs_msg_send(cmd, devfs_msg); 1205 } 1206 1207 /* 1208 * sends a message with a clone handler argument. 1209 */ 1210 void 1211 devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler) 1212 { 1213 devfs_msg_t devfs_msg = devfs_msg_get(); 1214 1215 devfs_msg->mdv_chandler.name = name; 1216 devfs_msg->mdv_chandler.nhandler = handler; 1217 devfs_msg_send(cmd, devfs_msg); 1218 } 1219 1220 /* 1221 * sends a message with a device argument. 1222 */ 1223 void 1224 devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms) 1225 { 1226 devfs_msg_t devfs_msg = devfs_msg_get(); 1227 1228 devfs_msg->mdv_dev.dev = dev; 1229 devfs_msg->mdv_dev.uid = uid; 1230 devfs_msg->mdv_dev.gid = gid; 1231 devfs_msg->mdv_dev.perms = perms; 1232 1233 devfs_msg_send(cmd, devfs_msg); 1234 } 1235 1236 /* 1237 * sends a message with a link argument. 1238 */ 1239 void 1240 devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp) 1241 { 1242 devfs_msg_t devfs_msg = devfs_msg_get(); 1243 1244 devfs_msg->mdv_link.name = name; 1245 devfs_msg->mdv_link.target = target; 1246 devfs_msg->mdv_link.mp = mp; 1247 devfs_msg_send(cmd, devfs_msg); 1248 } 1249 1250 /* 1251 * devfs_msg_core is the main devfs thread. It handles all incoming messages 1252 * and calls the relevant worker functions. By using messages it's assured 1253 * that events occur in the correct order. 1254 */ 1255 static void 1256 devfs_msg_core(void *arg) 1257 { 1258 devfs_msg_t msg; 1259 1260 lwkt_initport_thread(&devfs_msg_port, curthread); 1261 1262 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1263 devfs_run = 1; 1264 wakeup(td_core); 1265 lockmgr(&devfs_lock, LK_RELEASE); 1266 1267 lwkt_gettoken(&devfs_token); 1268 1269 while (devfs_run) { 1270 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0); 1271 devfs_debug(DEVFS_DEBUG_DEBUG, 1272 "devfs_msg_core, new msg: %x\n", 1273 (unsigned int)msg->hdr.u.ms_result); 1274 devfs_msg_exec(msg); 1275 lwkt_replymsg(&msg->hdr, 0); 1276 } 1277 1278 lwkt_reltoken(&devfs_token); 1279 wakeup(td_core); 1280 1281 lwkt_exit(); 1282 } 1283 1284 static void 1285 devfs_msg_exec(devfs_msg_t msg) 1286 { 1287 struct devfs_mnt_data *mnt; 1288 struct devfs_node *node; 1289 cdev_t dev; 1290 1291 /* 1292 * Acquire the devfs lock to ensure safety of all called functions 1293 */ 1294 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1295 1296 switch (msg->hdr.u.ms_result) { 1297 case DEVFS_DEVICE_CREATE: 1298 dev = msg->mdv_dev.dev; 1299 devfs_create_dev_worker(dev, 1300 msg->mdv_dev.uid, 1301 msg->mdv_dev.gid, 1302 msg->mdv_dev.perms); 1303 break; 1304 case DEVFS_DEVICE_DESTROY: 1305 dev = msg->mdv_dev.dev; 1306 devfs_destroy_dev_worker(dev); 1307 break; 1308 case DEVFS_DESTROY_RELATED: 1309 devfs_destroy_related_worker(msg->mdv_load); 1310 break; 1311 case DEVFS_DESTROY_DEV_BY_OPS: 1312 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops, 1313 msg->mdv_ops.minor); 1314 break; 1315 case DEVFS_CREATE_ALL_DEV: 1316 node = (struct devfs_node *)msg->mdv_load; 1317 devfs_create_all_dev_worker(node); 1318 break; 1319 case DEVFS_MOUNT_ADD: 1320 mnt = msg->mdv_mnt; 1321 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link); 1322 devfs_create_all_dev_worker(mnt->root_node); 1323 break; 1324 case DEVFS_MOUNT_DEL: 1325 mnt = msg->mdv_mnt; 1326 TAILQ_REMOVE(&devfs_mnt_list, mnt, link); 1327 /* Be sure to remove all the aliases first */ 1328 devfs_iterate_topology(mnt->root_node, 1329 devfs_alias_reaper_callback, 1330 NULL); 1331 devfs_iterate_topology(mnt->root_node, 1332 devfs_reaperp_callback, 1333 NULL); 1334 devfs_iterate_orphans_unmount(mnt->mp); 1335 if (mnt->leak_count) { 1336 devfs_debug(DEVFS_DEBUG_SHOW, 1337 "Leaked %ld devfs_node elements!\n", 1338 mnt->leak_count); 1339 } 1340 break; 1341 case DEVFS_CHANDLER_ADD: 1342 devfs_chandler_add_worker(msg->mdv_chandler.name, 1343 msg->mdv_chandler.nhandler); 1344 break; 1345 case DEVFS_CHANDLER_DEL: 1346 devfs_chandler_del_worker(msg->mdv_chandler.name); 1347 break; 1348 case DEVFS_FIND_DEVICE_BY_NAME: 1349 devfs_find_device_by_name_worker(msg); 1350 break; 1351 case DEVFS_FIND_DEVICE_BY_UDEV: 1352 devfs_find_device_by_udev_worker(msg); 1353 break; 1354 case DEVFS_MAKE_ALIAS: 1355 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load); 1356 break; 1357 case DEVFS_DESTROY_ALIAS: 1358 devfs_destroy_alias_worker((struct devfs_alias *)msg->mdv_load); 1359 break; 1360 case DEVFS_APPLY_RULES: 1361 devfs_apply_reset_rules_caller(msg->mdv_name, 1); 1362 break; 1363 case DEVFS_RESET_RULES: 1364 devfs_apply_reset_rules_caller(msg->mdv_name, 0); 1365 break; 1366 case DEVFS_SCAN_CALLBACK: 1367 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load, 1368 msg->mdv_load2); 1369 break; 1370 case DEVFS_CLR_RELATED_FLAG: 1371 devfs_clr_related_flag_worker(msg->mdv_flags.dev, 1372 msg->mdv_flags.flag); 1373 break; 1374 case DEVFS_DESTROY_RELATED_WO_FLAG: 1375 devfs_destroy_related_without_flag_worker(msg->mdv_flags.dev, 1376 msg->mdv_flags.flag); 1377 break; 1378 case DEVFS_INODE_TO_VNODE: 1379 msg->mdv_ino.vp = devfs_iterate_topology( 1380 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node, 1381 (devfs_iterate_callback_t *)devfs_inode_to_vnode_worker_callback, 1382 &msg->mdv_ino.ino); 1383 break; 1384 case DEVFS_TERMINATE_CORE: 1385 devfs_run = 0; 1386 break; 1387 case DEVFS_SYNC: 1388 break; 1389 default: 1390 devfs_debug(DEVFS_DEBUG_WARNING, 1391 "devfs_msg_core: unknown message " 1392 "received at core\n"); 1393 break; 1394 } 1395 lockmgr(&devfs_lock, LK_RELEASE); 1396 } 1397 1398 static void 1399 devfs_devctl_notify(cdev_t dev, const char *ev) 1400 { 1401 static const char prefix[] = "cdev="; 1402 char *data; 1403 int namelen; 1404 1405 namelen = strlen(dev->si_name); 1406 data = kmalloc(namelen + sizeof(prefix), M_TEMP, M_WAITOK); 1407 memcpy(data, prefix, sizeof(prefix) - 1); 1408 memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); 1409 devctl_notify("DEVFS", "CDEV", ev, data); 1410 kfree(data, M_TEMP); 1411 } 1412 1413 /* 1414 * Worker function to insert a new dev into the dev list and initialize its 1415 * permissions. It also calls devfs_propagate_dev which in turn propagates 1416 * the change to all mount points. 1417 * 1418 * The passed dev is already referenced. This reference is eaten by this 1419 * function and represents the dev's linkage into devfs_dev_list. 1420 */ 1421 static int 1422 devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms) 1423 { 1424 KKASSERT(dev); 1425 1426 dev->si_uid = uid; 1427 dev->si_gid = gid; 1428 dev->si_perms = perms; 1429 1430 devfs_link_dev(dev); 1431 devfs_propagate_dev(dev, 1); 1432 1433 udev_event_attach(dev, NULL, 0); 1434 devfs_devctl_notify(dev, "CREATE"); 1435 1436 return 0; 1437 } 1438 1439 /* 1440 * Worker function to delete a dev from the dev list and free the cdev. 1441 * It also calls devfs_propagate_dev which in turn propagates the change 1442 * to all mount points. 1443 */ 1444 static int 1445 devfs_destroy_dev_worker(cdev_t dev) 1446 { 1447 int error; 1448 1449 KKASSERT(dev); 1450 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1451 1452 error = devfs_unlink_dev(dev); 1453 devfs_propagate_dev(dev, 0); 1454 1455 devfs_devctl_notify(dev, "DESTROY"); 1456 udev_event_detach(dev, NULL, 0); 1457 1458 if (error == 0) 1459 release_dev(dev); /* link ref */ 1460 release_dev(dev); 1461 release_dev(dev); 1462 1463 return 0; 1464 } 1465 1466 /* 1467 * Worker function to destroy all devices with a certain basename. 1468 * Calls devfs_destroy_dev_worker for the actual destruction. 1469 */ 1470 static int 1471 devfs_destroy_related_worker(cdev_t needle) 1472 { 1473 cdev_t dev; 1474 1475 restart: 1476 devfs_debug(DEVFS_DEBUG_DEBUG, "related worker: %s\n", 1477 needle->si_name); 1478 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1479 if (dev->si_parent == needle) { 1480 devfs_destroy_related_worker(dev); 1481 devfs_destroy_dev_worker(dev); 1482 goto restart; 1483 } 1484 } 1485 return 0; 1486 } 1487 1488 static int 1489 devfs_clr_related_flag_worker(cdev_t needle, uint32_t flag) 1490 { 1491 cdev_t dev, dev1; 1492 1493 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1494 if (dev->si_parent == needle) { 1495 devfs_clr_related_flag_worker(dev, flag); 1496 dev->si_flags &= ~flag; 1497 } 1498 } 1499 1500 return 0; 1501 } 1502 1503 static int 1504 devfs_destroy_related_without_flag_worker(cdev_t needle, uint32_t flag) 1505 { 1506 cdev_t dev; 1507 1508 restart: 1509 devfs_debug(DEVFS_DEBUG_DEBUG, "related_wo_flag: %s\n", 1510 needle->si_name); 1511 1512 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1513 if (dev->si_parent == needle) { 1514 devfs_destroy_related_without_flag_worker(dev, flag); 1515 if (!(dev->si_flags & flag)) { 1516 devfs_destroy_dev_worker(dev); 1517 devfs_debug(DEVFS_DEBUG_DEBUG, 1518 "related_wo_flag: %s restart\n", dev->si_name); 1519 goto restart; 1520 } 1521 } 1522 } 1523 1524 return 0; 1525 } 1526 1527 /* 1528 * Worker function that creates all device nodes on top of a devfs 1529 * root node. 1530 */ 1531 static int 1532 devfs_create_all_dev_worker(struct devfs_node *root) 1533 { 1534 cdev_t dev; 1535 1536 KKASSERT(root); 1537 1538 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1539 devfs_create_device_node(root, dev, NULL, NULL, NULL); 1540 } 1541 1542 return 0; 1543 } 1544 1545 /* 1546 * Worker function that destroys all devices that match a specific 1547 * dev_ops and/or minor. If minor is less than 0, it is not matched 1548 * against. It also propagates all changes. 1549 */ 1550 static int 1551 devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor) 1552 { 1553 cdev_t dev, dev1; 1554 1555 KKASSERT(ops); 1556 1557 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1558 if (dev->si_ops != ops) 1559 continue; 1560 if ((minor < 0) || (dev->si_uminor == minor)) { 1561 devfs_destroy_dev_worker(dev); 1562 } 1563 } 1564 1565 return 0; 1566 } 1567 1568 /* 1569 * Worker function that registers a new clone handler in devfs. 1570 */ 1571 static int 1572 devfs_chandler_add_worker(const char *name, d_clone_t *nhandler) 1573 { 1574 struct devfs_clone_handler *chandler = NULL; 1575 u_char len = strlen(name); 1576 1577 if (len == 0) 1578 return 1; 1579 1580 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1581 if (chandler->namlen != len) 1582 continue; 1583 1584 if (!memcmp(chandler->name, name, len)) { 1585 /* Clonable basename already exists */ 1586 return 1; 1587 } 1588 } 1589 1590 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO); 1591 chandler->name = kstrdup(name, M_DEVFS); 1592 chandler->namlen = len; 1593 chandler->nhandler = nhandler; 1594 1595 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link); 1596 return 0; 1597 } 1598 1599 /* 1600 * Worker function that removes a given clone handler from the 1601 * clone handler list. 1602 */ 1603 static int 1604 devfs_chandler_del_worker(const char *name) 1605 { 1606 struct devfs_clone_handler *chandler, *chandler2; 1607 u_char len = strlen(name); 1608 1609 if (len == 0) 1610 return 1; 1611 1612 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) { 1613 if (chandler->namlen != len) 1614 continue; 1615 if (memcmp(chandler->name, name, len)) 1616 continue; 1617 1618 TAILQ_REMOVE(&devfs_chandler_list, chandler, link); 1619 kfree(chandler->name, M_DEVFS); 1620 kfree(chandler, M_DEVFS); 1621 break; 1622 } 1623 1624 return 0; 1625 } 1626 1627 /* 1628 * Worker function that finds a given device name and changes 1629 * the message received accordingly so that when replied to, 1630 * the answer is returned to the caller. 1631 */ 1632 static int 1633 devfs_find_device_by_name_worker(devfs_msg_t devfs_msg) 1634 { 1635 struct devfs_alias *alias; 1636 cdev_t dev; 1637 cdev_t found = NULL; 1638 1639 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1640 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) { 1641 found = dev; 1642 break; 1643 } 1644 } 1645 if (found == NULL) { 1646 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1647 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) { 1648 found = alias->dev_target; 1649 break; 1650 } 1651 } 1652 } 1653 devfs_msg->mdv_cdev = found; 1654 1655 return 0; 1656 } 1657 1658 /* 1659 * Worker function that finds a given device udev and changes 1660 * the message received accordingly so that when replied to, 1661 * the answer is returned to the caller. 1662 */ 1663 static int 1664 devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg) 1665 { 1666 cdev_t dev, dev1; 1667 cdev_t found = NULL; 1668 1669 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1670 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) { 1671 found = dev; 1672 break; 1673 } 1674 } 1675 devfs_msg->mdv_cdev = found; 1676 1677 return 0; 1678 } 1679 1680 /* 1681 * Worker function that inserts a given alias into the 1682 * alias list, and propagates the alias to all mount 1683 * points. 1684 */ 1685 static int 1686 devfs_make_alias_worker(struct devfs_alias *alias) 1687 { 1688 struct devfs_alias *alias2; 1689 size_t len = strlen(alias->name); 1690 int found = 0; 1691 1692 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1693 if (len != alias2->namlen) 1694 continue; 1695 1696 if (!memcmp(alias->name, alias2->name, len)) { 1697 found = 1; 1698 break; 1699 } 1700 } 1701 1702 if (!found) { 1703 /* 1704 * The alias doesn't exist yet, so we add it to the alias list 1705 */ 1706 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link); 1707 devfs_alias_propagate(alias, 0); 1708 udev_event_attach(alias->dev_target, alias->name, 1); 1709 } else { 1710 devfs_debug(DEVFS_DEBUG_WARNING, 1711 "Warning: duplicate devfs_make_alias for %s\n", 1712 alias->name); 1713 kfree(alias->name, M_DEVFS); 1714 kfree(alias, M_DEVFS); 1715 } 1716 1717 return 0; 1718 } 1719 1720 /* 1721 * Worker function that delete a given alias from the 1722 * alias list, and propagates the removal to all mount 1723 * points. 1724 */ 1725 static int 1726 devfs_destroy_alias_worker(struct devfs_alias *alias) 1727 { 1728 struct devfs_alias *alias2; 1729 int found = 0; 1730 1731 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1732 if (alias->dev_target != alias2->dev_target) 1733 continue; 1734 1735 if (devfs_WildCmp(alias->name, alias2->name) == 0) { 1736 found = 1; 1737 break; 1738 } 1739 } 1740 1741 if (!found) { 1742 devfs_debug(DEVFS_DEBUG_WARNING, 1743 "Warning: devfs_destroy_alias for inexistant alias: %s\n", 1744 alias->name); 1745 kfree(alias->name, M_DEVFS); 1746 kfree(alias, M_DEVFS); 1747 } else { 1748 /* 1749 * The alias exists, so we delete it from the alias list 1750 */ 1751 TAILQ_REMOVE(&devfs_alias_list, alias2, link); 1752 devfs_alias_propagate(alias2, 1); 1753 udev_event_detach(alias2->dev_target, alias2->name, 1); 1754 kfree(alias->name, M_DEVFS); 1755 kfree(alias, M_DEVFS); 1756 kfree(alias2->name, M_DEVFS); 1757 kfree(alias2, M_DEVFS); 1758 } 1759 1760 return 0; 1761 } 1762 1763 /* 1764 * Function that removes and frees all aliases. 1765 */ 1766 static int 1767 devfs_alias_reap(void) 1768 { 1769 struct devfs_alias *alias, *alias2; 1770 1771 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1772 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1773 kfree(alias->name, M_DEVFS); 1774 kfree(alias, M_DEVFS); 1775 } 1776 return 0; 1777 } 1778 1779 /* 1780 * Function that removes an alias matching a specific cdev and frees 1781 * it accordingly. 1782 */ 1783 static int 1784 devfs_alias_remove(cdev_t dev) 1785 { 1786 struct devfs_alias *alias, *alias2; 1787 1788 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1789 if (alias->dev_target == dev) { 1790 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1791 udev_event_detach(alias->dev_target, alias->name, 1); 1792 kfree(alias->name, M_DEVFS); 1793 kfree(alias, M_DEVFS); 1794 } 1795 } 1796 return 0; 1797 } 1798 1799 /* 1800 * This function propagates an alias addition or removal to 1801 * all mount points. 1802 */ 1803 static int 1804 devfs_alias_propagate(struct devfs_alias *alias, int remove) 1805 { 1806 struct devfs_mnt_data *mnt; 1807 1808 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1809 if (remove) { 1810 devfs_destroy_node(mnt->root_node, alias->name); 1811 } else { 1812 devfs_alias_apply(mnt->root_node, alias); 1813 } 1814 } 1815 return 0; 1816 } 1817 1818 /* 1819 * This function is a recursive function iterating through 1820 * all device nodes in the topology and, if applicable, 1821 * creating the relevant alias for a device node. 1822 */ 1823 static int 1824 devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias) 1825 { 1826 struct devfs_node *node1, *node2; 1827 1828 KKASSERT(alias != NULL); 1829 1830 if ((node->node_type == Nroot) || (node->node_type == Ndir)) { 1831 if (node->nchildren > 2) { 1832 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1833 devfs_alias_apply(node1, alias); 1834 } 1835 } 1836 } else { 1837 if (node->d_dev == alias->dev_target) 1838 devfs_alias_create(alias->name, node, 0); 1839 } 1840 return 0; 1841 } 1842 1843 /* 1844 * This function checks if any alias possibly is applicable 1845 * to the given node. If so, the alias is created. 1846 */ 1847 static int 1848 devfs_alias_check_create(struct devfs_node *node) 1849 { 1850 struct devfs_alias *alias; 1851 1852 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1853 if (node->d_dev == alias->dev_target) 1854 devfs_alias_create(alias->name, node, 0); 1855 } 1856 return 0; 1857 } 1858 1859 /* 1860 * This function creates an alias with a given name 1861 * linking to a given devfs node. It also increments 1862 * the link count on the target node. 1863 */ 1864 int 1865 devfs_alias_create(char *name_orig, struct devfs_node *target, int rule_based) 1866 { 1867 struct mount *mp = target->mp; 1868 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node; 1869 struct devfs_node *linknode; 1870 char *create_path = NULL; 1871 char *name; 1872 char *name_buf; 1873 int result = 0; 1874 1875 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1876 1877 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1878 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name); 1879 1880 if (create_path) 1881 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1882 1883 1884 if (devfs_find_device_node_by_name(parent, name)) { 1885 devfs_debug(DEVFS_DEBUG_WARNING, 1886 "Node already exists: %s " 1887 "(devfs_make_alias_worker)!\n", 1888 name); 1889 result = 1; 1890 goto done; 1891 } 1892 1893 linknode = devfs_allocp(Nlink, name, parent, mp, NULL); 1894 if (linknode == NULL) { 1895 result = 1; 1896 goto done; 1897 } 1898 1899 linknode->link_target = target; 1900 target->nlinks++; 1901 1902 if (rule_based) 1903 linknode->flags |= DEVFS_RULE_CREATED; 1904 1905 done: 1906 kfree(name_buf, M_TEMP); 1907 return (result); 1908 } 1909 1910 /* 1911 * This function is called by the core and handles mount point 1912 * strings. It either calls the relevant worker (devfs_apply_ 1913 * reset_rules_worker) on all mountpoints or only a specific 1914 * one. 1915 */ 1916 static int 1917 devfs_apply_reset_rules_caller(char *mountto, int apply) 1918 { 1919 struct devfs_mnt_data *mnt; 1920 1921 if (mountto[0] == '*') { 1922 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1923 devfs_iterate_topology(mnt->root_node, 1924 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1925 NULL); 1926 } 1927 } else { 1928 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1929 if (!strcmp(mnt->mp->mnt_stat.f_mntonname, mountto)) { 1930 devfs_iterate_topology(mnt->root_node, 1931 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1932 NULL); 1933 break; 1934 } 1935 } 1936 } 1937 1938 kfree(mountto, M_DEVFS); 1939 return 0; 1940 } 1941 1942 /* 1943 * This function calls a given callback function for 1944 * every dev node in the devfs dev list. 1945 */ 1946 static int 1947 devfs_scan_callback_worker(devfs_scan_t *callback, void *arg) 1948 { 1949 cdev_t dev, dev1; 1950 struct devfs_alias *alias, *alias1; 1951 1952 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1953 callback(dev->si_name, dev, false, arg); 1954 } 1955 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias1) { 1956 callback(alias->name, alias->dev_target, true, arg); 1957 } 1958 1959 return 0; 1960 } 1961 1962 /* 1963 * This function tries to resolve a given directory, or if not 1964 * found and creation requested, creates the given directory. 1965 */ 1966 static struct devfs_node * 1967 devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name, 1968 size_t name_len, int create) 1969 { 1970 struct devfs_node *node, *found = NULL; 1971 1972 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1973 if (name_len != node->d_dir.d_namlen) 1974 continue; 1975 1976 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) { 1977 found = node; 1978 break; 1979 } 1980 } 1981 1982 if ((found == NULL) && (create)) { 1983 found = devfs_allocp(Ndir, dir_name, parent, parent->mp, NULL); 1984 } 1985 1986 return found; 1987 } 1988 1989 /* 1990 * This function tries to resolve a complete path. If creation is requested, 1991 * if a given part of the path cannot be resolved (because it doesn't exist), 1992 * it is created. 1993 */ 1994 struct devfs_node * 1995 devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create) 1996 { 1997 struct devfs_node *node = parent; 1998 char *buf; 1999 size_t idx = 0; 2000 2001 if (path == NULL) 2002 return parent; 2003 2004 buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 2005 2006 while (*path && idx < PATH_MAX - 1) { 2007 if (*path != '/') { 2008 buf[idx++] = *path; 2009 } else { 2010 buf[idx] = '\0'; 2011 node = devfs_resolve_or_create_dir(node, buf, idx, create); 2012 if (node == NULL) { 2013 kfree(buf, M_TEMP); 2014 return NULL; 2015 } 2016 idx = 0; 2017 } 2018 ++path; 2019 } 2020 buf[idx] = '\0'; 2021 node = devfs_resolve_or_create_dir(node, buf, idx, create); 2022 kfree (buf, M_TEMP); 2023 return (node); 2024 } 2025 2026 /* 2027 * Takes a full path and strips it into a directory path and a name. 2028 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It 2029 * requires a working buffer with enough size to keep the whole 2030 * fullpath. 2031 */ 2032 int 2033 devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep) 2034 { 2035 char *name = NULL; 2036 char *path = NULL; 2037 size_t len = strlen(fullpath) + 1; 2038 int i; 2039 2040 KKASSERT((fullpath != NULL) && (buf != NULL)); 2041 KKASSERT((pathp != NULL) && (namep != NULL)); 2042 2043 memcpy(buf, fullpath, len); 2044 2045 for (i = len-1; i>= 0; i--) { 2046 if (buf[i] == '/') { 2047 buf[i] = '\0'; 2048 name = &(buf[i+1]); 2049 path = buf; 2050 break; 2051 } 2052 } 2053 2054 *pathp = path; 2055 2056 if (name) { 2057 *namep = name; 2058 } else { 2059 *namep = buf; 2060 } 2061 2062 return 0; 2063 } 2064 2065 /* 2066 * This function creates a new devfs node for a given device. It can 2067 * handle a complete path as device name, and accordingly creates 2068 * the path and the final device node. 2069 * 2070 * The reference count on the passed dev remains unchanged. 2071 */ 2072 struct devfs_node * 2073 devfs_create_device_node(struct devfs_node *root, cdev_t dev, 2074 int *existsp, char *dev_name, char *path_fmt, ...) 2075 { 2076 struct devfs_node *parent, *node = NULL; 2077 char *path = NULL; 2078 char *name; 2079 char *name_buf; 2080 __va_list ap; 2081 int i, found; 2082 char *create_path = NULL; 2083 char *names = "pqrsPQRS"; 2084 2085 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 2086 2087 if (existsp) 2088 *existsp = 0; 2089 2090 if (path_fmt != NULL) { 2091 __va_start(ap, path_fmt); 2092 kvasnprintf(&path, PATH_MAX, path_fmt, ap); 2093 __va_end(ap); 2094 } 2095 2096 parent = devfs_resolve_or_create_path(root, path, 1); 2097 KKASSERT(parent); 2098 2099 devfs_resolve_name_path( 2100 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), 2101 name_buf, &create_path, &name); 2102 2103 if (create_path) 2104 parent = devfs_resolve_or_create_path(parent, create_path, 1); 2105 2106 2107 node = devfs_find_device_node_by_name(parent, name); 2108 if (node) { 2109 if (node->d_dev == dev) { 2110 /* 2111 * Allow case where device caches dev after the 2112 * close and might desire to reuse it. 2113 */ 2114 if (existsp) 2115 *existsp = 1; 2116 } else { 2117 devfs_debug(DEVFS_DEBUG_WARNING, 2118 "devfs_create_device_node: " 2119 "DEVICE %s ALREADY EXISTS!!! " 2120 "Ignoring creation request.\n", 2121 name); 2122 node = NULL; 2123 } 2124 goto out; 2125 } 2126 2127 node = devfs_allocp(Ndev, name, parent, parent->mp, dev); 2128 nanotime(&parent->mtime); 2129 2130 /* 2131 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their 2132 * directory 2133 */ 2134 if ((dev) && (strlen(dev->si_name) >= 4) && 2135 (!memcmp(dev->si_name, "ptm/", 4))) { 2136 node->parent->flags |= DEVFS_HIDDEN; 2137 node->flags |= DEVFS_HIDDEN; 2138 } 2139 2140 /* 2141 * Ugly pty magic, to tag pty devices as such and hide them if needed. 2142 */ 2143 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3))) 2144 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 2145 2146 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) { 2147 found = 0; 2148 for (i = 0; i < strlen(names); i++) { 2149 if (name[3] == names[i]) { 2150 found = 1; 2151 break; 2152 } 2153 } 2154 if (found) 2155 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 2156 } 2157 2158 out: 2159 kfree(name_buf, M_TEMP); 2160 kvasfree(&path); 2161 return node; 2162 } 2163 2164 /* 2165 * This function finds a given device node in the topology with a given 2166 * cdev. 2167 */ 2168 void * 2169 devfs_find_device_node_callback(struct devfs_node *node, cdev_t target) 2170 { 2171 if ((node->node_type == Ndev) && (node->d_dev == target)) { 2172 return node; 2173 } 2174 2175 return NULL; 2176 } 2177 2178 /* 2179 * This function finds a device node in the given parent directory by its 2180 * name and returns it. 2181 */ 2182 struct devfs_node * 2183 devfs_find_device_node_by_name(struct devfs_node *parent, char *target) 2184 { 2185 struct devfs_node *node, *found = NULL; 2186 size_t len = strlen(target); 2187 2188 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 2189 if (len != node->d_dir.d_namlen) 2190 continue; 2191 2192 if (!memcmp(node->d_dir.d_name, target, len)) { 2193 found = node; 2194 break; 2195 } 2196 } 2197 2198 return found; 2199 } 2200 2201 static void * 2202 devfs_inode_to_vnode_worker_callback(struct devfs_node *node, ino_t *inop) 2203 { 2204 struct vnode *vp = NULL; 2205 ino_t target = *inop; 2206 2207 if (node->d_dir.d_ino == target) { 2208 if (node->v_node) { 2209 vp = node->v_node; 2210 vget(vp, LK_EXCLUSIVE | LK_RETRY); 2211 vn_unlock(vp); 2212 } else { 2213 devfs_allocv(&vp, node); 2214 vn_unlock(vp); 2215 } 2216 } 2217 2218 return vp; 2219 } 2220 2221 /* 2222 * This function takes a cdev and removes its devfs node in the 2223 * given topology. The cdev remains intact. 2224 */ 2225 int 2226 devfs_destroy_device_node(struct devfs_node *root, cdev_t target) 2227 { 2228 KKASSERT(target != NULL); 2229 return devfs_destroy_node(root, target->si_name); 2230 } 2231 2232 /* 2233 * This function takes a path to a devfs node, resolves it and 2234 * removes the devfs node from the given topology. 2235 */ 2236 int 2237 devfs_destroy_node(struct devfs_node *root, char *target) 2238 { 2239 struct devfs_node *node, *parent; 2240 char *name; 2241 char *name_buf; 2242 char *create_path = NULL; 2243 2244 KKASSERT(target); 2245 2246 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 2247 ksnprintf(name_buf, PATH_MAX, "%s", target); 2248 2249 devfs_resolve_name_path(target, name_buf, &create_path, &name); 2250 2251 if (create_path) 2252 parent = devfs_resolve_or_create_path(root, create_path, 0); 2253 else 2254 parent = root; 2255 2256 if (parent == NULL) { 2257 kfree(name_buf, M_TEMP); 2258 return 1; 2259 } 2260 2261 node = devfs_find_device_node_by_name(parent, name); 2262 2263 if (node) { 2264 nanotime(&node->parent->mtime); 2265 devfs_gc(node); 2266 } 2267 2268 kfree(name_buf, M_TEMP); 2269 2270 return 0; 2271 } 2272 2273 /* 2274 * Just set perms and ownership for given node. 2275 */ 2276 int 2277 devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, 2278 u_short mode, u_long flags) 2279 { 2280 node->mode = mode; 2281 node->uid = uid; 2282 node->gid = gid; 2283 2284 return 0; 2285 } 2286 2287 /* 2288 * Propagates a device attach/detach to all mount 2289 * points. Also takes care of automatic alias removal 2290 * for a deleted cdev. 2291 */ 2292 static int 2293 devfs_propagate_dev(cdev_t dev, int attach) 2294 { 2295 struct devfs_mnt_data *mnt; 2296 2297 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 2298 if (attach) { 2299 /* Device is being attached */ 2300 devfs_create_device_node(mnt->root_node, dev, 2301 NULL, NULL, NULL); 2302 } else { 2303 /* Device is being detached */ 2304 devfs_alias_remove(dev); 2305 devfs_destroy_device_node(mnt->root_node, dev); 2306 } 2307 } 2308 return 0; 2309 } 2310 2311 /* 2312 * devfs_clone either returns a basename from a complete name by 2313 * returning the length of the name without trailing digits, or, 2314 * if clone != 0, calls the device's clone handler to get a new 2315 * device, which in turn is returned in devp. 2316 * 2317 * Caller must hold a shared devfs_lock 2318 */ 2319 cdev_t 2320 devfs_clone(cdev_t dev, const char *name, size_t len, int mode, 2321 struct ucred *cred) 2322 { 2323 int error; 2324 struct devfs_clone_handler *chandler; 2325 struct dev_clone_args ap; 2326 2327 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 2328 if (chandler->namlen != len) 2329 continue; 2330 if ((!memcmp(chandler->name, name, len)) && 2331 (chandler->nhandler)) { 2332 /* 2333 * We have to unlock across the config and the 2334 * callback to avoid deadlocking. The device is 2335 * likely to obtain its own lock in the callback 2336 * and might then call into devfs. 2337 */ 2338 lockmgr(&devfs_lock, LK_RELEASE); 2339 devfs_config(); 2340 ap.a_head.a_dev = dev; 2341 ap.a_dev = NULL; 2342 ap.a_name = name; 2343 ap.a_namelen = len; 2344 ap.a_mode = mode; 2345 ap.a_cred = cred; 2346 error = (chandler->nhandler)(&ap); 2347 lockmgr(&devfs_lock, LK_SHARED); 2348 if (error) 2349 continue; 2350 2351 return ap.a_dev; 2352 } 2353 } 2354 2355 return NULL; 2356 } 2357 2358 2359 /* 2360 * Registers a new orphan in the orphan list. 2361 */ 2362 void 2363 devfs_tracer_add_orphan(struct devfs_node *node) 2364 { 2365 struct devfs_orphan *orphan; 2366 2367 KKASSERT(node); 2368 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK); 2369 orphan->node = node; 2370 2371 KKASSERT((node->flags & DEVFS_ORPHANED) == 0); 2372 node->flags |= DEVFS_ORPHANED; 2373 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link); 2374 } 2375 2376 /* 2377 * Removes an orphan from the orphan list. 2378 */ 2379 void 2380 devfs_tracer_del_orphan(struct devfs_node *node) 2381 { 2382 struct devfs_orphan *orphan; 2383 2384 KKASSERT(node); 2385 2386 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) { 2387 if (orphan->node == node) { 2388 node->flags &= ~DEVFS_ORPHANED; 2389 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link); 2390 kfree(orphan, M_DEVFS); 2391 break; 2392 } 2393 } 2394 } 2395 2396 /* 2397 * Counts the orphans in the orphan list, and if cleanup 2398 * is specified, also frees the orphan and removes it from 2399 * the list. 2400 */ 2401 size_t 2402 devfs_tracer_orphan_count(struct mount *mp, int cleanup) 2403 { 2404 struct devfs_orphan *orphan, *orphan2; 2405 size_t count = 0; 2406 2407 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) { 2408 count++; 2409 /* 2410 * If we are instructed to clean up, we do so. 2411 */ 2412 if (cleanup) { 2413 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link); 2414 orphan->node->flags &= ~DEVFS_ORPHANED; 2415 devfs_freep(orphan->node); 2416 kfree(orphan, M_DEVFS); 2417 } 2418 } 2419 2420 return count; 2421 } 2422 2423 /* 2424 * Fetch an ino_t from the global d_ino by increasing it 2425 * while spinlocked. 2426 */ 2427 static ino_t 2428 devfs_fetch_ino(void) 2429 { 2430 ino_t ret; 2431 2432 spin_lock(&ino_lock); 2433 ret = d_ino++; 2434 spin_unlock(&ino_lock); 2435 2436 return ret; 2437 } 2438 2439 /* 2440 * Allocates a new cdev and initializes it's most basic 2441 * fields. 2442 */ 2443 cdev_t 2444 devfs_new_cdev(struct dev_ops *ops, int minor, struct dev_ops *bops) 2445 { 2446 cdev_t dev = sysref_alloc(&cdev_sysref_class); 2447 2448 sysref_activate(&dev->si_sysref); 2449 reference_dev(dev); 2450 bzero(dev, offsetof(struct cdev, si_sysref)); 2451 2452 dev->si_uid = 0; 2453 dev->si_gid = 0; 2454 dev->si_perms = 0; 2455 dev->si_drv1 = NULL; 2456 dev->si_drv2 = NULL; 2457 dev->si_lastread = 0; /* time_uptime */ 2458 dev->si_lastwrite = 0; /* time_uptime */ 2459 2460 dev->si_dict = NULL; 2461 dev->si_parent = NULL; 2462 dev->si_ops = ops; 2463 dev->si_flags = 0; 2464 dev->si_uminor = minor; 2465 dev->si_bops = bops; 2466 2467 /* 2468 * Since the disk subsystem is in the way, we need to 2469 * propagate the D_CANFREE from bops (and ops) to 2470 * si_flags. 2471 */ 2472 if (bops && (bops->head.flags & D_CANFREE)) { 2473 dev->si_flags |= SI_CANFREE; 2474 } else if (ops->head.flags & D_CANFREE) { 2475 dev->si_flags |= SI_CANFREE; 2476 } 2477 2478 /* If there is a backing device, we reference its ops */ 2479 dev->si_inode = makeudev( 2480 devfs_reference_ops((bops)?(bops):(ops)), 2481 minor ); 2482 dev->si_umajor = umajor(dev->si_inode); 2483 2484 return dev; 2485 } 2486 2487 static void 2488 devfs_cdev_terminate(cdev_t dev) 2489 { 2490 int locked = 0; 2491 2492 /* Check if it is locked already. if not, we acquire the devfs lock */ 2493 if ((lockstatus(&devfs_lock, curthread)) != LK_EXCLUSIVE) { 2494 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2495 locked = 1; 2496 } 2497 2498 /* 2499 * Make sure the node isn't linked anymore. Otherwise we've screwed 2500 * up somewhere, since normal devs are unlinked on the call to 2501 * destroy_dev and only-cdevs that have not been used for cloning 2502 * are not linked in the first place. only-cdevs used for cloning 2503 * will be linked in, too, and should only be destroyed via 2504 * destroy_dev, not destroy_only_dev, so we catch that problem, too. 2505 */ 2506 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2507 2508 /* If we acquired the lock, we also get rid of it */ 2509 if (locked) 2510 lockmgr(&devfs_lock, LK_RELEASE); 2511 2512 /* If there is a backing device, we release the backing device's ops */ 2513 devfs_release_ops((dev->si_bops)?(dev->si_bops):(dev->si_ops)); 2514 2515 /* Finally destroy the device */ 2516 sysref_put(&dev->si_sysref); 2517 } 2518 2519 /* 2520 * Dummies for now (individual locks for MPSAFE) 2521 */ 2522 static void 2523 devfs_cdev_lock(cdev_t dev) 2524 { 2525 } 2526 2527 static void 2528 devfs_cdev_unlock(cdev_t dev) 2529 { 2530 } 2531 2532 static int 2533 devfs_detached_filter_eof(struct knote *kn, long hint) 2534 { 2535 kn->kn_flags |= (EV_EOF | EV_NODATA); 2536 return (1); 2537 } 2538 2539 static void 2540 devfs_detached_filter_detach(struct knote *kn) 2541 { 2542 cdev_t dev = (cdev_t)kn->kn_hook; 2543 2544 knote_remove(&dev->si_kqinfo.ki_note, kn); 2545 } 2546 2547 static struct filterops devfs_detached_filterops = 2548 { FILTEROP_ISFD, NULL, 2549 devfs_detached_filter_detach, 2550 devfs_detached_filter_eof }; 2551 2552 /* 2553 * Delegates knote filter handling responsibility to devfs 2554 * 2555 * Any device that implements kqfilter event handling and could be detached 2556 * or shut down out from under the kevent subsystem must allow devfs to 2557 * assume responsibility for any knotes it may hold. 2558 */ 2559 void 2560 devfs_assume_knotes(cdev_t dev, struct kqinfo *kqi) 2561 { 2562 /* 2563 * Let kern/kern_event.c do the heavy lifting. 2564 */ 2565 knote_assume_knotes(kqi, &dev->si_kqinfo, 2566 &devfs_detached_filterops, (void *)dev); 2567 2568 /* 2569 * These should probably be activated individually, but doing so 2570 * would require refactoring kq's public in-kernel interface. 2571 */ 2572 KNOTE(&dev->si_kqinfo.ki_note, 0); 2573 } 2574 2575 /* 2576 * Links a given cdev into the dev list. 2577 */ 2578 int 2579 devfs_link_dev(cdev_t dev) 2580 { 2581 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2582 dev->si_flags |= SI_DEVFS_LINKED; 2583 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link); 2584 2585 return 0; 2586 } 2587 2588 /* 2589 * Removes a given cdev from the dev list. The caller is responsible for 2590 * releasing the reference on the device associated with the linkage. 2591 * 2592 * Returns EALREADY if the dev has already been unlinked. 2593 */ 2594 static int 2595 devfs_unlink_dev(cdev_t dev) 2596 { 2597 if ((dev->si_flags & SI_DEVFS_LINKED)) { 2598 TAILQ_REMOVE(&devfs_dev_list, dev, link); 2599 dev->si_flags &= ~SI_DEVFS_LINKED; 2600 return (0); 2601 } 2602 return (EALREADY); 2603 } 2604 2605 int 2606 devfs_node_is_accessible(struct devfs_node *node) 2607 { 2608 if ((node) && (!(node->flags & DEVFS_HIDDEN))) 2609 return 1; 2610 else 2611 return 0; 2612 } 2613 2614 int 2615 devfs_reference_ops(struct dev_ops *ops) 2616 { 2617 int unit; 2618 struct devfs_dev_ops *found = NULL; 2619 struct devfs_dev_ops *devops; 2620 2621 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2622 if (devops->ops == ops) { 2623 found = devops; 2624 break; 2625 } 2626 } 2627 2628 if (!found) { 2629 found = kmalloc(sizeof(struct devfs_dev_ops), M_DEVFS, M_WAITOK); 2630 found->ops = ops; 2631 found->ref_count = 0; 2632 TAILQ_INSERT_TAIL(&devfs_dev_ops_list, found, link); 2633 } 2634 2635 KKASSERT(found); 2636 2637 if (found->ref_count == 0) { 2638 found->id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255); 2639 if (found->id == -1) { 2640 /* Ran out of unique ids */ 2641 devfs_debug(DEVFS_DEBUG_WARNING, 2642 "devfs_reference_ops: WARNING: ran out of unique ids\n"); 2643 } 2644 } 2645 unit = found->id; 2646 ++found->ref_count; 2647 2648 return unit; 2649 } 2650 2651 void 2652 devfs_release_ops(struct dev_ops *ops) 2653 { 2654 struct devfs_dev_ops *found = NULL; 2655 struct devfs_dev_ops *devops; 2656 2657 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2658 if (devops->ops == ops) { 2659 found = devops; 2660 break; 2661 } 2662 } 2663 2664 KKASSERT(found); 2665 2666 --found->ref_count; 2667 2668 if (found->ref_count == 0) { 2669 TAILQ_REMOVE(&devfs_dev_ops_list, found, link); 2670 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), found->id); 2671 kfree(found, M_DEVFS); 2672 } 2673 } 2674 2675 /* 2676 * Wait for asynchronous messages to complete in the devfs helper 2677 * thread, then return. Do nothing if the helper thread is dead 2678 * or we are being indirectly called from the helper thread itself. 2679 */ 2680 void 2681 devfs_config(void) 2682 { 2683 devfs_msg_t msg; 2684 2685 if (devfs_run && curthread != td_core) { 2686 msg = devfs_msg_get(); 2687 devfs_msg_send_sync(DEVFS_SYNC, msg); 2688 devfs_msg_put(msg); 2689 } 2690 } 2691 2692 /* 2693 * Called on init of devfs; creates the objcaches and 2694 * spawns off the devfs core thread. Also initializes 2695 * locks. 2696 */ 2697 static void 2698 devfs_init(void) 2699 { 2700 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n"); 2701 /* Create objcaches for nodes, msgs and devs */ 2702 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0, 2703 NULL, NULL, NULL, 2704 objcache_malloc_alloc, 2705 objcache_malloc_free, 2706 &devfs_node_malloc_args ); 2707 2708 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0, 2709 NULL, NULL, NULL, 2710 objcache_malloc_alloc, 2711 objcache_malloc_free, 2712 &devfs_msg_malloc_args ); 2713 2714 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0, 2715 NULL, NULL, NULL, 2716 objcache_malloc_alloc, 2717 objcache_malloc_free, 2718 &devfs_dev_malloc_args ); 2719 2720 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id)); 2721 2722 /* Initialize the reply-only port which acts as a message drain */ 2723 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply); 2724 2725 /* Initialize *THE* devfs lock */ 2726 lockinit(&devfs_lock, "devfs_core lock", 0, 0); 2727 lwkt_token_init(&devfs_token, "devfs_core"); 2728 2729 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2730 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL, 2731 0, -1, "devfs_msg_core"); 2732 while (devfs_run == 0) 2733 lksleep(td_core, &devfs_lock, 0, "devfsc", 0); 2734 lockmgr(&devfs_lock, LK_RELEASE); 2735 2736 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n"); 2737 } 2738 2739 /* 2740 * Called on unload of devfs; takes care of destroying the core 2741 * and the objcaches. Also removes aliases that are no longer needed. 2742 */ 2743 static void 2744 devfs_uninit(void) 2745 { 2746 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); 2747 2748 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL); 2749 while (devfs_run) 2750 tsleep(td_core, 0, "devfsc", hz*10); 2751 tsleep(td_core, 0, "devfsc", hz); 2752 2753 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id)); 2754 2755 /* Destroy the objcaches */ 2756 objcache_destroy(devfs_msg_cache); 2757 objcache_destroy(devfs_node_cache); 2758 objcache_destroy(devfs_dev_cache); 2759 2760 devfs_alias_reap(); 2761 } 2762 2763 /* 2764 * This is a sysctl handler to assist userland devname(3) to 2765 * find the device name for a given udev. 2766 */ 2767 static int 2768 devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS) 2769 { 2770 udev_t udev; 2771 cdev_t found; 2772 int error; 2773 2774 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t)))) 2775 return (error); 2776 2777 devfs_debug(DEVFS_DEBUG_DEBUG, 2778 "devfs sysctl, received udev: %d\n", udev); 2779 2780 if (udev == NOUDEV) 2781 return(EINVAL); 2782 2783 if ((found = devfs_find_device_by_udev(udev)) == NULL) 2784 return(ENOENT); 2785 2786 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1)); 2787 } 2788 2789 2790 SYSCTL_PROC(_kern, OID_AUTO, devname, 2791 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK, 2792 NULL, 0, devfs_sysctl_devname_helper, "", 2793 "helper for devname(3)"); 2794 2795 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); 2796 TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable); 2797 SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 2798 0, "Enable DevFS debugging"); 2799 2800 SYSINIT(vfs_devfs_register, SI_SUB_DEVFS_CORE, SI_ORDER_FIRST, 2801 devfs_init, NULL); 2802 SYSUNINIT(vfs_devfs_register, SI_SUB_DEVFS_CORE, SI_ORDER_ANY, 2803 devfs_uninit, NULL); 2804 2805 /* 2806 * WildCmp() - compare wild string to sane string 2807 * 2808 * Returns 0 on success, -1 on failure. 2809 */ 2810 static int 2811 wildCmp(const char **mary, int d, const char *w, const char *s) 2812 { 2813 int i; 2814 2815 /* 2816 * skip fixed portion 2817 */ 2818 for (;;) { 2819 switch(*w) { 2820 case '*': 2821 /* 2822 * optimize terminator 2823 */ 2824 if (w[1] == 0) 2825 return(0); 2826 if (w[1] != '?' && w[1] != '*') { 2827 /* 2828 * optimize * followed by non-wild 2829 */ 2830 for (i = 0; s + i < mary[d]; ++i) { 2831 if (s[i] == w[1] && wildCmp(mary, d + 1, w + 1, s + i) == 0) 2832 return(0); 2833 } 2834 } else { 2835 /* 2836 * less-optimal 2837 */ 2838 for (i = 0; s + i < mary[d]; ++i) { 2839 if (wildCmp(mary, d + 1, w + 1, s + i) == 0) 2840 return(0); 2841 } 2842 } 2843 mary[d] = s; 2844 return(-1); 2845 case '?': 2846 if (*s == 0) 2847 return(-1); 2848 ++w; 2849 ++s; 2850 break; 2851 default: 2852 if (*w != *s) 2853 return(-1); 2854 if (*w == 0) /* terminator */ 2855 return(0); 2856 ++w; 2857 ++s; 2858 break; 2859 } 2860 } 2861 /* not reached */ 2862 return(-1); 2863 } 2864 2865 2866 /* 2867 * WildCaseCmp() - compare wild string to sane string, case insensitive 2868 * 2869 * Returns 0 on success, -1 on failure. 2870 */ 2871 static int 2872 wildCaseCmp(const char **mary, int d, const char *w, const char *s) 2873 { 2874 int i; 2875 2876 /* 2877 * skip fixed portion 2878 */ 2879 for (;;) { 2880 switch(*w) { 2881 case '*': 2882 /* 2883 * optimize terminator 2884 */ 2885 if (w[1] == 0) 2886 return(0); 2887 if (w[1] != '?' && w[1] != '*') { 2888 /* 2889 * optimize * followed by non-wild 2890 */ 2891 for (i = 0; s + i < mary[d]; ++i) { 2892 if (s[i] == w[1] && wildCaseCmp(mary, d + 1, w + 1, s + i) == 0) 2893 return(0); 2894 } 2895 } else { 2896 /* 2897 * less-optimal 2898 */ 2899 for (i = 0; s + i < mary[d]; ++i) { 2900 if (wildCaseCmp(mary, d + 1, w + 1, s + i) == 0) 2901 return(0); 2902 } 2903 } 2904 mary[d] = s; 2905 return(-1); 2906 case '?': 2907 if (*s == 0) 2908 return(-1); 2909 ++w; 2910 ++s; 2911 break; 2912 default: 2913 if (*w != *s) { 2914 #define tolower(x) ((x >= 'A' && x <= 'Z')?(x+('a'-'A')):(x)) 2915 if (tolower(*w) != tolower(*s)) 2916 return(-1); 2917 } 2918 if (*w == 0) /* terminator */ 2919 return(0); 2920 ++w; 2921 ++s; 2922 break; 2923 } 2924 } 2925 /* not reached */ 2926 return(-1); 2927 } 2928 2929 struct cdev_privdata { 2930 void *cdpd_data; 2931 cdevpriv_dtr_t cdpd_dtr; 2932 }; 2933 2934 int 2935 devfs_get_cdevpriv(struct file *fp, void **datap) 2936 { 2937 int error; 2938 2939 if (fp == NULL) 2940 return(EBADF); 2941 2942 spin_lock_shared(&fp->f_spin); 2943 if (fp->f_data1 == NULL) { 2944 *datap = NULL; 2945 error = ENOENT; 2946 } else { 2947 struct cdev_privdata *p = fp->f_data1; 2948 2949 *datap = p->cdpd_data; 2950 error = 0; 2951 } 2952 spin_unlock_shared(&fp->f_spin); 2953 2954 return (error); 2955 } 2956 2957 int 2958 devfs_set_cdevpriv(struct file *fp, void *priv, cdevpriv_dtr_t dtr) 2959 { 2960 struct cdev_privdata *p; 2961 int error; 2962 2963 if (fp == NULL) 2964 return (ENOENT); 2965 2966 p = kmalloc(sizeof(struct cdev_privdata), M_DEVFS, M_WAITOK); 2967 p->cdpd_data = priv; 2968 p->cdpd_dtr = dtr; 2969 2970 spin_lock(&fp->f_spin); 2971 if (fp->f_data1 == NULL) { 2972 fp->f_data1 = p; 2973 error = 0; 2974 } else { 2975 error = EBUSY; 2976 } 2977 spin_unlock(&fp->f_spin); 2978 2979 if (error) 2980 kfree(p, M_DEVFS); 2981 2982 return error; 2983 } 2984 2985 void 2986 devfs_clear_cdevpriv(struct file *fp) 2987 { 2988 struct cdev_privdata *p; 2989 2990 if (fp == NULL) 2991 return; 2992 2993 spin_lock(&fp->f_spin); 2994 p = fp->f_data1; 2995 fp->f_data1 = NULL; 2996 spin_unlock(&fp->f_spin); 2997 2998 if (p != NULL) { 2999 p->cdpd_dtr(p->cdpd_data); 3000 kfree(p, M_DEVFS); 3001 } 3002 } 3003 3004 int 3005 devfs_WildCmp(const char *w, const char *s) 3006 { 3007 int i; 3008 int c; 3009 int slen = strlen(s); 3010 const char **mary; 3011 3012 for (i = c = 0; w[i]; ++i) { 3013 if (w[i] == '*') 3014 ++c; 3015 } 3016 mary = kmalloc(sizeof(char *) * (c + 1), M_DEVFS, M_WAITOK); 3017 for (i = 0; i < c; ++i) 3018 mary[i] = s + slen; 3019 i = wildCmp(mary, 0, w, s); 3020 kfree(mary, M_DEVFS); 3021 return(i); 3022 } 3023 3024 int 3025 devfs_WildCaseCmp(const char *w, const char *s) 3026 { 3027 int i; 3028 int c; 3029 int slen = strlen(s); 3030 const char **mary; 3031 3032 for (i = c = 0; w[i]; ++i) { 3033 if (w[i] == '*') 3034 ++c; 3035 } 3036 mary = kmalloc(sizeof(char *) * (c + 1), M_DEVFS, M_WAITOK); 3037 for (i = 0; i < c; ++i) 3038 mary[i] = s + slen; 3039 i = wildCaseCmp(mary, 0, w, s); 3040 kfree(mary, M_DEVFS); 3041 return(i); 3042 } 3043 3044