xref: /dflybsd-src/sys/vfs/devfs/devfs_core.c (revision 6507240b2fcfebaacc0f92f997dad76922e1d8c0)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Alex Hornung <ahornung@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/mount.h>
38 #include <sys/vnode.h>
39 #include <sys/types.h>
40 #include <sys/lock.h>
41 #include <sys/msgport.h>
42 #include <sys/msgport2.h>
43 #include <sys/spinlock2.h>
44 #include <sys/sysctl.h>
45 #include <sys/ucred.h>
46 #include <sys/param.h>
47 #include <sys/sysref2.h>
48 #include <sys/systm.h>
49 #include <vfs/devfs/devfs.h>
50 #include <vfs/devfs/devfs_rules.h>
51 
52 MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations");
53 DEVFS_DECLARE_CLONE_BITMAP(ops_id);
54 /*
55  * SYSREF Integration - reference counting, allocation,
56  * sysid and syslink integration.
57  */
58 static void devfs_cdev_terminate(cdev_t dev);
59 static struct sysref_class     cdev_sysref_class = {
60 	.name =         "cdev",
61 	.mtype =        M_DEVFS,
62 	.proto =        SYSREF_PROTO_DEV,
63 	.offset =       offsetof(struct cdev, si_sysref),
64 	.objsize =      sizeof(struct cdev),
65 	.mag_capacity = 32,
66 	.flags =        0,
67 	.ops =  {
68 		.terminate = (sysref_terminate_func_t)devfs_cdev_terminate
69 	}
70 };
71 
72 static struct objcache	*devfs_node_cache;
73 static struct objcache 	*devfs_msg_cache;
74 static struct objcache	*devfs_dev_cache;
75 
76 static struct objcache_malloc_args devfs_node_malloc_args = {
77 	sizeof(struct devfs_node), M_DEVFS };
78 struct objcache_malloc_args devfs_msg_malloc_args = {
79 	sizeof(struct devfs_msg), M_DEVFS };
80 struct objcache_malloc_args devfs_dev_malloc_args = {
81 	sizeof(struct cdev), M_DEVFS };
82 
83 static struct devfs_dev_head devfs_dev_list = TAILQ_HEAD_INITIALIZER(devfs_dev_list);
84 static struct devfs_mnt_head devfs_mnt_list = TAILQ_HEAD_INITIALIZER(devfs_mnt_list);
85 static struct devfs_chandler_head devfs_chandler_list = TAILQ_HEAD_INITIALIZER(devfs_chandler_list);
86 static struct devfs_alias_head devfs_alias_list = TAILQ_HEAD_INITIALIZER(devfs_alias_list);
87 
88 struct lock 		devfs_lock;
89 static struct lwkt_port devfs_dispose_port;
90 static struct lwkt_port devfs_msg_port;
91 static struct thread 	*td_core;
92 
93 static ino_t 	d_ino = 0;
94 static __uint32_t	msg_id = 0;
95 static struct spinlock  ino_lock;
96 static int devfs_debug_enable = 0;
97 
98 static ino_t devfs_fetch_ino(void);
99 static int devfs_gc_dirs(struct devfs_node *);
100 static int devfs_gc_links(struct devfs_node *, struct devfs_node *, size_t);
101 static int devfs_create_all_dev_worker(struct devfs_node *);
102 static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int);
103 static int devfs_destroy_dev_worker(cdev_t);
104 static int devfs_destroy_subnames_worker(char *);
105 static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int);
106 static int devfs_propagate_dev(cdev_t, int);
107 static int devfs_unlink_dev(cdev_t dev);
108 
109 static int devfs_chandler_add_worker(char *, d_clone_t *);
110 static int devfs_chandler_del_worker(char *);
111 
112 static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
113 static void devfs_msg_core(void *);
114 
115 static int devfs_find_device_by_name_worker(devfs_msg_t);
116 static int devfs_find_device_by_udev_worker(devfs_msg_t);
117 
118 static int devfs_apply_reset_rules_caller(char *, int);
119 static int devfs_apply_reset_rules_worker(struct devfs_node *, int);
120 
121 static int devfs_scan_callback_worker(devfs_scan_t *);
122 
123 static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, char *, size_t, int);
124 
125 static int devfs_make_alias_worker(struct devfs_alias *);
126 static int devfs_alias_remove(cdev_t);
127 static int devfs_alias_reap(void);
128 static int devfs_alias_propagate(struct devfs_alias *);
129 static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *);
130 static int devfs_alias_check_create(struct devfs_node *);
131 
132 static int devfs_clr_subnames_flag_worker(char *, uint32_t);
133 static int devfs_destroy_subnames_without_flag_worker(char *, uint32_t);
134 
135 /*
136  * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function using kvprintf
137  */
138 int
139 devfs_debug(int level, char *fmt, ...)
140 {
141 	__va_list ap;
142 
143 	__va_start(ap, fmt);
144 	if (level <= devfs_debug_enable)
145 		kvprintf(fmt, ap);
146 	__va_end(ap);
147 
148 	return 0;
149 }
150 
151 /*
152  * devfs_allocp() Allocates a new devfs node with the specified
153  * parameters. The node is also automatically linked into the topology
154  * if a parent is specified. It also calls the rule and alias stuff to
155  * be applied on the new node
156  */
157 struct devfs_node *
158 devfs_allocp(devfs_nodetype devfsnodetype, char *name,
159 	     struct devfs_node *parent, struct mount *mp, cdev_t dev)
160 {
161 	struct devfs_node *node = NULL;
162 	size_t namlen = strlen(name);
163 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocp -1- for %s\n", name?name:"NULL");
164 
165 	node = objcache_get(devfs_node_cache, M_WAITOK);
166 
167 	bzero(node, sizeof(*node));
168 
169 	atomic_add_int(&(DEVFS_MNTDATA(mp)->leak_count), 1);
170 
171 	node->d_dev = NULL;
172 	node->nchildren = 1;
173 	node->mp = mp;
174 	node->d_dir.d_ino = devfs_fetch_ino();
175 	node->cookie_jar = 2; /* Leave 0 and 1 for '.' and '..', respectively */
176 
177 	/* Access Control members */
178 	node->mode = DEVFS_DEFAULT_MODE;	/* files access mode and type */
179 	node->uid = DEVFS_DEFAULT_UID;		/* owner user id */
180 	node->gid = DEVFS_DEFAULT_GID;		/* owner group id */
181 
182 	switch (devfsnodetype) {
183 	case Proot:
184 		/* Ensure that we don't recycle the root vnode */
185 		node->flags |= DEVFS_NODE_LINKED;
186 	case Pdir:
187 		TAILQ_INIT(DEVFS_DENODE_HEAD(node));
188 		node->d_dir.d_type = DT_DIR;
189 		node->nchildren = 2;
190 		break;
191 
192 	case Plink:
193 		node->d_dir.d_type = DT_LNK;
194 		break;
195 
196 	case Preg:
197 		node->d_dir.d_type = DT_REG;
198 		break;
199 
200 	case Pdev:
201 		if (dev != NULL) {
202 			node->d_dir.d_type = DT_CHR;
203 			node->d_dev = dev;
204 			node->d_dir.d_ino = dev->si_inode;
205 
206 			node->mode = dev->si_perms;	/* files access mode and type */
207 			node->uid = dev->si_uid;		/* owner user id */
208 			node->gid = dev->si_gid;		/* owner group id */
209 
210 			devfs_alias_check_create(node);
211 		}
212 		break;
213 
214 	default:
215 		panic("devfs_allocp: unknown node type");
216 	}
217 
218 	node->v_node = NULL;
219 	node->node_type = devfsnodetype;
220 
221 	/* Init the dirent structure of each devfs vnode */
222 	KKASSERT(namlen < 256);
223 
224 	node->d_dir.d_namlen = namlen;
225 	node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK);
226 	memcpy(node->d_dir.d_name, name, namlen);
227 	node->d_dir.d_name[namlen] = '\0';
228 
229 	/* Initialize the parent node element */
230 	node->parent = parent;
231 
232 	/* Apply rules */
233 	devfs_rule_check_apply(node);
234 
235 	/* xtime members */
236 	nanotime(&node->atime);
237 	node->mtime = node->ctime = node->atime;
238 
239 	/*
240 	 * Associate with parent as last step, clean out namecache
241 	 * reference.
242 	 */
243 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocp: about to insert node\n");
244 	if ((parent != NULL) &&
245 	    ((parent->node_type == Proot) || (parent->node_type == Pdir))) {
246 		devfs_debug(DEVFS_DEBUG_DEBUG,
247 			    "devfs_allocp: node inserted %p\n",
248 			    node);
249 		parent->nchildren++;
250 		node->cookie = parent->cookie_jar++;
251 		node->flags |= DEVFS_NODE_LINKED;
252 		TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link);
253 
254 		/* This forces negative namecache lookups to clear */
255 		++mp->mnt_namecache_gen;
256 	}
257 
258 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocp -end:2-\n");
259 	return node;
260 }
261 
262 /*
263  * devfs_allocv() allocates a new vnode based on a devfs node.
264  */
265 int
266 devfs_allocv(struct vnode **vpp, struct devfs_node *node)
267 {
268 	struct vnode *vp;
269 	int error = 0;
270 
271 	KKASSERT(node);
272 
273 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -1-\n");
274 
275 try_again:
276 	while ((vp = node->v_node) != NULL) {
277 		error = vget(vp, LK_EXCLUSIVE);
278 		if (error != ENOENT) {
279 			*vpp = vp;
280 			devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv, code path 2...\n");
281 			goto out;
282 		}
283 	}
284 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -3-\n");
285 
286 	if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0)
287 		goto out;
288 
289 	vp = *vpp;
290 
291 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -4-\n");
292 
293 	if (node->v_node != NULL) {
294 		vp->v_type = VBAD;
295 		vx_put(vp);
296 		goto try_again;
297 	}
298 
299 	vp->v_data = node;
300 	node->v_node = vp;
301 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -5-\n");
302 
303 	switch (node->node_type) {
304 	case Proot:
305 		vp->v_flag |= VROOT;
306 	case Pdir:
307 		vp->v_type = VDIR;
308 		break;
309 
310 	case Plink:
311 		vp->v_type = VLNK;
312 		break;
313 
314 	case Preg:
315 		vp->v_type = VREG;
316 		break;
317 
318 	case Pdev:
319 		vp->v_type = VCHR;
320 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -6-\n");
321 		KKASSERT(node->d_dev);
322 
323 		if (node->d_dev) {
324 			devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -7-\n");
325 			vp->v_uminor = node->d_dev->si_uminor;
326 			vp->v_umajor = 0;
327 #if 0
328 			vp->v_rdev = node->d_dev;
329 #endif
330 			v_associate_rdev(vp, node->d_dev);
331 			vp->v_ops = &node->mp->mnt_vn_spec_ops;
332 			devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -8-\n");
333 		} else {
334 			devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv: type is Pdev but d_dev is not set!!!!\n");
335 		}
336 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -9-\n");
337 		break;
338 
339 	default:
340 		panic("devfs_allocv: unknown node type");
341 	}
342 
343 out:
344 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -10-\n");
345 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -end:11-\n");
346 	return error;
347 }
348 
349 /*
350  * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode
351  * based on the newly created devfs node.
352  */
353 int
354 devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype,
355 				char *name, struct devfs_node *parent, cdev_t dev)
356 {
357 	struct devfs_node *node;
358 
359 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocvp -1-\n");
360 	node = devfs_allocp(devfsnodetype, name, parent, mp, dev);
361 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocvp -2-\n");
362 	if (node != NULL)
363 		devfs_allocv(vpp, node);
364 	else
365 		*vpp = NULL;
366 
367 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocvp -end:3-\n");
368 
369 	return 0;
370 }
371 
372 /*
373  * Destroy the devfs_node.  The node must be unlinked from the topology.
374  *
375  * This function will also destroy any vnode association with the node
376  * and device.
377  *
378  * The cdev_t itself remains intact.
379  */
380 int
381 devfs_freep(struct devfs_node *node)
382 {
383 	struct vnode *vp;
384 
385 	KKASSERT(node);
386 	KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) ||
387 		 (node->node_type == Proot));
388 	KKASSERT((node->flags & DEVFS_DESTROYED) == 0);
389 
390 	atomic_subtract_int(&(DEVFS_MNTDATA(node->mp)->leak_count), 1);
391 	if (node->symlink_name)	{
392 		kfree(node->symlink_name, M_DEVFS);
393 		node->symlink_name = NULL;
394 	}
395 
396 	/*
397 	 * Remove the node from the orphan list if it is still on it.
398 	 */
399 	if (node->flags & DEVFS_ORPHANED)
400 		devfs_tracer_del_orphan(node);
401 
402 	/*
403 	 * Disassociate the vnode from the node.  This also prevents the
404 	 * vnode's reclaim code from double-freeing the node.
405 	 */
406 	if ((vp = node->v_node) != NULL) {
407 #if 0
408 		vp->v_rdev = NULL;
409 #endif
410 		v_release_rdev(vp);
411 		vp->v_data = NULL;
412 		node->v_node = NULL;
413 	}
414 	if (node->d_dir.d_name)
415 		kfree(node->d_dir.d_name, M_DEVFS);
416 	node->flags |= DEVFS_DESTROYED;
417 
418 	objcache_put(devfs_node_cache, node);
419 
420 	return 0;
421 }
422 
423 /*
424  * Unlink the devfs node from the topology and add it to the orphan list.
425  * The node will later be destroyed by freep.
426  *
427  * Any vnode association, including the v_rdev and v_data, remains intact
428  * until the freep.
429  */
430 int
431 devfs_unlinkp(struct devfs_node *node)
432 {
433 	struct devfs_node *parent;
434 	KKASSERT(node);
435 
436 	devfs_tracer_add_orphan(node);
437 	devfs_debug(DEVFS_DEBUG_DEBUG,
438 		    "devfs_unlinkp for %s\n", node->d_dir.d_name);
439 	parent = node->parent;
440 
441 	/*
442 	 * If the parent is known we can unlink the node out of the topology
443 	 */
444 	if (parent)	{
445 		TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link);
446 		parent->nchildren--;
447 		KKASSERT((parent->nchildren >= 0));
448 		node->flags &= ~DEVFS_NODE_LINKED;
449 	}
450 	node->parent = NULL;
451 	return 0;
452 }
453 
454 /*
455  * devfs_reaperp() is a recursive function that iterates through all the
456  * topology, unlinking and freeing all devfs nodes.
457  */
458 int
459 devfs_reaperp(struct devfs_node *node)
460 {
461 	struct devfs_node *node1, *node2;
462 
463 	if ((node->node_type == Proot) || (node->node_type == Pdir)) {
464 		devfs_debug(DEVFS_DEBUG_DEBUG,
465 			    "This node is Pdir or Proot; has %d children\n",
466 			    node->nchildren);
467 		if (node->nchildren > 2) {
468 			TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node),
469 					      link, node2) {
470 				devfs_reaperp(node1);
471 			}
472 		}
473 	}
474 	devfs_unlinkp(node);
475 	devfs_freep(node);
476 
477 	return 0;
478 }
479 
480 /*
481  * devfs_gc() is devfs garbage collector. It takes care of unlinking and
482  * freeing a node, but also removes empty directories and links that link
483  * via devfs auto-link mechanism to the node being deleted.
484  */
485 int
486 devfs_gc(struct devfs_node *node)
487 {
488 	struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node;
489 
490 	devfs_gc_links(root_node, node, node->nlinks);
491 	devfs_unlinkp(node);
492 	devfs_gc_dirs(root_node);
493 
494 	devfs_freep(node);
495 
496 	return 0;
497 }
498 
499 /*
500  * devfs_gc_dirs() is a helper function for devfs_gc, unlinking and freeing
501  * empty directories.
502  */
503 static int
504 devfs_gc_dirs(struct devfs_node *node)
505 {
506 	struct devfs_node *node1, *node2;
507 
508 	if ((node->node_type == Proot) || (node->node_type == Pdir)) {
509 		devfs_debug(DEVFS_DEBUG_DEBUG,
510 			    "This node is Pdir or Proot; has %d children\n",
511 			    node->nchildren);
512 		if (node->nchildren > 2) {
513 			TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node),
514 					      link, node2) {
515 				devfs_gc_dirs(node1);
516 			}
517 		}
518 
519 		if (node->nchildren == 2) {
520 			devfs_debug(DEVFS_DEBUG_DEBUG,
521 				    "This node is called %s and it is empty\n",
522 				    node->d_dir.d_name);
523 			devfs_unlinkp(node);
524 			devfs_freep(node);
525 		}
526 	}
527 
528 	return 0;
529 }
530 
531 /*
532  * devfs_gc_links() is a helper function for devfs_gc, unlinking and freeing
533  * eauto-linked nodes linking to the node being deleted.
534  */
535 static int
536 devfs_gc_links(struct devfs_node *node, struct devfs_node *target,
537 	       size_t nlinks)
538 {
539 	struct devfs_node *node1, *node2;
540 
541 	if (nlinks > 0) {
542 		if ((node->node_type == Proot) || (node->node_type == Pdir)) {
543 			devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren);
544 			if (node->nchildren > 2) {
545 				TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2)	{
546 					nlinks = devfs_gc_links(node1, target, nlinks);
547 				}
548 			}
549 		} else if (node->link_target == target) {
550 			nlinks--;
551 			devfs_unlinkp(node);
552 			devfs_freep(node);
553 		}
554 	}
555 
556 	KKASSERT(nlinks >= 0);
557 
558 	return nlinks;
559 }
560 
561 /*
562  * devfs_create_dev() is the asynchronous entry point for device creation.
563  * It just sends a message with the relevant details to the devfs core.
564  *
565  * This function will reference the passed device.  The reference is owned
566  * by devfs and represents all of the device's node associations.
567  */
568 int
569 devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms)
570 {
571 	__uint64_t id;
572 	devfs_debug(DEVFS_DEBUG_DEBUG,
573 		    "devfs_create_dev -1-, name: %s (%p)\n",
574 		    dev->si_name, dev);
575 	reference_dev(dev);
576 	id = devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms);
577 	devfs_debug(DEVFS_DEBUG_DEBUG,
578 		    "devfs_create_dev -end:2- (unique id: %x) / (%p)\n",
579 		    id, dev);
580 	return 0;
581 }
582 
583 /*
584  * devfs_destroy_dev() is the asynchronous entry point for device destruction.
585  * It just sends a message with the relevant details to the devfs core.
586  */
587 int
588 devfs_destroy_dev(cdev_t dev)
589 {
590 	devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0);
591 	return 0;
592 }
593 
594 /*
595  * devfs_mount_add() is the synchronous entry point for adding a new devfs
596  * mount.  It sends a synchronous message with the relevant details to the
597  * devfs core.
598  */
599 int
600 devfs_mount_add(struct devfs_mnt_data *mnt)
601 {
602 	devfs_msg_t msg;
603 
604 	msg = devfs_msg_get();
605 	msg->mdv_mnt = mnt;
606 	msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg);
607 	devfs_msg_put(msg);
608 
609 	return 0;
610 }
611 
612 /*
613  * devfs_mount_del() is the synchronous entry point for removing a devfs mount.
614  * It sends a synchronous message with the relevant details to the devfs core.
615  */
616 int
617 devfs_mount_del(struct devfs_mnt_data *mnt)
618 {
619 	devfs_msg_t msg;
620 
621 	msg = devfs_msg_get();
622 	msg->mdv_mnt = mnt;
623 	msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg);
624 	devfs_msg_put(msg);
625 
626 	return 0;
627 }
628 
629 /*
630  * devfs_destroy_subnames() is the synchronous entry point for device destruction
631  * by subname. It just sends a message with the relevant details to the devfs core.
632  */
633 int
634 devfs_destroy_subnames(char *name)
635 {
636 	devfs_msg_t msg;
637 
638 	msg = devfs_msg_get();
639 	msg->mdv_load = name;
640 	msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES, msg);
641 	devfs_msg_put(msg);
642 	return 0;
643 }
644 
645 int
646 devfs_clr_subnames_flag(char *name, uint32_t flag)
647 {
648 	devfs_msg_t msg;
649 
650 	msg = devfs_msg_get();
651 	msg->mdv_flags.name = name;
652 	msg->mdv_flags.flag = flag;
653 	msg = devfs_msg_send_sync(DEVFS_CLR_SUBNAMES_FLAG, msg);
654 	devfs_msg_put(msg);
655 
656 	return 0;
657 }
658 
659 int
660 devfs_destroy_subnames_without_flag(char *name, uint32_t flag)
661 {
662 	devfs_msg_t msg;
663 
664 	msg = devfs_msg_get();
665 	msg->mdv_flags.name = name;
666 	msg->mdv_flags.flag = flag;
667 	msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES_WO_FLAG, msg);
668 	devfs_msg_put(msg);
669 
670 	return 0;
671 }
672 
673 /*
674  * devfs_create_all_dev is the asynchronous entry point to trigger device
675  * node creation.  It just sends a message with the relevant details to
676  * the devfs core.
677  */
678 int
679 devfs_create_all_dev(struct devfs_node *root)
680 {
681 	devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root);
682 	return 0;
683 }
684 
685 /*
686  * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all
687  * devices with a specific set of dev_ops and minor.  It just sends a
688  * message with the relevant details to the devfs core.
689  */
690 int
691 devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor)
692 {
693 	devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor);
694 	return 0;
695 }
696 
697 /*
698  * devfs_clone_handler_add is the synchronous entry point to add a new
699  * clone handler.  It just sends a message with the relevant details to
700  * the devfs core.
701  */
702 int
703 devfs_clone_handler_add(char *name, d_clone_t *nhandler)
704 {
705 	devfs_msg_t msg;
706 
707 	msg = devfs_msg_get();
708     msg->mdv_chandler.name = name;
709 	msg->mdv_chandler.nhandler = nhandler;
710 	msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg);
711 	devfs_msg_put(msg);
712 	return 0;
713 }
714 
715 /*
716  * devfs_clone_handler_del is the synchronous entry point to remove a
717  * clone handler.  It just sends a message with the relevant details to
718  * the devfs core.
719  */
720 int
721 devfs_clone_handler_del(char *name)
722 {
723 	devfs_msg_t msg;
724 
725 	msg = devfs_msg_get();
726     msg->mdv_chandler.name = name;
727 	msg->mdv_chandler.nhandler = NULL;
728 	msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg);
729 	devfs_msg_put(msg);
730 	return 0;
731 }
732 
733 /*
734  * devfs_find_device_by_name is the synchronous entry point to find a
735  * device given its name.  It sends a synchronous message with the
736  * relevant details to the devfs core and returns the answer.
737  */
738 cdev_t
739 devfs_find_device_by_name(const char *fmt, ...)
740 {
741 	cdev_t found = NULL;
742 	devfs_msg_t msg;
743 	char target[PATH_MAX+1];
744 	__va_list ap;
745 	int i;
746 
747 	if (fmt == NULL)
748 		return NULL;
749 
750 
751 	__va_start(ap, fmt);
752 	i = kvcprintf(fmt, NULL, target, 10, ap);
753 	target[i] = '\0';
754 	__va_end(ap);
755 
756 
757 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_find_device_by_name: %s -1-\n", target);
758 	msg = devfs_msg_get();
759 	msg->mdv_name = target;
760 	msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg);
761 	found = msg->mdv_cdev;
762 	devfs_msg_put(msg);
763 
764 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_find_device_by_name found? %s  -end:2-\n", (found)?"YES":"NO");
765 	return found;
766 }
767 
768 /*
769  * devfs_find_device_by_udev is the synchronous entry point to find a
770  * device given its udev number.  It sends a synchronous message with
771  * the relevant details to the devfs core and returns the answer.
772  */
773 cdev_t
774 devfs_find_device_by_udev(udev_t udev)
775 {
776 	cdev_t found = NULL;
777 	devfs_msg_t msg;
778 
779 	msg = devfs_msg_get();
780 	msg->mdv_udev = udev;
781 	msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg);
782 	found = msg->mdv_cdev;
783 	devfs_msg_put(msg);
784 
785 	devfs_debug(DEVFS_DEBUG_DEBUG,
786 		    "devfs_find_device_by_udev found? %s  -end:3-\n",
787 		    ((found) ? found->si_name:"NO"));
788 	return found;
789 }
790 
791 /*
792  * devfs_make_alias is the asynchronous entry point to register an alias
793  * for a device.  It just sends a message with the relevant details to the
794  * devfs core.
795  */
796 int
797 devfs_make_alias(char *name, cdev_t dev_target)
798 {
799 	struct devfs_alias *alias;
800 
801 	alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK);
802 	memcpy(alias->name, name, strlen(name) + 1);
803 	alias->dev_target = dev_target;
804 
805 	devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias);
806 	return 0;
807 }
808 
809 /*
810  * devfs_apply_rules is the asynchronous entry point to trigger application
811  * of all rules.  It just sends a message with the relevant details to the
812  * devfs core.
813  */
814 int
815 devfs_apply_rules(char *mntto)
816 {
817 	char *new_name;
818 	size_t	namelen;
819 
820 	namelen = strlen(mntto) + 1;
821 
822 	new_name = kmalloc(namelen, M_DEVFS, M_WAITOK);
823 
824 	memcpy(new_name, mntto, namelen);
825 
826 	devfs_msg_send_name(DEVFS_APPLY_RULES, new_name);
827 	return 0;
828 }
829 
830 /*
831  * devfs_reset_rules is the asynchronous entry point to trigger reset of all rules.
832  * It just sends a message with the relevant details to the devfs core.
833  */
834 int
835 devfs_reset_rules(char *mntto)
836 {
837 	char *new_name;
838 	size_t	namelen;
839 
840 	namelen = strlen(mntto) + 1;
841 
842 	new_name = kmalloc(namelen, M_DEVFS, M_WAITOK);
843 
844 	memcpy(new_name, mntto, namelen);
845 
846 	devfs_msg_send_name(DEVFS_RESET_RULES, new_name);
847 	return 0;
848 }
849 
850 
851 /*
852  * devfs_scan_callback is the asynchronous entry point to call a callback
853  * on all cdevs.
854  * It just sends a message with the relevant details to the devfs core.
855  */
856 int
857 devfs_scan_callback(devfs_scan_t *callback)
858 {
859 	devfs_msg_t msg;
860 
861 	/* Make sure that function pointers have the size of a generic pointer (innecessary) */
862 	KKASSERT(sizeof(callback) == sizeof(void *));
863 
864 	msg = devfs_msg_get();
865 	msg->mdv_load = callback;
866 	msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg);
867 	devfs_msg_put(msg);
868 
869 	return 0;
870 }
871 
872 
873 /*
874  * Acts as a message drain. Any message that is replied to here gets destroyed and
875  * the memory freed.
876  */
877 static void
878 devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
879 {
880 	devfs_msg_put((devfs_msg_t)msg);
881 }
882 
883 /*
884  * devfs_msg_get allocates a new devfs msg and returns it.
885  */
886 devfs_msg_t
887 devfs_msg_get()
888 {
889 	return objcache_get(devfs_msg_cache, M_WAITOK);
890 }
891 
892 /*
893  * devfs_msg_put deallocates a given devfs msg.
894  */
895 int
896 devfs_msg_put(devfs_msg_t msg)
897 {
898 	objcache_put(devfs_msg_cache, msg);
899 	return 0;
900 }
901 
902 /*
903  * devfs_msg_send is the generic asynchronous message sending facility
904  * for devfs. By default the reply port is the automatic disposal port.
905  */
906 __uint32_t
907 devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg)
908 {
909 	lwkt_port_t port = &devfs_msg_port;
910 
911     lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0);
912 
913     devfs_msg->hdr.u.ms_result = cmd;
914 	devfs_msg->id = atomic_fetchadd_int(&msg_id, 1);
915 
916     lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg);
917 
918 	return devfs_msg->id;
919 }
920 
921 /*
922  * devfs_msg_send_sync is the generic synchronous message sending
923  * facility for devfs. It initializes a local reply port and waits
924  * for the core's answer. This answer is then returned.
925  */
926 devfs_msg_t
927 devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg)
928 {
929 	struct lwkt_port rep_port;
930 	devfs_msg_t	msg_incoming;
931 	lwkt_port_t port = &devfs_msg_port;
932 
933 	lwkt_initport_thread(&rep_port, curthread);
934     lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0);
935 
936     devfs_msg->hdr.u.ms_result = cmd;
937 	devfs_msg->id = atomic_fetchadd_int(&msg_id, 1);
938 
939     lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg);
940 	msg_incoming = lwkt_waitport(&rep_port, 0);
941 
942 	return msg_incoming;
943 }
944 
945 /*
946  * sends a message with a generic argument.
947  */
948 __uint32_t
949 devfs_msg_send_generic(uint32_t cmd, void *load)
950 {
951     devfs_msg_t devfs_msg = devfs_msg_get();
952     devfs_msg->mdv_load = load;
953 
954 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_send_generic -1- (%p)\n", load);
955 
956 	return devfs_msg_send(cmd, devfs_msg);
957 }
958 
959 /*
960  * sends a message with a name argument.
961  */
962 __uint32_t
963 devfs_msg_send_name(uint32_t cmd, char *name)
964 {
965     devfs_msg_t devfs_msg = devfs_msg_get();
966     devfs_msg->mdv_name = name;
967 
968 	return devfs_msg_send(cmd, devfs_msg);
969 }
970 
971 /*
972  * sends a message with a mount argument.
973  */
974 __uint32_t
975 devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt)
976 {
977     devfs_msg_t devfs_msg = devfs_msg_get();
978     devfs_msg->mdv_mnt = mnt;
979 
980 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_send_mp -1- (%p)\n", mnt);
981 
982 	return devfs_msg_send(cmd, devfs_msg);
983 }
984 
985 /*
986  * sends a message with an ops argument.
987  */
988 __uint32_t
989 devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor)
990 {
991     devfs_msg_t devfs_msg = devfs_msg_get();
992     devfs_msg->mdv_ops.ops = ops;
993 	devfs_msg->mdv_ops.minor = minor;
994 
995 	return devfs_msg_send(cmd, devfs_msg);
996 }
997 
998 /*
999  * sends a message with a clone handler argument.
1000  */
1001 __uint32_t
1002 devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler)
1003 {
1004     devfs_msg_t devfs_msg = devfs_msg_get();
1005     devfs_msg->mdv_chandler.name = name;
1006 	devfs_msg->mdv_chandler.nhandler = handler;
1007 
1008 	return devfs_msg_send(cmd, devfs_msg);
1009 }
1010 
1011 /*
1012  * sends a message with a device argument.
1013  */
1014 __uint32_t
1015 devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms)
1016 {
1017     devfs_msg_t devfs_msg = devfs_msg_get();
1018     devfs_msg->mdv_dev.dev = dev;
1019 	devfs_msg->mdv_dev.uid = uid;
1020 	devfs_msg->mdv_dev.gid = gid;
1021 	devfs_msg->mdv_dev.perms = perms;
1022 
1023 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_send_dev -1- (%p)\n", dev);
1024 
1025 	return devfs_msg_send(cmd, devfs_msg);
1026 }
1027 
1028 /*
1029  * sends a message with a link argument.
1030  */
1031 __uint32_t
1032 devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp)
1033 {
1034     devfs_msg_t devfs_msg = devfs_msg_get();
1035     devfs_msg->mdv_link.name = name;
1036 	devfs_msg->mdv_link.target = target;
1037 	devfs_msg->mdv_link.mp = mp;
1038 
1039 
1040 	return devfs_msg_send(cmd, devfs_msg);
1041 }
1042 
1043 /*
1044  * devfs_msg_core is the main devfs thread. It handles all incoming messages
1045  * and calls the relevant worker functions. By using messages it's assured
1046  * that events occur in the correct order.
1047  */
1048 static void
1049 devfs_msg_core(void *arg)
1050 {
1051 	uint8_t  run = 1;
1052 	devfs_msg_t msg;
1053 	cdev_t	dev;
1054 	struct devfs_mnt_data *mnt;
1055 	struct devfs_node *node;
1056 
1057 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core -1-\n");
1058 	lwkt_initport_thread(&devfs_msg_port, curthread);
1059 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core -2-\n");
1060 	wakeup(td_core/*devfs_id*/);
1061 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core -3-\n");
1062 
1063 	while (run) {
1064 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core -loop:4-\n");
1065 		msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0);
1066 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core, new msg: %x (unique id: %x)\n", (unsigned int)msg->hdr.u.ms_result, msg->id);
1067 		lockmgr(&devfs_lock, LK_EXCLUSIVE);
1068 		switch (msg->hdr.u.ms_result) {
1069 
1070 		case DEVFS_DEVICE_CREATE:
1071 			dev = msg->mdv_dev.dev;
1072 			devfs_debug(DEVFS_DEBUG_DEBUG,
1073 				    "devfs_msg_core device create msg %s(%p)\n",
1074 				    dev->si_name, dev);
1075 			devfs_create_dev_worker(dev,
1076 						msg->mdv_dev.uid,
1077 						msg->mdv_dev.gid,
1078 						msg->mdv_dev.perms);
1079 			break;
1080 
1081 		case DEVFS_DEVICE_DESTROY:
1082 			devfs_debug(DEVFS_DEBUG_DEBUG,
1083 				    "devfs_msg_core device destroy msg\n");
1084 			dev = msg->mdv_dev.dev;
1085 			devfs_destroy_dev_worker(dev);
1086 			break;
1087 
1088 		case DEVFS_DESTROY_SUBNAMES:
1089 			devfs_destroy_subnames_worker(msg->mdv_load);
1090 			break;
1091 
1092 		case DEVFS_DESTROY_DEV_BY_OPS:
1093 			devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops,
1094 							msg->mdv_ops.minor);
1095 			break;
1096 
1097 		case DEVFS_CREATE_ALL_DEV:
1098 			devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core device create ALL msg\n");
1099 			node = (struct devfs_node *)msg->mdv_load;
1100 			devfs_create_all_dev_worker(node);
1101 			break;
1102 
1103 		case DEVFS_MOUNT_ADD:
1104 			mnt = msg->mdv_mnt;
1105 			TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link);
1106 			devfs_create_all_dev_worker(mnt->root_node);
1107 			break;
1108 
1109 		case DEVFS_MOUNT_DEL:
1110 			mnt = msg->mdv_mnt;
1111 			TAILQ_REMOVE(&devfs_mnt_list, mnt, link);
1112 			devfs_reaperp(mnt->root_node);
1113 			if (mnt->leak_count) {
1114 				devfs_debug(DEVFS_DEBUG_SHOW,
1115 					    "Leaked %d devfs_node elements!\n",
1116 					    mnt->leak_count);
1117 			}
1118 			break;
1119 
1120 		case DEVFS_CHANDLER_ADD:
1121 			devfs_chandler_add_worker(msg->mdv_chandler.name, msg->mdv_chandler.nhandler);
1122 			break;
1123 
1124 		case DEVFS_CHANDLER_DEL:
1125 			devfs_chandler_del_worker(msg->mdv_chandler.name);
1126 			break;
1127 
1128 		case DEVFS_FIND_DEVICE_BY_NAME:
1129 			devfs_find_device_by_name_worker(msg);
1130 			break;
1131 
1132 		case DEVFS_FIND_DEVICE_BY_UDEV:
1133 			devfs_find_device_by_udev_worker(msg);
1134 			break;
1135 
1136 		case DEVFS_MAKE_ALIAS:
1137 			devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load);
1138 			break;
1139 
1140 		case DEVFS_APPLY_RULES:
1141 			devfs_apply_reset_rules_caller(msg->mdv_name, 1);
1142 			break;
1143 
1144 		case DEVFS_RESET_RULES:
1145 			devfs_apply_reset_rules_caller(msg->mdv_name, 0);
1146 			break;
1147 
1148 		case DEVFS_SCAN_CALLBACK:
1149 			devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load);
1150 			break;
1151 
1152 		case DEVFS_CLR_SUBNAMES_FLAG:
1153 			devfs_clr_subnames_flag_worker(msg->mdv_flags.name,
1154 											msg->mdv_flags.flag);
1155 			break;
1156 
1157 		case DEVFS_DESTROY_SUBNAMES_WO_FLAG:
1158 			devfs_destroy_subnames_without_flag_worker(msg->mdv_flags.name,
1159 														msg->mdv_flags.flag);
1160 			break;
1161 
1162 		case DEVFS_TERMINATE_CORE:
1163 			run = 0;
1164 			break;
1165 		case DEVFS_SYNC:
1166 			break;
1167 		default:
1168 			devfs_debug(DEVFS_DEBUG_DEBUG,
1169 				    "devfs_msg_core: unknown message "
1170 				    "received at core\n");
1171 			break;
1172 		}
1173 		lockmgr(&devfs_lock, LK_RELEASE);
1174 
1175 		lwkt_replymsg((lwkt_msg_t)msg, 0);
1176 	}
1177 	wakeup(td_core/*devfs_id*/);
1178 	lwkt_exit();
1179 }
1180 
1181 /*
1182  * Worker function to insert a new dev into the dev list and initialize its
1183  * permissions. It also calls devfs_propagate_dev which in turn propagates
1184  * the change to all mount points.
1185  *
1186  * The passed dev is already referenced.  This reference is eaten by this
1187  * function and represents the dev's linkage into devfs_dev_list.
1188  */
1189 static int
1190 devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms)
1191 {
1192 	KKASSERT(dev);
1193 	devfs_debug(DEVFS_DEBUG_DEBUG,
1194 		    "devfs_create_dev_worker -1- -%s- (%p)\n",
1195 		    dev->si_name, dev);
1196 
1197 	dev->si_uid = uid;
1198 	dev->si_gid = gid;
1199 	dev->si_perms = perms;
1200 
1201 	devfs_link_dev(dev);
1202 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_dev_worker -2-\n");
1203 	devfs_propagate_dev(dev, 1);
1204 
1205 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_dev_worker -end:3-\n");
1206 	return 0;
1207 }
1208 
1209 /*
1210  * Worker function to delete a dev from the dev list and free the cdev.
1211  * It also calls devfs_propagate_dev which in turn propagates the change
1212  * to all mount points.
1213  */
1214 static int
1215 devfs_destroy_dev_worker(cdev_t dev)
1216 {
1217 	int error;
1218 
1219 	KKASSERT(dev);
1220 	KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE);
1221 
1222 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_destroy_dev_worker -1- %s\n", dev->si_name);
1223 	error = devfs_unlink_dev(dev);
1224 	devfs_propagate_dev(dev, 0);
1225 	if (error == 0)
1226 		release_dev(dev);	/* link ref */
1227 	release_dev(dev);
1228 	release_dev(dev);
1229 
1230 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_destroy_dev_worker -end:5-\n");
1231 	return 0;
1232 }
1233 
1234 /*
1235  * Worker function to destroy all devices with a certain basename.
1236  * Calls devfs_destroy_dev_worker for the actual destruction.
1237  */
1238 static int
1239 devfs_destroy_subnames_worker(char *name)
1240 {
1241 	cdev_t dev, dev1;
1242 	size_t len = strlen(name);
1243 
1244 	TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1245 		if (!strncmp(dev->si_name, name, len)) {
1246 			if (dev->si_name[len] != '\0') {
1247 				devfs_destroy_dev_worker(dev);
1248 				/* release_dev(dev); */
1249 			}
1250 		}
1251 	}
1252 	return 0;
1253 }
1254 
1255 static int
1256 devfs_clr_subnames_flag_worker(char *name, uint32_t flag)
1257 {
1258 	cdev_t dev, dev1;
1259 	size_t len = strlen(name);
1260 
1261 	TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1262 		if (!strncmp(dev->si_name, name, len)) {
1263 			if (dev->si_name[len] != '\0') {
1264 				dev->si_flags &= ~flag;
1265 			}
1266 		}
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 static int
1273 devfs_destroy_subnames_without_flag_worker(char *name, uint32_t flag)
1274 {
1275 	cdev_t dev, dev1;
1276 	size_t len = strlen(name);
1277 
1278 	TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1279 		if (!strncmp(dev->si_name, name, len)) {
1280 			if (dev->si_name[len] != '\0') {
1281 				if (!(dev->si_flags & flag)) {
1282 					devfs_destroy_dev_worker(dev);
1283 				}
1284 			}
1285 		}
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 /*
1292  * Worker function that creates all device nodes on top of a devfs
1293  * root node.
1294  */
1295 static int
1296 devfs_create_all_dev_worker(struct devfs_node *root)
1297 {
1298 	cdev_t dev;
1299 
1300 	KKASSERT(root);
1301 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_all_dev_worker -1-\n");
1302 
1303     TAILQ_FOREACH(dev, &devfs_dev_list, link) {
1304 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_all_dev_worker -loop:2- -%s-\n", dev->si_name);
1305 		devfs_create_device_node(root, dev, NULL, NULL);
1306     }
1307 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_all_dev_worker -end:3-\n");
1308 	return 0;
1309 }
1310 
1311 /*
1312  * Worker function that destroys all devices that match a specific
1313  * dev_ops and/or minor. If minor is less than 0, it is not matched
1314  * against. It also propagates all changes.
1315  */
1316 static int
1317 devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor)
1318 {
1319 	cdev_t dev, dev1;
1320 
1321 	KKASSERT(ops);
1322 	devfs_debug(DEVFS_DEBUG_DEBUG,
1323 		    "devfs_destroy_dev_by_ops_worker -1-\n");
1324 
1325 	TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1326 		if (dev->si_ops != ops)
1327 			continue;
1328 		if ((minor < 0) || (dev->si_uminor == minor)) {
1329 			devfs_debug(DEVFS_DEBUG_DEBUG,
1330 				    "devfs_destroy_dev_by_ops_worker "
1331 				    "-loop:2- -%s-\n",
1332 				    dev->si_name);
1333 			devfs_destroy_dev_worker(dev);
1334 		}
1335 	}
1336 	devfs_debug(DEVFS_DEBUG_DEBUG,
1337 		    "devfs_destroy_dev_by_ops_worker -end:3-\n");
1338 	return 0;
1339 }
1340 
1341 /*
1342  * Worker function that registers a new clone handler in devfs.
1343  */
1344 static int
1345 devfs_chandler_add_worker(char *name, d_clone_t *nhandler)
1346 {
1347 	struct devfs_clone_handler *chandler = NULL;
1348 	u_char len = strlen(name);
1349 
1350 	if (len == 0)
1351 		return 1;
1352 
1353 	TAILQ_FOREACH(chandler, &devfs_chandler_list, link) {
1354 		if (chandler->namlen == len) {
1355 			if (!memcmp(chandler->name, name, len)) {
1356 				/* Clonable basename already exists */
1357 				return 1;
1358 			}
1359 		}
1360 	}
1361 
1362 	chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO);
1363 	memcpy(chandler->name, name, len+1);
1364 	chandler->namlen = len;
1365 	chandler->nhandler = nhandler;
1366 
1367 	TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link);
1368 	return 0;
1369 }
1370 
1371 /*
1372  * Worker function that removes a given clone handler from the
1373  * clone handler list.
1374  */
1375 static int
1376 devfs_chandler_del_worker(char *name)
1377 {
1378 	struct devfs_clone_handler *chandler, *chandler2;
1379 	u_char len = strlen(name);
1380 
1381 	if (len == 0)
1382 		return 1;
1383 
1384 	TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) {
1385 		if (chandler->namlen != len)
1386 			continue;
1387 		if (memcmp(chandler->name, name, len))
1388 			continue;
1389 		TAILQ_REMOVE(&devfs_chandler_list, chandler, link);
1390 		kfree(chandler, M_DEVFS);
1391 	}
1392 
1393 	return 0;
1394 }
1395 
1396 /*
1397  * Worker function that finds a given device name and changes
1398  * the message received accordingly so that when replied to,
1399  * the answer is returned to the caller.
1400  */
1401 static int
1402 devfs_find_device_by_name_worker(devfs_msg_t devfs_msg)
1403 {
1404 	struct devfs_alias *alias;
1405 	cdev_t dev;
1406 	cdev_t found = NULL;
1407 
1408 	TAILQ_FOREACH(dev, &devfs_dev_list, link) {
1409 		if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) {
1410 			found = dev;
1411 			break;
1412 		}
1413 	}
1414 	if (found == NULL) {
1415 		TAILQ_FOREACH(alias, &devfs_alias_list, link) {
1416 			if (strcmp(devfs_msg->mdv_name, alias->name) == 0) {
1417 				found = alias->dev_target;
1418 				break;
1419 			}
1420 		}
1421 	}
1422 	devfs_msg->mdv_cdev = found;
1423 
1424 	return 0;
1425 }
1426 
1427 /*
1428  * Worker function that finds a given device udev and changes
1429  * the message received accordingly so that when replied to,
1430  * the answer is returned to the caller.
1431  */
1432 static int
1433 devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg)
1434 {
1435 	cdev_t dev, dev1;
1436 	cdev_t found = NULL;
1437 
1438 	TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1439 		if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) {
1440 			found = dev;
1441 			break;
1442 		}
1443 	}
1444 	devfs_msg->mdv_cdev = found;
1445 
1446 	return 0;
1447 }
1448 
1449 /*
1450  * Worker function that inserts a given alias into the
1451  * alias list, and propagates the alias to all mount
1452  * points.
1453  */
1454 static int
1455 devfs_make_alias_worker(struct devfs_alias *alias)
1456 {
1457 	struct devfs_alias *alias2;
1458 	size_t len = strlen(alias->name);
1459 	int found = 0;
1460 
1461 	TAILQ_FOREACH(alias2, &devfs_alias_list, link) {
1462 		if (!memcmp(alias->name, alias2->name, len)) { /* XXX */
1463 			found = 1;
1464 			break;
1465 		}
1466 	}
1467 
1468 	if (!found) {
1469 		TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link);
1470 		devfs_alias_propagate(alias);
1471 	} else {
1472 		devfs_debug(DEVFS_DEBUG_DEBUG,
1473 			    "Warning: duplicate devfs_make_alias for %s\n",
1474 			    alias->name);
1475 		kfree(alias, M_DEVFS);
1476 	}
1477 
1478 	return 0;
1479 }
1480 
1481 /*
1482  * Function that removes and frees all aliases.
1483  */
1484 static int
1485 devfs_alias_reap(void)
1486 {
1487 	struct devfs_alias *alias, *alias2;
1488 
1489 	TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) {
1490 		TAILQ_REMOVE(&devfs_alias_list, alias, link);
1491 		kfree(alias, M_DEVFS);
1492 	}
1493 	return 0;
1494 }
1495 
1496 /*
1497  * Function that removes an alias matching a specific cdev and frees
1498  * it accordingly.
1499  */
1500 static int
1501 devfs_alias_remove(cdev_t dev)
1502 {
1503 	struct devfs_alias *alias, *alias2;
1504 
1505 	TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) {
1506 		if (alias->dev_target == dev) {
1507 			TAILQ_REMOVE(&devfs_alias_list, alias, link);
1508 			kfree(alias, M_DEVFS);
1509 		}
1510 	}
1511 	return 0;
1512 }
1513 
1514 /*
1515  * This function propagates a new alias to all mount points.
1516  */
1517 static int
1518 devfs_alias_propagate(struct devfs_alias *alias)
1519 {
1520 	struct devfs_mnt_data *mnt;
1521 
1522 	TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
1523 		devfs_alias_apply(mnt->root_node, alias);
1524 	}
1525 	return 0;
1526 }
1527 
1528 /*
1529  * This function is a recursive function iterating through
1530  * all device nodes in the topology and, if applicable,
1531  * creating the relevant alias for a device node.
1532  */
1533 static int
1534 devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias)
1535 {
1536 	struct devfs_node *node1, *node2;
1537 
1538 	KKASSERT(alias != NULL);
1539 
1540 	if ((node->node_type == Proot) || (node->node_type == Pdir)) {
1541 		devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren);
1542 		if (node->nchildren > 2) {
1543 			TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
1544 				devfs_alias_apply(node1, alias);
1545 			}
1546 		}
1547 	} else {
1548 		if (node->d_dev == alias->dev_target)
1549 			devfs_alias_create(alias->name, node);
1550 	}
1551 	return 0;
1552 }
1553 
1554 /*
1555  * This function checks if any alias possibly is applicable
1556  * to the given node. If so, the alias is created.
1557  */
1558 static int
1559 devfs_alias_check_create(struct devfs_node *node)
1560 {
1561 	struct devfs_alias *alias;
1562 
1563 	TAILQ_FOREACH(alias, &devfs_alias_list, link) {
1564 		if (node->d_dev == alias->dev_target)
1565 			devfs_alias_create(alias->name, node);
1566 	}
1567 	return 0;
1568 }
1569 
1570 /*
1571  * This function creates an alias with a given name
1572  * linking to a given devfs node. It also increments
1573  * the link count on the target node.
1574  */
1575 int
1576 devfs_alias_create(char *name_orig, struct devfs_node *target)
1577 {
1578 	struct mount *mp = target->mp;
1579 	struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node;
1580 	struct devfs_node *linknode;
1581 	char *create_path = NULL;
1582 	char *name, name_buf[PATH_MAX];
1583 
1584 	KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE);
1585 
1586 	devfs_resolve_name_path(name_orig, name_buf, &create_path, &name);
1587 
1588 	if (create_path)
1589 		parent = devfs_resolve_or_create_path(parent, create_path, 1);
1590 
1591 
1592 	if (devfs_find_device_node_by_name(parent, name)) {
1593 		devfs_debug(DEVFS_DEBUG_DEBUG,
1594 			    "Node already exists: %s "
1595 			    "(devfs_make_alias_worker)!\n",
1596 			    name);
1597 		return 1;
1598 	}
1599 
1600 
1601 	linknode = devfs_allocp(Plink, name, parent, mp, NULL);
1602 	if (linknode == NULL)
1603 		return 1;
1604 
1605 	linknode->link_target = target;
1606 	target->nlinks++;
1607 #if 0
1608 	linknode->flags |= DEVFS_LINK;
1609 #endif
1610 
1611 	return 0;
1612 }
1613 
1614 /*
1615  * This function is called by the core and handles mount point
1616  * strings. It either calls the relevant worker (devfs_apply_
1617  * reset_rules_worker) on all mountpoints or only a specific
1618  * one.
1619  */
1620 static int
1621 devfs_apply_reset_rules_caller(char *mountto, int apply)
1622 {
1623 	struct devfs_mnt_data *mnt;
1624 	size_t len = strlen(mountto);
1625 
1626 	if (mountto[0] != '*') {
1627 		TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
1628 			if ((len == mnt->mntonnamelen) &&
1629 				(!memcmp(mnt->mp->mnt_stat.f_mntonname, mountto, len))) {
1630 				devfs_apply_reset_rules_worker(mnt->root_node, apply);
1631 				break;
1632 			}
1633 		}
1634 	} else {
1635 		TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
1636 			devfs_apply_reset_rules_worker(mnt->root_node, apply);
1637 		}
1638 	}
1639 
1640 	kfree(mountto, M_DEVFS);
1641 	return 0;
1642 }
1643 
1644 /*
1645  * This worker function applies or resets, depending on the arguments, a rule
1646  * to the whole given topology. *RECURSIVE*
1647  */
1648 static int
1649 devfs_apply_reset_rules_worker(struct devfs_node *node, int apply)
1650 {
1651 	struct devfs_node *node1, *node2;
1652 
1653 	if ((node->node_type == Proot) || (node->node_type == Pdir)) {
1654 		devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren);
1655 		if (node->nchildren > 2) {
1656 			TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2)	{
1657 				devfs_apply_reset_rules_worker(node1, apply);
1658 			}
1659 		}
1660 	}
1661 
1662 	if (apply)
1663 		devfs_rule_check_apply(node);
1664 	else
1665 		devfs_rule_reset_node(node);
1666 
1667 	return 0;
1668 }
1669 
1670 
1671 /*
1672  * This function calls a given callback function for
1673  * every dev node in the devfs dev list.
1674  */
1675 static int
1676 devfs_scan_callback_worker(devfs_scan_t *callback)
1677 {
1678 	cdev_t dev, dev1;
1679 
1680 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_scan_callback: %p -1-\n", callback);
1681 
1682     TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1683 		callback(dev);
1684     }
1685 
1686 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_scan_callback: finished\n");
1687 	return 0;
1688 }
1689 
1690 
1691 /*
1692  * This function tries to resolve a given directory, or if not
1693  * found and creation requested, creates the given directory.
1694  */
1695 static struct devfs_node *
1696 devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name,
1697 			    size_t name_len, int create)
1698 {
1699 	struct devfs_node *node, *found = NULL;
1700 
1701 	TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) {
1702 		if (name_len == node->d_dir.d_namlen) {
1703 			if (!memcmp(dir_name, node->d_dir.d_name, name_len)) {
1704 				found = node;
1705 				break;
1706 			}
1707 		}
1708 	}
1709 
1710 	if ((found == NULL) && (create)) {
1711 		found = devfs_allocp(Pdir, dir_name, parent, parent->mp, NULL);
1712 	}
1713 
1714 	return found;
1715 }
1716 
1717 /*
1718  * This function tries to resolve a complete path. If creation is requested,
1719  * if a given part of the path cannot be resolved (because it doesn't exist),
1720  * it is created.
1721  */
1722 struct devfs_node *
1723 devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create)
1724 {
1725 	struct devfs_node *node = parent;
1726 	char buf[PATH_MAX];
1727 	size_t idx = 0;
1728 
1729 
1730 	if (path == NULL)
1731 		return parent;
1732 
1733 
1734 	for (; *path != '\0' ; path++) {
1735 		if (*path != '/') {
1736 			buf[idx++] = *path;
1737 		} else {
1738 			buf[idx] = '\0';
1739 			node = devfs_resolve_or_create_dir(node, buf, idx, create);
1740 			if (node == NULL)
1741 				return NULL;
1742 			idx = 0;
1743 		}
1744 	}
1745 	buf[idx] = '\0';
1746 	return devfs_resolve_or_create_dir(node, buf, idx, create);
1747 }
1748 
1749 /*
1750  * Takes a full path and strips it into a directory path and a name.
1751  * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It
1752  * requires a working buffer with enough size to keep the whole
1753  * fullpath.
1754  */
1755 int
1756 devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep)
1757 {
1758 	char *name = NULL;
1759 	char *path = NULL;
1760 	size_t len = strlen(fullpath) + 1;
1761 	int i;
1762 
1763 	KKASSERT((fullpath != NULL) && (buf != NULL) && (pathp != NULL) && (namep != NULL));
1764 
1765 	memcpy(buf, fullpath, len);
1766 
1767 	for (i = len-1; i>= 0; i--) {
1768 		if (buf[i] == '/') {
1769 			buf[i] = '\0';
1770 			name = &(buf[i+1]);
1771 			path = buf;
1772 			break;
1773 		}
1774 	}
1775 
1776 	*pathp = path;
1777 
1778 	if (name) {
1779 		*namep = name;
1780 	} else {
1781 		*namep = buf;
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 /*
1788  * This function creates a new devfs node for a given device.  It can
1789  * handle a complete path as device name, and accordingly creates
1790  * the path and the final device node.
1791  *
1792  * The reference count on the passed dev remains unchanged.
1793  */
1794 struct devfs_node *
1795 devfs_create_device_node(struct devfs_node *root, cdev_t dev,
1796 			 char *dev_name, char *path_fmt, ...)
1797 {
1798 	struct devfs_node *parent, *node = NULL;
1799 	char *path = NULL;
1800 	char *name, name_buf[PATH_MAX];
1801 	__va_list ap;
1802 	int i, found;
1803 
1804 	char *create_path = NULL;
1805 	char *names = "pqrsPQRS";
1806 
1807 	if (path_fmt != NULL) {
1808 		path = kmalloc(PATH_MAX+1, M_DEVFS, M_WAITOK);
1809 
1810 		__va_start(ap, path_fmt);
1811 		i = kvcprintf(path_fmt, NULL, path, 10, ap);
1812 		path[i] = '\0';
1813 		__va_end(ap);
1814 	}
1815 
1816 	parent = devfs_resolve_or_create_path(root, path, 1);
1817 	KKASSERT(parent);
1818 
1819 	devfs_resolve_name_path(((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), name_buf, &create_path, &name);
1820 
1821 	if (create_path)
1822 		parent = devfs_resolve_or_create_path(parent, create_path, 1);
1823 
1824 
1825 	if (devfs_find_device_node_by_name(parent, name)) {
1826 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_device_node: "
1827 			"DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name);
1828 		goto out;
1829 	}
1830 	devfs_debug(DEVFS_DEBUG_DEBUG, "parent->d_dir.d_name=%s\n", parent->d_dir.d_name);
1831 	node = devfs_allocp(Pdev, name, parent, parent->mp, dev);
1832 	devfs_debug(DEVFS_DEBUG_DEBUG, "node->d_dir.d_name=%s\n", node->d_dir.d_name);
1833 
1834 #if 0
1835 	/* Ugly unix98 pty magic, to hide pty master (ptm) devices and their directory */
1836 	if ((dev) && (strlen(dev->si_name) >= 4) && (!memcmp(dev->si_name, "ptm/", 4))) {
1837 		node->parent->flags |= DEVFS_HIDDEN;
1838 		node->flags |= DEVFS_HIDDEN;
1839 	}
1840 #endif
1841 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_device_node: marker A\n");
1842 	/* Ugly pty magic, to tag pty devices as such and hide them if needed */
1843 	if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3)))
1844 		node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE);
1845 
1846 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_device_node: marker B\n");
1847 	if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) {
1848 		found = 0;
1849 		for (i = 0; i < strlen(names); i++) {
1850 			if (name[3] == names[i]) {
1851 				found = 1;
1852 				break;
1853 			}
1854 		}
1855 		if (found)
1856 			node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE);
1857 	}
1858 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_device_node: marker C\n");
1859 
1860 out:
1861 	if (path_fmt != NULL)
1862 		kfree(path, M_DEVFS);
1863 
1864 	return node;
1865 }
1866 
1867 /*
1868  * This function finds a given device node in the topology with a given
1869  * cdev.
1870  */
1871 struct devfs_node *
1872 devfs_find_device_node(struct devfs_node *node, cdev_t target)
1873 {
1874 	struct devfs_node *node1, *node2, *found = NULL;
1875 
1876 	if ((node->node_type == Proot) || (node->node_type == Pdir)) {
1877 		devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren);
1878 		if (node->nchildren > 2) {
1879 			TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2)	{
1880 				if ((found = devfs_find_device_node(node1, target)))
1881 					return found;
1882 			}
1883 		}
1884 	} else if (node->node_type == Pdev) {
1885 		if (node->d_dev == target)
1886 			return node;
1887 	}
1888 
1889 	return NULL;
1890 }
1891 
1892 /*
1893  * This function finds a device node in the topology by its
1894  * name and returns it.
1895  */
1896 struct devfs_node *
1897 devfs_find_device_node_by_name(struct devfs_node *parent, char *target)
1898 {
1899 	struct devfs_node *node, *found = NULL;
1900 	size_t len = strlen(target);
1901 
1902 	TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) {
1903 		if ((len == node->d_dir.d_namlen) && (!memcmp(node->d_dir.d_name, target, len))) {
1904 			found = node;
1905 			break;
1906 		}
1907 	}
1908 
1909 	return found;
1910 }
1911 
1912 /*
1913  * This function takes a cdev and removes its devfs node in the
1914  * given topology.  The cdev remains intact.
1915  */
1916 int
1917 devfs_destroy_device_node(struct devfs_node *root, cdev_t target)
1918 {
1919 	struct devfs_node *node, *parent;
1920 	char *name, name_buf[PATH_MAX];
1921 	char *create_path = NULL;
1922 
1923 	KKASSERT(target);
1924 
1925 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_destroy_device_node\n");
1926 	memcpy(name_buf, target->si_name, strlen(target->si_name)+1);
1927 
1928 	devfs_resolve_name_path(target->si_name, name_buf, &create_path, &name);
1929 	devfs_debug(DEVFS_DEBUG_DEBUG, "create_path: %s\n", create_path);
1930 	devfs_debug(DEVFS_DEBUG_DEBUG, "name: %s\n", name);
1931 
1932 	if (create_path)
1933 		parent = devfs_resolve_or_create_path(root, create_path, 0);
1934 	else
1935 		parent = root;
1936 	devfs_debug(DEVFS_DEBUG_DEBUG, "-> marker <-\n");
1937 	if (parent == NULL)
1938 		return 1;
1939 	devfs_debug(DEVFS_DEBUG_DEBUG, "->d_dir.d_name=%s\n", parent->d_dir.d_name);
1940 	node = devfs_find_device_node_by_name(parent, name);
1941 	devfs_debug(DEVFS_DEBUG_DEBUG,
1942 		    "->d_dir.d_name=%s\n",
1943 		    ((node) ? (node->d_dir.d_name) : "SHIT!"));
1944 	if (node)
1945 		devfs_gc(node);
1946 
1947 	return 0;
1948 }
1949 
1950 /*
1951  * Just set perms and ownership for given node.
1952  */
1953 int
1954 devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, u_short mode, u_long flags)
1955 {
1956 	node->mode = mode;		/* files access mode and type */
1957 	node->uid = uid;		/* owner user id */
1958 	node->gid = gid;		/* owner group id */
1959 
1960 	return 0;
1961 }
1962 
1963 /*
1964  * Propagates a device attach/detach to all mount
1965  * points. Also takes care of automatic alias removal
1966  * for a deleted cdev.
1967  */
1968 static int
1969 devfs_propagate_dev(cdev_t dev, int attach)
1970 {
1971 	struct devfs_mnt_data *mnt;
1972 
1973 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_propagate_dev -1-\n");
1974 	TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
1975 		devfs_debug(DEVFS_DEBUG_DEBUG,
1976 			    "devfs_propagate_dev -loop:2-\n");
1977 		if (attach) {
1978 			/* Device is being attached */
1979 			devfs_create_device_node(mnt->root_node, dev,
1980 						 NULL, NULL );
1981 		} else {
1982 			/* Device is being detached */
1983 			devfs_alias_remove(dev);
1984 			devfs_destroy_device_node(mnt->root_node, dev);
1985 		}
1986 	}
1987 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_propagate_dev -end:3-\n");
1988 	return 0;
1989 }
1990 
1991 /*
1992  * devfs_node_to_path takes a node and a buffer of a size of
1993  * at least PATH_MAX, resolves the full path from the root
1994  * node and writes it in a humanly-readable format into the
1995  * buffer.
1996  * If DEVFS_STASH_DEPTH is less than the directory level up
1997  * to the root node, only the last DEVFS_STASH_DEPTH levels
1998  * of the path are resolved.
1999  */
2000 int
2001 devfs_node_to_path(struct devfs_node *node, char *buffer)
2002 {
2003 #define DEVFS_STASH_DEPTH	32
2004 	struct devfs_node *node_stash[DEVFS_STASH_DEPTH];
2005 	int i, offset;
2006 	memset(buffer, 0, PATH_MAX);
2007 
2008 	for (i = 0; (i < DEVFS_STASH_DEPTH) && (node->node_type != Proot); i++) {
2009 		node_stash[i] = node;
2010 		node = node->parent;
2011 	}
2012 	i--;
2013 
2014 	for (offset = 0; i >= 0; i--) {
2015 		memcpy(buffer+offset, node_stash[i]->d_dir.d_name, node_stash[i]->d_dir.d_namlen);
2016 		offset += node_stash[i]->d_dir.d_namlen;
2017 		if (i > 0) {
2018 			*(buffer+offset) = '/';
2019 			offset++;
2020 		}
2021 	}
2022 #undef DEVFS_STASH_DEPTH
2023 	return 0;
2024 }
2025 
2026 /*
2027  * devfs_clone either returns a basename from a complete name by
2028  * returning the length of the name without trailing digits, or,
2029  * if clone != 0, calls the device's clone handler to get a new
2030  * device, which in turn is returned in devp.
2031  */
2032 int
2033 devfs_clone(char *name, size_t *namlenp, cdev_t *devp, int clone, struct ucred *cred)
2034 {
2035 	KKASSERT(namlenp);
2036 
2037 	size_t len = *namlenp;
2038 	int error = 1;
2039 	struct devfs_clone_handler *chandler;
2040 	struct dev_clone_args ap;
2041 
2042 	if (!clone) {
2043 		for (; (len > 0) && (DEVFS_ISDIGIT(name[len-1])); len--);
2044 	}
2045 
2046     TAILQ_FOREACH(chandler, &devfs_chandler_list, link) {
2047 		devfs_debug(DEVFS_DEBUG_DEBUG, "len=%d, chandler->namlen=%d\n", len, chandler->namlen);
2048 		devfs_debug(DEVFS_DEBUG_DEBUG, "name=%s, chandler->name=%s\n", name, chandler->name);
2049 		if ((chandler->namlen == len) &&
2050 			(!memcmp(chandler->name, name, len)) &&
2051 			(chandler->nhandler)) {
2052 			devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nclone: found clone handler for the base name at %p\n", chandler->nhandler);
2053 			if (clone) {
2054 				ap.a_dev = NULL;
2055 				ap.a_name = name;
2056 				ap.a_namelen = len;
2057 				ap.a_cred = cred;
2058 				error = (chandler->nhandler)(&ap);
2059 				KKASSERT(devp);
2060 				*devp = ap.a_dev;
2061 			} else {
2062 				*namlenp = len;
2063 				error = 0;
2064 			}
2065 
2066 			break;
2067 		}
2068 	}
2069 
2070 	return error;
2071 }
2072 
2073 
2074 /*
2075  * Registers a new orphan in the orphan list.
2076  */
2077 void
2078 devfs_tracer_add_orphan(struct devfs_node *node)
2079 {
2080 	struct devfs_orphan *orphan;
2081 
2082 	KKASSERT(node);
2083 	orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK);
2084 	orphan->node = node;
2085 
2086 	KKASSERT((node->flags & DEVFS_ORPHANED) == 0);
2087 	node->flags |= DEVFS_ORPHANED;
2088 	TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link);
2089 }
2090 
2091 /*
2092  * Removes an orphan from the orphan list.
2093  */
2094 void
2095 devfs_tracer_del_orphan(struct devfs_node *node)
2096 {
2097 	struct devfs_orphan *orphan;
2098 
2099 	KKASSERT(node);
2100 
2101 	TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link)	{
2102 		if (orphan->node == node) {
2103 			node->flags &= ~DEVFS_ORPHANED;
2104 			TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link);
2105 			kfree(orphan, M_DEVFS);
2106 			break;
2107 		}
2108 	}
2109 }
2110 
2111 /*
2112  * Counts the orphans in the orphan list, and if cleanup
2113  * is specified, also frees the orphan and removes it from
2114  * the list.
2115  */
2116 size_t
2117 devfs_tracer_orphan_count(struct mount *mp, int cleanup)
2118 {
2119 	struct devfs_orphan *orphan, *orphan2;
2120 	size_t count = 0;
2121 
2122 	TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2)	{
2123 		count++;
2124 		if (cleanup) {
2125 			TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link);
2126 			orphan->node->flags &= ~DEVFS_ORPHANED;
2127 			devfs_freep(orphan->node);
2128 			kfree(orphan, M_DEVFS);
2129 		}
2130 	}
2131 
2132 	return count;
2133 }
2134 
2135 /*
2136  * Fetch an ino_t from the global d_ino by increasing it
2137  * while spinlocked.
2138  */
2139 static ino_t
2140 devfs_fetch_ino(void)
2141 {
2142 	ino_t	ret;
2143 
2144 	spin_lock_wr(&ino_lock);
2145 	ret = d_ino++;
2146 	spin_unlock_wr(&ino_lock);
2147 
2148 	return ret;
2149 }
2150 
2151 /*
2152  * Allocates a new cdev and initializes it's most basic
2153  * fields.
2154  */
2155 cdev_t
2156 devfs_new_cdev(struct dev_ops *ops, int minor)
2157 {
2158 	cdev_t dev = sysref_alloc(&cdev_sysref_class);
2159 	sysref_activate(&dev->si_sysref);
2160 	reference_dev(dev);
2161 	devfs_debug(DEVFS_DEBUG_DEBUG,
2162 		    "new_cdev: clearing first %d bytes\n",
2163 		    offsetof(struct cdev, si_sysref));
2164 	memset(dev, 0, offsetof(struct cdev, si_sysref));
2165 
2166 	dev->si_uid = 0;
2167 	dev->si_gid = 0;
2168 	dev->si_perms = 0;
2169 	dev->si_drv1 = NULL;
2170 	dev->si_drv2 = NULL;
2171 	dev->si_lastread = 0;		/* time_second */
2172 	dev->si_lastwrite = 0;		/* time_second */
2173 
2174 	dev->si_ops = ops;
2175 	dev->si_flags = 0;
2176 	dev->si_umajor = 0;
2177 	dev->si_uminor = minor;
2178 	dev->si_inode = makeudev(devfs_reference_ops(ops), minor);
2179 
2180 	return dev;
2181 }
2182 
2183 static void
2184 devfs_cdev_terminate(cdev_t dev)
2185 {
2186 	int locked = 0;
2187 
2188 	/* Check if it is locked already. if not, we acquire the devfs lock */
2189 	if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
2190 		lockmgr(&devfs_lock, LK_EXCLUSIVE);
2191 		locked = 1;
2192 	}
2193 
2194 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_cdev_terminate: Taking care of dev->si_name=%s\n", dev->si_name);
2195 
2196 	/* Propagate destruction, just in case */
2197 	devfs_propagate_dev(dev, 0);
2198 
2199 	/* If we acquired the lock, we also get rid of it */
2200 	if (locked)
2201 		lockmgr(&devfs_lock, LK_RELEASE);
2202 
2203 	devfs_release_ops(dev->si_ops);
2204 
2205 	/* Finally destroy the device */
2206 	sysref_put(&dev->si_sysref);
2207 }
2208 
2209 /*
2210  * Links a given cdev into the dev list.
2211  */
2212 int
2213 devfs_link_dev(cdev_t dev)
2214 {
2215 	KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0);
2216 	dev->si_flags |= SI_DEVFS_LINKED;
2217 	TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link);
2218 
2219 	return 0;
2220 }
2221 
2222 /*
2223  * Removes a given cdev from the dev list.  The caller is responsible for
2224  * releasing the reference on the device associated with the linkage.
2225  *
2226  * Returns EALREADY if the dev has already been unlinked.
2227  */
2228 static int
2229 devfs_unlink_dev(cdev_t dev)
2230 {
2231 	if ((dev->si_flags & SI_DEVFS_LINKED)) {
2232 		TAILQ_REMOVE(&devfs_dev_list, dev, link);
2233 		dev->si_flags &= ~SI_DEVFS_LINKED;
2234 		return (0);
2235 	}
2236 	return (EALREADY);
2237 }
2238 
2239 int
2240 devfs_node_is_accessible(struct devfs_node *node)
2241 {
2242 	if ((node) && (!(node->flags & DEVFS_HIDDEN)))
2243 		return 1;
2244 	else
2245 		return 0;
2246 }
2247 
2248 int
2249 devfs_reference_ops(struct dev_ops *ops)
2250 {
2251 	int unit;
2252 
2253 	if (ops->head.refs == 0) {
2254 		ops->head.id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255);
2255 		if (ops->head.id == -1) {
2256 			/* Ran out of unique ids */
2257 			kprintf("devfs_reference_ops: WARNING: ran out of unique ids\n");
2258 		}
2259 	}
2260 	unit = ops->head.id;
2261 	++ops->head.refs;
2262 
2263 	return unit;
2264 }
2265 
2266 void
2267 devfs_release_ops(struct dev_ops *ops)
2268 {
2269 	--ops->head.refs;
2270 
2271 	if (ops->head.refs == 0) {
2272 		devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), ops->head.id);
2273 	}
2274 }
2275 
2276 void
2277 devfs_config(void *arg)
2278 {
2279 	devfs_msg_t msg;
2280 
2281 	msg = devfs_msg_get();
2282 
2283 	kprintf("devfs_config: sync'ing up\n");
2284 	msg = devfs_msg_send_sync(DEVFS_SYNC, msg);
2285 	devfs_msg_put(msg);
2286 }
2287 
2288 /*
2289  * Called on init of devfs; creates the objcaches and
2290  * spawns off the devfs core thread. Also initializes
2291  * locks.
2292  */
2293 static void
2294 devfs_init(void)
2295 {
2296 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n");
2297 	/* Create objcaches for nodes, msgs and devs */
2298     devfs_node_cache = objcache_create("devfs-node-cache", 0, 0,
2299 			NULL, NULL, NULL,
2300 			objcache_malloc_alloc,
2301 			objcache_malloc_free,
2302 			&devfs_node_malloc_args );
2303 
2304     devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0,
2305 			NULL, NULL, NULL,
2306 			objcache_malloc_alloc,
2307 			objcache_malloc_free,
2308 			&devfs_msg_malloc_args );
2309 
2310     devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0,
2311 			NULL, NULL, NULL,
2312 			objcache_malloc_alloc,
2313 			objcache_malloc_free,
2314 			&devfs_dev_malloc_args );
2315 
2316 	devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id));
2317 #if 0
2318 	devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(ops_id), 0);
2319 #endif
2320 
2321 	/* Initialize the reply-only port which acts as a message drain */
2322 	lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply);
2323 
2324 	/* Initialize *THE* devfs lock */
2325 	lockinit(&devfs_lock, "devfs_core lock", 0, 0);
2326 
2327 
2328 	lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL,
2329 		    0, 0, "devfs_msg_core");
2330 
2331 	tsleep(td_core/*devfs_id*/, 0, "devfsc", 0);
2332 
2333 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n");
2334 }
2335 
2336 /*
2337  * Called on unload of devfs; takes care of destroying the core
2338  * and the objcaches. Also removes aliases that are no longer needed.
2339  */
2340 static void
2341 devfs_uninit(void)
2342 {
2343 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n");
2344 
2345 	devfs_msg_send(DEVFS_TERMINATE_CORE, NULL);
2346 
2347 	tsleep(td_core/*devfs_id*/, 0, "devfsc", 0);
2348 	tsleep(td_core/*devfs_id*/, 0, "devfsc", 10000);
2349 
2350 	devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id));
2351 
2352 	/* Destroy the objcaches */
2353 	objcache_destroy(devfs_msg_cache);
2354 	objcache_destroy(devfs_node_cache);
2355 	objcache_destroy(devfs_dev_cache);
2356 
2357 	devfs_alias_reap();
2358 }
2359 
2360 /*
2361  * This is a sysctl handler to assist userland devname(3) to
2362  * find the device name for a given udev.
2363  */
2364 static int
2365 devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS)
2366 {
2367 	udev_t 	udev;
2368 	cdev_t	found;
2369 	int		error;
2370 
2371 
2372 	if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t))))
2373 		return (error);
2374 
2375 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev);
2376 
2377 	if (udev == NOUDEV)
2378 		return(EINVAL);
2379 
2380 	if ((found = devfs_find_device_by_udev(udev)) == NULL)
2381 		return(ENOENT);
2382 
2383 	return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1));
2384 }
2385 
2386 
2387 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY,
2388 			NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)");
2389 
2390 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs");
2391 TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable);
2392 SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 0, "Enable DevFS debugging");
2393 
2394 SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, devfs_init, NULL);
2395 SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, devfs_uninit, NULL);
2396