xref: /netbsd-src/sys/kern/subr_autoconf.c (revision 5f2f42719cd62ff11fd913b40b7ce19f07c4fd25)
1 /* $NetBSD: subr_autoconf.c,v 1.306 2022/09/13 09:43:33 riastradh Exp $ */
2 
3 /*
4  * Copyright (c) 1996, 2000 Christopher G. Demetriou
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *          This product includes software developed for the
18  *          NetBSD Project.  See http://www.NetBSD.org/ for
19  *          information about NetBSD.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
35  */
36 
37 /*
38  * Copyright (c) 1992, 1993
39  *	The Regents of the University of California.  All rights reserved.
40  *
41  * This software was developed by the Computer Systems Engineering group
42  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43  * contributed to Berkeley.
44  *
45  * All advertising materials mentioning features or use of this software
46  * must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Lawrence Berkeley Laboratories.
49  *
50  * Redistribution and use in source and binary forms, with or without
51  * modification, are permitted provided that the following conditions
52  * are met:
53  * 1. Redistributions of source code must retain the above copyright
54  *    notice, this list of conditions and the following disclaimer.
55  * 2. Redistributions in binary form must reproduce the above copyright
56  *    notice, this list of conditions and the following disclaimer in the
57  *    documentation and/or other materials provided with the distribution.
58  * 3. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp  (LBL)
75  *
76  *	@(#)subr_autoconf.c	8.3 (Berkeley) 5/17/94
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.306 2022/09/13 09:43:33 riastradh Exp $");
81 
82 #ifdef _KERNEL_OPT
83 #include "opt_ddb.h"
84 #include "drvctl.h"
85 #endif
86 
87 #include <sys/param.h>
88 #include <sys/device.h>
89 #include <sys/device_impl.h>
90 #include <sys/disklabel.h>
91 #include <sys/conf.h>
92 #include <sys/kauth.h>
93 #include <sys/kmem.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/errno.h>
97 #include <sys/proc.h>
98 #include <sys/reboot.h>
99 #include <sys/kthread.h>
100 #include <sys/buf.h>
101 #include <sys/dirent.h>
102 #include <sys/mount.h>
103 #include <sys/namei.h>
104 #include <sys/unistd.h>
105 #include <sys/fcntl.h>
106 #include <sys/lockf.h>
107 #include <sys/callout.h>
108 #include <sys/devmon.h>
109 #include <sys/cpu.h>
110 #include <sys/sysctl.h>
111 #include <sys/stdarg.h>
112 #include <sys/localcount.h>
113 
114 #include <sys/disk.h>
115 
116 #include <sys/rndsource.h>
117 
118 #include <machine/limits.h>
119 
120 /*
121  * Autoconfiguration subroutines.
122  */
123 
124 /*
125  * Device autoconfiguration timings are mixed into the entropy pool.
126  */
127 static krndsource_t rnd_autoconf_source;
128 
129 /*
130  * ioconf.c exports exactly two names: cfdata and cfroots.  All system
131  * devices and drivers are found via these tables.
132  */
133 extern struct cfdata cfdata[];
134 extern const short cfroots[];
135 
136 /*
137  * List of all cfdriver structures.  We use this to detect duplicates
138  * when other cfdrivers are loaded.
139  */
140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
141 extern struct cfdriver * const cfdriver_list_initial[];
142 
143 /*
144  * Initial list of cfattach's.
145  */
146 extern const struct cfattachinit cfattachinit[];
147 
148 /*
149  * List of cfdata tables.  We always have one such list -- the one
150  * built statically when the kernel was configured.
151  */
152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
153 static struct cftable initcftable;
154 
155 #define	ROOT ((device_t)NULL)
156 
157 struct matchinfo {
158 	cfsubmatch_t fn;
159 	device_t parent;
160 	const int *locs;
161 	void	*aux;
162 	struct	cfdata *match;
163 	int	pri;
164 };
165 
166 struct alldevs_foray {
167 	int			af_s;
168 	struct devicelist	af_garbage;
169 };
170 
171 /*
172  * Internal version of the cfargs structure; all versions are
173  * canonicalized to this.
174  */
175 struct cfargs_internal {
176 	union {
177 		cfsubmatch_t	submatch;/* submatch function (direct config) */
178 		cfsearch_t	search;	 /* search function (indirect config) */
179 	};
180 	const char *	iattr;		/* interface attribute */
181 	const int *	locators;	/* locators array */
182 	devhandle_t	devhandle;	/* devhandle_t (by value) */
183 };
184 
185 static char *number(char *, int);
186 static void mapply(struct matchinfo *, cfdata_t);
187 static void config_devdelete(device_t);
188 static void config_devunlink(device_t, struct devicelist *);
189 static void config_makeroom(int, struct cfdriver *);
190 static void config_devlink(device_t);
191 static void config_alldevs_enter(struct alldevs_foray *);
192 static void config_alldevs_exit(struct alldevs_foray *);
193 static void config_add_attrib_dict(device_t);
194 static device_t	config_attach_internal(device_t, cfdata_t, void *,
195 		    cfprint_t, const struct cfargs_internal *);
196 
197 static void config_collect_garbage(struct devicelist *);
198 static void config_dump_garbage(struct devicelist *);
199 
200 static void pmflock_debug(device_t, const char *, int);
201 
202 static device_t deviter_next1(deviter_t *);
203 static void deviter_reinit(deviter_t *);
204 
205 struct deferred_config {
206 	TAILQ_ENTRY(deferred_config) dc_queue;
207 	device_t dc_dev;
208 	void (*dc_func)(device_t);
209 };
210 
211 TAILQ_HEAD(deferred_config_head, deferred_config);
212 
213 static struct deferred_config_head deferred_config_queue =
214 	TAILQ_HEAD_INITIALIZER(deferred_config_queue);
215 static struct deferred_config_head interrupt_config_queue =
216 	TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
217 static int interrupt_config_threads = 8;
218 static struct deferred_config_head mountroot_config_queue =
219 	TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
220 static int mountroot_config_threads = 2;
221 static lwp_t **mountroot_config_lwpids;
222 static size_t mountroot_config_lwpids_size;
223 bool root_is_mounted = false;
224 
225 static void config_process_deferred(struct deferred_config_head *, device_t);
226 
227 /* Hooks to finalize configuration once all real devices have been found. */
228 struct finalize_hook {
229 	TAILQ_ENTRY(finalize_hook) f_list;
230 	int (*f_func)(device_t);
231 	device_t f_dev;
232 };
233 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
234 	TAILQ_HEAD_INITIALIZER(config_finalize_list);
235 static int config_finalize_done;
236 
237 /* list of all devices */
238 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
239 static kmutex_t alldevs_lock __cacheline_aligned;
240 static devgen_t alldevs_gen = 1;
241 static int alldevs_nread = 0;
242 static int alldevs_nwrite = 0;
243 static bool alldevs_garbage = false;
244 
245 static struct devicelist config_pending =
246     TAILQ_HEAD_INITIALIZER(config_pending);
247 static kmutex_t config_misc_lock;
248 static kcondvar_t config_misc_cv;
249 
250 static bool detachall = false;
251 
252 #define	STREQ(s1, s2)			\
253 	(*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
254 
255 static bool config_initialized = false;	/* config_init() has been called. */
256 
257 static int config_do_twiddle;
258 static callout_t config_twiddle_ch;
259 
260 static void sysctl_detach_setup(struct sysctllog **);
261 
262 int no_devmon_insert(const char *, prop_dictionary_t);
263 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
264 
265 typedef int (*cfdriver_fn)(struct cfdriver *);
266 static int
267 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
268 	cfdriver_fn drv_do, cfdriver_fn drv_undo,
269 	const char *style, bool dopanic)
270 {
271 	void (*pr)(const char *, ...) __printflike(1, 2) =
272 	    dopanic ? panic : printf;
273 	int i, error = 0, e2 __diagused;
274 
275 	for (i = 0; cfdriverv[i] != NULL; i++) {
276 		if ((error = drv_do(cfdriverv[i])) != 0) {
277 			pr("configure: `%s' driver %s failed: %d",
278 			    cfdriverv[i]->cd_name, style, error);
279 			goto bad;
280 		}
281 	}
282 
283 	KASSERT(error == 0);
284 	return 0;
285 
286  bad:
287 	printf("\n");
288 	for (i--; i >= 0; i--) {
289 		e2 = drv_undo(cfdriverv[i]);
290 		KASSERT(e2 == 0);
291 	}
292 
293 	return error;
294 }
295 
296 typedef int (*cfattach_fn)(const char *, struct cfattach *);
297 static int
298 frob_cfattachvec(const struct cfattachinit *cfattachv,
299 	cfattach_fn att_do, cfattach_fn att_undo,
300 	const char *style, bool dopanic)
301 {
302 	const struct cfattachinit *cfai = NULL;
303 	void (*pr)(const char *, ...) __printflike(1, 2) =
304 	    dopanic ? panic : printf;
305 	int j = 0, error = 0, e2 __diagused;
306 
307 	for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
308 		for (j = 0; cfai->cfai_list[j] != NULL; j++) {
309 			if ((error = att_do(cfai->cfai_name,
310 			    cfai->cfai_list[j])) != 0) {
311 				pr("configure: attachment `%s' "
312 				    "of `%s' driver %s failed: %d",
313 				    cfai->cfai_list[j]->ca_name,
314 				    cfai->cfai_name, style, error);
315 				goto bad;
316 			}
317 		}
318 	}
319 
320 	KASSERT(error == 0);
321 	return 0;
322 
323  bad:
324 	/*
325 	 * Rollback in reverse order.  dunno if super-important, but
326 	 * do that anyway.  Although the code looks a little like
327 	 * someone did a little integration (in the math sense).
328 	 */
329 	printf("\n");
330 	if (cfai) {
331 		bool last;
332 
333 		for (last = false; last == false; ) {
334 			if (cfai == &cfattachv[0])
335 				last = true;
336 			for (j--; j >= 0; j--) {
337 				e2 = att_undo(cfai->cfai_name,
338 				    cfai->cfai_list[j]);
339 				KASSERT(e2 == 0);
340 			}
341 			if (!last) {
342 				cfai--;
343 				for (j = 0; cfai->cfai_list[j] != NULL; j++)
344 					;
345 			}
346 		}
347 	}
348 
349 	return error;
350 }
351 
352 /*
353  * Initialize the autoconfiguration data structures.  Normally this
354  * is done by configure(), but some platforms need to do this very
355  * early (to e.g. initialize the console).
356  */
357 void
358 config_init(void)
359 {
360 
361 	KASSERT(config_initialized == false);
362 
363 	mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
364 
365 	mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
366 	cv_init(&config_misc_cv, "cfgmisc");
367 
368 	callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
369 
370 	frob_cfdrivervec(cfdriver_list_initial,
371 	    config_cfdriver_attach, NULL, "bootstrap", true);
372 	frob_cfattachvec(cfattachinit,
373 	    config_cfattach_attach, NULL, "bootstrap", true);
374 
375 	initcftable.ct_cfdata = cfdata;
376 	TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
377 
378 	rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
379 	    RND_FLAG_COLLECT_TIME);
380 
381 	config_initialized = true;
382 }
383 
384 /*
385  * Init or fini drivers and attachments.  Either all or none
386  * are processed (via rollback).  It would be nice if this were
387  * atomic to outside consumers, but with the current state of
388  * locking ...
389  */
390 int
391 config_init_component(struct cfdriver * const *cfdriverv,
392 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
393 {
394 	int error;
395 
396 	KERNEL_LOCK(1, NULL);
397 
398 	if ((error = frob_cfdrivervec(cfdriverv,
399 	    config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
400 		goto out;
401 	if ((error = frob_cfattachvec(cfattachv,
402 	    config_cfattach_attach, config_cfattach_detach,
403 	    "init", false)) != 0) {
404 		frob_cfdrivervec(cfdriverv,
405 	            config_cfdriver_detach, NULL, "init rollback", true);
406 		goto out;
407 	}
408 	if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
409 		frob_cfattachvec(cfattachv,
410 		    config_cfattach_detach, NULL, "init rollback", true);
411 		frob_cfdrivervec(cfdriverv,
412 	            config_cfdriver_detach, NULL, "init rollback", true);
413 		goto out;
414 	}
415 
416 	/* Success!  */
417 	error = 0;
418 
419 out:	KERNEL_UNLOCK_ONE(NULL);
420 	return error;
421 }
422 
423 int
424 config_fini_component(struct cfdriver * const *cfdriverv,
425 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
426 {
427 	int error;
428 
429 	KERNEL_LOCK(1, NULL);
430 
431 	if ((error = config_cfdata_detach(cfdatav)) != 0)
432 		goto out;
433 	if ((error = frob_cfattachvec(cfattachv,
434 	    config_cfattach_detach, config_cfattach_attach,
435 	    "fini", false)) != 0) {
436 		if (config_cfdata_attach(cfdatav, 0) != 0)
437 			panic("config_cfdata fini rollback failed");
438 		goto out;
439 	}
440 	if ((error = frob_cfdrivervec(cfdriverv,
441 	    config_cfdriver_detach, config_cfdriver_attach,
442 	    "fini", false)) != 0) {
443 		frob_cfattachvec(cfattachv,
444 	            config_cfattach_attach, NULL, "fini rollback", true);
445 		if (config_cfdata_attach(cfdatav, 0) != 0)
446 			panic("config_cfdata fini rollback failed");
447 		goto out;
448 	}
449 
450 	/* Success!  */
451 	error = 0;
452 
453 out:	KERNEL_UNLOCK_ONE(NULL);
454 	return error;
455 }
456 
457 void
458 config_init_mi(void)
459 {
460 
461 	if (!config_initialized)
462 		config_init();
463 
464 	sysctl_detach_setup(NULL);
465 }
466 
467 void
468 config_deferred(device_t dev)
469 {
470 
471 	KASSERT(KERNEL_LOCKED_P());
472 
473 	config_process_deferred(&deferred_config_queue, dev);
474 	config_process_deferred(&interrupt_config_queue, dev);
475 	config_process_deferred(&mountroot_config_queue, dev);
476 }
477 
478 static void
479 config_interrupts_thread(void *cookie)
480 {
481 	struct deferred_config *dc;
482 	device_t dev;
483 
484 	mutex_enter(&config_misc_lock);
485 	while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
486 		TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
487 		mutex_exit(&config_misc_lock);
488 
489 		dev = dc->dc_dev;
490 		(*dc->dc_func)(dev);
491 		if (!device_pmf_is_registered(dev))
492 			aprint_debug_dev(dev,
493 			    "WARNING: power management not supported\n");
494 		config_pending_decr(dev);
495 		kmem_free(dc, sizeof(*dc));
496 
497 		mutex_enter(&config_misc_lock);
498 	}
499 	mutex_exit(&config_misc_lock);
500 
501 	kthread_exit(0);
502 }
503 
504 void
505 config_create_interruptthreads(void)
506 {
507 	int i;
508 
509 	for (i = 0; i < interrupt_config_threads; i++) {
510 		(void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
511 		    config_interrupts_thread, NULL, NULL, "configintr");
512 	}
513 }
514 
515 static void
516 config_mountroot_thread(void *cookie)
517 {
518 	struct deferred_config *dc;
519 
520 	mutex_enter(&config_misc_lock);
521 	while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
522 		TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
523 		mutex_exit(&config_misc_lock);
524 
525 		(*dc->dc_func)(dc->dc_dev);
526 		kmem_free(dc, sizeof(*dc));
527 
528 		mutex_enter(&config_misc_lock);
529 	}
530 	mutex_exit(&config_misc_lock);
531 
532 	kthread_exit(0);
533 }
534 
535 void
536 config_create_mountrootthreads(void)
537 {
538 	int i;
539 
540 	if (!root_is_mounted)
541 		root_is_mounted = true;
542 
543 	mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
544 				       mountroot_config_threads;
545 	mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
546 					     KM_NOSLEEP);
547 	KASSERT(mountroot_config_lwpids);
548 	for (i = 0; i < mountroot_config_threads; i++) {
549 		mountroot_config_lwpids[i] = 0;
550 		(void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
551 				     NULL, config_mountroot_thread, NULL,
552 				     &mountroot_config_lwpids[i],
553 				     "configroot");
554 	}
555 }
556 
557 void
558 config_finalize_mountroot(void)
559 {
560 	int i, error;
561 
562 	for (i = 0; i < mountroot_config_threads; i++) {
563 		if (mountroot_config_lwpids[i] == 0)
564 			continue;
565 
566 		error = kthread_join(mountroot_config_lwpids[i]);
567 		if (error)
568 			printf("%s: thread %x joined with error %d\n",
569 			       __func__, i, error);
570 	}
571 	kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
572 }
573 
574 /*
575  * Announce device attach/detach to userland listeners.
576  */
577 
578 int
579 no_devmon_insert(const char *name, prop_dictionary_t p)
580 {
581 
582 	return ENODEV;
583 }
584 
585 static void
586 devmon_report_device(device_t dev, bool isattach)
587 {
588 	prop_dictionary_t ev, dict = device_properties(dev);
589 	const char *parent;
590 	const char *what;
591 	const char *where;
592 	device_t pdev = device_parent(dev);
593 
594 	/* If currently no drvctl device, just return */
595 	if (devmon_insert_vec == no_devmon_insert)
596 		return;
597 
598 	ev = prop_dictionary_create();
599 	if (ev == NULL)
600 		return;
601 
602 	what = (isattach ? "device-attach" : "device-detach");
603 	parent = (pdev == NULL ? "root" : device_xname(pdev));
604 	if (prop_dictionary_get_string(dict, "location", &where)) {
605 		prop_dictionary_set_string(ev, "location", where);
606 		aprint_debug("ev: %s %s at %s in [%s]\n",
607 		    what, device_xname(dev), parent, where);
608 	}
609 	if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
610 	    !prop_dictionary_set_string(ev, "parent", parent)) {
611 		prop_object_release(ev);
612 		return;
613 	}
614 
615 	if ((*devmon_insert_vec)(what, ev) != 0)
616 		prop_object_release(ev);
617 }
618 
619 /*
620  * Add a cfdriver to the system.
621  */
622 int
623 config_cfdriver_attach(struct cfdriver *cd)
624 {
625 	struct cfdriver *lcd;
626 
627 	/* Make sure this driver isn't already in the system. */
628 	LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
629 		if (STREQ(lcd->cd_name, cd->cd_name))
630 			return EEXIST;
631 	}
632 
633 	LIST_INIT(&cd->cd_attach);
634 	LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
635 
636 	return 0;
637 }
638 
639 /*
640  * Remove a cfdriver from the system.
641  */
642 int
643 config_cfdriver_detach(struct cfdriver *cd)
644 {
645 	struct alldevs_foray af;
646 	int i, rc = 0;
647 
648 	config_alldevs_enter(&af);
649 	/* Make sure there are no active instances. */
650 	for (i = 0; i < cd->cd_ndevs; i++) {
651 		if (cd->cd_devs[i] != NULL) {
652 			rc = EBUSY;
653 			break;
654 		}
655 	}
656 	config_alldevs_exit(&af);
657 
658 	if (rc != 0)
659 		return rc;
660 
661 	/* ...and no attachments loaded. */
662 	if (LIST_EMPTY(&cd->cd_attach) == 0)
663 		return EBUSY;
664 
665 	LIST_REMOVE(cd, cd_list);
666 
667 	KASSERT(cd->cd_devs == NULL);
668 
669 	return 0;
670 }
671 
672 /*
673  * Look up a cfdriver by name.
674  */
675 struct cfdriver *
676 config_cfdriver_lookup(const char *name)
677 {
678 	struct cfdriver *cd;
679 
680 	LIST_FOREACH(cd, &allcfdrivers, cd_list) {
681 		if (STREQ(cd->cd_name, name))
682 			return cd;
683 	}
684 
685 	return NULL;
686 }
687 
688 /*
689  * Add a cfattach to the specified driver.
690  */
691 int
692 config_cfattach_attach(const char *driver, struct cfattach *ca)
693 {
694 	struct cfattach *lca;
695 	struct cfdriver *cd;
696 
697 	cd = config_cfdriver_lookup(driver);
698 	if (cd == NULL)
699 		return ESRCH;
700 
701 	/* Make sure this attachment isn't already on this driver. */
702 	LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
703 		if (STREQ(lca->ca_name, ca->ca_name))
704 			return EEXIST;
705 	}
706 
707 	LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
708 
709 	return 0;
710 }
711 
712 /*
713  * Remove a cfattach from the specified driver.
714  */
715 int
716 config_cfattach_detach(const char *driver, struct cfattach *ca)
717 {
718 	struct alldevs_foray af;
719 	struct cfdriver *cd;
720 	device_t dev;
721 	int i, rc = 0;
722 
723 	cd = config_cfdriver_lookup(driver);
724 	if (cd == NULL)
725 		return ESRCH;
726 
727 	config_alldevs_enter(&af);
728 	/* Make sure there are no active instances. */
729 	for (i = 0; i < cd->cd_ndevs; i++) {
730 		if ((dev = cd->cd_devs[i]) == NULL)
731 			continue;
732 		if (dev->dv_cfattach == ca) {
733 			rc = EBUSY;
734 			break;
735 		}
736 	}
737 	config_alldevs_exit(&af);
738 
739 	if (rc != 0)
740 		return rc;
741 
742 	LIST_REMOVE(ca, ca_list);
743 
744 	return 0;
745 }
746 
747 /*
748  * Look up a cfattach by name.
749  */
750 static struct cfattach *
751 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
752 {
753 	struct cfattach *ca;
754 
755 	LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
756 		if (STREQ(ca->ca_name, atname))
757 			return ca;
758 	}
759 
760 	return NULL;
761 }
762 
763 /*
764  * Look up a cfattach by driver/attachment name.
765  */
766 struct cfattach *
767 config_cfattach_lookup(const char *name, const char *atname)
768 {
769 	struct cfdriver *cd;
770 
771 	cd = config_cfdriver_lookup(name);
772 	if (cd == NULL)
773 		return NULL;
774 
775 	return config_cfattach_lookup_cd(cd, atname);
776 }
777 
778 /*
779  * Apply the matching function and choose the best.  This is used
780  * a few times and we want to keep the code small.
781  */
782 static void
783 mapply(struct matchinfo *m, cfdata_t cf)
784 {
785 	int pri;
786 
787 	if (m->fn != NULL) {
788 		pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
789 	} else {
790 		pri = config_match(m->parent, cf, m->aux);
791 	}
792 	if (pri > m->pri) {
793 		m->match = cf;
794 		m->pri = pri;
795 	}
796 }
797 
798 int
799 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
800 {
801 	const struct cfiattrdata *ci;
802 	const struct cflocdesc *cl;
803 	int nlocs, i;
804 
805 	ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
806 	KASSERT(ci);
807 	nlocs = ci->ci_loclen;
808 	KASSERT(!nlocs || locs);
809 	for (i = 0; i < nlocs; i++) {
810 		cl = &ci->ci_locdesc[i];
811 		if (cl->cld_defaultstr != NULL &&
812 		    cf->cf_loc[i] == cl->cld_default)
813 			continue;
814 		if (cf->cf_loc[i] == locs[i])
815 			continue;
816 		return 0;
817 	}
818 
819 	return config_match(parent, cf, aux);
820 }
821 
822 /*
823  * Helper function: check whether the driver supports the interface attribute
824  * and return its descriptor structure.
825  */
826 static const struct cfiattrdata *
827 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
828 {
829 	const struct cfiattrdata * const *cpp;
830 
831 	if (cd->cd_attrs == NULL)
832 		return 0;
833 
834 	for (cpp = cd->cd_attrs; *cpp; cpp++) {
835 		if (STREQ((*cpp)->ci_name, ia)) {
836 			/* Match. */
837 			return *cpp;
838 		}
839 	}
840 	return 0;
841 }
842 
843 static int __diagused
844 cfdriver_iattr_count(const struct cfdriver *cd)
845 {
846 	const struct cfiattrdata * const *cpp;
847 	int i;
848 
849 	if (cd->cd_attrs == NULL)
850 		return 0;
851 
852 	for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
853 		i++;
854 	}
855 	return i;
856 }
857 
858 /*
859  * Lookup an interface attribute description by name.
860  * If the driver is given, consider only its supported attributes.
861  */
862 const struct cfiattrdata *
863 cfiattr_lookup(const char *name, const struct cfdriver *cd)
864 {
865 	const struct cfdriver *d;
866 	const struct cfiattrdata *ia;
867 
868 	if (cd)
869 		return cfdriver_get_iattr(cd, name);
870 
871 	LIST_FOREACH(d, &allcfdrivers, cd_list) {
872 		ia = cfdriver_get_iattr(d, name);
873 		if (ia)
874 			return ia;
875 	}
876 	return 0;
877 }
878 
879 /*
880  * Determine if `parent' is a potential parent for a device spec based
881  * on `cfp'.
882  */
883 static int
884 cfparent_match(const device_t parent, const struct cfparent *cfp)
885 {
886 	struct cfdriver *pcd;
887 
888 	/* We don't match root nodes here. */
889 	if (cfp == NULL)
890 		return 0;
891 
892 	pcd = parent->dv_cfdriver;
893 	KASSERT(pcd != NULL);
894 
895 	/*
896 	 * First, ensure this parent has the correct interface
897 	 * attribute.
898 	 */
899 	if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr))
900 		return 0;
901 
902 	/*
903 	 * If no specific parent device instance was specified (i.e.
904 	 * we're attaching to the attribute only), we're done!
905 	 */
906 	if (cfp->cfp_parent == NULL)
907 		return 1;
908 
909 	/*
910 	 * Check the parent device's name.
911 	 */
912 	if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
913 		return 0;	/* not the same parent */
914 
915 	/*
916 	 * Make sure the unit number matches.
917 	 */
918 	if (cfp->cfp_unit == DVUNIT_ANY ||	/* wildcard */
919 	    cfp->cfp_unit == parent->dv_unit)
920 		return 1;
921 
922 	/* Unit numbers don't match. */
923 	return 0;
924 }
925 
926 /*
927  * Helper for config_cfdata_attach(): check all devices whether it could be
928  * parent any attachment in the config data table passed, and rescan.
929  */
930 static void
931 rescan_with_cfdata(const struct cfdata *cf)
932 {
933 	device_t d;
934 	const struct cfdata *cf1;
935 	deviter_t di;
936 
937 	KASSERT(KERNEL_LOCKED_P());
938 
939 	/*
940 	 * "alldevs" is likely longer than a modules's cfdata, so make it
941 	 * the outer loop.
942 	 */
943 	for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
944 
945 		if (!(d->dv_cfattach->ca_rescan))
946 			continue;
947 
948 		for (cf1 = cf; cf1->cf_name; cf1++) {
949 
950 			if (!cfparent_match(d, cf1->cf_pspec))
951 				continue;
952 
953 			(*d->dv_cfattach->ca_rescan)(d,
954 				cfdata_ifattr(cf1), cf1->cf_loc);
955 
956 			config_deferred(d);
957 		}
958 	}
959 	deviter_release(&di);
960 }
961 
962 /*
963  * Attach a supplemental config data table and rescan potential
964  * parent devices if required.
965  */
966 int
967 config_cfdata_attach(cfdata_t cf, int scannow)
968 {
969 	struct cftable *ct;
970 
971 	KERNEL_LOCK(1, NULL);
972 
973 	ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
974 	ct->ct_cfdata = cf;
975 	TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
976 
977 	if (scannow)
978 		rescan_with_cfdata(cf);
979 
980 	KERNEL_UNLOCK_ONE(NULL);
981 
982 	return 0;
983 }
984 
985 /*
986  * Helper for config_cfdata_detach: check whether a device is
987  * found through any attachment in the config data table.
988  */
989 static int
990 dev_in_cfdata(device_t d, cfdata_t cf)
991 {
992 	const struct cfdata *cf1;
993 
994 	for (cf1 = cf; cf1->cf_name; cf1++)
995 		if (d->dv_cfdata == cf1)
996 			return 1;
997 
998 	return 0;
999 }
1000 
1001 /*
1002  * Detach a supplemental config data table. Detach all devices found
1003  * through that table (and thus keeping references to it) before.
1004  */
1005 int
1006 config_cfdata_detach(cfdata_t cf)
1007 {
1008 	device_t d;
1009 	int error = 0;
1010 	struct cftable *ct;
1011 	deviter_t di;
1012 
1013 	KERNEL_LOCK(1, NULL);
1014 
1015 	for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
1016 	     d = deviter_next(&di)) {
1017 		if (!dev_in_cfdata(d, cf))
1018 			continue;
1019 		if ((error = config_detach(d, 0)) != 0)
1020 			break;
1021 	}
1022 	deviter_release(&di);
1023 	if (error) {
1024 		aprint_error_dev(d, "unable to detach instance\n");
1025 		goto out;
1026 	}
1027 
1028 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
1029 		if (ct->ct_cfdata == cf) {
1030 			TAILQ_REMOVE(&allcftables, ct, ct_list);
1031 			kmem_free(ct, sizeof(*ct));
1032 			error = 0;
1033 			goto out;
1034 		}
1035 	}
1036 
1037 	/* not found -- shouldn't happen */
1038 	error = EINVAL;
1039 
1040 out:	KERNEL_UNLOCK_ONE(NULL);
1041 	return error;
1042 }
1043 
1044 /*
1045  * Invoke the "match" routine for a cfdata entry on behalf of
1046  * an external caller, usually a direct config "submatch" routine.
1047  */
1048 int
1049 config_match(device_t parent, cfdata_t cf, void *aux)
1050 {
1051 	struct cfattach *ca;
1052 
1053 	KASSERT(KERNEL_LOCKED_P());
1054 
1055 	ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
1056 	if (ca == NULL) {
1057 		/* No attachment for this entry, oh well. */
1058 		return 0;
1059 	}
1060 
1061 	return (*ca->ca_match)(parent, cf, aux);
1062 }
1063 
1064 /*
1065  * Invoke the "probe" routine for a cfdata entry on behalf of
1066  * an external caller, usually an indirect config "search" routine.
1067  */
1068 int
1069 config_probe(device_t parent, cfdata_t cf, void *aux)
1070 {
1071 	/*
1072 	 * This is currently a synonym for config_match(), but this
1073 	 * is an implementation detail; "match" and "probe" routines
1074 	 * have different behaviors.
1075 	 *
1076 	 * XXX config_probe() should return a bool, because there is
1077 	 * XXX no match score for probe -- it's either there or it's
1078 	 * XXX not, but some ports abuse the return value as a way
1079 	 * XXX to attach "critical" devices before "non-critical"
1080 	 * XXX devices.
1081 	 */
1082 	return config_match(parent, cf, aux);
1083 }
1084 
1085 static struct cfargs_internal *
1086 cfargs_canonicalize(const struct cfargs * const cfargs,
1087     struct cfargs_internal * const store)
1088 {
1089 	struct cfargs_internal *args = store;
1090 
1091 	memset(args, 0, sizeof(*args));
1092 
1093 	/* If none specified, are all-NULL pointers are good. */
1094 	if (cfargs == NULL) {
1095 		return args;
1096 	}
1097 
1098 	/*
1099 	 * Only one arguments version is recognized at this time.
1100 	 */
1101 	if (cfargs->cfargs_version != CFARGS_VERSION) {
1102 		panic("cfargs_canonicalize: unknown version %lu\n",
1103 		    (unsigned long)cfargs->cfargs_version);
1104 	}
1105 
1106 	/*
1107 	 * submatch and search are mutually-exclusive.
1108 	 */
1109 	if (cfargs->submatch != NULL && cfargs->search != NULL) {
1110 		panic("cfargs_canonicalize: submatch and search are "
1111 		      "mutually-exclusive");
1112 	}
1113 	if (cfargs->submatch != NULL) {
1114 		args->submatch = cfargs->submatch;
1115 	} else if (cfargs->search != NULL) {
1116 		args->search = cfargs->search;
1117 	}
1118 
1119 	args->iattr = cfargs->iattr;
1120 	args->locators = cfargs->locators;
1121 	args->devhandle = cfargs->devhandle;
1122 
1123 	return args;
1124 }
1125 
1126 /*
1127  * Iterate over all potential children of some device, calling the given
1128  * function (default being the child's match function) for each one.
1129  * Nonzero returns are matches; the highest value returned is considered
1130  * the best match.  Return the `found child' if we got a match, or NULL
1131  * otherwise.  The `aux' pointer is simply passed on through.
1132  *
1133  * Note that this function is designed so that it can be used to apply
1134  * an arbitrary function to all potential children (its return value
1135  * can be ignored).
1136  */
1137 static cfdata_t
1138 config_search_internal(device_t parent, void *aux,
1139     const struct cfargs_internal * const args)
1140 {
1141 	struct cftable *ct;
1142 	cfdata_t cf;
1143 	struct matchinfo m;
1144 
1145 	KASSERT(config_initialized);
1146 	KASSERT(!args->iattr ||
1147 		cfdriver_get_iattr(parent->dv_cfdriver, args->iattr));
1148 	KASSERT(args->iattr ||
1149 		cfdriver_iattr_count(parent->dv_cfdriver) < 2);
1150 
1151 	m.fn = args->submatch;		/* N.B. union */
1152 	m.parent = parent;
1153 	m.locs = args->locators;
1154 	m.aux = aux;
1155 	m.match = NULL;
1156 	m.pri = 0;
1157 
1158 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
1159 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1160 
1161 			/* We don't match root nodes here. */
1162 			if (!cf->cf_pspec)
1163 				continue;
1164 
1165 			/*
1166 			 * Skip cf if no longer eligible, otherwise scan
1167 			 * through parents for one matching `parent', and
1168 			 * try match function.
1169 			 */
1170 			if (cf->cf_fstate == FSTATE_FOUND)
1171 				continue;
1172 			if (cf->cf_fstate == FSTATE_DNOTFOUND ||
1173 			    cf->cf_fstate == FSTATE_DSTAR)
1174 				continue;
1175 
1176 			/*
1177 			 * If an interface attribute was specified,
1178 			 * consider only children which attach to
1179 			 * that attribute.
1180 			 */
1181 			if (args->iattr != NULL &&
1182 			    !STREQ(args->iattr, cfdata_ifattr(cf)))
1183 				continue;
1184 
1185 			if (cfparent_match(parent, cf->cf_pspec))
1186 				mapply(&m, cf);
1187 		}
1188 	}
1189 	rnd_add_uint32(&rnd_autoconf_source, 0);
1190 	return m.match;
1191 }
1192 
1193 cfdata_t
1194 config_search(device_t parent, void *aux, const struct cfargs *cfargs)
1195 {
1196 	cfdata_t cf;
1197 	struct cfargs_internal store;
1198 
1199 	cf = config_search_internal(parent, aux,
1200 	    cfargs_canonicalize(cfargs, &store));
1201 
1202 	return cf;
1203 }
1204 
1205 /*
1206  * Find the given root device.
1207  * This is much like config_search, but there is no parent.
1208  * Don't bother with multiple cfdata tables; the root node
1209  * must always be in the initial table.
1210  */
1211 cfdata_t
1212 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
1213 {
1214 	cfdata_t cf;
1215 	const short *p;
1216 	struct matchinfo m;
1217 
1218 	m.fn = fn;
1219 	m.parent = ROOT;
1220 	m.aux = aux;
1221 	m.match = NULL;
1222 	m.pri = 0;
1223 	m.locs = 0;
1224 	/*
1225 	 * Look at root entries for matching name.  We do not bother
1226 	 * with found-state here since only one root should ever be
1227 	 * searched (and it must be done first).
1228 	 */
1229 	for (p = cfroots; *p >= 0; p++) {
1230 		cf = &cfdata[*p];
1231 		if (strcmp(cf->cf_name, rootname) == 0)
1232 			mapply(&m, cf);
1233 	}
1234 	return m.match;
1235 }
1236 
1237 static const char * const msgs[] = {
1238 [QUIET]		=	"",
1239 [UNCONF]	=	" not configured\n",
1240 [UNSUPP]	=	" unsupported\n",
1241 };
1242 
1243 /*
1244  * The given `aux' argument describes a device that has been found
1245  * on the given parent, but not necessarily configured.  Locate the
1246  * configuration data for that device (using the submatch function
1247  * provided, or using candidates' cd_match configuration driver
1248  * functions) and attach it, and return its device_t.  If the device was
1249  * not configured, call the given `print' function and return NULL.
1250  */
1251 device_t
1252 config_found(device_t parent, void *aux, cfprint_t print,
1253     const struct cfargs * const cfargs)
1254 {
1255 	cfdata_t cf;
1256 	struct cfargs_internal store;
1257 	const struct cfargs_internal * const args =
1258 	    cfargs_canonicalize(cfargs, &store);
1259 
1260 	cf = config_search_internal(parent, aux, args);
1261 	if (cf != NULL) {
1262 		return config_attach_internal(parent, cf, aux, print, args);
1263 	}
1264 
1265 	if (print) {
1266 		if (config_do_twiddle && cold)
1267 			twiddle();
1268 
1269 		const int pret = (*print)(aux, device_xname(parent));
1270 		KASSERT(pret >= 0);
1271 		KASSERT(pret < __arraycount(msgs));
1272 		KASSERT(msgs[pret] != NULL);
1273 		aprint_normal("%s", msgs[pret]);
1274 	}
1275 
1276 	return NULL;
1277 }
1278 
1279 /*
1280  * As above, but for root devices.
1281  */
1282 device_t
1283 config_rootfound(const char *rootname, void *aux)
1284 {
1285 	cfdata_t cf;
1286 	device_t dev = NULL;
1287 
1288 	KERNEL_LOCK(1, NULL);
1289 	if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
1290 		dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE);
1291 	else
1292 		aprint_error("root device %s not configured\n", rootname);
1293 	KERNEL_UNLOCK_ONE(NULL);
1294 	return dev;
1295 }
1296 
1297 /* just like sprintf(buf, "%d") except that it works from the end */
1298 static char *
1299 number(char *ep, int n)
1300 {
1301 
1302 	*--ep = 0;
1303 	while (n >= 10) {
1304 		*--ep = (n % 10) + '0';
1305 		n /= 10;
1306 	}
1307 	*--ep = n + '0';
1308 	return ep;
1309 }
1310 
1311 /*
1312  * Expand the size of the cd_devs array if necessary.
1313  *
1314  * The caller must hold alldevs_lock. config_makeroom() may release and
1315  * re-acquire alldevs_lock, so callers should re-check conditions such
1316  * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
1317  * returns.
1318  */
1319 static void
1320 config_makeroom(int n, struct cfdriver *cd)
1321 {
1322 	int ondevs, nndevs;
1323 	device_t *osp, *nsp;
1324 
1325 	KASSERT(mutex_owned(&alldevs_lock));
1326 	alldevs_nwrite++;
1327 
1328 	for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
1329 		;
1330 
1331 	while (n >= cd->cd_ndevs) {
1332 		/*
1333 		 * Need to expand the array.
1334 		 */
1335 		ondevs = cd->cd_ndevs;
1336 		osp = cd->cd_devs;
1337 
1338 		/*
1339 		 * Release alldevs_lock around allocation, which may
1340 		 * sleep.
1341 		 */
1342 		mutex_exit(&alldevs_lock);
1343 		nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
1344 		mutex_enter(&alldevs_lock);
1345 
1346 		/*
1347 		 * If another thread moved the array while we did
1348 		 * not hold alldevs_lock, try again.
1349 		 */
1350 		if (cd->cd_devs != osp) {
1351 			mutex_exit(&alldevs_lock);
1352 			kmem_free(nsp, sizeof(device_t) * nndevs);
1353 			mutex_enter(&alldevs_lock);
1354 			continue;
1355 		}
1356 
1357 		memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
1358 		if (ondevs != 0)
1359 			memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
1360 
1361 		cd->cd_ndevs = nndevs;
1362 		cd->cd_devs = nsp;
1363 		if (ondevs != 0) {
1364 			mutex_exit(&alldevs_lock);
1365 			kmem_free(osp, sizeof(device_t) * ondevs);
1366 			mutex_enter(&alldevs_lock);
1367 		}
1368 	}
1369 	KASSERT(mutex_owned(&alldevs_lock));
1370 	alldevs_nwrite--;
1371 }
1372 
1373 /*
1374  * Put dev into the devices list.
1375  */
1376 static void
1377 config_devlink(device_t dev)
1378 {
1379 
1380 	mutex_enter(&alldevs_lock);
1381 
1382 	KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
1383 
1384 	dev->dv_add_gen = alldevs_gen;
1385 	/* It is safe to add a device to the tail of the list while
1386 	 * readers and writers are in the list.
1387 	 */
1388 	TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
1389 	mutex_exit(&alldevs_lock);
1390 }
1391 
1392 static void
1393 config_devfree(device_t dev)
1394 {
1395 
1396 	KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
1397 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1398 
1399 	if (dev->dv_cfattach->ca_devsize > 0)
1400 		kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
1401 	kmem_free(dev, sizeof(*dev));
1402 }
1403 
1404 /*
1405  * Caller must hold alldevs_lock.
1406  */
1407 static void
1408 config_devunlink(device_t dev, struct devicelist *garbage)
1409 {
1410 	struct device_garbage *dg = &dev->dv_garbage;
1411 	cfdriver_t cd = device_cfdriver(dev);
1412 	int i;
1413 
1414 	KASSERT(mutex_owned(&alldevs_lock));
1415 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1416 
1417  	/* Unlink from device list.  Link to garbage list. */
1418 	TAILQ_REMOVE(&alldevs, dev, dv_list);
1419 	TAILQ_INSERT_TAIL(garbage, dev, dv_list);
1420 
1421 	/* Remove from cfdriver's array. */
1422 	cd->cd_devs[dev->dv_unit] = NULL;
1423 
1424 	/*
1425 	 * If the device now has no units in use, unlink its softc array.
1426 	 */
1427 	for (i = 0; i < cd->cd_ndevs; i++) {
1428 		if (cd->cd_devs[i] != NULL)
1429 			break;
1430 	}
1431 	/* Nothing found.  Unlink, now.  Deallocate, later. */
1432 	if (i == cd->cd_ndevs) {
1433 		dg->dg_ndevs = cd->cd_ndevs;
1434 		dg->dg_devs = cd->cd_devs;
1435 		cd->cd_devs = NULL;
1436 		cd->cd_ndevs = 0;
1437 	}
1438 }
1439 
1440 static void
1441 config_devdelete(device_t dev)
1442 {
1443 	struct device_garbage *dg = &dev->dv_garbage;
1444 	device_lock_t dvl = device_getlock(dev);
1445 
1446 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1447 
1448 	if (dg->dg_devs != NULL)
1449 		kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
1450 
1451 	localcount_fini(dev->dv_localcount);
1452 	kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount));
1453 
1454 	cv_destroy(&dvl->dvl_cv);
1455 	mutex_destroy(&dvl->dvl_mtx);
1456 
1457 	KASSERT(dev->dv_properties != NULL);
1458 	prop_object_release(dev->dv_properties);
1459 
1460 	if (dev->dv_activity_handlers)
1461 		panic("%s with registered handlers", __func__);
1462 
1463 	if (dev->dv_locators) {
1464 		size_t amount = *--dev->dv_locators;
1465 		kmem_free(dev->dv_locators, amount);
1466 	}
1467 
1468 	config_devfree(dev);
1469 }
1470 
1471 static int
1472 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
1473 {
1474 	int unit = cf->cf_unit;
1475 
1476 	if (unit < 0)
1477 		return -1;
1478 	if (cf->cf_fstate == FSTATE_STAR) {
1479 		for (; unit < cd->cd_ndevs; unit++)
1480 			if (cd->cd_devs[unit] == NULL)
1481 				break;
1482 		/*
1483 		 * unit is now the unit of the first NULL device pointer,
1484 		 * or max(cd->cd_ndevs,cf->cf_unit).
1485 		 */
1486 	} else {
1487 		if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
1488 			unit = -1;
1489 	}
1490 	return unit;
1491 }
1492 
1493 static int
1494 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
1495 {
1496 	struct alldevs_foray af;
1497 	int unit;
1498 
1499 	config_alldevs_enter(&af);
1500 	for (;;) {
1501 		unit = config_unit_nextfree(cd, cf);
1502 		if (unit == -1)
1503 			break;
1504 		if (unit < cd->cd_ndevs) {
1505 			cd->cd_devs[unit] = dev;
1506 			dev->dv_unit = unit;
1507 			break;
1508 		}
1509 		config_makeroom(unit, cd);
1510 	}
1511 	config_alldevs_exit(&af);
1512 
1513 	return unit;
1514 }
1515 
1516 static device_t
1517 config_devalloc(const device_t parent, const cfdata_t cf,
1518     const struct cfargs_internal * const args)
1519 {
1520 	cfdriver_t cd;
1521 	cfattach_t ca;
1522 	size_t lname, lunit;
1523 	const char *xunit;
1524 	int myunit;
1525 	char num[10];
1526 	device_t dev;
1527 	void *dev_private;
1528 	const struct cfiattrdata *ia;
1529 	device_lock_t dvl;
1530 
1531 	cd = config_cfdriver_lookup(cf->cf_name);
1532 	if (cd == NULL)
1533 		return NULL;
1534 
1535 	ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
1536 	if (ca == NULL)
1537 		return NULL;
1538 
1539 	/* get memory for all device vars */
1540 	KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
1541 	if (ca->ca_devsize > 0) {
1542 		dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
1543 	} else {
1544 		dev_private = NULL;
1545 	}
1546 	dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
1547 
1548 	dev->dv_handle = args->devhandle;
1549 
1550 	dev->dv_class = cd->cd_class;
1551 	dev->dv_cfdata = cf;
1552 	dev->dv_cfdriver = cd;
1553 	dev->dv_cfattach = ca;
1554 	dev->dv_activity_count = 0;
1555 	dev->dv_activity_handlers = NULL;
1556 	dev->dv_private = dev_private;
1557 	dev->dv_flags = ca->ca_flags;	/* inherit flags from class */
1558 	dev->dv_attaching = curlwp;
1559 
1560 	myunit = config_unit_alloc(dev, cd, cf);
1561 	if (myunit == -1) {
1562 		config_devfree(dev);
1563 		return NULL;
1564 	}
1565 
1566 	/* compute length of name and decimal expansion of unit number */
1567 	lname = strlen(cd->cd_name);
1568 	xunit = number(&num[sizeof(num)], myunit);
1569 	lunit = &num[sizeof(num)] - xunit;
1570 	if (lname + lunit > sizeof(dev->dv_xname))
1571 		panic("config_devalloc: device name too long");
1572 
1573 	dvl = device_getlock(dev);
1574 
1575 	mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
1576 	cv_init(&dvl->dvl_cv, "pmfsusp");
1577 
1578 	memcpy(dev->dv_xname, cd->cd_name, lname);
1579 	memcpy(dev->dv_xname + lname, xunit, lunit);
1580 	dev->dv_parent = parent;
1581 	if (parent != NULL)
1582 		dev->dv_depth = parent->dv_depth + 1;
1583 	else
1584 		dev->dv_depth = 0;
1585 	dev->dv_flags |= DVF_ACTIVE;	/* always initially active */
1586 	if (args->locators) {
1587 		KASSERT(parent); /* no locators at root */
1588 		ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
1589 		dev->dv_locators =
1590 		    kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
1591 		*dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
1592 		memcpy(dev->dv_locators, args->locators,
1593 		    sizeof(int) * ia->ci_loclen);
1594 	}
1595 	dev->dv_properties = prop_dictionary_create();
1596 	KASSERT(dev->dv_properties != NULL);
1597 
1598 	prop_dictionary_set_string_nocopy(dev->dv_properties,
1599 	    "device-driver", dev->dv_cfdriver->cd_name);
1600 	prop_dictionary_set_uint16(dev->dv_properties,
1601 	    "device-unit", dev->dv_unit);
1602 	if (parent != NULL) {
1603 		prop_dictionary_set_string(dev->dv_properties,
1604 		    "device-parent", device_xname(parent));
1605 	}
1606 
1607 	dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount),
1608 	    KM_SLEEP);
1609 	localcount_init(dev->dv_localcount);
1610 
1611 	if (dev->dv_cfdriver->cd_attrs != NULL)
1612 		config_add_attrib_dict(dev);
1613 
1614 	return dev;
1615 }
1616 
1617 /*
1618  * Create an array of device attach attributes and add it
1619  * to the device's dv_properties dictionary.
1620  *
1621  * <key>interface-attributes</key>
1622  * <array>
1623  *    <dict>
1624  *       <key>attribute-name</key>
1625  *       <string>foo</string>
1626  *       <key>locators</key>
1627  *       <array>
1628  *          <dict>
1629  *             <key>loc-name</key>
1630  *             <string>foo-loc1</string>
1631  *          </dict>
1632  *          <dict>
1633  *             <key>loc-name</key>
1634  *             <string>foo-loc2</string>
1635  *             <key>default</key>
1636  *             <string>foo-loc2-default</string>
1637  *          </dict>
1638  *          ...
1639  *       </array>
1640  *    </dict>
1641  *    ...
1642  * </array>
1643  */
1644 
1645 static void
1646 config_add_attrib_dict(device_t dev)
1647 {
1648 	int i, j;
1649 	const struct cfiattrdata *ci;
1650 	prop_dictionary_t attr_dict, loc_dict;
1651 	prop_array_t attr_array, loc_array;
1652 
1653 	if ((attr_array = prop_array_create()) == NULL)
1654 		return;
1655 
1656 	for (i = 0; ; i++) {
1657 		if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
1658 			break;
1659 		if ((attr_dict = prop_dictionary_create()) == NULL)
1660 			break;
1661 		prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
1662 		    ci->ci_name);
1663 
1664 		/* Create an array of the locator names and defaults */
1665 
1666 		if (ci->ci_loclen != 0 &&
1667 		    (loc_array = prop_array_create()) != NULL) {
1668 			for (j = 0; j < ci->ci_loclen; j++) {
1669 				loc_dict = prop_dictionary_create();
1670 				if (loc_dict == NULL)
1671 					continue;
1672 				prop_dictionary_set_string_nocopy(loc_dict,
1673 				    "loc-name", ci->ci_locdesc[j].cld_name);
1674 				if (ci->ci_locdesc[j].cld_defaultstr != NULL)
1675 					prop_dictionary_set_string_nocopy(
1676 					    loc_dict, "default",
1677 					    ci->ci_locdesc[j].cld_defaultstr);
1678 				prop_array_set(loc_array, j, loc_dict);
1679 				prop_object_release(loc_dict);
1680 			}
1681 			prop_dictionary_set_and_rel(attr_dict, "locators",
1682 			    loc_array);
1683 		}
1684 		prop_array_add(attr_array, attr_dict);
1685 		prop_object_release(attr_dict);
1686 	}
1687 	if (i == 0)
1688 		prop_object_release(attr_array);
1689 	else
1690 		prop_dictionary_set_and_rel(dev->dv_properties,
1691 		    "interface-attributes", attr_array);
1692 
1693 	return;
1694 }
1695 
1696 /*
1697  * Attach a found device.
1698  */
1699 static device_t
1700 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1701     const struct cfargs_internal * const args)
1702 {
1703 	device_t dev;
1704 	struct cftable *ct;
1705 	const char *drvname;
1706 	bool deferred;
1707 
1708 	KASSERT(KERNEL_LOCKED_P());
1709 
1710 	dev = config_devalloc(parent, cf, args);
1711 	if (!dev)
1712 		panic("config_attach: allocation of device softc failed");
1713 
1714 	/* XXX redundant - see below? */
1715 	if (cf->cf_fstate != FSTATE_STAR) {
1716 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1717 		cf->cf_fstate = FSTATE_FOUND;
1718 	}
1719 
1720 	config_devlink(dev);
1721 
1722 	if (config_do_twiddle && cold)
1723 		twiddle();
1724 	else
1725 		aprint_naive("Found ");
1726 	/*
1727 	 * We want the next two printfs for normal, verbose, and quiet,
1728 	 * but not silent (in which case, we're twiddling, instead).
1729 	 */
1730 	if (parent == ROOT) {
1731 		aprint_naive("%s (root)", device_xname(dev));
1732 		aprint_normal("%s (root)", device_xname(dev));
1733 	} else {
1734 		aprint_naive("%s at %s", device_xname(dev),
1735 		    device_xname(parent));
1736 		aprint_normal("%s at %s", device_xname(dev),
1737 		    device_xname(parent));
1738 		if (print)
1739 			(void) (*print)(aux, NULL);
1740 	}
1741 
1742 	/*
1743 	 * Before attaching, clobber any unfound devices that are
1744 	 * otherwise identical.
1745 	 * XXX code above is redundant?
1746 	 */
1747 	drvname = dev->dv_cfdriver->cd_name;
1748 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
1749 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1750 			if (STREQ(cf->cf_name, drvname) &&
1751 			    cf->cf_unit == dev->dv_unit) {
1752 				if (cf->cf_fstate == FSTATE_NOTFOUND)
1753 					cf->cf_fstate = FSTATE_FOUND;
1754 			}
1755 		}
1756 	}
1757 	device_register(dev, aux);
1758 
1759 	/* Let userland know */
1760 	devmon_report_device(dev, true);
1761 
1762 	/*
1763 	 * Prevent detach until the driver's attach function, and all
1764 	 * deferred actions, have finished.
1765 	 */
1766 	config_pending_incr(dev);
1767 
1768 	/* Call the driver's attach function.  */
1769 	(*dev->dv_cfattach->ca_attach)(parent, dev, aux);
1770 
1771 	/*
1772 	 * Allow other threads to acquire references to the device now
1773 	 * that the driver's attach function is done.
1774 	 */
1775 	mutex_enter(&config_misc_lock);
1776 	KASSERT(dev->dv_attaching == curlwp);
1777 	dev->dv_attaching = NULL;
1778 	cv_broadcast(&config_misc_cv);
1779 	mutex_exit(&config_misc_lock);
1780 
1781 	/*
1782 	 * Synchronous parts of attach are done.  Allow detach, unless
1783 	 * the driver's attach function scheduled deferred actions.
1784 	 */
1785 	config_pending_decr(dev);
1786 
1787 	mutex_enter(&config_misc_lock);
1788 	deferred = (dev->dv_pending != 0);
1789 	mutex_exit(&config_misc_lock);
1790 
1791 	if (!deferred && !device_pmf_is_registered(dev))
1792 		aprint_debug_dev(dev,
1793 		    "WARNING: power management not supported\n");
1794 
1795 	config_process_deferred(&deferred_config_queue, dev);
1796 
1797 	device_register_post_config(dev, aux);
1798 	rnd_add_uint32(&rnd_autoconf_source, 0);
1799 	return dev;
1800 }
1801 
1802 device_t
1803 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1804     const struct cfargs *cfargs)
1805 {
1806 	struct cfargs_internal store;
1807 
1808 	KASSERT(KERNEL_LOCKED_P());
1809 
1810 	return config_attach_internal(parent, cf, aux, print,
1811 	    cfargs_canonicalize(cfargs, &store));
1812 }
1813 
1814 /*
1815  * As above, but for pseudo-devices.  Pseudo-devices attached in this
1816  * way are silently inserted into the device tree, and their children
1817  * attached.
1818  *
1819  * Note that because pseudo-devices are attached silently, any information
1820  * the attach routine wishes to print should be prefixed with the device
1821  * name by the attach routine.
1822  */
1823 device_t
1824 config_attach_pseudo(cfdata_t cf)
1825 {
1826 	device_t dev;
1827 
1828 	KERNEL_LOCK(1, NULL);
1829 
1830 	struct cfargs_internal args = { };
1831 	dev = config_devalloc(ROOT, cf, &args);
1832 	if (!dev)
1833 		goto out;
1834 
1835 	/* XXX mark busy in cfdata */
1836 
1837 	if (cf->cf_fstate != FSTATE_STAR) {
1838 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1839 		cf->cf_fstate = FSTATE_FOUND;
1840 	}
1841 
1842 	config_devlink(dev);
1843 
1844 #if 0	/* XXXJRT not yet */
1845 	device_register(dev, NULL);	/* like a root node */
1846 #endif
1847 
1848 	/* Let userland know */
1849 	devmon_report_device(dev, true);
1850 
1851 	/*
1852 	 * Prevent detach until the driver's attach function, and all
1853 	 * deferred actions, have finished.
1854 	 */
1855 	config_pending_incr(dev);
1856 
1857 	/* Call the driver's attach function.  */
1858 	(*dev->dv_cfattach->ca_attach)(ROOT, dev, NULL);
1859 
1860 	/*
1861 	 * Allow other threads to acquire references to the device now
1862 	 * that the driver's attach function is done.
1863 	 */
1864 	mutex_enter(&config_misc_lock);
1865 	KASSERT(dev->dv_attaching == curlwp);
1866 	dev->dv_attaching = NULL;
1867 	cv_broadcast(&config_misc_cv);
1868 	mutex_exit(&config_misc_lock);
1869 
1870 	/*
1871 	 * Synchronous parts of attach are done.  Allow detach, unless
1872 	 * the driver's attach function scheduled deferred actions.
1873 	 */
1874 	config_pending_decr(dev);
1875 
1876 	config_process_deferred(&deferred_config_queue, dev);
1877 
1878 out:	KERNEL_UNLOCK_ONE(NULL);
1879 	return dev;
1880 }
1881 
1882 /*
1883  * Caller must hold alldevs_lock.
1884  */
1885 static void
1886 config_collect_garbage(struct devicelist *garbage)
1887 {
1888 	device_t dv;
1889 
1890 	KASSERT(!cpu_intr_p());
1891 	KASSERT(!cpu_softintr_p());
1892 	KASSERT(mutex_owned(&alldevs_lock));
1893 
1894 	while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
1895 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
1896 			if (dv->dv_del_gen != 0)
1897 				break;
1898 		}
1899 		if (dv == NULL) {
1900 			alldevs_garbage = false;
1901 			break;
1902 		}
1903 		config_devunlink(dv, garbage);
1904 	}
1905 	KASSERT(mutex_owned(&alldevs_lock));
1906 }
1907 
1908 static void
1909 config_dump_garbage(struct devicelist *garbage)
1910 {
1911 	device_t dv;
1912 
1913 	while ((dv = TAILQ_FIRST(garbage)) != NULL) {
1914 		TAILQ_REMOVE(garbage, dv, dv_list);
1915 		config_devdelete(dv);
1916 	}
1917 }
1918 
1919 static int
1920 config_detach_enter(device_t dev)
1921 {
1922 	struct lwp *l __diagused;
1923 	int error = 0;
1924 
1925 	mutex_enter(&config_misc_lock);
1926 
1927 	/*
1928 	 * Wait until attach has fully completed, and until any
1929 	 * concurrent detach (e.g., drvctl racing with USB event
1930 	 * thread) has completed.
1931 	 *
1932 	 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via
1933 	 * deviter) to ensure the winner of the race doesn't free the
1934 	 * device leading the loser of the race into use-after-free.
1935 	 *
1936 	 * XXX Not all callers do this!
1937 	 */
1938 	while (dev->dv_pending || dev->dv_detaching) {
1939 		KASSERTMSG(dev->dv_detaching != curlwp,
1940 		    "recursively detaching %s", device_xname(dev));
1941 		error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
1942 		if (error)
1943 			goto out;
1944 	}
1945 
1946 	/*
1947 	 * Attach has completed, and no other concurrent detach is
1948 	 * running.  Claim the device for detaching.  This will cause
1949 	 * all new attempts to acquire references to block.
1950 	 */
1951 	KASSERTMSG((l = dev->dv_attaching) == NULL,
1952 	    "lwp %ld [%s] @ %p attaching %s",
1953 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
1954 	    device_xname(dev));
1955 	KASSERTMSG((l = dev->dv_detaching) == NULL,
1956 	    "lwp %ld [%s] @ %p detaching %s",
1957 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
1958 	    device_xname(dev));
1959 	dev->dv_detaching = curlwp;
1960 
1961 out:	mutex_exit(&config_misc_lock);
1962 	return error;
1963 }
1964 
1965 static void
1966 config_detach_exit(device_t dev)
1967 {
1968 	struct lwp *l __diagused;
1969 
1970 	mutex_enter(&config_misc_lock);
1971 	KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
1972 	    device_xname(dev));
1973 	KASSERTMSG((l = dev->dv_detaching) == curlwp,
1974 	    "lwp %ld [%s] @ %p detaching %s",
1975 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
1976 	    device_xname(dev));
1977 	dev->dv_detaching = NULL;
1978 	cv_broadcast(&config_misc_cv);
1979 	mutex_exit(&config_misc_lock);
1980 }
1981 
1982 /*
1983  * Detach a device.  Optionally forced (e.g. because of hardware
1984  * removal) and quiet.  Returns zero if successful, non-zero
1985  * (an error code) otherwise.
1986  *
1987  * Note that this code wants to be run from a process context, so
1988  * that the detach can sleep to allow processes which have a device
1989  * open to run and unwind their stacks.
1990  */
1991 int
1992 config_detach(device_t dev, int flags)
1993 {
1994 	struct alldevs_foray af;
1995 	struct cftable *ct;
1996 	cfdata_t cf;
1997 	const struct cfattach *ca;
1998 	struct cfdriver *cd;
1999 	device_t d __diagused;
2000 	int rv = 0;
2001 
2002 	KERNEL_LOCK(1, NULL);
2003 
2004 	cf = dev->dv_cfdata;
2005 	KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
2006 		cf->cf_fstate == FSTATE_STAR),
2007 	    "config_detach: %s: bad device fstate: %d",
2008 	    device_xname(dev), cf ? cf->cf_fstate : -1);
2009 
2010 	cd = dev->dv_cfdriver;
2011 	KASSERT(cd != NULL);
2012 
2013 	ca = dev->dv_cfattach;
2014 	KASSERT(ca != NULL);
2015 
2016 	/*
2017 	 * Only one detach at a time, please -- and not until fully
2018 	 * attached.
2019 	 */
2020 	rv = config_detach_enter(dev);
2021 	if (rv) {
2022 		KERNEL_UNLOCK_ONE(NULL);
2023 		return rv;
2024 	}
2025 
2026 	mutex_enter(&alldevs_lock);
2027 	if (dev->dv_del_gen != 0) {
2028 		mutex_exit(&alldevs_lock);
2029 #ifdef DIAGNOSTIC
2030 		printf("%s: %s is already detached\n", __func__,
2031 		    device_xname(dev));
2032 #endif /* DIAGNOSTIC */
2033 		config_detach_exit(dev);
2034 		KERNEL_UNLOCK_ONE(NULL);
2035 		return ENOENT;
2036 	}
2037 	alldevs_nwrite++;
2038 	mutex_exit(&alldevs_lock);
2039 
2040 	/*
2041 	 * Call the driver's .ca_detach function, unless it has none or
2042 	 * we are skipping it because it's unforced shutdown time and
2043 	 * the driver didn't ask to detach on shutdown.
2044 	 */
2045 	if (!detachall &&
2046 	    (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
2047 	    (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
2048 		rv = EOPNOTSUPP;
2049 	} else if (ca->ca_detach != NULL) {
2050 		rv = (*ca->ca_detach)(dev, flags);
2051 	} else
2052 		rv = EOPNOTSUPP;
2053 
2054 	KASSERTMSG(!dev->dv_detach_done, "%s detached twice, error=%d",
2055 	    device_xname(dev), rv);
2056 
2057 	/*
2058 	 * If it was not possible to detach the device, then we either
2059 	 * panic() (for the forced but failed case), or return an error.
2060 	 */
2061 	if (rv) {
2062 		/*
2063 		 * Detach failed -- likely EOPNOTSUPP or EBUSY.  Driver
2064 		 * must not have called config_detach_commit.
2065 		 */
2066 		KASSERTMSG(!dev->dv_detach_committed,
2067 		    "%s committed to detaching and then backed out, error=%d",
2068 		    device_xname(dev), rv);
2069 		if (flags & DETACH_FORCE) {
2070 			panic("config_detach: forced detach of %s failed (%d)",
2071 			    device_xname(dev), rv);
2072 		}
2073 		goto out;
2074 	}
2075 
2076 	/*
2077 	 * The device has now been successfully detached.
2078 	 */
2079 	dev->dv_detach_done = true;
2080 
2081 	/*
2082 	 * If .ca_detach didn't commit to detach, then do that for it.
2083 	 * This wakes any pending device_lookup_acquire calls so they
2084 	 * will fail.
2085 	 */
2086 	config_detach_commit(dev);
2087 
2088 	/*
2089 	 * If it was possible to detach the device, ensure that the
2090 	 * device is deactivated.
2091 	 */
2092 	dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */
2093 
2094 	/*
2095 	 * Wait for all device_lookup_acquire references -- mostly, for
2096 	 * all attempts to open the device -- to drain.  It is the
2097 	 * responsibility of .ca_detach to ensure anything with open
2098 	 * references will be interrupted and release them promptly,
2099 	 * not block indefinitely.  All new attempts to acquire
2100 	 * references will fail, as config_detach_commit has arranged
2101 	 * by now.
2102 	 */
2103 	mutex_enter(&config_misc_lock);
2104 	localcount_drain(dev->dv_localcount,
2105 	    &config_misc_cv, &config_misc_lock);
2106 	mutex_exit(&config_misc_lock);
2107 
2108 	/* Let userland know */
2109 	devmon_report_device(dev, false);
2110 
2111 #ifdef DIAGNOSTIC
2112 	/*
2113 	 * Sanity: If you're successfully detached, you should have no
2114 	 * children.  (Note that because children must be attached
2115 	 * after parents, we only need to search the latter part of
2116 	 * the list.)
2117 	 */
2118 	mutex_enter(&alldevs_lock);
2119 	for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
2120 	    d = TAILQ_NEXT(d, dv_list)) {
2121 		if (d->dv_parent == dev && d->dv_del_gen == 0) {
2122 			printf("config_detach: detached device %s"
2123 			    " has children %s\n", device_xname(dev),
2124 			    device_xname(d));
2125 			panic("config_detach");
2126 		}
2127 	}
2128 	mutex_exit(&alldevs_lock);
2129 #endif
2130 
2131 	/* notify the parent that the child is gone */
2132 	if (dev->dv_parent) {
2133 		device_t p = dev->dv_parent;
2134 		if (p->dv_cfattach->ca_childdetached)
2135 			(*p->dv_cfattach->ca_childdetached)(p, dev);
2136 	}
2137 
2138 	/*
2139 	 * Mark cfdata to show that the unit can be reused, if possible.
2140 	 */
2141 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
2142 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
2143 			if (STREQ(cf->cf_name, cd->cd_name)) {
2144 				if (cf->cf_fstate == FSTATE_FOUND &&
2145 				    cf->cf_unit == dev->dv_unit)
2146 					cf->cf_fstate = FSTATE_NOTFOUND;
2147 			}
2148 		}
2149 	}
2150 
2151 	if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
2152 		aprint_normal_dev(dev, "detached\n");
2153 
2154 out:
2155 	config_detach_exit(dev);
2156 
2157 	config_alldevs_enter(&af);
2158 	KASSERT(alldevs_nwrite != 0);
2159 	--alldevs_nwrite;
2160 	if (rv == 0 && dev->dv_del_gen == 0) {
2161 		if (alldevs_nwrite == 0 && alldevs_nread == 0)
2162 			config_devunlink(dev, &af.af_garbage);
2163 		else {
2164 			dev->dv_del_gen = alldevs_gen;
2165 			alldevs_garbage = true;
2166 		}
2167 	}
2168 	config_alldevs_exit(&af);
2169 
2170 	KERNEL_UNLOCK_ONE(NULL);
2171 
2172 	return rv;
2173 }
2174 
2175 /*
2176  * config_detach_commit(dev)
2177  *
2178  *	Issued by a driver's .ca_detach routine to notify anyone
2179  *	waiting in device_lookup_acquire that the driver is committed
2180  *	to detaching the device, which allows device_lookup_acquire to
2181  *	wake up and fail immediately.
2182  *
2183  *	Safe to call multiple times -- idempotent.  Must be called
2184  *	during config_detach_enter/exit.  Safe to use with
2185  *	device_lookup because the device is not actually removed from
2186  *	the table until after config_detach_exit.
2187  */
2188 void
2189 config_detach_commit(device_t dev)
2190 {
2191 	struct lwp *l __diagused;
2192 
2193 	mutex_enter(&config_misc_lock);
2194 	KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
2195 	    device_xname(dev));
2196 	KASSERTMSG((l = dev->dv_detaching) == curlwp,
2197 	    "lwp %ld [%s] @ %p detaching %s",
2198 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2199 	    device_xname(dev));
2200 	dev->dv_detach_committed = true;
2201 	cv_broadcast(&config_misc_cv);
2202 	mutex_exit(&config_misc_lock);
2203 }
2204 
2205 int
2206 config_detach_children(device_t parent, int flags)
2207 {
2208 	device_t dv;
2209 	deviter_t di;
2210 	int error = 0;
2211 
2212 	KASSERT(KERNEL_LOCKED_P());
2213 
2214 	for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
2215 	     dv = deviter_next(&di)) {
2216 		if (device_parent(dv) != parent)
2217 			continue;
2218 		if ((error = config_detach(dv, flags)) != 0)
2219 			break;
2220 	}
2221 	deviter_release(&di);
2222 	return error;
2223 }
2224 
2225 device_t
2226 shutdown_first(struct shutdown_state *s)
2227 {
2228 	if (!s->initialized) {
2229 		deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
2230 		s->initialized = true;
2231 	}
2232 	return shutdown_next(s);
2233 }
2234 
2235 device_t
2236 shutdown_next(struct shutdown_state *s)
2237 {
2238 	device_t dv;
2239 
2240 	while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
2241 		;
2242 
2243 	if (dv == NULL)
2244 		s->initialized = false;
2245 
2246 	return dv;
2247 }
2248 
2249 bool
2250 config_detach_all(int how)
2251 {
2252 	static struct shutdown_state s;
2253 	device_t curdev;
2254 	bool progress = false;
2255 	int flags;
2256 
2257 	KERNEL_LOCK(1, NULL);
2258 
2259 	if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
2260 		goto out;
2261 
2262 	if ((how & RB_POWERDOWN) == RB_POWERDOWN)
2263 		flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
2264 	else
2265 		flags = DETACH_SHUTDOWN;
2266 
2267 	for (curdev = shutdown_first(&s); curdev != NULL;
2268 	     curdev = shutdown_next(&s)) {
2269 		aprint_debug(" detaching %s, ", device_xname(curdev));
2270 		if (config_detach(curdev, flags) == 0) {
2271 			progress = true;
2272 			aprint_debug("success.");
2273 		} else
2274 			aprint_debug("failed.");
2275 	}
2276 
2277 out:	KERNEL_UNLOCK_ONE(NULL);
2278 	return progress;
2279 }
2280 
2281 static bool
2282 device_is_ancestor_of(device_t ancestor, device_t descendant)
2283 {
2284 	device_t dv;
2285 
2286 	for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
2287 		if (device_parent(dv) == ancestor)
2288 			return true;
2289 	}
2290 	return false;
2291 }
2292 
2293 int
2294 config_deactivate(device_t dev)
2295 {
2296 	deviter_t di;
2297 	const struct cfattach *ca;
2298 	device_t descendant;
2299 	int s, rv = 0, oflags;
2300 
2301 	for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
2302 	     descendant != NULL;
2303 	     descendant = deviter_next(&di)) {
2304 		if (dev != descendant &&
2305 		    !device_is_ancestor_of(dev, descendant))
2306 			continue;
2307 
2308 		if ((descendant->dv_flags & DVF_ACTIVE) == 0)
2309 			continue;
2310 
2311 		ca = descendant->dv_cfattach;
2312 		oflags = descendant->dv_flags;
2313 
2314 		descendant->dv_flags &= ~DVF_ACTIVE;
2315 		if (ca->ca_activate == NULL)
2316 			continue;
2317 		s = splhigh();
2318 		rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
2319 		splx(s);
2320 		if (rv != 0)
2321 			descendant->dv_flags = oflags;
2322 	}
2323 	deviter_release(&di);
2324 	return rv;
2325 }
2326 
2327 /*
2328  * Defer the configuration of the specified device until all
2329  * of its parent's devices have been attached.
2330  */
2331 void
2332 config_defer(device_t dev, void (*func)(device_t))
2333 {
2334 	struct deferred_config *dc;
2335 
2336 	if (dev->dv_parent == NULL)
2337 		panic("config_defer: can't defer config of a root device");
2338 
2339 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2340 
2341 	config_pending_incr(dev);
2342 
2343 	mutex_enter(&config_misc_lock);
2344 #ifdef DIAGNOSTIC
2345 	struct deferred_config *odc;
2346 	TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
2347 		if (odc->dc_dev == dev)
2348 			panic("config_defer: deferred twice");
2349 	}
2350 #endif
2351 	dc->dc_dev = dev;
2352 	dc->dc_func = func;
2353 	TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
2354 	mutex_exit(&config_misc_lock);
2355 }
2356 
2357 /*
2358  * Defer some autoconfiguration for a device until after interrupts
2359  * are enabled.
2360  */
2361 void
2362 config_interrupts(device_t dev, void (*func)(device_t))
2363 {
2364 	struct deferred_config *dc;
2365 
2366 	/*
2367 	 * If interrupts are enabled, callback now.
2368 	 */
2369 	if (cold == 0) {
2370 		(*func)(dev);
2371 		return;
2372 	}
2373 
2374 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2375 
2376 	config_pending_incr(dev);
2377 
2378 	mutex_enter(&config_misc_lock);
2379 #ifdef DIAGNOSTIC
2380 	struct deferred_config *odc;
2381 	TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
2382 		if (odc->dc_dev == dev)
2383 			panic("config_interrupts: deferred twice");
2384 	}
2385 #endif
2386 	dc->dc_dev = dev;
2387 	dc->dc_func = func;
2388 	TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
2389 	mutex_exit(&config_misc_lock);
2390 }
2391 
2392 /*
2393  * Defer some autoconfiguration for a device until after root file system
2394  * is mounted (to load firmware etc).
2395  */
2396 void
2397 config_mountroot(device_t dev, void (*func)(device_t))
2398 {
2399 	struct deferred_config *dc;
2400 
2401 	/*
2402 	 * If root file system is mounted, callback now.
2403 	 */
2404 	if (root_is_mounted) {
2405 		(*func)(dev);
2406 		return;
2407 	}
2408 
2409 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2410 
2411 	mutex_enter(&config_misc_lock);
2412 #ifdef DIAGNOSTIC
2413 	struct deferred_config *odc;
2414 	TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
2415 		if (odc->dc_dev == dev)
2416 			panic("%s: deferred twice", __func__);
2417 	}
2418 #endif
2419 
2420 	dc->dc_dev = dev;
2421 	dc->dc_func = func;
2422 	TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
2423 	mutex_exit(&config_misc_lock);
2424 }
2425 
2426 /*
2427  * Process a deferred configuration queue.
2428  */
2429 static void
2430 config_process_deferred(struct deferred_config_head *queue, device_t parent)
2431 {
2432 	struct deferred_config *dc;
2433 
2434 	KASSERT(KERNEL_LOCKED_P());
2435 
2436 	mutex_enter(&config_misc_lock);
2437 	dc = TAILQ_FIRST(queue);
2438 	while (dc) {
2439 		if (parent == NULL || dc->dc_dev->dv_parent == parent) {
2440 			TAILQ_REMOVE(queue, dc, dc_queue);
2441 			mutex_exit(&config_misc_lock);
2442 
2443 			(*dc->dc_func)(dc->dc_dev);
2444 			config_pending_decr(dc->dc_dev);
2445 			kmem_free(dc, sizeof(*dc));
2446 
2447 			mutex_enter(&config_misc_lock);
2448 			/* Restart, queue might have changed */
2449 			dc = TAILQ_FIRST(queue);
2450 		} else {
2451 			dc = TAILQ_NEXT(dc, dc_queue);
2452 		}
2453 	}
2454 	mutex_exit(&config_misc_lock);
2455 }
2456 
2457 /*
2458  * Manipulate the config_pending semaphore.
2459  */
2460 void
2461 config_pending_incr(device_t dev)
2462 {
2463 
2464 	mutex_enter(&config_misc_lock);
2465 	KASSERTMSG(dev->dv_pending < INT_MAX,
2466 	    "%s: excess config_pending_incr", device_xname(dev));
2467 	if (dev->dv_pending++ == 0)
2468 		TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
2469 #ifdef DEBUG_AUTOCONF
2470 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2471 #endif
2472 	mutex_exit(&config_misc_lock);
2473 }
2474 
2475 void
2476 config_pending_decr(device_t dev)
2477 {
2478 
2479 	mutex_enter(&config_misc_lock);
2480 	KASSERTMSG(dev->dv_pending > 0,
2481 	    "%s: excess config_pending_decr", device_xname(dev));
2482 	if (--dev->dv_pending == 0) {
2483 		TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
2484 		cv_broadcast(&config_misc_cv);
2485 	}
2486 #ifdef DEBUG_AUTOCONF
2487 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2488 #endif
2489 	mutex_exit(&config_misc_lock);
2490 }
2491 
2492 /*
2493  * Register a "finalization" routine.  Finalization routines are
2494  * called iteratively once all real devices have been found during
2495  * autoconfiguration, for as long as any one finalizer has done
2496  * any work.
2497  */
2498 int
2499 config_finalize_register(device_t dev, int (*fn)(device_t))
2500 {
2501 	struct finalize_hook *f;
2502 	int error = 0;
2503 
2504 	KERNEL_LOCK(1, NULL);
2505 
2506 	/*
2507 	 * If finalization has already been done, invoke the
2508 	 * callback function now.
2509 	 */
2510 	if (config_finalize_done) {
2511 		while ((*fn)(dev) != 0)
2512 			/* loop */ ;
2513 		goto out;
2514 	}
2515 
2516 	/* Ensure this isn't already on the list. */
2517 	TAILQ_FOREACH(f, &config_finalize_list, f_list) {
2518 		if (f->f_func == fn && f->f_dev == dev) {
2519 			error = EEXIST;
2520 			goto out;
2521 		}
2522 	}
2523 
2524 	f = kmem_alloc(sizeof(*f), KM_SLEEP);
2525 	f->f_func = fn;
2526 	f->f_dev = dev;
2527 	TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
2528 
2529 	/* Success!  */
2530 	error = 0;
2531 
2532 out:	KERNEL_UNLOCK_ONE(NULL);
2533 	return error;
2534 }
2535 
2536 void
2537 config_finalize(void)
2538 {
2539 	struct finalize_hook *f;
2540 	struct pdevinit *pdev;
2541 	extern struct pdevinit pdevinit[];
2542 	int errcnt, rv;
2543 
2544 	/*
2545 	 * Now that device driver threads have been created, wait for
2546 	 * them to finish any deferred autoconfiguration.
2547 	 */
2548 	mutex_enter(&config_misc_lock);
2549 	while (!TAILQ_EMPTY(&config_pending)) {
2550 		device_t dev;
2551 		int error;
2552 
2553 		error = cv_timedwait(&config_misc_cv, &config_misc_lock,
2554 		    mstohz(1000));
2555 		if (error == EWOULDBLOCK) {
2556 			aprint_debug("waiting for devices:");
2557 			TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
2558 				aprint_debug(" %s", device_xname(dev));
2559 			aprint_debug("\n");
2560 		}
2561 	}
2562 	mutex_exit(&config_misc_lock);
2563 
2564 	KERNEL_LOCK(1, NULL);
2565 
2566 	/* Attach pseudo-devices. */
2567 	for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
2568 		(*pdev->pdev_attach)(pdev->pdev_count);
2569 
2570 	/* Run the hooks until none of them does any work. */
2571 	do {
2572 		rv = 0;
2573 		TAILQ_FOREACH(f, &config_finalize_list, f_list)
2574 			rv |= (*f->f_func)(f->f_dev);
2575 	} while (rv != 0);
2576 
2577 	config_finalize_done = 1;
2578 
2579 	/* Now free all the hooks. */
2580 	while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
2581 		TAILQ_REMOVE(&config_finalize_list, f, f_list);
2582 		kmem_free(f, sizeof(*f));
2583 	}
2584 
2585 	KERNEL_UNLOCK_ONE(NULL);
2586 
2587 	errcnt = aprint_get_error_count();
2588 	if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
2589 	    (boothowto & AB_VERBOSE) == 0) {
2590 		mutex_enter(&config_misc_lock);
2591 		if (config_do_twiddle) {
2592 			config_do_twiddle = 0;
2593 			printf_nolog(" done.\n");
2594 		}
2595 		mutex_exit(&config_misc_lock);
2596 	}
2597 	if (errcnt != 0) {
2598 		printf("WARNING: %d error%s while detecting hardware; "
2599 		    "check system log.\n", errcnt,
2600 		    errcnt == 1 ? "" : "s");
2601 	}
2602 }
2603 
2604 void
2605 config_twiddle_init(void)
2606 {
2607 
2608 	if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
2609 		config_do_twiddle = 1;
2610 	}
2611 	callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
2612 }
2613 
2614 void
2615 config_twiddle_fn(void *cookie)
2616 {
2617 
2618 	mutex_enter(&config_misc_lock);
2619 	if (config_do_twiddle) {
2620 		twiddle();
2621 		callout_schedule(&config_twiddle_ch, mstohz(100));
2622 	}
2623 	mutex_exit(&config_misc_lock);
2624 }
2625 
2626 static void
2627 config_alldevs_enter(struct alldevs_foray *af)
2628 {
2629 	TAILQ_INIT(&af->af_garbage);
2630 	mutex_enter(&alldevs_lock);
2631 	config_collect_garbage(&af->af_garbage);
2632 }
2633 
2634 static void
2635 config_alldevs_exit(struct alldevs_foray *af)
2636 {
2637 	mutex_exit(&alldevs_lock);
2638 	config_dump_garbage(&af->af_garbage);
2639 }
2640 
2641 /*
2642  * device_lookup:
2643  *
2644  *	Look up a device instance for a given driver.
2645  *
2646  *	Caller is responsible for ensuring the device's state is
2647  *	stable, either by holding a reference already obtained with
2648  *	device_lookup_acquire or by otherwise ensuring the device is
2649  *	attached and can't be detached (e.g., holding an open device
2650  *	node and ensuring *_detach calls vdevgone).
2651  *
2652  *	XXX Find a way to assert this.
2653  *
2654  *	Safe for use up to and including interrupt context at IPL_VM.
2655  *	Never sleeps.
2656  */
2657 device_t
2658 device_lookup(cfdriver_t cd, int unit)
2659 {
2660 	device_t dv;
2661 
2662 	mutex_enter(&alldevs_lock);
2663 	if (unit < 0 || unit >= cd->cd_ndevs)
2664 		dv = NULL;
2665 	else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
2666 		dv = NULL;
2667 	mutex_exit(&alldevs_lock);
2668 
2669 	return dv;
2670 }
2671 
2672 /*
2673  * device_lookup_private:
2674  *
2675  *	Look up a softc instance for a given driver.
2676  */
2677 void *
2678 device_lookup_private(cfdriver_t cd, int unit)
2679 {
2680 
2681 	return device_private(device_lookup(cd, unit));
2682 }
2683 
2684 /*
2685  * device_lookup_acquire:
2686  *
2687  *	Look up a device instance for a given driver, and return a
2688  *	reference to it that must be released by device_release.
2689  *
2690  *	=> If the device is still attaching, blocks until *_attach has
2691  *	   returned.
2692  *
2693  *	=> If the device is detaching, blocks until *_detach has
2694  *	   returned.  May succeed or fail in that case, depending on
2695  *	   whether *_detach has backed out (EBUSY) or committed to
2696  *	   detaching.
2697  *
2698  *	May sleep.
2699  */
2700 device_t
2701 device_lookup_acquire(cfdriver_t cd, int unit)
2702 {
2703 	device_t dv;
2704 
2705 	ASSERT_SLEEPABLE();
2706 
2707 	/* XXX This should have a pserialized fast path -- TBD.  */
2708 	mutex_enter(&config_misc_lock);
2709 	mutex_enter(&alldevs_lock);
2710 retry:	if (unit < 0 || unit >= cd->cd_ndevs ||
2711 	    (dv = cd->cd_devs[unit]) == NULL ||
2712 	    dv->dv_del_gen != 0 ||
2713 	    dv->dv_detach_committed) {
2714 		dv = NULL;
2715 	} else {
2716 		/*
2717 		 * Wait for the device to stabilize, if attaching or
2718 		 * detaching.  Either way we must wait for *_attach or
2719 		 * *_detach to complete, and either way we must retry:
2720 		 * even if detaching, *_detach might fail (EBUSY) so
2721 		 * the device may still be there.
2722 		 */
2723 		if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) ||
2724 		    dv->dv_detaching != NULL) {
2725 			mutex_exit(&alldevs_lock);
2726 			cv_wait(&config_misc_cv, &config_misc_lock);
2727 			mutex_enter(&alldevs_lock);
2728 			goto retry;
2729 		}
2730 		localcount_acquire(dv->dv_localcount);
2731 	}
2732 	mutex_exit(&alldevs_lock);
2733 	mutex_exit(&config_misc_lock);
2734 
2735 	return dv;
2736 }
2737 
2738 /*
2739  * device_release:
2740  *
2741  *	Release a reference to a device acquired with
2742  *	device_lookup_acquire.
2743  */
2744 void
2745 device_release(device_t dv)
2746 {
2747 
2748 	localcount_release(dv->dv_localcount,
2749 	    &config_misc_cv, &config_misc_lock);
2750 }
2751 
2752 /*
2753  * device_find_by_xname:
2754  *
2755  *	Returns the device of the given name or NULL if it doesn't exist.
2756  */
2757 device_t
2758 device_find_by_xname(const char *name)
2759 {
2760 	device_t dv;
2761 	deviter_t di;
2762 
2763 	for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
2764 		if (strcmp(device_xname(dv), name) == 0)
2765 			break;
2766 	}
2767 	deviter_release(&di);
2768 
2769 	return dv;
2770 }
2771 
2772 /*
2773  * device_find_by_driver_unit:
2774  *
2775  *	Returns the device of the given driver name and unit or
2776  *	NULL if it doesn't exist.
2777  */
2778 device_t
2779 device_find_by_driver_unit(const char *name, int unit)
2780 {
2781 	struct cfdriver *cd;
2782 
2783 	if ((cd = config_cfdriver_lookup(name)) == NULL)
2784 		return NULL;
2785 	return device_lookup(cd, unit);
2786 }
2787 
2788 static bool
2789 match_strcmp(const char * const s1, const char * const s2)
2790 {
2791 	return strcmp(s1, s2) == 0;
2792 }
2793 
2794 static bool
2795 match_pmatch(const char * const s1, const char * const s2)
2796 {
2797 	return pmatch(s1, s2, NULL) == 2;
2798 }
2799 
2800 static bool
2801 strarray_match_internal(const char ** const strings,
2802     unsigned int const nstrings, const char * const str,
2803     unsigned int * const indexp,
2804     bool (*match_fn)(const char *, const char *))
2805 {
2806 	unsigned int i;
2807 
2808 	if (strings == NULL || nstrings == 0) {
2809 		return false;
2810 	}
2811 
2812 	for (i = 0; i < nstrings; i++) {
2813 		if ((*match_fn)(strings[i], str)) {
2814 			*indexp = i;
2815 			return true;
2816 		}
2817 	}
2818 
2819 	return false;
2820 }
2821 
2822 static int
2823 strarray_match(const char ** const strings, unsigned int const nstrings,
2824     const char * const str)
2825 {
2826 	unsigned int idx;
2827 
2828 	if (strarray_match_internal(strings, nstrings, str, &idx,
2829 				    match_strcmp)) {
2830 		return (int)(nstrings - idx);
2831 	}
2832 	return 0;
2833 }
2834 
2835 static int
2836 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
2837     const char * const pattern)
2838 {
2839 	unsigned int idx;
2840 
2841 	if (strarray_match_internal(strings, nstrings, pattern, &idx,
2842 				    match_pmatch)) {
2843 		return (int)(nstrings - idx);
2844 	}
2845 	return 0;
2846 }
2847 
2848 static int
2849 device_compatible_match_strarray_internal(
2850     const char **device_compats, int ndevice_compats,
2851     const struct device_compatible_entry *driver_compats,
2852     const struct device_compatible_entry **matching_entryp,
2853     int (*match_fn)(const char **, unsigned int, const char *))
2854 {
2855 	const struct device_compatible_entry *dce = NULL;
2856 	int rv;
2857 
2858 	if (ndevice_compats == 0 || device_compats == NULL ||
2859 	    driver_compats == NULL)
2860 		return 0;
2861 
2862 	for (dce = driver_compats; dce->compat != NULL; dce++) {
2863 		rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
2864 		if (rv != 0) {
2865 			if (matching_entryp != NULL) {
2866 				*matching_entryp = dce;
2867 			}
2868 			return rv;
2869 		}
2870 	}
2871 	return 0;
2872 }
2873 
2874 /*
2875  * device_compatible_match:
2876  *
2877  *	Match a driver's "compatible" data against a device's
2878  *	"compatible" strings.  Returns resulted weighted by
2879  *	which device "compatible" string was matched.
2880  */
2881 int
2882 device_compatible_match(const char **device_compats, int ndevice_compats,
2883     const struct device_compatible_entry *driver_compats)
2884 {
2885 	return device_compatible_match_strarray_internal(device_compats,
2886 	    ndevice_compats, driver_compats, NULL, strarray_match);
2887 }
2888 
2889 /*
2890  * device_compatible_pmatch:
2891  *
2892  *	Like device_compatible_match(), but uses pmatch(9) to compare
2893  *	the device "compatible" strings against patterns in the
2894  *	driver's "compatible" data.
2895  */
2896 int
2897 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
2898     const struct device_compatible_entry *driver_compats)
2899 {
2900 	return device_compatible_match_strarray_internal(device_compats,
2901 	    ndevice_compats, driver_compats, NULL, strarray_pmatch);
2902 }
2903 
2904 static int
2905 device_compatible_match_strlist_internal(
2906     const char * const device_compats, size_t const device_compatsize,
2907     const struct device_compatible_entry *driver_compats,
2908     const struct device_compatible_entry **matching_entryp,
2909     int (*match_fn)(const char *, size_t, const char *))
2910 {
2911 	const struct device_compatible_entry *dce = NULL;
2912 	int rv;
2913 
2914 	if (device_compats == NULL || device_compatsize == 0 ||
2915 	    driver_compats == NULL)
2916 		return 0;
2917 
2918 	for (dce = driver_compats; dce->compat != NULL; dce++) {
2919 		rv = (*match_fn)(device_compats, device_compatsize,
2920 		    dce->compat);
2921 		if (rv != 0) {
2922 			if (matching_entryp != NULL) {
2923 				*matching_entryp = dce;
2924 			}
2925 			return rv;
2926 		}
2927 	}
2928 	return 0;
2929 }
2930 
2931 /*
2932  * device_compatible_match_strlist:
2933  *
2934  *	Like device_compatible_match(), but take the device
2935  *	"compatible" strings as an OpenFirmware-style string
2936  *	list.
2937  */
2938 int
2939 device_compatible_match_strlist(
2940     const char * const device_compats, size_t const device_compatsize,
2941     const struct device_compatible_entry *driver_compats)
2942 {
2943 	return device_compatible_match_strlist_internal(device_compats,
2944 	    device_compatsize, driver_compats, NULL, strlist_match);
2945 }
2946 
2947 /*
2948  * device_compatible_pmatch_strlist:
2949  *
2950  *	Like device_compatible_pmatch(), but take the device
2951  *	"compatible" strings as an OpenFirmware-style string
2952  *	list.
2953  */
2954 int
2955 device_compatible_pmatch_strlist(
2956     const char * const device_compats, size_t const device_compatsize,
2957     const struct device_compatible_entry *driver_compats)
2958 {
2959 	return device_compatible_match_strlist_internal(device_compats,
2960 	    device_compatsize, driver_compats, NULL, strlist_pmatch);
2961 }
2962 
2963 static int
2964 device_compatible_match_id_internal(
2965     uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
2966     const struct device_compatible_entry *driver_compats,
2967     const struct device_compatible_entry **matching_entryp)
2968 {
2969 	const struct device_compatible_entry *dce = NULL;
2970 
2971 	if (mask == 0)
2972 		return 0;
2973 
2974 	for (dce = driver_compats; dce->id != sentinel_id; dce++) {
2975 		if ((id & mask) == dce->id) {
2976 			if (matching_entryp != NULL) {
2977 				*matching_entryp = dce;
2978 			}
2979 			return 1;
2980 		}
2981 	}
2982 	return 0;
2983 }
2984 
2985 /*
2986  * device_compatible_match_id:
2987  *
2988  *	Like device_compatible_match(), but takes a single
2989  *	unsigned integer device ID.
2990  */
2991 int
2992 device_compatible_match_id(
2993     uintptr_t const id, uintptr_t const sentinel_id,
2994     const struct device_compatible_entry *driver_compats)
2995 {
2996 	return device_compatible_match_id_internal(id, (uintptr_t)-1,
2997 	    sentinel_id, driver_compats, NULL);
2998 }
2999 
3000 /*
3001  * device_compatible_lookup:
3002  *
3003  *	Look up and return the device_compatible_entry, using the
3004  *	same matching criteria used by device_compatible_match().
3005  */
3006 const struct device_compatible_entry *
3007 device_compatible_lookup(const char **device_compats, int ndevice_compats,
3008 			 const struct device_compatible_entry *driver_compats)
3009 {
3010 	const struct device_compatible_entry *dce;
3011 
3012 	if (device_compatible_match_strarray_internal(device_compats,
3013 	    ndevice_compats, driver_compats, &dce, strarray_match)) {
3014 		return dce;
3015 	}
3016 	return NULL;
3017 }
3018 
3019 /*
3020  * device_compatible_plookup:
3021  *
3022  *	Look up and return the device_compatible_entry, using the
3023  *	same matching criteria used by device_compatible_pmatch().
3024  */
3025 const struct device_compatible_entry *
3026 device_compatible_plookup(const char **device_compats, int ndevice_compats,
3027 			  const struct device_compatible_entry *driver_compats)
3028 {
3029 	const struct device_compatible_entry *dce;
3030 
3031 	if (device_compatible_match_strarray_internal(device_compats,
3032 	    ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
3033 		return dce;
3034 	}
3035 	return NULL;
3036 }
3037 
3038 /*
3039  * device_compatible_lookup_strlist:
3040  *
3041  *	Like device_compatible_lookup(), but take the device
3042  *	"compatible" strings as an OpenFirmware-style string
3043  *	list.
3044  */
3045 const struct device_compatible_entry *
3046 device_compatible_lookup_strlist(
3047     const char * const device_compats, size_t const device_compatsize,
3048     const struct device_compatible_entry *driver_compats)
3049 {
3050 	const struct device_compatible_entry *dce;
3051 
3052 	if (device_compatible_match_strlist_internal(device_compats,
3053 	    device_compatsize, driver_compats, &dce, strlist_match)) {
3054 		return dce;
3055 	}
3056 	return NULL;
3057 }
3058 
3059 /*
3060  * device_compatible_plookup_strlist:
3061  *
3062  *	Like device_compatible_plookup(), but take the device
3063  *	"compatible" strings as an OpenFirmware-style string
3064  *	list.
3065  */
3066 const struct device_compatible_entry *
3067 device_compatible_plookup_strlist(
3068     const char * const device_compats, size_t const device_compatsize,
3069     const struct device_compatible_entry *driver_compats)
3070 {
3071 	const struct device_compatible_entry *dce;
3072 
3073 	if (device_compatible_match_strlist_internal(device_compats,
3074 	    device_compatsize, driver_compats, &dce, strlist_pmatch)) {
3075 		return dce;
3076 	}
3077 	return NULL;
3078 }
3079 
3080 /*
3081  * device_compatible_lookup_id:
3082  *
3083  *	Like device_compatible_lookup(), but takes a single
3084  *	unsigned integer device ID.
3085  */
3086 const struct device_compatible_entry *
3087 device_compatible_lookup_id(
3088     uintptr_t const id, uintptr_t const sentinel_id,
3089     const struct device_compatible_entry *driver_compats)
3090 {
3091 	const struct device_compatible_entry *dce;
3092 
3093 	if (device_compatible_match_id_internal(id, (uintptr_t)-1,
3094 	    sentinel_id, driver_compats, &dce)) {
3095 		return dce;
3096 	}
3097 	return NULL;
3098 }
3099 
3100 /*
3101  * Power management related functions.
3102  */
3103 
3104 bool
3105 device_pmf_is_registered(device_t dev)
3106 {
3107 	return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
3108 }
3109 
3110 bool
3111 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
3112 {
3113 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3114 		return true;
3115 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3116 		return false;
3117 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3118 	    dev->dv_driver_suspend != NULL &&
3119 	    !(*dev->dv_driver_suspend)(dev, qual))
3120 		return false;
3121 
3122 	dev->dv_flags |= DVF_DRIVER_SUSPENDED;
3123 	return true;
3124 }
3125 
3126 bool
3127 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
3128 {
3129 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3130 		return true;
3131 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3132 		return false;
3133 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3134 	    dev->dv_driver_resume != NULL &&
3135 	    !(*dev->dv_driver_resume)(dev, qual))
3136 		return false;
3137 
3138 	dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
3139 	return true;
3140 }
3141 
3142 bool
3143 device_pmf_driver_shutdown(device_t dev, int how)
3144 {
3145 
3146 	if (*dev->dv_driver_shutdown != NULL &&
3147 	    !(*dev->dv_driver_shutdown)(dev, how))
3148 		return false;
3149 	return true;
3150 }
3151 
3152 void
3153 device_pmf_driver_register(device_t dev,
3154     bool (*suspend)(device_t, const pmf_qual_t *),
3155     bool (*resume)(device_t, const pmf_qual_t *),
3156     bool (*shutdown)(device_t, int))
3157 {
3158 
3159 	dev->dv_driver_suspend = suspend;
3160 	dev->dv_driver_resume = resume;
3161 	dev->dv_driver_shutdown = shutdown;
3162 	dev->dv_flags |= DVF_POWER_HANDLERS;
3163 }
3164 
3165 void
3166 device_pmf_driver_deregister(device_t dev)
3167 {
3168 	device_lock_t dvl = device_getlock(dev);
3169 
3170 	dev->dv_driver_suspend = NULL;
3171 	dev->dv_driver_resume = NULL;
3172 
3173 	mutex_enter(&dvl->dvl_mtx);
3174 	dev->dv_flags &= ~DVF_POWER_HANDLERS;
3175 	while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
3176 		/* Wake a thread that waits for the lock.  That
3177 		 * thread will fail to acquire the lock, and then
3178 		 * it will wake the next thread that waits for the
3179 		 * lock, or else it will wake us.
3180 		 */
3181 		cv_signal(&dvl->dvl_cv);
3182 		pmflock_debug(dev, __func__, __LINE__);
3183 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3184 		pmflock_debug(dev, __func__, __LINE__);
3185 	}
3186 	mutex_exit(&dvl->dvl_mtx);
3187 }
3188 
3189 void
3190 device_pmf_driver_child_register(device_t dev)
3191 {
3192 	device_t parent = device_parent(dev);
3193 
3194 	if (parent == NULL || parent->dv_driver_child_register == NULL)
3195 		return;
3196 	(*parent->dv_driver_child_register)(dev);
3197 }
3198 
3199 void
3200 device_pmf_driver_set_child_register(device_t dev,
3201     void (*child_register)(device_t))
3202 {
3203 	dev->dv_driver_child_register = child_register;
3204 }
3205 
3206 static void
3207 pmflock_debug(device_t dev, const char *func, int line)
3208 {
3209 #ifdef PMFLOCK_DEBUG
3210 	device_lock_t dvl = device_getlock(dev);
3211 	const char *curlwp_name;
3212 
3213 	if (curlwp->l_name != NULL)
3214 		curlwp_name = curlwp->l_name;
3215 	else
3216 		curlwp_name = curlwp->l_proc->p_comm;
3217 
3218 	aprint_debug_dev(dev,
3219 	    "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
3220 	    curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
3221 #endif	/* PMFLOCK_DEBUG */
3222 }
3223 
3224 static bool
3225 device_pmf_lock1(device_t dev)
3226 {
3227 	device_lock_t dvl = device_getlock(dev);
3228 
3229 	while (device_pmf_is_registered(dev) &&
3230 	    dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
3231 		dvl->dvl_nwait++;
3232 		pmflock_debug(dev, __func__, __LINE__);
3233 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3234 		pmflock_debug(dev, __func__, __LINE__);
3235 		dvl->dvl_nwait--;
3236 	}
3237 	if (!device_pmf_is_registered(dev)) {
3238 		pmflock_debug(dev, __func__, __LINE__);
3239 		/* We could not acquire the lock, but some other thread may
3240 		 * wait for it, also.  Wake that thread.
3241 		 */
3242 		cv_signal(&dvl->dvl_cv);
3243 		return false;
3244 	}
3245 	dvl->dvl_nlock++;
3246 	dvl->dvl_holder = curlwp;
3247 	pmflock_debug(dev, __func__, __LINE__);
3248 	return true;
3249 }
3250 
3251 bool
3252 device_pmf_lock(device_t dev)
3253 {
3254 	bool rc;
3255 	device_lock_t dvl = device_getlock(dev);
3256 
3257 	mutex_enter(&dvl->dvl_mtx);
3258 	rc = device_pmf_lock1(dev);
3259 	mutex_exit(&dvl->dvl_mtx);
3260 
3261 	return rc;
3262 }
3263 
3264 void
3265 device_pmf_unlock(device_t dev)
3266 {
3267 	device_lock_t dvl = device_getlock(dev);
3268 
3269 	KASSERT(dvl->dvl_nlock > 0);
3270 	mutex_enter(&dvl->dvl_mtx);
3271 	if (--dvl->dvl_nlock == 0)
3272 		dvl->dvl_holder = NULL;
3273 	cv_signal(&dvl->dvl_cv);
3274 	pmflock_debug(dev, __func__, __LINE__);
3275 	mutex_exit(&dvl->dvl_mtx);
3276 }
3277 
3278 device_lock_t
3279 device_getlock(device_t dev)
3280 {
3281 	return &dev->dv_lock;
3282 }
3283 
3284 void *
3285 device_pmf_bus_private(device_t dev)
3286 {
3287 	return dev->dv_bus_private;
3288 }
3289 
3290 bool
3291 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
3292 {
3293 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3294 		return true;
3295 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
3296 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3297 		return false;
3298 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3299 	    dev->dv_bus_suspend != NULL &&
3300 	    !(*dev->dv_bus_suspend)(dev, qual))
3301 		return false;
3302 
3303 	dev->dv_flags |= DVF_BUS_SUSPENDED;
3304 	return true;
3305 }
3306 
3307 bool
3308 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
3309 {
3310 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
3311 		return true;
3312 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3313 	    dev->dv_bus_resume != NULL &&
3314 	    !(*dev->dv_bus_resume)(dev, qual))
3315 		return false;
3316 
3317 	dev->dv_flags &= ~DVF_BUS_SUSPENDED;
3318 	return true;
3319 }
3320 
3321 bool
3322 device_pmf_bus_shutdown(device_t dev, int how)
3323 {
3324 
3325 	if (*dev->dv_bus_shutdown != NULL &&
3326 	    !(*dev->dv_bus_shutdown)(dev, how))
3327 		return false;
3328 	return true;
3329 }
3330 
3331 void
3332 device_pmf_bus_register(device_t dev, void *priv,
3333     bool (*suspend)(device_t, const pmf_qual_t *),
3334     bool (*resume)(device_t, const pmf_qual_t *),
3335     bool (*shutdown)(device_t, int), void (*deregister)(device_t))
3336 {
3337 	dev->dv_bus_private = priv;
3338 	dev->dv_bus_resume = resume;
3339 	dev->dv_bus_suspend = suspend;
3340 	dev->dv_bus_shutdown = shutdown;
3341 	dev->dv_bus_deregister = deregister;
3342 }
3343 
3344 void
3345 device_pmf_bus_deregister(device_t dev)
3346 {
3347 	if (dev->dv_bus_deregister == NULL)
3348 		return;
3349 	(*dev->dv_bus_deregister)(dev);
3350 	dev->dv_bus_private = NULL;
3351 	dev->dv_bus_suspend = NULL;
3352 	dev->dv_bus_resume = NULL;
3353 	dev->dv_bus_deregister = NULL;
3354 }
3355 
3356 void *
3357 device_pmf_class_private(device_t dev)
3358 {
3359 	return dev->dv_class_private;
3360 }
3361 
3362 bool
3363 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
3364 {
3365 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
3366 		return true;
3367 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3368 	    dev->dv_class_suspend != NULL &&
3369 	    !(*dev->dv_class_suspend)(dev, qual))
3370 		return false;
3371 
3372 	dev->dv_flags |= DVF_CLASS_SUSPENDED;
3373 	return true;
3374 }
3375 
3376 bool
3377 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
3378 {
3379 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3380 		return true;
3381 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
3382 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3383 		return false;
3384 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3385 	    dev->dv_class_resume != NULL &&
3386 	    !(*dev->dv_class_resume)(dev, qual))
3387 		return false;
3388 
3389 	dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
3390 	return true;
3391 }
3392 
3393 void
3394 device_pmf_class_register(device_t dev, void *priv,
3395     bool (*suspend)(device_t, const pmf_qual_t *),
3396     bool (*resume)(device_t, const pmf_qual_t *),
3397     void (*deregister)(device_t))
3398 {
3399 	dev->dv_class_private = priv;
3400 	dev->dv_class_suspend = suspend;
3401 	dev->dv_class_resume = resume;
3402 	dev->dv_class_deregister = deregister;
3403 }
3404 
3405 void
3406 device_pmf_class_deregister(device_t dev)
3407 {
3408 	if (dev->dv_class_deregister == NULL)
3409 		return;
3410 	(*dev->dv_class_deregister)(dev);
3411 	dev->dv_class_private = NULL;
3412 	dev->dv_class_suspend = NULL;
3413 	dev->dv_class_resume = NULL;
3414 	dev->dv_class_deregister = NULL;
3415 }
3416 
3417 bool
3418 device_active(device_t dev, devactive_t type)
3419 {
3420 	size_t i;
3421 
3422 	if (dev->dv_activity_count == 0)
3423 		return false;
3424 
3425 	for (i = 0; i < dev->dv_activity_count; ++i) {
3426 		if (dev->dv_activity_handlers[i] == NULL)
3427 			break;
3428 		(*dev->dv_activity_handlers[i])(dev, type);
3429 	}
3430 
3431 	return true;
3432 }
3433 
3434 bool
3435 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
3436 {
3437 	void (**new_handlers)(device_t, devactive_t);
3438 	void (**old_handlers)(device_t, devactive_t);
3439 	size_t i, old_size, new_size;
3440 	int s;
3441 
3442 	old_handlers = dev->dv_activity_handlers;
3443 	old_size = dev->dv_activity_count;
3444 
3445 	KASSERT(old_size == 0 || old_handlers != NULL);
3446 
3447 	for (i = 0; i < old_size; ++i) {
3448 		KASSERT(old_handlers[i] != handler);
3449 		if (old_handlers[i] == NULL) {
3450 			old_handlers[i] = handler;
3451 			return true;
3452 		}
3453 	}
3454 
3455 	new_size = old_size + 4;
3456 	new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
3457 
3458 	for (i = 0; i < old_size; ++i)
3459 		new_handlers[i] = old_handlers[i];
3460 	new_handlers[old_size] = handler;
3461 	for (i = old_size+1; i < new_size; ++i)
3462 		new_handlers[i] = NULL;
3463 
3464 	s = splhigh();
3465 	dev->dv_activity_count = new_size;
3466 	dev->dv_activity_handlers = new_handlers;
3467 	splx(s);
3468 
3469 	if (old_size > 0)
3470 		kmem_free(old_handlers, sizeof(void *) * old_size);
3471 
3472 	return true;
3473 }
3474 
3475 void
3476 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
3477 {
3478 	void (**old_handlers)(device_t, devactive_t);
3479 	size_t i, old_size;
3480 	int s;
3481 
3482 	old_handlers = dev->dv_activity_handlers;
3483 	old_size = dev->dv_activity_count;
3484 
3485 	for (i = 0; i < old_size; ++i) {
3486 		if (old_handlers[i] == handler)
3487 			break;
3488 		if (old_handlers[i] == NULL)
3489 			return; /* XXX panic? */
3490 	}
3491 
3492 	if (i == old_size)
3493 		return; /* XXX panic? */
3494 
3495 	for (; i < old_size - 1; ++i) {
3496 		if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
3497 			continue;
3498 
3499 		if (i == 0) {
3500 			s = splhigh();
3501 			dev->dv_activity_count = 0;
3502 			dev->dv_activity_handlers = NULL;
3503 			splx(s);
3504 			kmem_free(old_handlers, sizeof(void *) * old_size);
3505 		}
3506 		return;
3507 	}
3508 	old_handlers[i] = NULL;
3509 }
3510 
3511 /* Return true iff the device_t `dev' exists at generation `gen'. */
3512 static bool
3513 device_exists_at(device_t dv, devgen_t gen)
3514 {
3515 	return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
3516 	    dv->dv_add_gen <= gen;
3517 }
3518 
3519 static bool
3520 deviter_visits(const deviter_t *di, device_t dv)
3521 {
3522 	return device_exists_at(dv, di->di_gen);
3523 }
3524 
3525 /*
3526  * Device Iteration
3527  *
3528  * deviter_t: a device iterator.  Holds state for a "walk" visiting
3529  *     each device_t's in the device tree.
3530  *
3531  * deviter_init(di, flags): initialize the device iterator `di'
3532  *     to "walk" the device tree.  deviter_next(di) will return
3533  *     the first device_t in the device tree, or NULL if there are
3534  *     no devices.
3535  *
3536  *     `flags' is one or more of DEVITER_F_RW, indicating that the
3537  *     caller intends to modify the device tree by calling
3538  *     config_detach(9) on devices in the order that the iterator
3539  *     returns them; DEVITER_F_ROOT_FIRST, asking for the devices
3540  *     nearest the "root" of the device tree to be returned, first;
3541  *     DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
3542  *     the root of the device tree, first; and DEVITER_F_SHUTDOWN,
3543  *     indicating both that deviter_init() should not respect any
3544  *     locks on the device tree, and that deviter_next(di) may run
3545  *     in more than one LWP before the walk has finished.
3546  *
3547  *     Only one DEVITER_F_RW iterator may be in the device tree at
3548  *     once.
3549  *
3550  *     DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
3551  *
3552  *     Results are undefined if the flags DEVITER_F_ROOT_FIRST and
3553  *     DEVITER_F_LEAVES_FIRST are used in combination.
3554  *
3555  * deviter_first(di, flags): initialize the device iterator `di'
3556  *     and return the first device_t in the device tree, or NULL
3557  *     if there are no devices.  The statement
3558  *
3559  *         dv = deviter_first(di);
3560  *
3561  *     is shorthand for
3562  *
3563  *         deviter_init(di);
3564  *         dv = deviter_next(di);
3565  *
3566  * deviter_next(di): return the next device_t in the device tree,
3567  *     or NULL if there are no more devices.  deviter_next(di)
3568  *     is undefined if `di' was not initialized with deviter_init() or
3569  *     deviter_first().
3570  *
3571  * deviter_release(di): stops iteration (subsequent calls to
3572  *     deviter_next() will return NULL), releases any locks and
3573  *     resources held by the device iterator.
3574  *
3575  * Device iteration does not return device_t's in any particular
3576  * order.  An iterator will never return the same device_t twice.
3577  * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
3578  * is called repeatedly on the same `di', it will eventually return
3579  * NULL.  It is ok to attach/detach devices during device iteration.
3580  */
3581 void
3582 deviter_init(deviter_t *di, deviter_flags_t flags)
3583 {
3584 	device_t dv;
3585 
3586 	memset(di, 0, sizeof(*di));
3587 
3588 	if ((flags & DEVITER_F_SHUTDOWN) != 0)
3589 		flags |= DEVITER_F_RW;
3590 
3591 	mutex_enter(&alldevs_lock);
3592 	if ((flags & DEVITER_F_RW) != 0)
3593 		alldevs_nwrite++;
3594 	else
3595 		alldevs_nread++;
3596 	di->di_gen = alldevs_gen++;
3597 	di->di_flags = flags;
3598 
3599 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3600 	case DEVITER_F_LEAVES_FIRST:
3601 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
3602 			if (!deviter_visits(di, dv))
3603 				continue;
3604 			di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
3605 		}
3606 		break;
3607 	case DEVITER_F_ROOT_FIRST:
3608 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
3609 			if (!deviter_visits(di, dv))
3610 				continue;
3611 			di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
3612 		}
3613 		break;
3614 	default:
3615 		break;
3616 	}
3617 
3618 	deviter_reinit(di);
3619 	mutex_exit(&alldevs_lock);
3620 }
3621 
3622 static void
3623 deviter_reinit(deviter_t *di)
3624 {
3625 
3626 	KASSERT(mutex_owned(&alldevs_lock));
3627 	if ((di->di_flags & DEVITER_F_RW) != 0)
3628 		di->di_prev = TAILQ_LAST(&alldevs, devicelist);
3629 	else
3630 		di->di_prev = TAILQ_FIRST(&alldevs);
3631 }
3632 
3633 device_t
3634 deviter_first(deviter_t *di, deviter_flags_t flags)
3635 {
3636 
3637 	deviter_init(di, flags);
3638 	return deviter_next(di);
3639 }
3640 
3641 static device_t
3642 deviter_next2(deviter_t *di)
3643 {
3644 	device_t dv;
3645 
3646 	KASSERT(mutex_owned(&alldevs_lock));
3647 
3648 	dv = di->di_prev;
3649 
3650 	if (dv == NULL)
3651 		return NULL;
3652 
3653 	if ((di->di_flags & DEVITER_F_RW) != 0)
3654 		di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
3655 	else
3656 		di->di_prev = TAILQ_NEXT(dv, dv_list);
3657 
3658 	return dv;
3659 }
3660 
3661 static device_t
3662 deviter_next1(deviter_t *di)
3663 {
3664 	device_t dv;
3665 
3666 	KASSERT(mutex_owned(&alldevs_lock));
3667 
3668 	do {
3669 		dv = deviter_next2(di);
3670 	} while (dv != NULL && !deviter_visits(di, dv));
3671 
3672 	return dv;
3673 }
3674 
3675 device_t
3676 deviter_next(deviter_t *di)
3677 {
3678 	device_t dv = NULL;
3679 
3680 	mutex_enter(&alldevs_lock);
3681 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3682 	case 0:
3683 		dv = deviter_next1(di);
3684 		break;
3685 	case DEVITER_F_LEAVES_FIRST:
3686 		while (di->di_curdepth >= 0) {
3687 			if ((dv = deviter_next1(di)) == NULL) {
3688 				di->di_curdepth--;
3689 				deviter_reinit(di);
3690 			} else if (dv->dv_depth == di->di_curdepth)
3691 				break;
3692 		}
3693 		break;
3694 	case DEVITER_F_ROOT_FIRST:
3695 		while (di->di_curdepth <= di->di_maxdepth) {
3696 			if ((dv = deviter_next1(di)) == NULL) {
3697 				di->di_curdepth++;
3698 				deviter_reinit(di);
3699 			} else if (dv->dv_depth == di->di_curdepth)
3700 				break;
3701 		}
3702 		break;
3703 	default:
3704 		break;
3705 	}
3706 	mutex_exit(&alldevs_lock);
3707 
3708 	return dv;
3709 }
3710 
3711 void
3712 deviter_release(deviter_t *di)
3713 {
3714 	bool rw = (di->di_flags & DEVITER_F_RW) != 0;
3715 
3716 	mutex_enter(&alldevs_lock);
3717 	if (rw)
3718 		--alldevs_nwrite;
3719 	else
3720 		--alldevs_nread;
3721 	/* XXX wake a garbage-collection thread */
3722 	mutex_exit(&alldevs_lock);
3723 }
3724 
3725 const char *
3726 cfdata_ifattr(const struct cfdata *cf)
3727 {
3728 	return cf->cf_pspec->cfp_iattr;
3729 }
3730 
3731 bool
3732 ifattr_match(const char *snull, const char *t)
3733 {
3734 	return (snull == NULL) || strcmp(snull, t) == 0;
3735 }
3736 
3737 void
3738 null_childdetached(device_t self, device_t child)
3739 {
3740 	/* do nothing */
3741 }
3742 
3743 static void
3744 sysctl_detach_setup(struct sysctllog **clog)
3745 {
3746 
3747 	sysctl_createv(clog, 0, NULL, NULL,
3748 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
3749 		CTLTYPE_BOOL, "detachall",
3750 		SYSCTL_DESCR("Detach all devices at shutdown"),
3751 		NULL, 0, &detachall, 0,
3752 		CTL_KERN, CTL_CREATE, CTL_EOL);
3753 }
3754