xref: /netbsd-src/sys/kern/subr_autoconf.c (revision dd3ee07da436799d8de85f3055253118b76bf345)
1 /* $NetBSD: subr_autoconf.c,v 1.301 2022/03/28 12:38:59 riastradh Exp $ */
2 
3 /*
4  * Copyright (c) 1996, 2000 Christopher G. Demetriou
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *          This product includes software developed for the
18  *          NetBSD Project.  See http://www.NetBSD.org/ for
19  *          information about NetBSD.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
35  */
36 
37 /*
38  * Copyright (c) 1992, 1993
39  *	The Regents of the University of California.  All rights reserved.
40  *
41  * This software was developed by the Computer Systems Engineering group
42  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43  * contributed to Berkeley.
44  *
45  * All advertising materials mentioning features or use of this software
46  * must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Lawrence Berkeley Laboratories.
49  *
50  * Redistribution and use in source and binary forms, with or without
51  * modification, are permitted provided that the following conditions
52  * are met:
53  * 1. Redistributions of source code must retain the above copyright
54  *    notice, this list of conditions and the following disclaimer.
55  * 2. Redistributions in binary form must reproduce the above copyright
56  *    notice, this list of conditions and the following disclaimer in the
57  *    documentation and/or other materials provided with the distribution.
58  * 3. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp  (LBL)
75  *
76  *	@(#)subr_autoconf.c	8.3 (Berkeley) 5/17/94
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.301 2022/03/28 12:38:59 riastradh Exp $");
81 
82 #ifdef _KERNEL_OPT
83 #include "opt_ddb.h"
84 #include "drvctl.h"
85 #endif
86 
87 #include <sys/param.h>
88 #include <sys/device.h>
89 #include <sys/device_impl.h>
90 #include <sys/disklabel.h>
91 #include <sys/conf.h>
92 #include <sys/kauth.h>
93 #include <sys/kmem.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/errno.h>
97 #include <sys/proc.h>
98 #include <sys/reboot.h>
99 #include <sys/kthread.h>
100 #include <sys/buf.h>
101 #include <sys/dirent.h>
102 #include <sys/mount.h>
103 #include <sys/namei.h>
104 #include <sys/unistd.h>
105 #include <sys/fcntl.h>
106 #include <sys/lockf.h>
107 #include <sys/callout.h>
108 #include <sys/devmon.h>
109 #include <sys/cpu.h>
110 #include <sys/sysctl.h>
111 #include <sys/stdarg.h>
112 #include <sys/localcount.h>
113 
114 #include <sys/disk.h>
115 
116 #include <sys/rndsource.h>
117 
118 #include <machine/limits.h>
119 
120 /*
121  * Autoconfiguration subroutines.
122  */
123 
124 /*
125  * Device autoconfiguration timings are mixed into the entropy pool.
126  */
127 static krndsource_t rnd_autoconf_source;
128 
129 /*
130  * ioconf.c exports exactly two names: cfdata and cfroots.  All system
131  * devices and drivers are found via these tables.
132  */
133 extern struct cfdata cfdata[];
134 extern const short cfroots[];
135 
136 /*
137  * List of all cfdriver structures.  We use this to detect duplicates
138  * when other cfdrivers are loaded.
139  */
140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
141 extern struct cfdriver * const cfdriver_list_initial[];
142 
143 /*
144  * Initial list of cfattach's.
145  */
146 extern const struct cfattachinit cfattachinit[];
147 
148 /*
149  * List of cfdata tables.  We always have one such list -- the one
150  * built statically when the kernel was configured.
151  */
152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
153 static struct cftable initcftable;
154 
155 #define	ROOT ((device_t)NULL)
156 
157 struct matchinfo {
158 	cfsubmatch_t fn;
159 	device_t parent;
160 	const int *locs;
161 	void	*aux;
162 	struct	cfdata *match;
163 	int	pri;
164 };
165 
166 struct alldevs_foray {
167 	int			af_s;
168 	struct devicelist	af_garbage;
169 };
170 
171 /*
172  * Internal version of the cfargs structure; all versions are
173  * canonicalized to this.
174  */
175 struct cfargs_internal {
176 	union {
177 		cfsubmatch_t	submatch;/* submatch function (direct config) */
178 		cfsearch_t	search;	 /* search function (indirect config) */
179 	};
180 	const char *	iattr;		/* interface attribute */
181 	const int *	locators;	/* locators array */
182 	devhandle_t	devhandle;	/* devhandle_t (by value) */
183 };
184 
185 static char *number(char *, int);
186 static void mapply(struct matchinfo *, cfdata_t);
187 static void config_devdelete(device_t);
188 static void config_devunlink(device_t, struct devicelist *);
189 static void config_makeroom(int, struct cfdriver *);
190 static void config_devlink(device_t);
191 static void config_alldevs_enter(struct alldevs_foray *);
192 static void config_alldevs_exit(struct alldevs_foray *);
193 static void config_add_attrib_dict(device_t);
194 static device_t	config_attach_internal(device_t, cfdata_t, void *,
195 		    cfprint_t, const struct cfargs_internal *);
196 
197 static void config_collect_garbage(struct devicelist *);
198 static void config_dump_garbage(struct devicelist *);
199 
200 static void pmflock_debug(device_t, const char *, int);
201 
202 static device_t deviter_next1(deviter_t *);
203 static void deviter_reinit(deviter_t *);
204 
205 struct deferred_config {
206 	TAILQ_ENTRY(deferred_config) dc_queue;
207 	device_t dc_dev;
208 	void (*dc_func)(device_t);
209 };
210 
211 TAILQ_HEAD(deferred_config_head, deferred_config);
212 
213 static struct deferred_config_head deferred_config_queue =
214 	TAILQ_HEAD_INITIALIZER(deferred_config_queue);
215 static struct deferred_config_head interrupt_config_queue =
216 	TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
217 static int interrupt_config_threads = 8;
218 static struct deferred_config_head mountroot_config_queue =
219 	TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
220 static int mountroot_config_threads = 2;
221 static lwp_t **mountroot_config_lwpids;
222 static size_t mountroot_config_lwpids_size;
223 bool root_is_mounted = false;
224 
225 static void config_process_deferred(struct deferred_config_head *, device_t);
226 
227 /* Hooks to finalize configuration once all real devices have been found. */
228 struct finalize_hook {
229 	TAILQ_ENTRY(finalize_hook) f_list;
230 	int (*f_func)(device_t);
231 	device_t f_dev;
232 };
233 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
234 	TAILQ_HEAD_INITIALIZER(config_finalize_list);
235 static int config_finalize_done;
236 
237 /* list of all devices */
238 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
239 static kmutex_t alldevs_lock __cacheline_aligned;
240 static devgen_t alldevs_gen = 1;
241 static int alldevs_nread = 0;
242 static int alldevs_nwrite = 0;
243 static bool alldevs_garbage = false;
244 
245 static struct devicelist config_pending =
246     TAILQ_HEAD_INITIALIZER(config_pending);
247 static kmutex_t config_misc_lock;
248 static kcondvar_t config_misc_cv;
249 
250 static bool detachall = false;
251 
252 #define	STREQ(s1, s2)			\
253 	(*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
254 
255 static bool config_initialized = false;	/* config_init() has been called. */
256 
257 static int config_do_twiddle;
258 static callout_t config_twiddle_ch;
259 
260 static void sysctl_detach_setup(struct sysctllog **);
261 
262 int no_devmon_insert(const char *, prop_dictionary_t);
263 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
264 
265 typedef int (*cfdriver_fn)(struct cfdriver *);
266 static int
267 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
268 	cfdriver_fn drv_do, cfdriver_fn drv_undo,
269 	const char *style, bool dopanic)
270 {
271 	void (*pr)(const char *, ...) __printflike(1, 2) =
272 	    dopanic ? panic : printf;
273 	int i, error = 0, e2 __diagused;
274 
275 	for (i = 0; cfdriverv[i] != NULL; i++) {
276 		if ((error = drv_do(cfdriverv[i])) != 0) {
277 			pr("configure: `%s' driver %s failed: %d",
278 			    cfdriverv[i]->cd_name, style, error);
279 			goto bad;
280 		}
281 	}
282 
283 	KASSERT(error == 0);
284 	return 0;
285 
286  bad:
287 	printf("\n");
288 	for (i--; i >= 0; i--) {
289 		e2 = drv_undo(cfdriverv[i]);
290 		KASSERT(e2 == 0);
291 	}
292 
293 	return error;
294 }
295 
296 typedef int (*cfattach_fn)(const char *, struct cfattach *);
297 static int
298 frob_cfattachvec(const struct cfattachinit *cfattachv,
299 	cfattach_fn att_do, cfattach_fn att_undo,
300 	const char *style, bool dopanic)
301 {
302 	const struct cfattachinit *cfai = NULL;
303 	void (*pr)(const char *, ...) __printflike(1, 2) =
304 	    dopanic ? panic : printf;
305 	int j = 0, error = 0, e2 __diagused;
306 
307 	for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
308 		for (j = 0; cfai->cfai_list[j] != NULL; j++) {
309 			if ((error = att_do(cfai->cfai_name,
310 			    cfai->cfai_list[j])) != 0) {
311 				pr("configure: attachment `%s' "
312 				    "of `%s' driver %s failed: %d",
313 				    cfai->cfai_list[j]->ca_name,
314 				    cfai->cfai_name, style, error);
315 				goto bad;
316 			}
317 		}
318 	}
319 
320 	KASSERT(error == 0);
321 	return 0;
322 
323  bad:
324 	/*
325 	 * Rollback in reverse order.  dunno if super-important, but
326 	 * do that anyway.  Although the code looks a little like
327 	 * someone did a little integration (in the math sense).
328 	 */
329 	printf("\n");
330 	if (cfai) {
331 		bool last;
332 
333 		for (last = false; last == false; ) {
334 			if (cfai == &cfattachv[0])
335 				last = true;
336 			for (j--; j >= 0; j--) {
337 				e2 = att_undo(cfai->cfai_name,
338 				    cfai->cfai_list[j]);
339 				KASSERT(e2 == 0);
340 			}
341 			if (!last) {
342 				cfai--;
343 				for (j = 0; cfai->cfai_list[j] != NULL; j++)
344 					;
345 			}
346 		}
347 	}
348 
349 	return error;
350 }
351 
352 /*
353  * Initialize the autoconfiguration data structures.  Normally this
354  * is done by configure(), but some platforms need to do this very
355  * early (to e.g. initialize the console).
356  */
357 void
358 config_init(void)
359 {
360 
361 	KASSERT(config_initialized == false);
362 
363 	mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
364 
365 	mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
366 	cv_init(&config_misc_cv, "cfgmisc");
367 
368 	callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
369 
370 	frob_cfdrivervec(cfdriver_list_initial,
371 	    config_cfdriver_attach, NULL, "bootstrap", true);
372 	frob_cfattachvec(cfattachinit,
373 	    config_cfattach_attach, NULL, "bootstrap", true);
374 
375 	initcftable.ct_cfdata = cfdata;
376 	TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
377 
378 	rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
379 	    RND_FLAG_COLLECT_TIME);
380 
381 	config_initialized = true;
382 }
383 
384 /*
385  * Init or fini drivers and attachments.  Either all or none
386  * are processed (via rollback).  It would be nice if this were
387  * atomic to outside consumers, but with the current state of
388  * locking ...
389  */
390 int
391 config_init_component(struct cfdriver * const *cfdriverv,
392 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
393 {
394 	int error;
395 
396 	KERNEL_LOCK(1, NULL);
397 
398 	if ((error = frob_cfdrivervec(cfdriverv,
399 	    config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
400 		goto out;
401 	if ((error = frob_cfattachvec(cfattachv,
402 	    config_cfattach_attach, config_cfattach_detach,
403 	    "init", false)) != 0) {
404 		frob_cfdrivervec(cfdriverv,
405 	            config_cfdriver_detach, NULL, "init rollback", true);
406 		goto out;
407 	}
408 	if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
409 		frob_cfattachvec(cfattachv,
410 		    config_cfattach_detach, NULL, "init rollback", true);
411 		frob_cfdrivervec(cfdriverv,
412 	            config_cfdriver_detach, NULL, "init rollback", true);
413 		goto out;
414 	}
415 
416 	/* Success!  */
417 	error = 0;
418 
419 out:	KERNEL_UNLOCK_ONE(NULL);
420 	return error;
421 }
422 
423 int
424 config_fini_component(struct cfdriver * const *cfdriverv,
425 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
426 {
427 	int error;
428 
429 	KERNEL_LOCK(1, NULL);
430 
431 	if ((error = config_cfdata_detach(cfdatav)) != 0)
432 		goto out;
433 	if ((error = frob_cfattachvec(cfattachv,
434 	    config_cfattach_detach, config_cfattach_attach,
435 	    "fini", false)) != 0) {
436 		if (config_cfdata_attach(cfdatav, 0) != 0)
437 			panic("config_cfdata fini rollback failed");
438 		goto out;
439 	}
440 	if ((error = frob_cfdrivervec(cfdriverv,
441 	    config_cfdriver_detach, config_cfdriver_attach,
442 	    "fini", false)) != 0) {
443 		frob_cfattachvec(cfattachv,
444 	            config_cfattach_attach, NULL, "fini rollback", true);
445 		if (config_cfdata_attach(cfdatav, 0) != 0)
446 			panic("config_cfdata fini rollback failed");
447 		goto out;
448 	}
449 
450 	/* Success!  */
451 	error = 0;
452 
453 out:	KERNEL_UNLOCK_ONE(NULL);
454 	return error;
455 }
456 
457 void
458 config_init_mi(void)
459 {
460 
461 	if (!config_initialized)
462 		config_init();
463 
464 	sysctl_detach_setup(NULL);
465 }
466 
467 void
468 config_deferred(device_t dev)
469 {
470 
471 	KASSERT(KERNEL_LOCKED_P());
472 
473 	config_process_deferred(&deferred_config_queue, dev);
474 	config_process_deferred(&interrupt_config_queue, dev);
475 	config_process_deferred(&mountroot_config_queue, dev);
476 }
477 
478 static void
479 config_interrupts_thread(void *cookie)
480 {
481 	struct deferred_config *dc;
482 	device_t dev;
483 
484 	mutex_enter(&config_misc_lock);
485 	while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
486 		TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
487 		mutex_exit(&config_misc_lock);
488 
489 		dev = dc->dc_dev;
490 		(*dc->dc_func)(dev);
491 		if (!device_pmf_is_registered(dev))
492 			aprint_debug_dev(dev,
493 			    "WARNING: power management not supported\n");
494 		config_pending_decr(dev);
495 		kmem_free(dc, sizeof(*dc));
496 
497 		mutex_enter(&config_misc_lock);
498 	}
499 	mutex_exit(&config_misc_lock);
500 
501 	kthread_exit(0);
502 }
503 
504 void
505 config_create_interruptthreads(void)
506 {
507 	int i;
508 
509 	for (i = 0; i < interrupt_config_threads; i++) {
510 		(void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
511 		    config_interrupts_thread, NULL, NULL, "configintr");
512 	}
513 }
514 
515 static void
516 config_mountroot_thread(void *cookie)
517 {
518 	struct deferred_config *dc;
519 
520 	mutex_enter(&config_misc_lock);
521 	while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
522 		TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
523 		mutex_exit(&config_misc_lock);
524 
525 		(*dc->dc_func)(dc->dc_dev);
526 		kmem_free(dc, sizeof(*dc));
527 
528 		mutex_enter(&config_misc_lock);
529 	}
530 	mutex_exit(&config_misc_lock);
531 
532 	kthread_exit(0);
533 }
534 
535 void
536 config_create_mountrootthreads(void)
537 {
538 	int i;
539 
540 	if (!root_is_mounted)
541 		root_is_mounted = true;
542 
543 	mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
544 				       mountroot_config_threads;
545 	mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
546 					     KM_NOSLEEP);
547 	KASSERT(mountroot_config_lwpids);
548 	for (i = 0; i < mountroot_config_threads; i++) {
549 		mountroot_config_lwpids[i] = 0;
550 		(void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
551 				     NULL, config_mountroot_thread, NULL,
552 				     &mountroot_config_lwpids[i],
553 				     "configroot");
554 	}
555 }
556 
557 void
558 config_finalize_mountroot(void)
559 {
560 	int i, error;
561 
562 	for (i = 0; i < mountroot_config_threads; i++) {
563 		if (mountroot_config_lwpids[i] == 0)
564 			continue;
565 
566 		error = kthread_join(mountroot_config_lwpids[i]);
567 		if (error)
568 			printf("%s: thread %x joined with error %d\n",
569 			       __func__, i, error);
570 	}
571 	kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
572 }
573 
574 /*
575  * Announce device attach/detach to userland listeners.
576  */
577 
578 int
579 no_devmon_insert(const char *name, prop_dictionary_t p)
580 {
581 
582 	return ENODEV;
583 }
584 
585 static void
586 devmon_report_device(device_t dev, bool isattach)
587 {
588 	prop_dictionary_t ev, dict = device_properties(dev);
589 	const char *parent;
590 	const char *what;
591 	const char *where;
592 	device_t pdev = device_parent(dev);
593 
594 	/* If currently no drvctl device, just return */
595 	if (devmon_insert_vec == no_devmon_insert)
596 		return;
597 
598 	ev = prop_dictionary_create();
599 	if (ev == NULL)
600 		return;
601 
602 	what = (isattach ? "device-attach" : "device-detach");
603 	parent = (pdev == NULL ? "root" : device_xname(pdev));
604 	if (prop_dictionary_get_string(dict, "location", &where)) {
605 		prop_dictionary_set_string(ev, "location", where);
606 		aprint_debug("ev: %s %s at %s in [%s]\n",
607 		    what, device_xname(dev), parent, where);
608 	}
609 	if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
610 	    !prop_dictionary_set_string(ev, "parent", parent)) {
611 		prop_object_release(ev);
612 		return;
613 	}
614 
615 	if ((*devmon_insert_vec)(what, ev) != 0)
616 		prop_object_release(ev);
617 }
618 
619 /*
620  * Add a cfdriver to the system.
621  */
622 int
623 config_cfdriver_attach(struct cfdriver *cd)
624 {
625 	struct cfdriver *lcd;
626 
627 	/* Make sure this driver isn't already in the system. */
628 	LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
629 		if (STREQ(lcd->cd_name, cd->cd_name))
630 			return EEXIST;
631 	}
632 
633 	LIST_INIT(&cd->cd_attach);
634 	LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
635 
636 	return 0;
637 }
638 
639 /*
640  * Remove a cfdriver from the system.
641  */
642 int
643 config_cfdriver_detach(struct cfdriver *cd)
644 {
645 	struct alldevs_foray af;
646 	int i, rc = 0;
647 
648 	config_alldevs_enter(&af);
649 	/* Make sure there are no active instances. */
650 	for (i = 0; i < cd->cd_ndevs; i++) {
651 		if (cd->cd_devs[i] != NULL) {
652 			rc = EBUSY;
653 			break;
654 		}
655 	}
656 	config_alldevs_exit(&af);
657 
658 	if (rc != 0)
659 		return rc;
660 
661 	/* ...and no attachments loaded. */
662 	if (LIST_EMPTY(&cd->cd_attach) == 0)
663 		return EBUSY;
664 
665 	LIST_REMOVE(cd, cd_list);
666 
667 	KASSERT(cd->cd_devs == NULL);
668 
669 	return 0;
670 }
671 
672 /*
673  * Look up a cfdriver by name.
674  */
675 struct cfdriver *
676 config_cfdriver_lookup(const char *name)
677 {
678 	struct cfdriver *cd;
679 
680 	LIST_FOREACH(cd, &allcfdrivers, cd_list) {
681 		if (STREQ(cd->cd_name, name))
682 			return cd;
683 	}
684 
685 	return NULL;
686 }
687 
688 /*
689  * Add a cfattach to the specified driver.
690  */
691 int
692 config_cfattach_attach(const char *driver, struct cfattach *ca)
693 {
694 	struct cfattach *lca;
695 	struct cfdriver *cd;
696 
697 	cd = config_cfdriver_lookup(driver);
698 	if (cd == NULL)
699 		return ESRCH;
700 
701 	/* Make sure this attachment isn't already on this driver. */
702 	LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
703 		if (STREQ(lca->ca_name, ca->ca_name))
704 			return EEXIST;
705 	}
706 
707 	LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
708 
709 	return 0;
710 }
711 
712 /*
713  * Remove a cfattach from the specified driver.
714  */
715 int
716 config_cfattach_detach(const char *driver, struct cfattach *ca)
717 {
718 	struct alldevs_foray af;
719 	struct cfdriver *cd;
720 	device_t dev;
721 	int i, rc = 0;
722 
723 	cd = config_cfdriver_lookup(driver);
724 	if (cd == NULL)
725 		return ESRCH;
726 
727 	config_alldevs_enter(&af);
728 	/* Make sure there are no active instances. */
729 	for (i = 0; i < cd->cd_ndevs; i++) {
730 		if ((dev = cd->cd_devs[i]) == NULL)
731 			continue;
732 		if (dev->dv_cfattach == ca) {
733 			rc = EBUSY;
734 			break;
735 		}
736 	}
737 	config_alldevs_exit(&af);
738 
739 	if (rc != 0)
740 		return rc;
741 
742 	LIST_REMOVE(ca, ca_list);
743 
744 	return 0;
745 }
746 
747 /*
748  * Look up a cfattach by name.
749  */
750 static struct cfattach *
751 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
752 {
753 	struct cfattach *ca;
754 
755 	LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
756 		if (STREQ(ca->ca_name, atname))
757 			return ca;
758 	}
759 
760 	return NULL;
761 }
762 
763 /*
764  * Look up a cfattach by driver/attachment name.
765  */
766 struct cfattach *
767 config_cfattach_lookup(const char *name, const char *atname)
768 {
769 	struct cfdriver *cd;
770 
771 	cd = config_cfdriver_lookup(name);
772 	if (cd == NULL)
773 		return NULL;
774 
775 	return config_cfattach_lookup_cd(cd, atname);
776 }
777 
778 /*
779  * Apply the matching function and choose the best.  This is used
780  * a few times and we want to keep the code small.
781  */
782 static void
783 mapply(struct matchinfo *m, cfdata_t cf)
784 {
785 	int pri;
786 
787 	if (m->fn != NULL) {
788 		pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
789 	} else {
790 		pri = config_match(m->parent, cf, m->aux);
791 	}
792 	if (pri > m->pri) {
793 		m->match = cf;
794 		m->pri = pri;
795 	}
796 }
797 
798 int
799 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
800 {
801 	const struct cfiattrdata *ci;
802 	const struct cflocdesc *cl;
803 	int nlocs, i;
804 
805 	ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
806 	KASSERT(ci);
807 	nlocs = ci->ci_loclen;
808 	KASSERT(!nlocs || locs);
809 	for (i = 0; i < nlocs; i++) {
810 		cl = &ci->ci_locdesc[i];
811 		if (cl->cld_defaultstr != NULL &&
812 		    cf->cf_loc[i] == cl->cld_default)
813 			continue;
814 		if (cf->cf_loc[i] == locs[i])
815 			continue;
816 		return 0;
817 	}
818 
819 	return config_match(parent, cf, aux);
820 }
821 
822 /*
823  * Helper function: check whether the driver supports the interface attribute
824  * and return its descriptor structure.
825  */
826 static const struct cfiattrdata *
827 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
828 {
829 	const struct cfiattrdata * const *cpp;
830 
831 	if (cd->cd_attrs == NULL)
832 		return 0;
833 
834 	for (cpp = cd->cd_attrs; *cpp; cpp++) {
835 		if (STREQ((*cpp)->ci_name, ia)) {
836 			/* Match. */
837 			return *cpp;
838 		}
839 	}
840 	return 0;
841 }
842 
843 static int __diagused
844 cfdriver_iattr_count(const struct cfdriver *cd)
845 {
846 	const struct cfiattrdata * const *cpp;
847 	int i;
848 
849 	if (cd->cd_attrs == NULL)
850 		return 0;
851 
852 	for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
853 		i++;
854 	}
855 	return i;
856 }
857 
858 /*
859  * Lookup an interface attribute description by name.
860  * If the driver is given, consider only its supported attributes.
861  */
862 const struct cfiattrdata *
863 cfiattr_lookup(const char *name, const struct cfdriver *cd)
864 {
865 	const struct cfdriver *d;
866 	const struct cfiattrdata *ia;
867 
868 	if (cd)
869 		return cfdriver_get_iattr(cd, name);
870 
871 	LIST_FOREACH(d, &allcfdrivers, cd_list) {
872 		ia = cfdriver_get_iattr(d, name);
873 		if (ia)
874 			return ia;
875 	}
876 	return 0;
877 }
878 
879 /*
880  * Determine if `parent' is a potential parent for a device spec based
881  * on `cfp'.
882  */
883 static int
884 cfparent_match(const device_t parent, const struct cfparent *cfp)
885 {
886 	struct cfdriver *pcd;
887 
888 	/* We don't match root nodes here. */
889 	if (cfp == NULL)
890 		return 0;
891 
892 	pcd = parent->dv_cfdriver;
893 	KASSERT(pcd != NULL);
894 
895 	/*
896 	 * First, ensure this parent has the correct interface
897 	 * attribute.
898 	 */
899 	if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr))
900 		return 0;
901 
902 	/*
903 	 * If no specific parent device instance was specified (i.e.
904 	 * we're attaching to the attribute only), we're done!
905 	 */
906 	if (cfp->cfp_parent == NULL)
907 		return 1;
908 
909 	/*
910 	 * Check the parent device's name.
911 	 */
912 	if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
913 		return 0;	/* not the same parent */
914 
915 	/*
916 	 * Make sure the unit number matches.
917 	 */
918 	if (cfp->cfp_unit == DVUNIT_ANY ||	/* wildcard */
919 	    cfp->cfp_unit == parent->dv_unit)
920 		return 1;
921 
922 	/* Unit numbers don't match. */
923 	return 0;
924 }
925 
926 /*
927  * Helper for config_cfdata_attach(): check all devices whether it could be
928  * parent any attachment in the config data table passed, and rescan.
929  */
930 static void
931 rescan_with_cfdata(const struct cfdata *cf)
932 {
933 	device_t d;
934 	const struct cfdata *cf1;
935 	deviter_t di;
936 
937 	KASSERT(KERNEL_LOCKED_P());
938 
939 	/*
940 	 * "alldevs" is likely longer than a modules's cfdata, so make it
941 	 * the outer loop.
942 	 */
943 	for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
944 
945 		if (!(d->dv_cfattach->ca_rescan))
946 			continue;
947 
948 		for (cf1 = cf; cf1->cf_name; cf1++) {
949 
950 			if (!cfparent_match(d, cf1->cf_pspec))
951 				continue;
952 
953 			(*d->dv_cfattach->ca_rescan)(d,
954 				cfdata_ifattr(cf1), cf1->cf_loc);
955 
956 			config_deferred(d);
957 		}
958 	}
959 	deviter_release(&di);
960 }
961 
962 /*
963  * Attach a supplemental config data table and rescan potential
964  * parent devices if required.
965  */
966 int
967 config_cfdata_attach(cfdata_t cf, int scannow)
968 {
969 	struct cftable *ct;
970 
971 	KERNEL_LOCK(1, NULL);
972 
973 	ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
974 	ct->ct_cfdata = cf;
975 	TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
976 
977 	if (scannow)
978 		rescan_with_cfdata(cf);
979 
980 	KERNEL_UNLOCK_ONE(NULL);
981 
982 	return 0;
983 }
984 
985 /*
986  * Helper for config_cfdata_detach: check whether a device is
987  * found through any attachment in the config data table.
988  */
989 static int
990 dev_in_cfdata(device_t d, cfdata_t cf)
991 {
992 	const struct cfdata *cf1;
993 
994 	for (cf1 = cf; cf1->cf_name; cf1++)
995 		if (d->dv_cfdata == cf1)
996 			return 1;
997 
998 	return 0;
999 }
1000 
1001 /*
1002  * Detach a supplemental config data table. Detach all devices found
1003  * through that table (and thus keeping references to it) before.
1004  */
1005 int
1006 config_cfdata_detach(cfdata_t cf)
1007 {
1008 	device_t d;
1009 	int error = 0;
1010 	struct cftable *ct;
1011 	deviter_t di;
1012 
1013 	KERNEL_LOCK(1, NULL);
1014 
1015 	for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
1016 	     d = deviter_next(&di)) {
1017 		if (!dev_in_cfdata(d, cf))
1018 			continue;
1019 		if ((error = config_detach(d, 0)) != 0)
1020 			break;
1021 	}
1022 	deviter_release(&di);
1023 	if (error) {
1024 		aprint_error_dev(d, "unable to detach instance\n");
1025 		goto out;
1026 	}
1027 
1028 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
1029 		if (ct->ct_cfdata == cf) {
1030 			TAILQ_REMOVE(&allcftables, ct, ct_list);
1031 			kmem_free(ct, sizeof(*ct));
1032 			error = 0;
1033 			goto out;
1034 		}
1035 	}
1036 
1037 	/* not found -- shouldn't happen */
1038 	error = EINVAL;
1039 
1040 out:	KERNEL_UNLOCK_ONE(NULL);
1041 	return error;
1042 }
1043 
1044 /*
1045  * Invoke the "match" routine for a cfdata entry on behalf of
1046  * an external caller, usually a direct config "submatch" routine.
1047  */
1048 int
1049 config_match(device_t parent, cfdata_t cf, void *aux)
1050 {
1051 	struct cfattach *ca;
1052 
1053 	KASSERT(KERNEL_LOCKED_P());
1054 
1055 	ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
1056 	if (ca == NULL) {
1057 		/* No attachment for this entry, oh well. */
1058 		return 0;
1059 	}
1060 
1061 	return (*ca->ca_match)(parent, cf, aux);
1062 }
1063 
1064 /*
1065  * Invoke the "probe" routine for a cfdata entry on behalf of
1066  * an external caller, usually an indirect config "search" routine.
1067  */
1068 int
1069 config_probe(device_t parent, cfdata_t cf, void *aux)
1070 {
1071 	/*
1072 	 * This is currently a synonym for config_match(), but this
1073 	 * is an implementation detail; "match" and "probe" routines
1074 	 * have different behaviors.
1075 	 *
1076 	 * XXX config_probe() should return a bool, because there is
1077 	 * XXX no match score for probe -- it's either there or it's
1078 	 * XXX not, but some ports abuse the return value as a way
1079 	 * XXX to attach "critical" devices before "non-critical"
1080 	 * XXX devices.
1081 	 */
1082 	return config_match(parent, cf, aux);
1083 }
1084 
1085 static struct cfargs_internal *
1086 cfargs_canonicalize(const struct cfargs * const cfargs,
1087     struct cfargs_internal * const store)
1088 {
1089 	struct cfargs_internal *args = store;
1090 
1091 	memset(args, 0, sizeof(*args));
1092 
1093 	/* If none specified, are all-NULL pointers are good. */
1094 	if (cfargs == NULL) {
1095 		return args;
1096 	}
1097 
1098 	/*
1099 	 * Only one arguments version is recognized at this time.
1100 	 */
1101 	if (cfargs->cfargs_version != CFARGS_VERSION) {
1102 		panic("cfargs_canonicalize: unknown version %lu\n",
1103 		    (unsigned long)cfargs->cfargs_version);
1104 	}
1105 
1106 	/*
1107 	 * submatch and search are mutually-exclusive.
1108 	 */
1109 	if (cfargs->submatch != NULL && cfargs->search != NULL) {
1110 		panic("cfargs_canonicalize: submatch and search are "
1111 		      "mutually-exclusive");
1112 	}
1113 	if (cfargs->submatch != NULL) {
1114 		args->submatch = cfargs->submatch;
1115 	} else if (cfargs->search != NULL) {
1116 		args->search = cfargs->search;
1117 	}
1118 
1119 	args->iattr = cfargs->iattr;
1120 	args->locators = cfargs->locators;
1121 	args->devhandle = cfargs->devhandle;
1122 
1123 	return args;
1124 }
1125 
1126 /*
1127  * Iterate over all potential children of some device, calling the given
1128  * function (default being the child's match function) for each one.
1129  * Nonzero returns are matches; the highest value returned is considered
1130  * the best match.  Return the `found child' if we got a match, or NULL
1131  * otherwise.  The `aux' pointer is simply passed on through.
1132  *
1133  * Note that this function is designed so that it can be used to apply
1134  * an arbitrary function to all potential children (its return value
1135  * can be ignored).
1136  */
1137 static cfdata_t
1138 config_search_internal(device_t parent, void *aux,
1139     const struct cfargs_internal * const args)
1140 {
1141 	struct cftable *ct;
1142 	cfdata_t cf;
1143 	struct matchinfo m;
1144 
1145 	KASSERT(config_initialized);
1146 	KASSERT(!args->iattr ||
1147 		cfdriver_get_iattr(parent->dv_cfdriver, args->iattr));
1148 	KASSERT(args->iattr ||
1149 		cfdriver_iattr_count(parent->dv_cfdriver) < 2);
1150 
1151 	m.fn = args->submatch;		/* N.B. union */
1152 	m.parent = parent;
1153 	m.locs = args->locators;
1154 	m.aux = aux;
1155 	m.match = NULL;
1156 	m.pri = 0;
1157 
1158 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
1159 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1160 
1161 			/* We don't match root nodes here. */
1162 			if (!cf->cf_pspec)
1163 				continue;
1164 
1165 			/*
1166 			 * Skip cf if no longer eligible, otherwise scan
1167 			 * through parents for one matching `parent', and
1168 			 * try match function.
1169 			 */
1170 			if (cf->cf_fstate == FSTATE_FOUND)
1171 				continue;
1172 			if (cf->cf_fstate == FSTATE_DNOTFOUND ||
1173 			    cf->cf_fstate == FSTATE_DSTAR)
1174 				continue;
1175 
1176 			/*
1177 			 * If an interface attribute was specified,
1178 			 * consider only children which attach to
1179 			 * that attribute.
1180 			 */
1181 			if (args->iattr != NULL &&
1182 			    !STREQ(args->iattr, cfdata_ifattr(cf)))
1183 				continue;
1184 
1185 			if (cfparent_match(parent, cf->cf_pspec))
1186 				mapply(&m, cf);
1187 		}
1188 	}
1189 	rnd_add_uint32(&rnd_autoconf_source, 0);
1190 	return m.match;
1191 }
1192 
1193 cfdata_t
1194 config_search(device_t parent, void *aux, const struct cfargs *cfargs)
1195 {
1196 	cfdata_t cf;
1197 	struct cfargs_internal store;
1198 
1199 	cf = config_search_internal(parent, aux,
1200 	    cfargs_canonicalize(cfargs, &store));
1201 
1202 	return cf;
1203 }
1204 
1205 /*
1206  * Find the given root device.
1207  * This is much like config_search, but there is no parent.
1208  * Don't bother with multiple cfdata tables; the root node
1209  * must always be in the initial table.
1210  */
1211 cfdata_t
1212 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
1213 {
1214 	cfdata_t cf;
1215 	const short *p;
1216 	struct matchinfo m;
1217 
1218 	m.fn = fn;
1219 	m.parent = ROOT;
1220 	m.aux = aux;
1221 	m.match = NULL;
1222 	m.pri = 0;
1223 	m.locs = 0;
1224 	/*
1225 	 * Look at root entries for matching name.  We do not bother
1226 	 * with found-state here since only one root should ever be
1227 	 * searched (and it must be done first).
1228 	 */
1229 	for (p = cfroots; *p >= 0; p++) {
1230 		cf = &cfdata[*p];
1231 		if (strcmp(cf->cf_name, rootname) == 0)
1232 			mapply(&m, cf);
1233 	}
1234 	return m.match;
1235 }
1236 
1237 static const char * const msgs[] = {
1238 [QUIET]		=	"",
1239 [UNCONF]	=	" not configured\n",
1240 [UNSUPP]	=	" unsupported\n",
1241 };
1242 
1243 /*
1244  * The given `aux' argument describes a device that has been found
1245  * on the given parent, but not necessarily configured.  Locate the
1246  * configuration data for that device (using the submatch function
1247  * provided, or using candidates' cd_match configuration driver
1248  * functions) and attach it, and return its device_t.  If the device was
1249  * not configured, call the given `print' function and return NULL.
1250  */
1251 device_t
1252 config_found(device_t parent, void *aux, cfprint_t print,
1253     const struct cfargs * const cfargs)
1254 {
1255 	cfdata_t cf;
1256 	struct cfargs_internal store;
1257 	const struct cfargs_internal * const args =
1258 	    cfargs_canonicalize(cfargs, &store);
1259 
1260 	cf = config_search_internal(parent, aux, args);
1261 	if (cf != NULL) {
1262 		return config_attach_internal(parent, cf, aux, print, args);
1263 	}
1264 
1265 	if (print) {
1266 		if (config_do_twiddle && cold)
1267 			twiddle();
1268 
1269 		const int pret = (*print)(aux, device_xname(parent));
1270 		KASSERT(pret >= 0);
1271 		KASSERT(pret < __arraycount(msgs));
1272 		KASSERT(msgs[pret] != NULL);
1273 		aprint_normal("%s", msgs[pret]);
1274 	}
1275 
1276 	return NULL;
1277 }
1278 
1279 /*
1280  * As above, but for root devices.
1281  */
1282 device_t
1283 config_rootfound(const char *rootname, void *aux)
1284 {
1285 	cfdata_t cf;
1286 	device_t dev = NULL;
1287 
1288 	KERNEL_LOCK(1, NULL);
1289 	if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
1290 		dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE);
1291 	else
1292 		aprint_error("root device %s not configured\n", rootname);
1293 	KERNEL_UNLOCK_ONE(NULL);
1294 	return dev;
1295 }
1296 
1297 /* just like sprintf(buf, "%d") except that it works from the end */
1298 static char *
1299 number(char *ep, int n)
1300 {
1301 
1302 	*--ep = 0;
1303 	while (n >= 10) {
1304 		*--ep = (n % 10) + '0';
1305 		n /= 10;
1306 	}
1307 	*--ep = n + '0';
1308 	return ep;
1309 }
1310 
1311 /*
1312  * Expand the size of the cd_devs array if necessary.
1313  *
1314  * The caller must hold alldevs_lock. config_makeroom() may release and
1315  * re-acquire alldevs_lock, so callers should re-check conditions such
1316  * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
1317  * returns.
1318  */
1319 static void
1320 config_makeroom(int n, struct cfdriver *cd)
1321 {
1322 	int ondevs, nndevs;
1323 	device_t *osp, *nsp;
1324 
1325 	KASSERT(mutex_owned(&alldevs_lock));
1326 	alldevs_nwrite++;
1327 
1328 	for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
1329 		;
1330 
1331 	while (n >= cd->cd_ndevs) {
1332 		/*
1333 		 * Need to expand the array.
1334 		 */
1335 		ondevs = cd->cd_ndevs;
1336 		osp = cd->cd_devs;
1337 
1338 		/*
1339 		 * Release alldevs_lock around allocation, which may
1340 		 * sleep.
1341 		 */
1342 		mutex_exit(&alldevs_lock);
1343 		nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
1344 		mutex_enter(&alldevs_lock);
1345 
1346 		/*
1347 		 * If another thread moved the array while we did
1348 		 * not hold alldevs_lock, try again.
1349 		 */
1350 		if (cd->cd_devs != osp) {
1351 			mutex_exit(&alldevs_lock);
1352 			kmem_free(nsp, sizeof(device_t) * nndevs);
1353 			mutex_enter(&alldevs_lock);
1354 			continue;
1355 		}
1356 
1357 		memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
1358 		if (ondevs != 0)
1359 			memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
1360 
1361 		cd->cd_ndevs = nndevs;
1362 		cd->cd_devs = nsp;
1363 		if (ondevs != 0) {
1364 			mutex_exit(&alldevs_lock);
1365 			kmem_free(osp, sizeof(device_t) * ondevs);
1366 			mutex_enter(&alldevs_lock);
1367 		}
1368 	}
1369 	KASSERT(mutex_owned(&alldevs_lock));
1370 	alldevs_nwrite--;
1371 }
1372 
1373 /*
1374  * Put dev into the devices list.
1375  */
1376 static void
1377 config_devlink(device_t dev)
1378 {
1379 
1380 	mutex_enter(&alldevs_lock);
1381 
1382 	KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
1383 
1384 	dev->dv_add_gen = alldevs_gen;
1385 	/* It is safe to add a device to the tail of the list while
1386 	 * readers and writers are in the list.
1387 	 */
1388 	TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
1389 	mutex_exit(&alldevs_lock);
1390 }
1391 
1392 static void
1393 config_devfree(device_t dev)
1394 {
1395 
1396 	KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
1397 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1398 
1399 	if (dev->dv_cfattach->ca_devsize > 0)
1400 		kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
1401 	kmem_free(dev, sizeof(*dev));
1402 }
1403 
1404 /*
1405  * Caller must hold alldevs_lock.
1406  */
1407 static void
1408 config_devunlink(device_t dev, struct devicelist *garbage)
1409 {
1410 	struct device_garbage *dg = &dev->dv_garbage;
1411 	cfdriver_t cd = device_cfdriver(dev);
1412 	int i;
1413 
1414 	KASSERT(mutex_owned(&alldevs_lock));
1415 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1416 
1417  	/* Unlink from device list.  Link to garbage list. */
1418 	TAILQ_REMOVE(&alldevs, dev, dv_list);
1419 	TAILQ_INSERT_TAIL(garbage, dev, dv_list);
1420 
1421 	/* Remove from cfdriver's array. */
1422 	cd->cd_devs[dev->dv_unit] = NULL;
1423 
1424 	/*
1425 	 * If the device now has no units in use, unlink its softc array.
1426 	 */
1427 	for (i = 0; i < cd->cd_ndevs; i++) {
1428 		if (cd->cd_devs[i] != NULL)
1429 			break;
1430 	}
1431 	/* Nothing found.  Unlink, now.  Deallocate, later. */
1432 	if (i == cd->cd_ndevs) {
1433 		dg->dg_ndevs = cd->cd_ndevs;
1434 		dg->dg_devs = cd->cd_devs;
1435 		cd->cd_devs = NULL;
1436 		cd->cd_ndevs = 0;
1437 	}
1438 }
1439 
1440 static void
1441 config_devdelete(device_t dev)
1442 {
1443 	struct device_garbage *dg = &dev->dv_garbage;
1444 	device_lock_t dvl = device_getlock(dev);
1445 
1446 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1447 
1448 	if (dg->dg_devs != NULL)
1449 		kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
1450 
1451 	localcount_fini(dev->dv_localcount);
1452 	kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount));
1453 
1454 	cv_destroy(&dvl->dvl_cv);
1455 	mutex_destroy(&dvl->dvl_mtx);
1456 
1457 	KASSERT(dev->dv_properties != NULL);
1458 	prop_object_release(dev->dv_properties);
1459 
1460 	if (dev->dv_activity_handlers)
1461 		panic("%s with registered handlers", __func__);
1462 
1463 	if (dev->dv_locators) {
1464 		size_t amount = *--dev->dv_locators;
1465 		kmem_free(dev->dv_locators, amount);
1466 	}
1467 
1468 	config_devfree(dev);
1469 }
1470 
1471 static int
1472 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
1473 {
1474 	int unit = cf->cf_unit;
1475 
1476 	if (unit < 0)
1477 		return -1;
1478 	if (cf->cf_fstate == FSTATE_STAR) {
1479 		for (; unit < cd->cd_ndevs; unit++)
1480 			if (cd->cd_devs[unit] == NULL)
1481 				break;
1482 		/*
1483 		 * unit is now the unit of the first NULL device pointer,
1484 		 * or max(cd->cd_ndevs,cf->cf_unit).
1485 		 */
1486 	} else {
1487 		if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
1488 			unit = -1;
1489 	}
1490 	return unit;
1491 }
1492 
1493 static int
1494 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
1495 {
1496 	struct alldevs_foray af;
1497 	int unit;
1498 
1499 	config_alldevs_enter(&af);
1500 	for (;;) {
1501 		unit = config_unit_nextfree(cd, cf);
1502 		if (unit == -1)
1503 			break;
1504 		if (unit < cd->cd_ndevs) {
1505 			cd->cd_devs[unit] = dev;
1506 			dev->dv_unit = unit;
1507 			break;
1508 		}
1509 		config_makeroom(unit, cd);
1510 	}
1511 	config_alldevs_exit(&af);
1512 
1513 	return unit;
1514 }
1515 
1516 static device_t
1517 config_devalloc(const device_t parent, const cfdata_t cf,
1518     const struct cfargs_internal * const args)
1519 {
1520 	cfdriver_t cd;
1521 	cfattach_t ca;
1522 	size_t lname, lunit;
1523 	const char *xunit;
1524 	int myunit;
1525 	char num[10];
1526 	device_t dev;
1527 	void *dev_private;
1528 	const struct cfiattrdata *ia;
1529 	device_lock_t dvl;
1530 
1531 	cd = config_cfdriver_lookup(cf->cf_name);
1532 	if (cd == NULL)
1533 		return NULL;
1534 
1535 	ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
1536 	if (ca == NULL)
1537 		return NULL;
1538 
1539 	/* get memory for all device vars */
1540 	KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
1541 	if (ca->ca_devsize > 0) {
1542 		dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
1543 	} else {
1544 		dev_private = NULL;
1545 	}
1546 	dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
1547 
1548 	dev->dv_handle = args->devhandle;
1549 
1550 	dev->dv_class = cd->cd_class;
1551 	dev->dv_cfdata = cf;
1552 	dev->dv_cfdriver = cd;
1553 	dev->dv_cfattach = ca;
1554 	dev->dv_activity_count = 0;
1555 	dev->dv_activity_handlers = NULL;
1556 	dev->dv_private = dev_private;
1557 	dev->dv_flags = ca->ca_flags;	/* inherit flags from class */
1558 	dev->dv_attaching = curlwp;
1559 
1560 	myunit = config_unit_alloc(dev, cd, cf);
1561 	if (myunit == -1) {
1562 		config_devfree(dev);
1563 		return NULL;
1564 	}
1565 
1566 	/* compute length of name and decimal expansion of unit number */
1567 	lname = strlen(cd->cd_name);
1568 	xunit = number(&num[sizeof(num)], myunit);
1569 	lunit = &num[sizeof(num)] - xunit;
1570 	if (lname + lunit > sizeof(dev->dv_xname))
1571 		panic("config_devalloc: device name too long");
1572 
1573 	dvl = device_getlock(dev);
1574 
1575 	mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
1576 	cv_init(&dvl->dvl_cv, "pmfsusp");
1577 
1578 	memcpy(dev->dv_xname, cd->cd_name, lname);
1579 	memcpy(dev->dv_xname + lname, xunit, lunit);
1580 	dev->dv_parent = parent;
1581 	if (parent != NULL)
1582 		dev->dv_depth = parent->dv_depth + 1;
1583 	else
1584 		dev->dv_depth = 0;
1585 	dev->dv_flags |= DVF_ACTIVE;	/* always initially active */
1586 	if (args->locators) {
1587 		KASSERT(parent); /* no locators at root */
1588 		ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
1589 		dev->dv_locators =
1590 		    kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
1591 		*dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
1592 		memcpy(dev->dv_locators, args->locators,
1593 		    sizeof(int) * ia->ci_loclen);
1594 	}
1595 	dev->dv_properties = prop_dictionary_create();
1596 	KASSERT(dev->dv_properties != NULL);
1597 
1598 	prop_dictionary_set_string_nocopy(dev->dv_properties,
1599 	    "device-driver", dev->dv_cfdriver->cd_name);
1600 	prop_dictionary_set_uint16(dev->dv_properties,
1601 	    "device-unit", dev->dv_unit);
1602 	if (parent != NULL) {
1603 		prop_dictionary_set_string(dev->dv_properties,
1604 		    "device-parent", device_xname(parent));
1605 	}
1606 
1607 	dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount),
1608 	    KM_SLEEP);
1609 	localcount_init(dev->dv_localcount);
1610 
1611 	if (dev->dv_cfdriver->cd_attrs != NULL)
1612 		config_add_attrib_dict(dev);
1613 
1614 	return dev;
1615 }
1616 
1617 /*
1618  * Create an array of device attach attributes and add it
1619  * to the device's dv_properties dictionary.
1620  *
1621  * <key>interface-attributes</key>
1622  * <array>
1623  *    <dict>
1624  *       <key>attribute-name</key>
1625  *       <string>foo</string>
1626  *       <key>locators</key>
1627  *       <array>
1628  *          <dict>
1629  *             <key>loc-name</key>
1630  *             <string>foo-loc1</string>
1631  *          </dict>
1632  *          <dict>
1633  *             <key>loc-name</key>
1634  *             <string>foo-loc2</string>
1635  *             <key>default</key>
1636  *             <string>foo-loc2-default</string>
1637  *          </dict>
1638  *          ...
1639  *       </array>
1640  *    </dict>
1641  *    ...
1642  * </array>
1643  */
1644 
1645 static void
1646 config_add_attrib_dict(device_t dev)
1647 {
1648 	int i, j;
1649 	const struct cfiattrdata *ci;
1650 	prop_dictionary_t attr_dict, loc_dict;
1651 	prop_array_t attr_array, loc_array;
1652 
1653 	if ((attr_array = prop_array_create()) == NULL)
1654 		return;
1655 
1656 	for (i = 0; ; i++) {
1657 		if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
1658 			break;
1659 		if ((attr_dict = prop_dictionary_create()) == NULL)
1660 			break;
1661 		prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
1662 		    ci->ci_name);
1663 
1664 		/* Create an array of the locator names and defaults */
1665 
1666 		if (ci->ci_loclen != 0 &&
1667 		    (loc_array = prop_array_create()) != NULL) {
1668 			for (j = 0; j < ci->ci_loclen; j++) {
1669 				loc_dict = prop_dictionary_create();
1670 				if (loc_dict == NULL)
1671 					continue;
1672 				prop_dictionary_set_string_nocopy(loc_dict,
1673 				    "loc-name", ci->ci_locdesc[j].cld_name);
1674 				if (ci->ci_locdesc[j].cld_defaultstr != NULL)
1675 					prop_dictionary_set_string_nocopy(
1676 					    loc_dict, "default",
1677 					    ci->ci_locdesc[j].cld_defaultstr);
1678 				prop_array_set(loc_array, j, loc_dict);
1679 				prop_object_release(loc_dict);
1680 			}
1681 			prop_dictionary_set_and_rel(attr_dict, "locators",
1682 			    loc_array);
1683 		}
1684 		prop_array_add(attr_array, attr_dict);
1685 		prop_object_release(attr_dict);
1686 	}
1687 	if (i == 0)
1688 		prop_object_release(attr_array);
1689 	else
1690 		prop_dictionary_set_and_rel(dev->dv_properties,
1691 		    "interface-attributes", attr_array);
1692 
1693 	return;
1694 }
1695 
1696 /*
1697  * Attach a found device.
1698  */
1699 static device_t
1700 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1701     const struct cfargs_internal * const args)
1702 {
1703 	device_t dev;
1704 	struct cftable *ct;
1705 	const char *drvname;
1706 	bool deferred;
1707 
1708 	KASSERT(KERNEL_LOCKED_P());
1709 
1710 	dev = config_devalloc(parent, cf, args);
1711 	if (!dev)
1712 		panic("config_attach: allocation of device softc failed");
1713 
1714 	/* XXX redundant - see below? */
1715 	if (cf->cf_fstate != FSTATE_STAR) {
1716 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1717 		cf->cf_fstate = FSTATE_FOUND;
1718 	}
1719 
1720 	config_devlink(dev);
1721 
1722 	if (config_do_twiddle && cold)
1723 		twiddle();
1724 	else
1725 		aprint_naive("Found ");
1726 	/*
1727 	 * We want the next two printfs for normal, verbose, and quiet,
1728 	 * but not silent (in which case, we're twiddling, instead).
1729 	 */
1730 	if (parent == ROOT) {
1731 		aprint_naive("%s (root)", device_xname(dev));
1732 		aprint_normal("%s (root)", device_xname(dev));
1733 	} else {
1734 		aprint_naive("%s at %s", device_xname(dev),
1735 		    device_xname(parent));
1736 		aprint_normal("%s at %s", device_xname(dev),
1737 		    device_xname(parent));
1738 		if (print)
1739 			(void) (*print)(aux, NULL);
1740 	}
1741 
1742 	/*
1743 	 * Before attaching, clobber any unfound devices that are
1744 	 * otherwise identical.
1745 	 * XXX code above is redundant?
1746 	 */
1747 	drvname = dev->dv_cfdriver->cd_name;
1748 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
1749 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1750 			if (STREQ(cf->cf_name, drvname) &&
1751 			    cf->cf_unit == dev->dv_unit) {
1752 				if (cf->cf_fstate == FSTATE_NOTFOUND)
1753 					cf->cf_fstate = FSTATE_FOUND;
1754 			}
1755 		}
1756 	}
1757 	device_register(dev, aux);
1758 
1759 	/* Let userland know */
1760 	devmon_report_device(dev, true);
1761 
1762 	/*
1763 	 * Prevent detach until the driver's attach function, and all
1764 	 * deferred actions, have finished.
1765 	 */
1766 	config_pending_incr(dev);
1767 
1768 	/* Call the driver's attach function.  */
1769 	(*dev->dv_cfattach->ca_attach)(parent, dev, aux);
1770 
1771 	/*
1772 	 * Allow other threads to acquire references to the device now
1773 	 * that the driver's attach function is done.
1774 	 */
1775 	mutex_enter(&config_misc_lock);
1776 	KASSERT(dev->dv_attaching == curlwp);
1777 	dev->dv_attaching = NULL;
1778 	cv_broadcast(&config_misc_cv);
1779 	mutex_exit(&config_misc_lock);
1780 
1781 	/*
1782 	 * Synchronous parts of attach are done.  Allow detach, unless
1783 	 * the driver's attach function scheduled deferred actions.
1784 	 */
1785 	config_pending_decr(dev);
1786 
1787 	mutex_enter(&config_misc_lock);
1788 	deferred = (dev->dv_pending != 0);
1789 	mutex_exit(&config_misc_lock);
1790 
1791 	if (!deferred && !device_pmf_is_registered(dev))
1792 		aprint_debug_dev(dev,
1793 		    "WARNING: power management not supported\n");
1794 
1795 	config_process_deferred(&deferred_config_queue, dev);
1796 
1797 	device_register_post_config(dev, aux);
1798 	rnd_add_uint32(&rnd_autoconf_source, 0);
1799 	return dev;
1800 }
1801 
1802 device_t
1803 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1804     const struct cfargs *cfargs)
1805 {
1806 	struct cfargs_internal store;
1807 
1808 	KASSERT(KERNEL_LOCKED_P());
1809 
1810 	return config_attach_internal(parent, cf, aux, print,
1811 	    cfargs_canonicalize(cfargs, &store));
1812 }
1813 
1814 /*
1815  * As above, but for pseudo-devices.  Pseudo-devices attached in this
1816  * way are silently inserted into the device tree, and their children
1817  * attached.
1818  *
1819  * Note that because pseudo-devices are attached silently, any information
1820  * the attach routine wishes to print should be prefixed with the device
1821  * name by the attach routine.
1822  */
1823 device_t
1824 config_attach_pseudo(cfdata_t cf)
1825 {
1826 	device_t dev;
1827 
1828 	KERNEL_LOCK(1, NULL);
1829 
1830 	struct cfargs_internal args = { };
1831 	dev = config_devalloc(ROOT, cf, &args);
1832 	if (!dev)
1833 		goto out;
1834 
1835 	/* XXX mark busy in cfdata */
1836 
1837 	if (cf->cf_fstate != FSTATE_STAR) {
1838 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1839 		cf->cf_fstate = FSTATE_FOUND;
1840 	}
1841 
1842 	config_devlink(dev);
1843 
1844 #if 0	/* XXXJRT not yet */
1845 	device_register(dev, NULL);	/* like a root node */
1846 #endif
1847 
1848 	/* Let userland know */
1849 	devmon_report_device(dev, true);
1850 
1851 	/*
1852 	 * Prevent detach until the driver's attach function, and all
1853 	 * deferred actions, have finished.
1854 	 */
1855 	config_pending_incr(dev);
1856 
1857 	/* Call the driver's attach function.  */
1858 	(*dev->dv_cfattach->ca_attach)(ROOT, dev, NULL);
1859 
1860 	/*
1861 	 * Allow other threads to acquire references to the device now
1862 	 * that the driver's attach function is done.
1863 	 */
1864 	mutex_enter(&config_misc_lock);
1865 	KASSERT(dev->dv_attaching == curlwp);
1866 	dev->dv_attaching = NULL;
1867 	cv_broadcast(&config_misc_cv);
1868 	mutex_exit(&config_misc_lock);
1869 
1870 	/*
1871 	 * Synchronous parts of attach are done.  Allow detach, unless
1872 	 * the driver's attach function scheduled deferred actions.
1873 	 */
1874 	config_pending_decr(dev);
1875 
1876 	config_process_deferred(&deferred_config_queue, dev);
1877 
1878 out:	KERNEL_UNLOCK_ONE(NULL);
1879 	return dev;
1880 }
1881 
1882 /*
1883  * Caller must hold alldevs_lock.
1884  */
1885 static void
1886 config_collect_garbage(struct devicelist *garbage)
1887 {
1888 	device_t dv;
1889 
1890 	KASSERT(!cpu_intr_p());
1891 	KASSERT(!cpu_softintr_p());
1892 	KASSERT(mutex_owned(&alldevs_lock));
1893 
1894 	while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
1895 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
1896 			if (dv->dv_del_gen != 0)
1897 				break;
1898 		}
1899 		if (dv == NULL) {
1900 			alldevs_garbage = false;
1901 			break;
1902 		}
1903 		config_devunlink(dv, garbage);
1904 	}
1905 	KASSERT(mutex_owned(&alldevs_lock));
1906 }
1907 
1908 static void
1909 config_dump_garbage(struct devicelist *garbage)
1910 {
1911 	device_t dv;
1912 
1913 	while ((dv = TAILQ_FIRST(garbage)) != NULL) {
1914 		TAILQ_REMOVE(garbage, dv, dv_list);
1915 		config_devdelete(dv);
1916 	}
1917 }
1918 
1919 static int
1920 config_detach_enter(device_t dev)
1921 {
1922 	int error = 0;
1923 
1924 	mutex_enter(&config_misc_lock);
1925 
1926 	/*
1927 	 * Wait until attach has fully completed, and until any
1928 	 * concurrent detach (e.g., drvctl racing with USB event
1929 	 * thread) has completed.
1930 	 *
1931 	 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via
1932 	 * deviter) to ensure the winner of the race doesn't free the
1933 	 * device leading the loser of the race into use-after-free.
1934 	 *
1935 	 * XXX Not all callers do this!
1936 	 */
1937 	while (dev->dv_pending || dev->dv_detaching) {
1938 		KASSERTMSG(dev->dv_detaching != curlwp,
1939 		    "recursively detaching %s", device_xname(dev));
1940 		error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
1941 		if (error)
1942 			goto out;
1943 	}
1944 
1945 	/*
1946 	 * Attach has completed, and no other concurrent detach is
1947 	 * running.  Claim the device for detaching.  This will cause
1948 	 * all new attempts to acquire references to block.
1949 	 */
1950 	KASSERT(dev->dv_attaching == NULL);
1951 	KASSERT(dev->dv_detaching == NULL);
1952 	dev->dv_detaching = curlwp;
1953 
1954 out:	mutex_exit(&config_misc_lock);
1955 	return error;
1956 }
1957 
1958 static void
1959 config_detach_exit(device_t dev)
1960 {
1961 
1962 	mutex_enter(&config_misc_lock);
1963 	KASSERT(dev->dv_detaching == curlwp);
1964 	dev->dv_detaching = NULL;
1965 	cv_broadcast(&config_misc_cv);
1966 	mutex_exit(&config_misc_lock);
1967 }
1968 
1969 /*
1970  * Detach a device.  Optionally forced (e.g. because of hardware
1971  * removal) and quiet.  Returns zero if successful, non-zero
1972  * (an error code) otherwise.
1973  *
1974  * Note that this code wants to be run from a process context, so
1975  * that the detach can sleep to allow processes which have a device
1976  * open to run and unwind their stacks.
1977  */
1978 int
1979 config_detach(device_t dev, int flags)
1980 {
1981 	struct alldevs_foray af;
1982 	struct cftable *ct;
1983 	cfdata_t cf;
1984 	const struct cfattach *ca;
1985 	struct cfdriver *cd;
1986 	device_t d __diagused;
1987 	int rv = 0;
1988 
1989 	KERNEL_LOCK(1, NULL);
1990 
1991 	cf = dev->dv_cfdata;
1992 	KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
1993 		cf->cf_fstate == FSTATE_STAR),
1994 	    "config_detach: %s: bad device fstate: %d",
1995 	    device_xname(dev), cf ? cf->cf_fstate : -1);
1996 
1997 	cd = dev->dv_cfdriver;
1998 	KASSERT(cd != NULL);
1999 
2000 	ca = dev->dv_cfattach;
2001 	KASSERT(ca != NULL);
2002 
2003 	/*
2004 	 * Only one detach at a time, please -- and not until fully
2005 	 * attached.
2006 	 */
2007 	rv = config_detach_enter(dev);
2008 	if (rv) {
2009 		KERNEL_UNLOCK_ONE(NULL);
2010 		return rv;
2011 	}
2012 
2013 	mutex_enter(&alldevs_lock);
2014 	if (dev->dv_del_gen != 0) {
2015 		mutex_exit(&alldevs_lock);
2016 #ifdef DIAGNOSTIC
2017 		printf("%s: %s is already detached\n", __func__,
2018 		    device_xname(dev));
2019 #endif /* DIAGNOSTIC */
2020 		config_detach_exit(dev);
2021 		KERNEL_UNLOCK_ONE(NULL);
2022 		return ENOENT;
2023 	}
2024 	alldevs_nwrite++;
2025 	mutex_exit(&alldevs_lock);
2026 
2027 	/*
2028 	 * Call the driver's .ca_detach function, unless it has none or
2029 	 * we are skipping it because it's unforced shutdown time and
2030 	 * the driver didn't ask to detach on shutdown.
2031 	 */
2032 	if (!detachall &&
2033 	    (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
2034 	    (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
2035 		rv = EOPNOTSUPP;
2036 	} else if (ca->ca_detach != NULL) {
2037 		rv = (*ca->ca_detach)(dev, flags);
2038 	} else
2039 		rv = EOPNOTSUPP;
2040 
2041 	/*
2042 	 * If it was not possible to detach the device, then we either
2043 	 * panic() (for the forced but failed case), or return an error.
2044 	 */
2045 	if (rv) {
2046 		/*
2047 		 * Detach failed -- likely EOPNOTSUPP or EBUSY.  Driver
2048 		 * must not have called config_detach_commit.
2049 		 */
2050 		KASSERTMSG(!dev->dv_detached,
2051 		    "%s committed to detaching and then backed out",
2052 		    device_xname(dev));
2053 		if (flags & DETACH_FORCE) {
2054 			panic("config_detach: forced detach of %s failed (%d)",
2055 			    device_xname(dev), rv);
2056 		}
2057 		goto out;
2058 	}
2059 
2060 	/*
2061 	 * The device has now been successfully detached.
2062 	 */
2063 
2064 	/*
2065 	 * If .ca_detach didn't commit to detach, then do that for it.
2066 	 * This wakes any pending device_lookup_acquire calls so they
2067 	 * will fail.
2068 	 */
2069 	config_detach_commit(dev);
2070 
2071 	/*
2072 	 * If it was possible to detach the device, ensure that the
2073 	 * device is deactivated.
2074 	 */
2075 	dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */
2076 
2077 	/*
2078 	 * Wait for all device_lookup_acquire references -- mostly, for
2079 	 * all attempts to open the device -- to drain.  It is the
2080 	 * responsibility of .ca_detach to ensure anything with open
2081 	 * references will be interrupted and release them promptly,
2082 	 * not block indefinitely.  All new attempts to acquire
2083 	 * references will fail, as config_detach_commit has arranged
2084 	 * by now.
2085 	 */
2086 	mutex_enter(&config_misc_lock);
2087 	localcount_drain(dev->dv_localcount,
2088 	    &config_misc_cv, &config_misc_lock);
2089 	mutex_exit(&config_misc_lock);
2090 
2091 	/* Let userland know */
2092 	devmon_report_device(dev, false);
2093 
2094 #ifdef DIAGNOSTIC
2095 	/*
2096 	 * Sanity: If you're successfully detached, you should have no
2097 	 * children.  (Note that because children must be attached
2098 	 * after parents, we only need to search the latter part of
2099 	 * the list.)
2100 	 */
2101 	mutex_enter(&alldevs_lock);
2102 	for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
2103 	    d = TAILQ_NEXT(d, dv_list)) {
2104 		if (d->dv_parent == dev && d->dv_del_gen == 0) {
2105 			printf("config_detach: detached device %s"
2106 			    " has children %s\n", device_xname(dev),
2107 			    device_xname(d));
2108 			panic("config_detach");
2109 		}
2110 	}
2111 	mutex_exit(&alldevs_lock);
2112 #endif
2113 
2114 	/* notify the parent that the child is gone */
2115 	if (dev->dv_parent) {
2116 		device_t p = dev->dv_parent;
2117 		if (p->dv_cfattach->ca_childdetached)
2118 			(*p->dv_cfattach->ca_childdetached)(p, dev);
2119 	}
2120 
2121 	/*
2122 	 * Mark cfdata to show that the unit can be reused, if possible.
2123 	 */
2124 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
2125 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
2126 			if (STREQ(cf->cf_name, cd->cd_name)) {
2127 				if (cf->cf_fstate == FSTATE_FOUND &&
2128 				    cf->cf_unit == dev->dv_unit)
2129 					cf->cf_fstate = FSTATE_NOTFOUND;
2130 			}
2131 		}
2132 	}
2133 
2134 	if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
2135 		aprint_normal_dev(dev, "detached\n");
2136 
2137 out:
2138 	config_detach_exit(dev);
2139 
2140 	config_alldevs_enter(&af);
2141 	KASSERT(alldevs_nwrite != 0);
2142 	--alldevs_nwrite;
2143 	if (rv == 0 && dev->dv_del_gen == 0) {
2144 		if (alldevs_nwrite == 0 && alldevs_nread == 0)
2145 			config_devunlink(dev, &af.af_garbage);
2146 		else {
2147 			dev->dv_del_gen = alldevs_gen;
2148 			alldevs_garbage = true;
2149 		}
2150 	}
2151 	config_alldevs_exit(&af);
2152 
2153 	KERNEL_UNLOCK_ONE(NULL);
2154 
2155 	return rv;
2156 }
2157 
2158 /*
2159  * config_detach_commit(dev)
2160  *
2161  *	Issued by a driver's .ca_detach routine to notify anyone
2162  *	waiting in device_lookup_acquire that the driver is committed
2163  *	to detaching the device, which allows device_lookup_acquire to
2164  *	wake up and fail immediately.
2165  *
2166  *	Safe to call multiple times -- idempotent.  Must be called
2167  *	during config_detach_enter/exit.  Safe to use with
2168  *	device_lookup because the device is not actually removed from
2169  *	the table until after config_detach_exit.
2170  */
2171 void
2172 config_detach_commit(device_t dev)
2173 {
2174 
2175 	mutex_enter(&config_misc_lock);
2176 	KASSERT(dev->dv_detaching == curlwp);
2177 	dev->dv_detached = true;
2178 	cv_broadcast(&config_misc_cv);
2179 	mutex_exit(&config_misc_lock);
2180 }
2181 
2182 int
2183 config_detach_children(device_t parent, int flags)
2184 {
2185 	device_t dv;
2186 	deviter_t di;
2187 	int error = 0;
2188 
2189 	KASSERT(KERNEL_LOCKED_P());
2190 
2191 	for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
2192 	     dv = deviter_next(&di)) {
2193 		if (device_parent(dv) != parent)
2194 			continue;
2195 		if ((error = config_detach(dv, flags)) != 0)
2196 			break;
2197 	}
2198 	deviter_release(&di);
2199 	return error;
2200 }
2201 
2202 device_t
2203 shutdown_first(struct shutdown_state *s)
2204 {
2205 	if (!s->initialized) {
2206 		deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
2207 		s->initialized = true;
2208 	}
2209 	return shutdown_next(s);
2210 }
2211 
2212 device_t
2213 shutdown_next(struct shutdown_state *s)
2214 {
2215 	device_t dv;
2216 
2217 	while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
2218 		;
2219 
2220 	if (dv == NULL)
2221 		s->initialized = false;
2222 
2223 	return dv;
2224 }
2225 
2226 bool
2227 config_detach_all(int how)
2228 {
2229 	static struct shutdown_state s;
2230 	device_t curdev;
2231 	bool progress = false;
2232 	int flags;
2233 
2234 	KERNEL_LOCK(1, NULL);
2235 
2236 	if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
2237 		goto out;
2238 
2239 	if ((how & RB_POWERDOWN) == RB_POWERDOWN)
2240 		flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
2241 	else
2242 		flags = DETACH_SHUTDOWN;
2243 
2244 	for (curdev = shutdown_first(&s); curdev != NULL;
2245 	     curdev = shutdown_next(&s)) {
2246 		aprint_debug(" detaching %s, ", device_xname(curdev));
2247 		if (config_detach(curdev, flags) == 0) {
2248 			progress = true;
2249 			aprint_debug("success.");
2250 		} else
2251 			aprint_debug("failed.");
2252 	}
2253 
2254 out:	KERNEL_UNLOCK_ONE(NULL);
2255 	return progress;
2256 }
2257 
2258 static bool
2259 device_is_ancestor_of(device_t ancestor, device_t descendant)
2260 {
2261 	device_t dv;
2262 
2263 	for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
2264 		if (device_parent(dv) == ancestor)
2265 			return true;
2266 	}
2267 	return false;
2268 }
2269 
2270 int
2271 config_deactivate(device_t dev)
2272 {
2273 	deviter_t di;
2274 	const struct cfattach *ca;
2275 	device_t descendant;
2276 	int s, rv = 0, oflags;
2277 
2278 	for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
2279 	     descendant != NULL;
2280 	     descendant = deviter_next(&di)) {
2281 		if (dev != descendant &&
2282 		    !device_is_ancestor_of(dev, descendant))
2283 			continue;
2284 
2285 		if ((descendant->dv_flags & DVF_ACTIVE) == 0)
2286 			continue;
2287 
2288 		ca = descendant->dv_cfattach;
2289 		oflags = descendant->dv_flags;
2290 
2291 		descendant->dv_flags &= ~DVF_ACTIVE;
2292 		if (ca->ca_activate == NULL)
2293 			continue;
2294 		s = splhigh();
2295 		rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
2296 		splx(s);
2297 		if (rv != 0)
2298 			descendant->dv_flags = oflags;
2299 	}
2300 	deviter_release(&di);
2301 	return rv;
2302 }
2303 
2304 /*
2305  * Defer the configuration of the specified device until all
2306  * of its parent's devices have been attached.
2307  */
2308 void
2309 config_defer(device_t dev, void (*func)(device_t))
2310 {
2311 	struct deferred_config *dc;
2312 
2313 	if (dev->dv_parent == NULL)
2314 		panic("config_defer: can't defer config of a root device");
2315 
2316 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2317 
2318 	config_pending_incr(dev);
2319 
2320 	mutex_enter(&config_misc_lock);
2321 #ifdef DIAGNOSTIC
2322 	struct deferred_config *odc;
2323 	TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
2324 		if (odc->dc_dev == dev)
2325 			panic("config_defer: deferred twice");
2326 	}
2327 #endif
2328 	dc->dc_dev = dev;
2329 	dc->dc_func = func;
2330 	TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
2331 	mutex_exit(&config_misc_lock);
2332 }
2333 
2334 /*
2335  * Defer some autoconfiguration for a device until after interrupts
2336  * are enabled.
2337  */
2338 void
2339 config_interrupts(device_t dev, void (*func)(device_t))
2340 {
2341 	struct deferred_config *dc;
2342 
2343 	/*
2344 	 * If interrupts are enabled, callback now.
2345 	 */
2346 	if (cold == 0) {
2347 		(*func)(dev);
2348 		return;
2349 	}
2350 
2351 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2352 
2353 	config_pending_incr(dev);
2354 
2355 	mutex_enter(&config_misc_lock);
2356 #ifdef DIAGNOSTIC
2357 	struct deferred_config *odc;
2358 	TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
2359 		if (odc->dc_dev == dev)
2360 			panic("config_interrupts: deferred twice");
2361 	}
2362 #endif
2363 	dc->dc_dev = dev;
2364 	dc->dc_func = func;
2365 	TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
2366 	mutex_exit(&config_misc_lock);
2367 }
2368 
2369 /*
2370  * Defer some autoconfiguration for a device until after root file system
2371  * is mounted (to load firmware etc).
2372  */
2373 void
2374 config_mountroot(device_t dev, void (*func)(device_t))
2375 {
2376 	struct deferred_config *dc;
2377 
2378 	/*
2379 	 * If root file system is mounted, callback now.
2380 	 */
2381 	if (root_is_mounted) {
2382 		(*func)(dev);
2383 		return;
2384 	}
2385 
2386 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2387 
2388 	mutex_enter(&config_misc_lock);
2389 #ifdef DIAGNOSTIC
2390 	struct deferred_config *odc;
2391 	TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
2392 		if (odc->dc_dev == dev)
2393 			panic("%s: deferred twice", __func__);
2394 	}
2395 #endif
2396 
2397 	dc->dc_dev = dev;
2398 	dc->dc_func = func;
2399 	TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
2400 	mutex_exit(&config_misc_lock);
2401 }
2402 
2403 /*
2404  * Process a deferred configuration queue.
2405  */
2406 static void
2407 config_process_deferred(struct deferred_config_head *queue, device_t parent)
2408 {
2409 	struct deferred_config *dc;
2410 
2411 	KASSERT(KERNEL_LOCKED_P());
2412 
2413 	mutex_enter(&config_misc_lock);
2414 	dc = TAILQ_FIRST(queue);
2415 	while (dc) {
2416 		if (parent == NULL || dc->dc_dev->dv_parent == parent) {
2417 			TAILQ_REMOVE(queue, dc, dc_queue);
2418 			mutex_exit(&config_misc_lock);
2419 
2420 			(*dc->dc_func)(dc->dc_dev);
2421 			config_pending_decr(dc->dc_dev);
2422 			kmem_free(dc, sizeof(*dc));
2423 
2424 			mutex_enter(&config_misc_lock);
2425 			/* Restart, queue might have changed */
2426 			dc = TAILQ_FIRST(queue);
2427 		} else {
2428 			dc = TAILQ_NEXT(dc, dc_queue);
2429 		}
2430 	}
2431 	mutex_exit(&config_misc_lock);
2432 }
2433 
2434 /*
2435  * Manipulate the config_pending semaphore.
2436  */
2437 void
2438 config_pending_incr(device_t dev)
2439 {
2440 
2441 	mutex_enter(&config_misc_lock);
2442 	KASSERTMSG(dev->dv_pending < INT_MAX,
2443 	    "%s: excess config_pending_incr", device_xname(dev));
2444 	if (dev->dv_pending++ == 0)
2445 		TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
2446 #ifdef DEBUG_AUTOCONF
2447 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2448 #endif
2449 	mutex_exit(&config_misc_lock);
2450 }
2451 
2452 void
2453 config_pending_decr(device_t dev)
2454 {
2455 
2456 	mutex_enter(&config_misc_lock);
2457 	KASSERTMSG(dev->dv_pending > 0,
2458 	    "%s: excess config_pending_decr", device_xname(dev));
2459 	if (--dev->dv_pending == 0) {
2460 		TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
2461 		cv_broadcast(&config_misc_cv);
2462 	}
2463 #ifdef DEBUG_AUTOCONF
2464 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2465 #endif
2466 	mutex_exit(&config_misc_lock);
2467 }
2468 
2469 /*
2470  * Register a "finalization" routine.  Finalization routines are
2471  * called iteratively once all real devices have been found during
2472  * autoconfiguration, for as long as any one finalizer has done
2473  * any work.
2474  */
2475 int
2476 config_finalize_register(device_t dev, int (*fn)(device_t))
2477 {
2478 	struct finalize_hook *f;
2479 	int error = 0;
2480 
2481 	KERNEL_LOCK(1, NULL);
2482 
2483 	/*
2484 	 * If finalization has already been done, invoke the
2485 	 * callback function now.
2486 	 */
2487 	if (config_finalize_done) {
2488 		while ((*fn)(dev) != 0)
2489 			/* loop */ ;
2490 		goto out;
2491 	}
2492 
2493 	/* Ensure this isn't already on the list. */
2494 	TAILQ_FOREACH(f, &config_finalize_list, f_list) {
2495 		if (f->f_func == fn && f->f_dev == dev) {
2496 			error = EEXIST;
2497 			goto out;
2498 		}
2499 	}
2500 
2501 	f = kmem_alloc(sizeof(*f), KM_SLEEP);
2502 	f->f_func = fn;
2503 	f->f_dev = dev;
2504 	TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
2505 
2506 	/* Success!  */
2507 	error = 0;
2508 
2509 out:	KERNEL_UNLOCK_ONE(NULL);
2510 	return error;
2511 }
2512 
2513 void
2514 config_finalize(void)
2515 {
2516 	struct finalize_hook *f;
2517 	struct pdevinit *pdev;
2518 	extern struct pdevinit pdevinit[];
2519 	int errcnt, rv;
2520 
2521 	/*
2522 	 * Now that device driver threads have been created, wait for
2523 	 * them to finish any deferred autoconfiguration.
2524 	 */
2525 	mutex_enter(&config_misc_lock);
2526 	while (!TAILQ_EMPTY(&config_pending)) {
2527 		device_t dev;
2528 		int error;
2529 
2530 		error = cv_timedwait(&config_misc_cv, &config_misc_lock,
2531 		    mstohz(1000));
2532 		if (error == EWOULDBLOCK) {
2533 			aprint_debug("waiting for devices:");
2534 			TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
2535 				aprint_debug(" %s", device_xname(dev));
2536 			aprint_debug("\n");
2537 		}
2538 	}
2539 	mutex_exit(&config_misc_lock);
2540 
2541 	KERNEL_LOCK(1, NULL);
2542 
2543 	/* Attach pseudo-devices. */
2544 	for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
2545 		(*pdev->pdev_attach)(pdev->pdev_count);
2546 
2547 	/* Run the hooks until none of them does any work. */
2548 	do {
2549 		rv = 0;
2550 		TAILQ_FOREACH(f, &config_finalize_list, f_list)
2551 			rv |= (*f->f_func)(f->f_dev);
2552 	} while (rv != 0);
2553 
2554 	config_finalize_done = 1;
2555 
2556 	/* Now free all the hooks. */
2557 	while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
2558 		TAILQ_REMOVE(&config_finalize_list, f, f_list);
2559 		kmem_free(f, sizeof(*f));
2560 	}
2561 
2562 	KERNEL_UNLOCK_ONE(NULL);
2563 
2564 	errcnt = aprint_get_error_count();
2565 	if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
2566 	    (boothowto & AB_VERBOSE) == 0) {
2567 		mutex_enter(&config_misc_lock);
2568 		if (config_do_twiddle) {
2569 			config_do_twiddle = 0;
2570 			printf_nolog(" done.\n");
2571 		}
2572 		mutex_exit(&config_misc_lock);
2573 	}
2574 	if (errcnt != 0) {
2575 		printf("WARNING: %d error%s while detecting hardware; "
2576 		    "check system log.\n", errcnt,
2577 		    errcnt == 1 ? "" : "s");
2578 	}
2579 }
2580 
2581 void
2582 config_twiddle_init(void)
2583 {
2584 
2585 	if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
2586 		config_do_twiddle = 1;
2587 	}
2588 	callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
2589 }
2590 
2591 void
2592 config_twiddle_fn(void *cookie)
2593 {
2594 
2595 	mutex_enter(&config_misc_lock);
2596 	if (config_do_twiddle) {
2597 		twiddle();
2598 		callout_schedule(&config_twiddle_ch, mstohz(100));
2599 	}
2600 	mutex_exit(&config_misc_lock);
2601 }
2602 
2603 static void
2604 config_alldevs_enter(struct alldevs_foray *af)
2605 {
2606 	TAILQ_INIT(&af->af_garbage);
2607 	mutex_enter(&alldevs_lock);
2608 	config_collect_garbage(&af->af_garbage);
2609 }
2610 
2611 static void
2612 config_alldevs_exit(struct alldevs_foray *af)
2613 {
2614 	mutex_exit(&alldevs_lock);
2615 	config_dump_garbage(&af->af_garbage);
2616 }
2617 
2618 /*
2619  * device_lookup:
2620  *
2621  *	Look up a device instance for a given driver.
2622  *
2623  *	Caller is responsible for ensuring the device's state is
2624  *	stable, either by holding a reference already obtained with
2625  *	device_lookup_acquire or by otherwise ensuring the device is
2626  *	attached and can't be detached (e.g., holding an open device
2627  *	node and ensuring *_detach calls vdevgone).
2628  *
2629  *	XXX Find a way to assert this.
2630  *
2631  *	Safe for use up to and including interrupt context at IPL_VM.
2632  *	Never sleeps.
2633  */
2634 device_t
2635 device_lookup(cfdriver_t cd, int unit)
2636 {
2637 	device_t dv;
2638 
2639 	mutex_enter(&alldevs_lock);
2640 	if (unit < 0 || unit >= cd->cd_ndevs)
2641 		dv = NULL;
2642 	else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
2643 		dv = NULL;
2644 	mutex_exit(&alldevs_lock);
2645 
2646 	return dv;
2647 }
2648 
2649 /*
2650  * device_lookup_private:
2651  *
2652  *	Look up a softc instance for a given driver.
2653  */
2654 void *
2655 device_lookup_private(cfdriver_t cd, int unit)
2656 {
2657 
2658 	return device_private(device_lookup(cd, unit));
2659 }
2660 
2661 /*
2662  * device_lookup_acquire:
2663  *
2664  *	Look up a device instance for a given driver, and return a
2665  *	reference to it that must be released by device_release.
2666  *
2667  *	=> If the device is still attaching, blocks until *_attach has
2668  *	   returned.
2669  *
2670  *	=> If the device is detaching, blocks until *_detach has
2671  *	   returned.  May succeed or fail in that case, depending on
2672  *	   whether *_detach has backed out (EBUSY) or committed to
2673  *	   detaching.
2674  *
2675  *	May sleep.
2676  */
2677 device_t
2678 device_lookup_acquire(cfdriver_t cd, int unit)
2679 {
2680 	device_t dv;
2681 
2682 	ASSERT_SLEEPABLE();
2683 
2684 	/* XXX This should have a pserialized fast path -- TBD.  */
2685 	mutex_enter(&config_misc_lock);
2686 	mutex_enter(&alldevs_lock);
2687 retry:	if (unit < 0 || unit >= cd->cd_ndevs ||
2688 	    (dv = cd->cd_devs[unit]) == NULL ||
2689 	    dv->dv_del_gen != 0 ||
2690 	    dv->dv_detached) {
2691 		dv = NULL;
2692 	} else {
2693 		/*
2694 		 * Wait for the device to stabilize, if attaching or
2695 		 * detaching.  Either way we must wait for *_attach or
2696 		 * *_detach to complete, and either way we must retry:
2697 		 * even if detaching, *_detach might fail (EBUSY) so
2698 		 * the device may still be there.
2699 		 */
2700 		if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) ||
2701 		    dv->dv_detaching != NULL) {
2702 			mutex_exit(&alldevs_lock);
2703 			cv_wait(&config_misc_cv, &config_misc_lock);
2704 			mutex_enter(&alldevs_lock);
2705 			goto retry;
2706 		}
2707 		localcount_acquire(dv->dv_localcount);
2708 	}
2709 	mutex_exit(&alldevs_lock);
2710 	mutex_exit(&config_misc_lock);
2711 
2712 	return dv;
2713 }
2714 
2715 /*
2716  * device_release:
2717  *
2718  *	Release a reference to a device acquired with
2719  *	device_lookup_acquire.
2720  */
2721 void
2722 device_release(device_t dv)
2723 {
2724 
2725 	localcount_release(dv->dv_localcount,
2726 	    &config_misc_cv, &config_misc_lock);
2727 }
2728 
2729 /*
2730  * device_find_by_xname:
2731  *
2732  *	Returns the device of the given name or NULL if it doesn't exist.
2733  */
2734 device_t
2735 device_find_by_xname(const char *name)
2736 {
2737 	device_t dv;
2738 	deviter_t di;
2739 
2740 	for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
2741 		if (strcmp(device_xname(dv), name) == 0)
2742 			break;
2743 	}
2744 	deviter_release(&di);
2745 
2746 	return dv;
2747 }
2748 
2749 /*
2750  * device_find_by_driver_unit:
2751  *
2752  *	Returns the device of the given driver name and unit or
2753  *	NULL if it doesn't exist.
2754  */
2755 device_t
2756 device_find_by_driver_unit(const char *name, int unit)
2757 {
2758 	struct cfdriver *cd;
2759 
2760 	if ((cd = config_cfdriver_lookup(name)) == NULL)
2761 		return NULL;
2762 	return device_lookup(cd, unit);
2763 }
2764 
2765 static bool
2766 match_strcmp(const char * const s1, const char * const s2)
2767 {
2768 	return strcmp(s1, s2) == 0;
2769 }
2770 
2771 static bool
2772 match_pmatch(const char * const s1, const char * const s2)
2773 {
2774 	return pmatch(s1, s2, NULL) == 2;
2775 }
2776 
2777 static bool
2778 strarray_match_internal(const char ** const strings,
2779     unsigned int const nstrings, const char * const str,
2780     unsigned int * const indexp,
2781     bool (*match_fn)(const char *, const char *))
2782 {
2783 	unsigned int i;
2784 
2785 	if (strings == NULL || nstrings == 0) {
2786 		return false;
2787 	}
2788 
2789 	for (i = 0; i < nstrings; i++) {
2790 		if ((*match_fn)(strings[i], str)) {
2791 			*indexp = i;
2792 			return true;
2793 		}
2794 	}
2795 
2796 	return false;
2797 }
2798 
2799 static int
2800 strarray_match(const char ** const strings, unsigned int const nstrings,
2801     const char * const str)
2802 {
2803 	unsigned int idx;
2804 
2805 	if (strarray_match_internal(strings, nstrings, str, &idx,
2806 				    match_strcmp)) {
2807 		return (int)(nstrings - idx);
2808 	}
2809 	return 0;
2810 }
2811 
2812 static int
2813 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
2814     const char * const pattern)
2815 {
2816 	unsigned int idx;
2817 
2818 	if (strarray_match_internal(strings, nstrings, pattern, &idx,
2819 				    match_pmatch)) {
2820 		return (int)(nstrings - idx);
2821 	}
2822 	return 0;
2823 }
2824 
2825 static int
2826 device_compatible_match_strarray_internal(
2827     const char **device_compats, int ndevice_compats,
2828     const struct device_compatible_entry *driver_compats,
2829     const struct device_compatible_entry **matching_entryp,
2830     int (*match_fn)(const char **, unsigned int, const char *))
2831 {
2832 	const struct device_compatible_entry *dce = NULL;
2833 	int rv;
2834 
2835 	if (ndevice_compats == 0 || device_compats == NULL ||
2836 	    driver_compats == NULL)
2837 		return 0;
2838 
2839 	for (dce = driver_compats; dce->compat != NULL; dce++) {
2840 		rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
2841 		if (rv != 0) {
2842 			if (matching_entryp != NULL) {
2843 				*matching_entryp = dce;
2844 			}
2845 			return rv;
2846 		}
2847 	}
2848 	return 0;
2849 }
2850 
2851 /*
2852  * device_compatible_match:
2853  *
2854  *	Match a driver's "compatible" data against a device's
2855  *	"compatible" strings.  Returns resulted weighted by
2856  *	which device "compatible" string was matched.
2857  */
2858 int
2859 device_compatible_match(const char **device_compats, int ndevice_compats,
2860     const struct device_compatible_entry *driver_compats)
2861 {
2862 	return device_compatible_match_strarray_internal(device_compats,
2863 	    ndevice_compats, driver_compats, NULL, strarray_match);
2864 }
2865 
2866 /*
2867  * device_compatible_pmatch:
2868  *
2869  *	Like device_compatible_match(), but uses pmatch(9) to compare
2870  *	the device "compatible" strings against patterns in the
2871  *	driver's "compatible" data.
2872  */
2873 int
2874 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
2875     const struct device_compatible_entry *driver_compats)
2876 {
2877 	return device_compatible_match_strarray_internal(device_compats,
2878 	    ndevice_compats, driver_compats, NULL, strarray_pmatch);
2879 }
2880 
2881 static int
2882 device_compatible_match_strlist_internal(
2883     const char * const device_compats, size_t const device_compatsize,
2884     const struct device_compatible_entry *driver_compats,
2885     const struct device_compatible_entry **matching_entryp,
2886     int (*match_fn)(const char *, size_t, const char *))
2887 {
2888 	const struct device_compatible_entry *dce = NULL;
2889 	int rv;
2890 
2891 	if (device_compats == NULL || device_compatsize == 0 ||
2892 	    driver_compats == NULL)
2893 		return 0;
2894 
2895 	for (dce = driver_compats; dce->compat != NULL; dce++) {
2896 		rv = (*match_fn)(device_compats, device_compatsize,
2897 		    dce->compat);
2898 		if (rv != 0) {
2899 			if (matching_entryp != NULL) {
2900 				*matching_entryp = dce;
2901 			}
2902 			return rv;
2903 		}
2904 	}
2905 	return 0;
2906 }
2907 
2908 /*
2909  * device_compatible_match_strlist:
2910  *
2911  *	Like device_compatible_match(), but take the device
2912  *	"compatible" strings as an OpenFirmware-style string
2913  *	list.
2914  */
2915 int
2916 device_compatible_match_strlist(
2917     const char * const device_compats, size_t const device_compatsize,
2918     const struct device_compatible_entry *driver_compats)
2919 {
2920 	return device_compatible_match_strlist_internal(device_compats,
2921 	    device_compatsize, driver_compats, NULL, strlist_match);
2922 }
2923 
2924 /*
2925  * device_compatible_pmatch_strlist:
2926  *
2927  *	Like device_compatible_pmatch(), but take the device
2928  *	"compatible" strings as an OpenFirmware-style string
2929  *	list.
2930  */
2931 int
2932 device_compatible_pmatch_strlist(
2933     const char * const device_compats, size_t const device_compatsize,
2934     const struct device_compatible_entry *driver_compats)
2935 {
2936 	return device_compatible_match_strlist_internal(device_compats,
2937 	    device_compatsize, driver_compats, NULL, strlist_pmatch);
2938 }
2939 
2940 static int
2941 device_compatible_match_id_internal(
2942     uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
2943     const struct device_compatible_entry *driver_compats,
2944     const struct device_compatible_entry **matching_entryp)
2945 {
2946 	const struct device_compatible_entry *dce = NULL;
2947 
2948 	if (mask == 0)
2949 		return 0;
2950 
2951 	for (dce = driver_compats; dce->id != sentinel_id; dce++) {
2952 		if ((id & mask) == dce->id) {
2953 			if (matching_entryp != NULL) {
2954 				*matching_entryp = dce;
2955 			}
2956 			return 1;
2957 		}
2958 	}
2959 	return 0;
2960 }
2961 
2962 /*
2963  * device_compatible_match_id:
2964  *
2965  *	Like device_compatible_match(), but takes a single
2966  *	unsigned integer device ID.
2967  */
2968 int
2969 device_compatible_match_id(
2970     uintptr_t const id, uintptr_t const sentinel_id,
2971     const struct device_compatible_entry *driver_compats)
2972 {
2973 	return device_compatible_match_id_internal(id, (uintptr_t)-1,
2974 	    sentinel_id, driver_compats, NULL);
2975 }
2976 
2977 /*
2978  * device_compatible_lookup:
2979  *
2980  *	Look up and return the device_compatible_entry, using the
2981  *	same matching criteria used by device_compatible_match().
2982  */
2983 const struct device_compatible_entry *
2984 device_compatible_lookup(const char **device_compats, int ndevice_compats,
2985 			 const struct device_compatible_entry *driver_compats)
2986 {
2987 	const struct device_compatible_entry *dce;
2988 
2989 	if (device_compatible_match_strarray_internal(device_compats,
2990 	    ndevice_compats, driver_compats, &dce, strarray_match)) {
2991 		return dce;
2992 	}
2993 	return NULL;
2994 }
2995 
2996 /*
2997  * device_compatible_plookup:
2998  *
2999  *	Look up and return the device_compatible_entry, using the
3000  *	same matching criteria used by device_compatible_pmatch().
3001  */
3002 const struct device_compatible_entry *
3003 device_compatible_plookup(const char **device_compats, int ndevice_compats,
3004 			  const struct device_compatible_entry *driver_compats)
3005 {
3006 	const struct device_compatible_entry *dce;
3007 
3008 	if (device_compatible_match_strarray_internal(device_compats,
3009 	    ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
3010 		return dce;
3011 	}
3012 	return NULL;
3013 }
3014 
3015 /*
3016  * device_compatible_lookup_strlist:
3017  *
3018  *	Like device_compatible_lookup(), but take the device
3019  *	"compatible" strings as an OpenFirmware-style string
3020  *	list.
3021  */
3022 const struct device_compatible_entry *
3023 device_compatible_lookup_strlist(
3024     const char * const device_compats, size_t const device_compatsize,
3025     const struct device_compatible_entry *driver_compats)
3026 {
3027 	const struct device_compatible_entry *dce;
3028 
3029 	if (device_compatible_match_strlist_internal(device_compats,
3030 	    device_compatsize, driver_compats, &dce, strlist_match)) {
3031 		return dce;
3032 	}
3033 	return NULL;
3034 }
3035 
3036 /*
3037  * device_compatible_plookup_strlist:
3038  *
3039  *	Like device_compatible_plookup(), but take the device
3040  *	"compatible" strings as an OpenFirmware-style string
3041  *	list.
3042  */
3043 const struct device_compatible_entry *
3044 device_compatible_plookup_strlist(
3045     const char * const device_compats, size_t const device_compatsize,
3046     const struct device_compatible_entry *driver_compats)
3047 {
3048 	const struct device_compatible_entry *dce;
3049 
3050 	if (device_compatible_match_strlist_internal(device_compats,
3051 	    device_compatsize, driver_compats, &dce, strlist_pmatch)) {
3052 		return dce;
3053 	}
3054 	return NULL;
3055 }
3056 
3057 /*
3058  * device_compatible_lookup_id:
3059  *
3060  *	Like device_compatible_lookup(), but takes a single
3061  *	unsigned integer device ID.
3062  */
3063 const struct device_compatible_entry *
3064 device_compatible_lookup_id(
3065     uintptr_t const id, uintptr_t const sentinel_id,
3066     const struct device_compatible_entry *driver_compats)
3067 {
3068 	const struct device_compatible_entry *dce;
3069 
3070 	if (device_compatible_match_id_internal(id, (uintptr_t)-1,
3071 	    sentinel_id, driver_compats, &dce)) {
3072 		return dce;
3073 	}
3074 	return NULL;
3075 }
3076 
3077 /*
3078  * Power management related functions.
3079  */
3080 
3081 bool
3082 device_pmf_is_registered(device_t dev)
3083 {
3084 	return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
3085 }
3086 
3087 bool
3088 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
3089 {
3090 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3091 		return true;
3092 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3093 		return false;
3094 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3095 	    dev->dv_driver_suspend != NULL &&
3096 	    !(*dev->dv_driver_suspend)(dev, qual))
3097 		return false;
3098 
3099 	dev->dv_flags |= DVF_DRIVER_SUSPENDED;
3100 	return true;
3101 }
3102 
3103 bool
3104 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
3105 {
3106 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3107 		return true;
3108 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3109 		return false;
3110 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3111 	    dev->dv_driver_resume != NULL &&
3112 	    !(*dev->dv_driver_resume)(dev, qual))
3113 		return false;
3114 
3115 	dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
3116 	return true;
3117 }
3118 
3119 bool
3120 device_pmf_driver_shutdown(device_t dev, int how)
3121 {
3122 
3123 	if (*dev->dv_driver_shutdown != NULL &&
3124 	    !(*dev->dv_driver_shutdown)(dev, how))
3125 		return false;
3126 	return true;
3127 }
3128 
3129 bool
3130 device_pmf_driver_register(device_t dev,
3131     bool (*suspend)(device_t, const pmf_qual_t *),
3132     bool (*resume)(device_t, const pmf_qual_t *),
3133     bool (*shutdown)(device_t, int))
3134 {
3135 	dev->dv_driver_suspend = suspend;
3136 	dev->dv_driver_resume = resume;
3137 	dev->dv_driver_shutdown = shutdown;
3138 	dev->dv_flags |= DVF_POWER_HANDLERS;
3139 	return true;
3140 }
3141 
3142 void
3143 device_pmf_driver_deregister(device_t dev)
3144 {
3145 	device_lock_t dvl = device_getlock(dev);
3146 
3147 	dev->dv_driver_suspend = NULL;
3148 	dev->dv_driver_resume = NULL;
3149 
3150 	mutex_enter(&dvl->dvl_mtx);
3151 	dev->dv_flags &= ~DVF_POWER_HANDLERS;
3152 	while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
3153 		/* Wake a thread that waits for the lock.  That
3154 		 * thread will fail to acquire the lock, and then
3155 		 * it will wake the next thread that waits for the
3156 		 * lock, or else it will wake us.
3157 		 */
3158 		cv_signal(&dvl->dvl_cv);
3159 		pmflock_debug(dev, __func__, __LINE__);
3160 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3161 		pmflock_debug(dev, __func__, __LINE__);
3162 	}
3163 	mutex_exit(&dvl->dvl_mtx);
3164 }
3165 
3166 bool
3167 device_pmf_driver_child_register(device_t dev)
3168 {
3169 	device_t parent = device_parent(dev);
3170 
3171 	if (parent == NULL || parent->dv_driver_child_register == NULL)
3172 		return true;
3173 	return (*parent->dv_driver_child_register)(dev);
3174 }
3175 
3176 void
3177 device_pmf_driver_set_child_register(device_t dev,
3178     bool (*child_register)(device_t))
3179 {
3180 	dev->dv_driver_child_register = child_register;
3181 }
3182 
3183 static void
3184 pmflock_debug(device_t dev, const char *func, int line)
3185 {
3186 #ifdef PMFLOCK_DEBUG
3187 	device_lock_t dvl = device_getlock(dev);
3188 	const char *curlwp_name;
3189 
3190 	if (curlwp->l_name != NULL)
3191 		curlwp_name = curlwp->l_name;
3192 	else
3193 		curlwp_name = curlwp->l_proc->p_comm;
3194 
3195 	aprint_debug_dev(dev,
3196 	    "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
3197 	    curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
3198 #endif	/* PMFLOCK_DEBUG */
3199 }
3200 
3201 static bool
3202 device_pmf_lock1(device_t dev)
3203 {
3204 	device_lock_t dvl = device_getlock(dev);
3205 
3206 	while (device_pmf_is_registered(dev) &&
3207 	    dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
3208 		dvl->dvl_nwait++;
3209 		pmflock_debug(dev, __func__, __LINE__);
3210 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3211 		pmflock_debug(dev, __func__, __LINE__);
3212 		dvl->dvl_nwait--;
3213 	}
3214 	if (!device_pmf_is_registered(dev)) {
3215 		pmflock_debug(dev, __func__, __LINE__);
3216 		/* We could not acquire the lock, but some other thread may
3217 		 * wait for it, also.  Wake that thread.
3218 		 */
3219 		cv_signal(&dvl->dvl_cv);
3220 		return false;
3221 	}
3222 	dvl->dvl_nlock++;
3223 	dvl->dvl_holder = curlwp;
3224 	pmflock_debug(dev, __func__, __LINE__);
3225 	return true;
3226 }
3227 
3228 bool
3229 device_pmf_lock(device_t dev)
3230 {
3231 	bool rc;
3232 	device_lock_t dvl = device_getlock(dev);
3233 
3234 	mutex_enter(&dvl->dvl_mtx);
3235 	rc = device_pmf_lock1(dev);
3236 	mutex_exit(&dvl->dvl_mtx);
3237 
3238 	return rc;
3239 }
3240 
3241 void
3242 device_pmf_unlock(device_t dev)
3243 {
3244 	device_lock_t dvl = device_getlock(dev);
3245 
3246 	KASSERT(dvl->dvl_nlock > 0);
3247 	mutex_enter(&dvl->dvl_mtx);
3248 	if (--dvl->dvl_nlock == 0)
3249 		dvl->dvl_holder = NULL;
3250 	cv_signal(&dvl->dvl_cv);
3251 	pmflock_debug(dev, __func__, __LINE__);
3252 	mutex_exit(&dvl->dvl_mtx);
3253 }
3254 
3255 device_lock_t
3256 device_getlock(device_t dev)
3257 {
3258 	return &dev->dv_lock;
3259 }
3260 
3261 void *
3262 device_pmf_bus_private(device_t dev)
3263 {
3264 	return dev->dv_bus_private;
3265 }
3266 
3267 bool
3268 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
3269 {
3270 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3271 		return true;
3272 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
3273 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3274 		return false;
3275 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3276 	    dev->dv_bus_suspend != NULL &&
3277 	    !(*dev->dv_bus_suspend)(dev, qual))
3278 		return false;
3279 
3280 	dev->dv_flags |= DVF_BUS_SUSPENDED;
3281 	return true;
3282 }
3283 
3284 bool
3285 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
3286 {
3287 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
3288 		return true;
3289 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3290 	    dev->dv_bus_resume != NULL &&
3291 	    !(*dev->dv_bus_resume)(dev, qual))
3292 		return false;
3293 
3294 	dev->dv_flags &= ~DVF_BUS_SUSPENDED;
3295 	return true;
3296 }
3297 
3298 bool
3299 device_pmf_bus_shutdown(device_t dev, int how)
3300 {
3301 
3302 	if (*dev->dv_bus_shutdown != NULL &&
3303 	    !(*dev->dv_bus_shutdown)(dev, how))
3304 		return false;
3305 	return true;
3306 }
3307 
3308 void
3309 device_pmf_bus_register(device_t dev, void *priv,
3310     bool (*suspend)(device_t, const pmf_qual_t *),
3311     bool (*resume)(device_t, const pmf_qual_t *),
3312     bool (*shutdown)(device_t, int), void (*deregister)(device_t))
3313 {
3314 	dev->dv_bus_private = priv;
3315 	dev->dv_bus_resume = resume;
3316 	dev->dv_bus_suspend = suspend;
3317 	dev->dv_bus_shutdown = shutdown;
3318 	dev->dv_bus_deregister = deregister;
3319 }
3320 
3321 void
3322 device_pmf_bus_deregister(device_t dev)
3323 {
3324 	if (dev->dv_bus_deregister == NULL)
3325 		return;
3326 	(*dev->dv_bus_deregister)(dev);
3327 	dev->dv_bus_private = NULL;
3328 	dev->dv_bus_suspend = NULL;
3329 	dev->dv_bus_resume = NULL;
3330 	dev->dv_bus_deregister = NULL;
3331 }
3332 
3333 void *
3334 device_pmf_class_private(device_t dev)
3335 {
3336 	return dev->dv_class_private;
3337 }
3338 
3339 bool
3340 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
3341 {
3342 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
3343 		return true;
3344 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3345 	    dev->dv_class_suspend != NULL &&
3346 	    !(*dev->dv_class_suspend)(dev, qual))
3347 		return false;
3348 
3349 	dev->dv_flags |= DVF_CLASS_SUSPENDED;
3350 	return true;
3351 }
3352 
3353 bool
3354 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
3355 {
3356 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3357 		return true;
3358 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
3359 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3360 		return false;
3361 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3362 	    dev->dv_class_resume != NULL &&
3363 	    !(*dev->dv_class_resume)(dev, qual))
3364 		return false;
3365 
3366 	dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
3367 	return true;
3368 }
3369 
3370 void
3371 device_pmf_class_register(device_t dev, void *priv,
3372     bool (*suspend)(device_t, const pmf_qual_t *),
3373     bool (*resume)(device_t, const pmf_qual_t *),
3374     void (*deregister)(device_t))
3375 {
3376 	dev->dv_class_private = priv;
3377 	dev->dv_class_suspend = suspend;
3378 	dev->dv_class_resume = resume;
3379 	dev->dv_class_deregister = deregister;
3380 }
3381 
3382 void
3383 device_pmf_class_deregister(device_t dev)
3384 {
3385 	if (dev->dv_class_deregister == NULL)
3386 		return;
3387 	(*dev->dv_class_deregister)(dev);
3388 	dev->dv_class_private = NULL;
3389 	dev->dv_class_suspend = NULL;
3390 	dev->dv_class_resume = NULL;
3391 	dev->dv_class_deregister = NULL;
3392 }
3393 
3394 bool
3395 device_active(device_t dev, devactive_t type)
3396 {
3397 	size_t i;
3398 
3399 	if (dev->dv_activity_count == 0)
3400 		return false;
3401 
3402 	for (i = 0; i < dev->dv_activity_count; ++i) {
3403 		if (dev->dv_activity_handlers[i] == NULL)
3404 			break;
3405 		(*dev->dv_activity_handlers[i])(dev, type);
3406 	}
3407 
3408 	return true;
3409 }
3410 
3411 bool
3412 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
3413 {
3414 	void (**new_handlers)(device_t, devactive_t);
3415 	void (**old_handlers)(device_t, devactive_t);
3416 	size_t i, old_size, new_size;
3417 	int s;
3418 
3419 	old_handlers = dev->dv_activity_handlers;
3420 	old_size = dev->dv_activity_count;
3421 
3422 	KASSERT(old_size == 0 || old_handlers != NULL);
3423 
3424 	for (i = 0; i < old_size; ++i) {
3425 		KASSERT(old_handlers[i] != handler);
3426 		if (old_handlers[i] == NULL) {
3427 			old_handlers[i] = handler;
3428 			return true;
3429 		}
3430 	}
3431 
3432 	new_size = old_size + 4;
3433 	new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
3434 
3435 	for (i = 0; i < old_size; ++i)
3436 		new_handlers[i] = old_handlers[i];
3437 	new_handlers[old_size] = handler;
3438 	for (i = old_size+1; i < new_size; ++i)
3439 		new_handlers[i] = NULL;
3440 
3441 	s = splhigh();
3442 	dev->dv_activity_count = new_size;
3443 	dev->dv_activity_handlers = new_handlers;
3444 	splx(s);
3445 
3446 	if (old_size > 0)
3447 		kmem_free(old_handlers, sizeof(void *) * old_size);
3448 
3449 	return true;
3450 }
3451 
3452 void
3453 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
3454 {
3455 	void (**old_handlers)(device_t, devactive_t);
3456 	size_t i, old_size;
3457 	int s;
3458 
3459 	old_handlers = dev->dv_activity_handlers;
3460 	old_size = dev->dv_activity_count;
3461 
3462 	for (i = 0; i < old_size; ++i) {
3463 		if (old_handlers[i] == handler)
3464 			break;
3465 		if (old_handlers[i] == NULL)
3466 			return; /* XXX panic? */
3467 	}
3468 
3469 	if (i == old_size)
3470 		return; /* XXX panic? */
3471 
3472 	for (; i < old_size - 1; ++i) {
3473 		if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
3474 			continue;
3475 
3476 		if (i == 0) {
3477 			s = splhigh();
3478 			dev->dv_activity_count = 0;
3479 			dev->dv_activity_handlers = NULL;
3480 			splx(s);
3481 			kmem_free(old_handlers, sizeof(void *) * old_size);
3482 		}
3483 		return;
3484 	}
3485 	old_handlers[i] = NULL;
3486 }
3487 
3488 /* Return true iff the device_t `dev' exists at generation `gen'. */
3489 static bool
3490 device_exists_at(device_t dv, devgen_t gen)
3491 {
3492 	return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
3493 	    dv->dv_add_gen <= gen;
3494 }
3495 
3496 static bool
3497 deviter_visits(const deviter_t *di, device_t dv)
3498 {
3499 	return device_exists_at(dv, di->di_gen);
3500 }
3501 
3502 /*
3503  * Device Iteration
3504  *
3505  * deviter_t: a device iterator.  Holds state for a "walk" visiting
3506  *     each device_t's in the device tree.
3507  *
3508  * deviter_init(di, flags): initialize the device iterator `di'
3509  *     to "walk" the device tree.  deviter_next(di) will return
3510  *     the first device_t in the device tree, or NULL if there are
3511  *     no devices.
3512  *
3513  *     `flags' is one or more of DEVITER_F_RW, indicating that the
3514  *     caller intends to modify the device tree by calling
3515  *     config_detach(9) on devices in the order that the iterator
3516  *     returns them; DEVITER_F_ROOT_FIRST, asking for the devices
3517  *     nearest the "root" of the device tree to be returned, first;
3518  *     DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
3519  *     the root of the device tree, first; and DEVITER_F_SHUTDOWN,
3520  *     indicating both that deviter_init() should not respect any
3521  *     locks on the device tree, and that deviter_next(di) may run
3522  *     in more than one LWP before the walk has finished.
3523  *
3524  *     Only one DEVITER_F_RW iterator may be in the device tree at
3525  *     once.
3526  *
3527  *     DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
3528  *
3529  *     Results are undefined if the flags DEVITER_F_ROOT_FIRST and
3530  *     DEVITER_F_LEAVES_FIRST are used in combination.
3531  *
3532  * deviter_first(di, flags): initialize the device iterator `di'
3533  *     and return the first device_t in the device tree, or NULL
3534  *     if there are no devices.  The statement
3535  *
3536  *         dv = deviter_first(di);
3537  *
3538  *     is shorthand for
3539  *
3540  *         deviter_init(di);
3541  *         dv = deviter_next(di);
3542  *
3543  * deviter_next(di): return the next device_t in the device tree,
3544  *     or NULL if there are no more devices.  deviter_next(di)
3545  *     is undefined if `di' was not initialized with deviter_init() or
3546  *     deviter_first().
3547  *
3548  * deviter_release(di): stops iteration (subsequent calls to
3549  *     deviter_next() will return NULL), releases any locks and
3550  *     resources held by the device iterator.
3551  *
3552  * Device iteration does not return device_t's in any particular
3553  * order.  An iterator will never return the same device_t twice.
3554  * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
3555  * is called repeatedly on the same `di', it will eventually return
3556  * NULL.  It is ok to attach/detach devices during device iteration.
3557  */
3558 void
3559 deviter_init(deviter_t *di, deviter_flags_t flags)
3560 {
3561 	device_t dv;
3562 
3563 	memset(di, 0, sizeof(*di));
3564 
3565 	if ((flags & DEVITER_F_SHUTDOWN) != 0)
3566 		flags |= DEVITER_F_RW;
3567 
3568 	mutex_enter(&alldevs_lock);
3569 	if ((flags & DEVITER_F_RW) != 0)
3570 		alldevs_nwrite++;
3571 	else
3572 		alldevs_nread++;
3573 	di->di_gen = alldevs_gen++;
3574 	di->di_flags = flags;
3575 
3576 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3577 	case DEVITER_F_LEAVES_FIRST:
3578 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
3579 			if (!deviter_visits(di, dv))
3580 				continue;
3581 			di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
3582 		}
3583 		break;
3584 	case DEVITER_F_ROOT_FIRST:
3585 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
3586 			if (!deviter_visits(di, dv))
3587 				continue;
3588 			di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
3589 		}
3590 		break;
3591 	default:
3592 		break;
3593 	}
3594 
3595 	deviter_reinit(di);
3596 	mutex_exit(&alldevs_lock);
3597 }
3598 
3599 static void
3600 deviter_reinit(deviter_t *di)
3601 {
3602 
3603 	KASSERT(mutex_owned(&alldevs_lock));
3604 	if ((di->di_flags & DEVITER_F_RW) != 0)
3605 		di->di_prev = TAILQ_LAST(&alldevs, devicelist);
3606 	else
3607 		di->di_prev = TAILQ_FIRST(&alldevs);
3608 }
3609 
3610 device_t
3611 deviter_first(deviter_t *di, deviter_flags_t flags)
3612 {
3613 
3614 	deviter_init(di, flags);
3615 	return deviter_next(di);
3616 }
3617 
3618 static device_t
3619 deviter_next2(deviter_t *di)
3620 {
3621 	device_t dv;
3622 
3623 	KASSERT(mutex_owned(&alldevs_lock));
3624 
3625 	dv = di->di_prev;
3626 
3627 	if (dv == NULL)
3628 		return NULL;
3629 
3630 	if ((di->di_flags & DEVITER_F_RW) != 0)
3631 		di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
3632 	else
3633 		di->di_prev = TAILQ_NEXT(dv, dv_list);
3634 
3635 	return dv;
3636 }
3637 
3638 static device_t
3639 deviter_next1(deviter_t *di)
3640 {
3641 	device_t dv;
3642 
3643 	KASSERT(mutex_owned(&alldevs_lock));
3644 
3645 	do {
3646 		dv = deviter_next2(di);
3647 	} while (dv != NULL && !deviter_visits(di, dv));
3648 
3649 	return dv;
3650 }
3651 
3652 device_t
3653 deviter_next(deviter_t *di)
3654 {
3655 	device_t dv = NULL;
3656 
3657 	mutex_enter(&alldevs_lock);
3658 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3659 	case 0:
3660 		dv = deviter_next1(di);
3661 		break;
3662 	case DEVITER_F_LEAVES_FIRST:
3663 		while (di->di_curdepth >= 0) {
3664 			if ((dv = deviter_next1(di)) == NULL) {
3665 				di->di_curdepth--;
3666 				deviter_reinit(di);
3667 			} else if (dv->dv_depth == di->di_curdepth)
3668 				break;
3669 		}
3670 		break;
3671 	case DEVITER_F_ROOT_FIRST:
3672 		while (di->di_curdepth <= di->di_maxdepth) {
3673 			if ((dv = deviter_next1(di)) == NULL) {
3674 				di->di_curdepth++;
3675 				deviter_reinit(di);
3676 			} else if (dv->dv_depth == di->di_curdepth)
3677 				break;
3678 		}
3679 		break;
3680 	default:
3681 		break;
3682 	}
3683 	mutex_exit(&alldevs_lock);
3684 
3685 	return dv;
3686 }
3687 
3688 void
3689 deviter_release(deviter_t *di)
3690 {
3691 	bool rw = (di->di_flags & DEVITER_F_RW) != 0;
3692 
3693 	mutex_enter(&alldevs_lock);
3694 	if (rw)
3695 		--alldevs_nwrite;
3696 	else
3697 		--alldevs_nread;
3698 	/* XXX wake a garbage-collection thread */
3699 	mutex_exit(&alldevs_lock);
3700 }
3701 
3702 const char *
3703 cfdata_ifattr(const struct cfdata *cf)
3704 {
3705 	return cf->cf_pspec->cfp_iattr;
3706 }
3707 
3708 bool
3709 ifattr_match(const char *snull, const char *t)
3710 {
3711 	return (snull == NULL) || strcmp(snull, t) == 0;
3712 }
3713 
3714 void
3715 null_childdetached(device_t self, device_t child)
3716 {
3717 	/* do nothing */
3718 }
3719 
3720 static void
3721 sysctl_detach_setup(struct sysctllog **clog)
3722 {
3723 
3724 	sysctl_createv(clog, 0, NULL, NULL,
3725 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
3726 		CTLTYPE_BOOL, "detachall",
3727 		SYSCTL_DESCR("Detach all devices at shutdown"),
3728 		NULL, 0, &detachall, 0,
3729 		CTL_KERN, CTL_CREATE, CTL_EOL);
3730 }
3731