xref: /netbsd-src/sys/kern/subr_autoconf.c (revision f68d28ccb5df2dfe20ddb2c038cc8840e47295bd)
1 /* $NetBSD: subr_autoconf.c,v 1.313 2023/05/23 08:16:43 riastradh Exp $ */
2 
3 /*
4  * Copyright (c) 1996, 2000 Christopher G. Demetriou
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *          This product includes software developed for the
18  *          NetBSD Project.  See http://www.NetBSD.org/ for
19  *          information about NetBSD.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
35  */
36 
37 /*
38  * Copyright (c) 1992, 1993
39  *	The Regents of the University of California.  All rights reserved.
40  *
41  * This software was developed by the Computer Systems Engineering group
42  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43  * contributed to Berkeley.
44  *
45  * All advertising materials mentioning features or use of this software
46  * must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Lawrence Berkeley Laboratories.
49  *
50  * Redistribution and use in source and binary forms, with or without
51  * modification, are permitted provided that the following conditions
52  * are met:
53  * 1. Redistributions of source code must retain the above copyright
54  *    notice, this list of conditions and the following disclaimer.
55  * 2. Redistributions in binary form must reproduce the above copyright
56  *    notice, this list of conditions and the following disclaimer in the
57  *    documentation and/or other materials provided with the distribution.
58  * 3. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp  (LBL)
75  *
76  *	@(#)subr_autoconf.c	8.3 (Berkeley) 5/17/94
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.313 2023/05/23 08:16:43 riastradh Exp $");
81 
82 #ifdef _KERNEL_OPT
83 #include "opt_ddb.h"
84 #include "drvctl.h"
85 #endif
86 
87 #include <sys/param.h>
88 #include <sys/device.h>
89 #include <sys/device_impl.h>
90 #include <sys/disklabel.h>
91 #include <sys/conf.h>
92 #include <sys/kauth.h>
93 #include <sys/kmem.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/errno.h>
97 #include <sys/proc.h>
98 #include <sys/reboot.h>
99 #include <sys/kthread.h>
100 #include <sys/buf.h>
101 #include <sys/dirent.h>
102 #include <sys/mount.h>
103 #include <sys/namei.h>
104 #include <sys/unistd.h>
105 #include <sys/fcntl.h>
106 #include <sys/lockf.h>
107 #include <sys/callout.h>
108 #include <sys/devmon.h>
109 #include <sys/cpu.h>
110 #include <sys/sysctl.h>
111 #include <sys/stdarg.h>
112 #include <sys/localcount.h>
113 
114 #include <sys/disk.h>
115 
116 #include <sys/rndsource.h>
117 
118 #include <machine/limits.h>
119 
120 /*
121  * Autoconfiguration subroutines.
122  */
123 
124 /*
125  * Device autoconfiguration timings are mixed into the entropy pool.
126  */
127 static krndsource_t rnd_autoconf_source;
128 
129 /*
130  * ioconf.c exports exactly two names: cfdata and cfroots.  All system
131  * devices and drivers are found via these tables.
132  */
133 extern struct cfdata cfdata[];
134 extern const short cfroots[];
135 
136 /*
137  * List of all cfdriver structures.  We use this to detect duplicates
138  * when other cfdrivers are loaded.
139  */
140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
141 extern struct cfdriver * const cfdriver_list_initial[];
142 
143 /*
144  * Initial list of cfattach's.
145  */
146 extern const struct cfattachinit cfattachinit[];
147 
148 /*
149  * List of cfdata tables.  We always have one such list -- the one
150  * built statically when the kernel was configured.
151  */
152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
153 static struct cftable initcftable;
154 
155 #define	ROOT ((device_t)NULL)
156 
157 struct matchinfo {
158 	cfsubmatch_t fn;
159 	device_t parent;
160 	const int *locs;
161 	void	*aux;
162 	struct	cfdata *match;
163 	int	pri;
164 };
165 
166 struct alldevs_foray {
167 	int			af_s;
168 	struct devicelist	af_garbage;
169 };
170 
171 /*
172  * Internal version of the cfargs structure; all versions are
173  * canonicalized to this.
174  */
175 struct cfargs_internal {
176 	union {
177 		cfsubmatch_t	submatch;/* submatch function (direct config) */
178 		cfsearch_t	search;	 /* search function (indirect config) */
179 	};
180 	const char *	iattr;		/* interface attribute */
181 	const int *	locators;	/* locators array */
182 	devhandle_t	devhandle;	/* devhandle_t (by value) */
183 };
184 
185 static char *number(char *, int);
186 static void mapply(struct matchinfo *, cfdata_t);
187 static void config_devdelete(device_t);
188 static void config_devunlink(device_t, struct devicelist *);
189 static void config_makeroom(int, struct cfdriver *);
190 static void config_devlink(device_t);
191 static void config_alldevs_enter(struct alldevs_foray *);
192 static void config_alldevs_exit(struct alldevs_foray *);
193 static void config_add_attrib_dict(device_t);
194 static device_t	config_attach_internal(device_t, cfdata_t, void *,
195 		    cfprint_t, const struct cfargs_internal *);
196 
197 static void config_collect_garbage(struct devicelist *);
198 static void config_dump_garbage(struct devicelist *);
199 
200 static void pmflock_debug(device_t, const char *, int);
201 
202 static device_t deviter_next1(deviter_t *);
203 static void deviter_reinit(deviter_t *);
204 
205 struct deferred_config {
206 	TAILQ_ENTRY(deferred_config) dc_queue;
207 	device_t dc_dev;
208 	void (*dc_func)(device_t);
209 };
210 
211 TAILQ_HEAD(deferred_config_head, deferred_config);
212 
213 static struct deferred_config_head deferred_config_queue =
214 	TAILQ_HEAD_INITIALIZER(deferred_config_queue);
215 static struct deferred_config_head interrupt_config_queue =
216 	TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
217 static int interrupt_config_threads = 8;
218 static struct deferred_config_head mountroot_config_queue =
219 	TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
220 static int mountroot_config_threads = 2;
221 static lwp_t **mountroot_config_lwpids;
222 static size_t mountroot_config_lwpids_size;
223 bool root_is_mounted = false;
224 
225 static void config_process_deferred(struct deferred_config_head *, device_t);
226 
227 /* Hooks to finalize configuration once all real devices have been found. */
228 struct finalize_hook {
229 	TAILQ_ENTRY(finalize_hook) f_list;
230 	int (*f_func)(device_t);
231 	device_t f_dev;
232 };
233 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
234 	TAILQ_HEAD_INITIALIZER(config_finalize_list);
235 static int config_finalize_done;
236 
237 /* list of all devices */
238 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
239 static kmutex_t alldevs_lock __cacheline_aligned;
240 static devgen_t alldevs_gen = 1;
241 static int alldevs_nread = 0;
242 static int alldevs_nwrite = 0;
243 static bool alldevs_garbage = false;
244 
245 static struct devicelist config_pending =
246     TAILQ_HEAD_INITIALIZER(config_pending);
247 static kmutex_t config_misc_lock;
248 static kcondvar_t config_misc_cv;
249 
250 static bool detachall = false;
251 
252 #define	STREQ(s1, s2)			\
253 	(*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
254 
255 static bool config_initialized = false;	/* config_init() has been called. */
256 
257 static int config_do_twiddle;
258 static callout_t config_twiddle_ch;
259 
260 static void sysctl_detach_setup(struct sysctllog **);
261 
262 int no_devmon_insert(const char *, prop_dictionary_t);
263 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
264 
265 typedef int (*cfdriver_fn)(struct cfdriver *);
266 static int
267 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
268 	cfdriver_fn drv_do, cfdriver_fn drv_undo,
269 	const char *style, bool dopanic)
270 {
271 	void (*pr)(const char *, ...) __printflike(1, 2) =
272 	    dopanic ? panic : printf;
273 	int i, error = 0, e2 __diagused;
274 
275 	for (i = 0; cfdriverv[i] != NULL; i++) {
276 		if ((error = drv_do(cfdriverv[i])) != 0) {
277 			pr("configure: `%s' driver %s failed: %d",
278 			    cfdriverv[i]->cd_name, style, error);
279 			goto bad;
280 		}
281 	}
282 
283 	KASSERT(error == 0);
284 	return 0;
285 
286  bad:
287 	printf("\n");
288 	for (i--; i >= 0; i--) {
289 		e2 = drv_undo(cfdriverv[i]);
290 		KASSERT(e2 == 0);
291 	}
292 
293 	return error;
294 }
295 
296 typedef int (*cfattach_fn)(const char *, struct cfattach *);
297 static int
298 frob_cfattachvec(const struct cfattachinit *cfattachv,
299 	cfattach_fn att_do, cfattach_fn att_undo,
300 	const char *style, bool dopanic)
301 {
302 	const struct cfattachinit *cfai = NULL;
303 	void (*pr)(const char *, ...) __printflike(1, 2) =
304 	    dopanic ? panic : printf;
305 	int j = 0, error = 0, e2 __diagused;
306 
307 	for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
308 		for (j = 0; cfai->cfai_list[j] != NULL; j++) {
309 			if ((error = att_do(cfai->cfai_name,
310 			    cfai->cfai_list[j])) != 0) {
311 				pr("configure: attachment `%s' "
312 				    "of `%s' driver %s failed: %d",
313 				    cfai->cfai_list[j]->ca_name,
314 				    cfai->cfai_name, style, error);
315 				goto bad;
316 			}
317 		}
318 	}
319 
320 	KASSERT(error == 0);
321 	return 0;
322 
323  bad:
324 	/*
325 	 * Rollback in reverse order.  dunno if super-important, but
326 	 * do that anyway.  Although the code looks a little like
327 	 * someone did a little integration (in the math sense).
328 	 */
329 	printf("\n");
330 	if (cfai) {
331 		bool last;
332 
333 		for (last = false; last == false; ) {
334 			if (cfai == &cfattachv[0])
335 				last = true;
336 			for (j--; j >= 0; j--) {
337 				e2 = att_undo(cfai->cfai_name,
338 				    cfai->cfai_list[j]);
339 				KASSERT(e2 == 0);
340 			}
341 			if (!last) {
342 				cfai--;
343 				for (j = 0; cfai->cfai_list[j] != NULL; j++)
344 					;
345 			}
346 		}
347 	}
348 
349 	return error;
350 }
351 
352 /*
353  * Initialize the autoconfiguration data structures.  Normally this
354  * is done by configure(), but some platforms need to do this very
355  * early (to e.g. initialize the console).
356  */
357 void
358 config_init(void)
359 {
360 
361 	KASSERT(config_initialized == false);
362 
363 	mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
364 
365 	mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
366 	cv_init(&config_misc_cv, "cfgmisc");
367 
368 	callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
369 
370 	frob_cfdrivervec(cfdriver_list_initial,
371 	    config_cfdriver_attach, NULL, "bootstrap", true);
372 	frob_cfattachvec(cfattachinit,
373 	    config_cfattach_attach, NULL, "bootstrap", true);
374 
375 	initcftable.ct_cfdata = cfdata;
376 	TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
377 
378 	rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
379 	    RND_FLAG_COLLECT_TIME);
380 
381 	config_initialized = true;
382 }
383 
384 /*
385  * Init or fini drivers and attachments.  Either all or none
386  * are processed (via rollback).  It would be nice if this were
387  * atomic to outside consumers, but with the current state of
388  * locking ...
389  */
390 int
391 config_init_component(struct cfdriver * const *cfdriverv,
392 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
393 {
394 	int error;
395 
396 	KERNEL_LOCK(1, NULL);
397 
398 	if ((error = frob_cfdrivervec(cfdriverv,
399 	    config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
400 		goto out;
401 	if ((error = frob_cfattachvec(cfattachv,
402 	    config_cfattach_attach, config_cfattach_detach,
403 	    "init", false)) != 0) {
404 		frob_cfdrivervec(cfdriverv,
405 	            config_cfdriver_detach, NULL, "init rollback", true);
406 		goto out;
407 	}
408 	if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
409 		frob_cfattachvec(cfattachv,
410 		    config_cfattach_detach, NULL, "init rollback", true);
411 		frob_cfdrivervec(cfdriverv,
412 	            config_cfdriver_detach, NULL, "init rollback", true);
413 		goto out;
414 	}
415 
416 	/* Success!  */
417 	error = 0;
418 
419 out:	KERNEL_UNLOCK_ONE(NULL);
420 	return error;
421 }
422 
423 int
424 config_fini_component(struct cfdriver * const *cfdriverv,
425 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
426 {
427 	int error;
428 
429 	KERNEL_LOCK(1, NULL);
430 
431 	if ((error = config_cfdata_detach(cfdatav)) != 0)
432 		goto out;
433 	if ((error = frob_cfattachvec(cfattachv,
434 	    config_cfattach_detach, config_cfattach_attach,
435 	    "fini", false)) != 0) {
436 		if (config_cfdata_attach(cfdatav, 0) != 0)
437 			panic("config_cfdata fini rollback failed");
438 		goto out;
439 	}
440 	if ((error = frob_cfdrivervec(cfdriverv,
441 	    config_cfdriver_detach, config_cfdriver_attach,
442 	    "fini", false)) != 0) {
443 		frob_cfattachvec(cfattachv,
444 	            config_cfattach_attach, NULL, "fini rollback", true);
445 		if (config_cfdata_attach(cfdatav, 0) != 0)
446 			panic("config_cfdata fini rollback failed");
447 		goto out;
448 	}
449 
450 	/* Success!  */
451 	error = 0;
452 
453 out:	KERNEL_UNLOCK_ONE(NULL);
454 	return error;
455 }
456 
457 void
458 config_init_mi(void)
459 {
460 
461 	if (!config_initialized)
462 		config_init();
463 
464 	sysctl_detach_setup(NULL);
465 }
466 
467 void
468 config_deferred(device_t dev)
469 {
470 
471 	KASSERT(KERNEL_LOCKED_P());
472 
473 	config_process_deferred(&deferred_config_queue, dev);
474 	config_process_deferred(&interrupt_config_queue, dev);
475 	config_process_deferred(&mountroot_config_queue, dev);
476 }
477 
478 static void
479 config_interrupts_thread(void *cookie)
480 {
481 	struct deferred_config *dc;
482 	device_t dev;
483 
484 	mutex_enter(&config_misc_lock);
485 	while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
486 		TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
487 		mutex_exit(&config_misc_lock);
488 
489 		dev = dc->dc_dev;
490 		(*dc->dc_func)(dev);
491 		if (!device_pmf_is_registered(dev))
492 			aprint_debug_dev(dev,
493 			    "WARNING: power management not supported\n");
494 		config_pending_decr(dev);
495 		kmem_free(dc, sizeof(*dc));
496 
497 		mutex_enter(&config_misc_lock);
498 	}
499 	mutex_exit(&config_misc_lock);
500 
501 	kthread_exit(0);
502 }
503 
504 void
505 config_create_interruptthreads(void)
506 {
507 	int i;
508 
509 	for (i = 0; i < interrupt_config_threads; i++) {
510 		(void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
511 		    config_interrupts_thread, NULL, NULL, "configintr");
512 	}
513 }
514 
515 static void
516 config_mountroot_thread(void *cookie)
517 {
518 	struct deferred_config *dc;
519 
520 	mutex_enter(&config_misc_lock);
521 	while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
522 		TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
523 		mutex_exit(&config_misc_lock);
524 
525 		(*dc->dc_func)(dc->dc_dev);
526 		kmem_free(dc, sizeof(*dc));
527 
528 		mutex_enter(&config_misc_lock);
529 	}
530 	mutex_exit(&config_misc_lock);
531 
532 	kthread_exit(0);
533 }
534 
535 void
536 config_create_mountrootthreads(void)
537 {
538 	int i;
539 
540 	if (!root_is_mounted)
541 		root_is_mounted = true;
542 
543 	mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
544 				       mountroot_config_threads;
545 	mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
546 					     KM_NOSLEEP);
547 	KASSERT(mountroot_config_lwpids);
548 	for (i = 0; i < mountroot_config_threads; i++) {
549 		mountroot_config_lwpids[i] = 0;
550 		(void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
551 				     NULL, config_mountroot_thread, NULL,
552 				     &mountroot_config_lwpids[i],
553 				     "configroot");
554 	}
555 }
556 
557 void
558 config_finalize_mountroot(void)
559 {
560 	int i, error;
561 
562 	for (i = 0; i < mountroot_config_threads; i++) {
563 		if (mountroot_config_lwpids[i] == 0)
564 			continue;
565 
566 		error = kthread_join(mountroot_config_lwpids[i]);
567 		if (error)
568 			printf("%s: thread %x joined with error %d\n",
569 			       __func__, i, error);
570 	}
571 	kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
572 }
573 
574 /*
575  * Announce device attach/detach to userland listeners.
576  */
577 
578 int
579 no_devmon_insert(const char *name, prop_dictionary_t p)
580 {
581 
582 	return ENODEV;
583 }
584 
585 static void
586 devmon_report_device(device_t dev, bool isattach)
587 {
588 	prop_dictionary_t ev, dict = device_properties(dev);
589 	const char *parent;
590 	const char *what;
591 	const char *where;
592 	device_t pdev = device_parent(dev);
593 
594 	/* If currently no drvctl device, just return */
595 	if (devmon_insert_vec == no_devmon_insert)
596 		return;
597 
598 	ev = prop_dictionary_create();
599 	if (ev == NULL)
600 		return;
601 
602 	what = (isattach ? "device-attach" : "device-detach");
603 	parent = (pdev == NULL ? "root" : device_xname(pdev));
604 	if (prop_dictionary_get_string(dict, "location", &where)) {
605 		prop_dictionary_set_string(ev, "location", where);
606 		aprint_debug("ev: %s %s at %s in [%s]\n",
607 		    what, device_xname(dev), parent, where);
608 	}
609 	if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
610 	    !prop_dictionary_set_string(ev, "parent", parent)) {
611 		prop_object_release(ev);
612 		return;
613 	}
614 
615 	if ((*devmon_insert_vec)(what, ev) != 0)
616 		prop_object_release(ev);
617 }
618 
619 /*
620  * Add a cfdriver to the system.
621  */
622 int
623 config_cfdriver_attach(struct cfdriver *cd)
624 {
625 	struct cfdriver *lcd;
626 
627 	/* Make sure this driver isn't already in the system. */
628 	LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
629 		if (STREQ(lcd->cd_name, cd->cd_name))
630 			return EEXIST;
631 	}
632 
633 	LIST_INIT(&cd->cd_attach);
634 	LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
635 
636 	return 0;
637 }
638 
639 /*
640  * Remove a cfdriver from the system.
641  */
642 int
643 config_cfdriver_detach(struct cfdriver *cd)
644 {
645 	struct alldevs_foray af;
646 	int i, rc = 0;
647 
648 	config_alldevs_enter(&af);
649 	/* Make sure there are no active instances. */
650 	for (i = 0; i < cd->cd_ndevs; i++) {
651 		if (cd->cd_devs[i] != NULL) {
652 			rc = EBUSY;
653 			break;
654 		}
655 	}
656 	config_alldevs_exit(&af);
657 
658 	if (rc != 0)
659 		return rc;
660 
661 	/* ...and no attachments loaded. */
662 	if (LIST_EMPTY(&cd->cd_attach) == 0)
663 		return EBUSY;
664 
665 	LIST_REMOVE(cd, cd_list);
666 
667 	KASSERT(cd->cd_devs == NULL);
668 
669 	return 0;
670 }
671 
672 /*
673  * Look up a cfdriver by name.
674  */
675 struct cfdriver *
676 config_cfdriver_lookup(const char *name)
677 {
678 	struct cfdriver *cd;
679 
680 	LIST_FOREACH(cd, &allcfdrivers, cd_list) {
681 		if (STREQ(cd->cd_name, name))
682 			return cd;
683 	}
684 
685 	return NULL;
686 }
687 
688 /*
689  * Add a cfattach to the specified driver.
690  */
691 int
692 config_cfattach_attach(const char *driver, struct cfattach *ca)
693 {
694 	struct cfattach *lca;
695 	struct cfdriver *cd;
696 
697 	cd = config_cfdriver_lookup(driver);
698 	if (cd == NULL)
699 		return ESRCH;
700 
701 	/* Make sure this attachment isn't already on this driver. */
702 	LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
703 		if (STREQ(lca->ca_name, ca->ca_name))
704 			return EEXIST;
705 	}
706 
707 	LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
708 
709 	return 0;
710 }
711 
712 /*
713  * Remove a cfattach from the specified driver.
714  */
715 int
716 config_cfattach_detach(const char *driver, struct cfattach *ca)
717 {
718 	struct alldevs_foray af;
719 	struct cfdriver *cd;
720 	device_t dev;
721 	int i, rc = 0;
722 
723 	cd = config_cfdriver_lookup(driver);
724 	if (cd == NULL)
725 		return ESRCH;
726 
727 	config_alldevs_enter(&af);
728 	/* Make sure there are no active instances. */
729 	for (i = 0; i < cd->cd_ndevs; i++) {
730 		if ((dev = cd->cd_devs[i]) == NULL)
731 			continue;
732 		if (dev->dv_cfattach == ca) {
733 			rc = EBUSY;
734 			break;
735 		}
736 	}
737 	config_alldevs_exit(&af);
738 
739 	if (rc != 0)
740 		return rc;
741 
742 	LIST_REMOVE(ca, ca_list);
743 
744 	return 0;
745 }
746 
747 /*
748  * Look up a cfattach by name.
749  */
750 static struct cfattach *
751 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
752 {
753 	struct cfattach *ca;
754 
755 	LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
756 		if (STREQ(ca->ca_name, atname))
757 			return ca;
758 	}
759 
760 	return NULL;
761 }
762 
763 /*
764  * Look up a cfattach by driver/attachment name.
765  */
766 struct cfattach *
767 config_cfattach_lookup(const char *name, const char *atname)
768 {
769 	struct cfdriver *cd;
770 
771 	cd = config_cfdriver_lookup(name);
772 	if (cd == NULL)
773 		return NULL;
774 
775 	return config_cfattach_lookup_cd(cd, atname);
776 }
777 
778 /*
779  * Apply the matching function and choose the best.  This is used
780  * a few times and we want to keep the code small.
781  */
782 static void
783 mapply(struct matchinfo *m, cfdata_t cf)
784 {
785 	int pri;
786 
787 	if (m->fn != NULL) {
788 		pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
789 	} else {
790 		pri = config_match(m->parent, cf, m->aux);
791 	}
792 	if (pri > m->pri) {
793 		m->match = cf;
794 		m->pri = pri;
795 	}
796 }
797 
798 int
799 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
800 {
801 	const struct cfiattrdata *ci;
802 	const struct cflocdesc *cl;
803 	int nlocs, i;
804 
805 	ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
806 	KASSERT(ci);
807 	nlocs = ci->ci_loclen;
808 	KASSERT(!nlocs || locs);
809 	for (i = 0; i < nlocs; i++) {
810 		cl = &ci->ci_locdesc[i];
811 		if (cl->cld_defaultstr != NULL &&
812 		    cf->cf_loc[i] == cl->cld_default)
813 			continue;
814 		if (cf->cf_loc[i] == locs[i])
815 			continue;
816 		return 0;
817 	}
818 
819 	return config_match(parent, cf, aux);
820 }
821 
822 /*
823  * Helper function: check whether the driver supports the interface attribute
824  * and return its descriptor structure.
825  */
826 static const struct cfiattrdata *
827 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
828 {
829 	const struct cfiattrdata * const *cpp;
830 
831 	if (cd->cd_attrs == NULL)
832 		return 0;
833 
834 	for (cpp = cd->cd_attrs; *cpp; cpp++) {
835 		if (STREQ((*cpp)->ci_name, ia)) {
836 			/* Match. */
837 			return *cpp;
838 		}
839 	}
840 	return 0;
841 }
842 
843 static int __diagused
844 cfdriver_iattr_count(const struct cfdriver *cd)
845 {
846 	const struct cfiattrdata * const *cpp;
847 	int i;
848 
849 	if (cd->cd_attrs == NULL)
850 		return 0;
851 
852 	for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
853 		i++;
854 	}
855 	return i;
856 }
857 
858 /*
859  * Lookup an interface attribute description by name.
860  * If the driver is given, consider only its supported attributes.
861  */
862 const struct cfiattrdata *
863 cfiattr_lookup(const char *name, const struct cfdriver *cd)
864 {
865 	const struct cfdriver *d;
866 	const struct cfiattrdata *ia;
867 
868 	if (cd)
869 		return cfdriver_get_iattr(cd, name);
870 
871 	LIST_FOREACH(d, &allcfdrivers, cd_list) {
872 		ia = cfdriver_get_iattr(d, name);
873 		if (ia)
874 			return ia;
875 	}
876 	return 0;
877 }
878 
879 /*
880  * Determine if `parent' is a potential parent for a device spec based
881  * on `cfp'.
882  */
883 static int
884 cfparent_match(const device_t parent, const struct cfparent *cfp)
885 {
886 	struct cfdriver *pcd;
887 
888 	/* We don't match root nodes here. */
889 	if (cfp == NULL)
890 		return 0;
891 
892 	pcd = parent->dv_cfdriver;
893 	KASSERT(pcd != NULL);
894 
895 	/*
896 	 * First, ensure this parent has the correct interface
897 	 * attribute.
898 	 */
899 	if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr))
900 		return 0;
901 
902 	/*
903 	 * If no specific parent device instance was specified (i.e.
904 	 * we're attaching to the attribute only), we're done!
905 	 */
906 	if (cfp->cfp_parent == NULL)
907 		return 1;
908 
909 	/*
910 	 * Check the parent device's name.
911 	 */
912 	if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
913 		return 0;	/* not the same parent */
914 
915 	/*
916 	 * Make sure the unit number matches.
917 	 */
918 	if (cfp->cfp_unit == DVUNIT_ANY ||	/* wildcard */
919 	    cfp->cfp_unit == parent->dv_unit)
920 		return 1;
921 
922 	/* Unit numbers don't match. */
923 	return 0;
924 }
925 
926 /*
927  * Helper for config_cfdata_attach(): check all devices whether it could be
928  * parent any attachment in the config data table passed, and rescan.
929  */
930 static void
931 rescan_with_cfdata(const struct cfdata *cf)
932 {
933 	device_t d;
934 	const struct cfdata *cf1;
935 	deviter_t di;
936 
937 	KASSERT(KERNEL_LOCKED_P());
938 
939 	/*
940 	 * "alldevs" is likely longer than a modules's cfdata, so make it
941 	 * the outer loop.
942 	 */
943 	for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
944 
945 		if (!(d->dv_cfattach->ca_rescan))
946 			continue;
947 
948 		for (cf1 = cf; cf1->cf_name; cf1++) {
949 
950 			if (!cfparent_match(d, cf1->cf_pspec))
951 				continue;
952 
953 			(*d->dv_cfattach->ca_rescan)(d,
954 				cfdata_ifattr(cf1), cf1->cf_loc);
955 
956 			config_deferred(d);
957 		}
958 	}
959 	deviter_release(&di);
960 }
961 
962 /*
963  * Attach a supplemental config data table and rescan potential
964  * parent devices if required.
965  */
966 int
967 config_cfdata_attach(cfdata_t cf, int scannow)
968 {
969 	struct cftable *ct;
970 
971 	KERNEL_LOCK(1, NULL);
972 
973 	ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
974 	ct->ct_cfdata = cf;
975 	TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
976 
977 	if (scannow)
978 		rescan_with_cfdata(cf);
979 
980 	KERNEL_UNLOCK_ONE(NULL);
981 
982 	return 0;
983 }
984 
985 /*
986  * Helper for config_cfdata_detach: check whether a device is
987  * found through any attachment in the config data table.
988  */
989 static int
990 dev_in_cfdata(device_t d, cfdata_t cf)
991 {
992 	const struct cfdata *cf1;
993 
994 	for (cf1 = cf; cf1->cf_name; cf1++)
995 		if (d->dv_cfdata == cf1)
996 			return 1;
997 
998 	return 0;
999 }
1000 
1001 /*
1002  * Detach a supplemental config data table. Detach all devices found
1003  * through that table (and thus keeping references to it) before.
1004  */
1005 int
1006 config_cfdata_detach(cfdata_t cf)
1007 {
1008 	device_t d;
1009 	int error = 0;
1010 	struct cftable *ct;
1011 	deviter_t di;
1012 
1013 	KERNEL_LOCK(1, NULL);
1014 
1015 	for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
1016 	     d = deviter_next(&di)) {
1017 		if (!dev_in_cfdata(d, cf))
1018 			continue;
1019 		if ((error = config_detach(d, 0)) != 0)
1020 			break;
1021 	}
1022 	deviter_release(&di);
1023 	if (error) {
1024 		aprint_error_dev(d, "unable to detach instance\n");
1025 		goto out;
1026 	}
1027 
1028 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
1029 		if (ct->ct_cfdata == cf) {
1030 			TAILQ_REMOVE(&allcftables, ct, ct_list);
1031 			kmem_free(ct, sizeof(*ct));
1032 			error = 0;
1033 			goto out;
1034 		}
1035 	}
1036 
1037 	/* not found -- shouldn't happen */
1038 	error = EINVAL;
1039 
1040 out:	KERNEL_UNLOCK_ONE(NULL);
1041 	return error;
1042 }
1043 
1044 /*
1045  * Invoke the "match" routine for a cfdata entry on behalf of
1046  * an external caller, usually a direct config "submatch" routine.
1047  */
1048 int
1049 config_match(device_t parent, cfdata_t cf, void *aux)
1050 {
1051 	struct cfattach *ca;
1052 
1053 	KASSERT(KERNEL_LOCKED_P());
1054 
1055 	ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
1056 	if (ca == NULL) {
1057 		/* No attachment for this entry, oh well. */
1058 		return 0;
1059 	}
1060 
1061 	return (*ca->ca_match)(parent, cf, aux);
1062 }
1063 
1064 /*
1065  * Invoke the "probe" routine for a cfdata entry on behalf of
1066  * an external caller, usually an indirect config "search" routine.
1067  */
1068 int
1069 config_probe(device_t parent, cfdata_t cf, void *aux)
1070 {
1071 	/*
1072 	 * This is currently a synonym for config_match(), but this
1073 	 * is an implementation detail; "match" and "probe" routines
1074 	 * have different behaviors.
1075 	 *
1076 	 * XXX config_probe() should return a bool, because there is
1077 	 * XXX no match score for probe -- it's either there or it's
1078 	 * XXX not, but some ports abuse the return value as a way
1079 	 * XXX to attach "critical" devices before "non-critical"
1080 	 * XXX devices.
1081 	 */
1082 	return config_match(parent, cf, aux);
1083 }
1084 
1085 static struct cfargs_internal *
1086 cfargs_canonicalize(const struct cfargs * const cfargs,
1087     struct cfargs_internal * const store)
1088 {
1089 	struct cfargs_internal *args = store;
1090 
1091 	memset(args, 0, sizeof(*args));
1092 
1093 	/* If none specified, are all-NULL pointers are good. */
1094 	if (cfargs == NULL) {
1095 		return args;
1096 	}
1097 
1098 	/*
1099 	 * Only one arguments version is recognized at this time.
1100 	 */
1101 	if (cfargs->cfargs_version != CFARGS_VERSION) {
1102 		panic("cfargs_canonicalize: unknown version %lu\n",
1103 		    (unsigned long)cfargs->cfargs_version);
1104 	}
1105 
1106 	/*
1107 	 * submatch and search are mutually-exclusive.
1108 	 */
1109 	if (cfargs->submatch != NULL && cfargs->search != NULL) {
1110 		panic("cfargs_canonicalize: submatch and search are "
1111 		      "mutually-exclusive");
1112 	}
1113 	if (cfargs->submatch != NULL) {
1114 		args->submatch = cfargs->submatch;
1115 	} else if (cfargs->search != NULL) {
1116 		args->search = cfargs->search;
1117 	}
1118 
1119 	args->iattr = cfargs->iattr;
1120 	args->locators = cfargs->locators;
1121 	args->devhandle = cfargs->devhandle;
1122 
1123 	return args;
1124 }
1125 
1126 /*
1127  * Iterate over all potential children of some device, calling the given
1128  * function (default being the child's match function) for each one.
1129  * Nonzero returns are matches; the highest value returned is considered
1130  * the best match.  Return the `found child' if we got a match, or NULL
1131  * otherwise.  The `aux' pointer is simply passed on through.
1132  *
1133  * Note that this function is designed so that it can be used to apply
1134  * an arbitrary function to all potential children (its return value
1135  * can be ignored).
1136  */
1137 static cfdata_t
1138 config_search_internal(device_t parent, void *aux,
1139     const struct cfargs_internal * const args)
1140 {
1141 	struct cftable *ct;
1142 	cfdata_t cf;
1143 	struct matchinfo m;
1144 
1145 	KASSERT(config_initialized);
1146 	KASSERTMSG((!args->iattr ||
1147 		cfdriver_get_iattr(parent->dv_cfdriver, args->iattr)),
1148 	    "%s searched for child at interface attribute %s,"
1149 	    " but device %s(4) has no such interface attribute in config(5)",
1150 	    device_xname(parent), args->iattr,
1151 	    parent->dv_cfdriver->cd_name);
1152 	KASSERTMSG((args->iattr ||
1153 		cfdriver_iattr_count(parent->dv_cfdriver) < 2),
1154 	    "%s searched for child without interface attribute,"
1155 	    " needed to disambiguate among the %d declared for in %s(4)"
1156 	    " in config(5)",
1157 	    device_xname(parent),
1158 	    cfdriver_iattr_count(parent->dv_cfdriver),
1159 	    parent->dv_cfdriver->cd_name);
1160 
1161 	m.fn = args->submatch;		/* N.B. union */
1162 	m.parent = parent;
1163 	m.locs = args->locators;
1164 	m.aux = aux;
1165 	m.match = NULL;
1166 	m.pri = 0;
1167 
1168 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
1169 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1170 
1171 			/* We don't match root nodes here. */
1172 			if (!cf->cf_pspec)
1173 				continue;
1174 
1175 			/*
1176 			 * Skip cf if no longer eligible, otherwise scan
1177 			 * through parents for one matching `parent', and
1178 			 * try match function.
1179 			 */
1180 			if (cf->cf_fstate == FSTATE_FOUND)
1181 				continue;
1182 			if (cf->cf_fstate == FSTATE_DNOTFOUND ||
1183 			    cf->cf_fstate == FSTATE_DSTAR)
1184 				continue;
1185 
1186 			/*
1187 			 * If an interface attribute was specified,
1188 			 * consider only children which attach to
1189 			 * that attribute.
1190 			 */
1191 			if (args->iattr != NULL &&
1192 			    !STREQ(args->iattr, cfdata_ifattr(cf)))
1193 				continue;
1194 
1195 			if (cfparent_match(parent, cf->cf_pspec))
1196 				mapply(&m, cf);
1197 		}
1198 	}
1199 	rnd_add_uint32(&rnd_autoconf_source, 0);
1200 	return m.match;
1201 }
1202 
1203 cfdata_t
1204 config_search(device_t parent, void *aux, const struct cfargs *cfargs)
1205 {
1206 	cfdata_t cf;
1207 	struct cfargs_internal store;
1208 
1209 	cf = config_search_internal(parent, aux,
1210 	    cfargs_canonicalize(cfargs, &store));
1211 
1212 	return cf;
1213 }
1214 
1215 /*
1216  * Find the given root device.
1217  * This is much like config_search, but there is no parent.
1218  * Don't bother with multiple cfdata tables; the root node
1219  * must always be in the initial table.
1220  */
1221 cfdata_t
1222 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
1223 {
1224 	cfdata_t cf;
1225 	const short *p;
1226 	struct matchinfo m;
1227 
1228 	m.fn = fn;
1229 	m.parent = ROOT;
1230 	m.aux = aux;
1231 	m.match = NULL;
1232 	m.pri = 0;
1233 	m.locs = 0;
1234 	/*
1235 	 * Look at root entries for matching name.  We do not bother
1236 	 * with found-state here since only one root should ever be
1237 	 * searched (and it must be done first).
1238 	 */
1239 	for (p = cfroots; *p >= 0; p++) {
1240 		cf = &cfdata[*p];
1241 		if (strcmp(cf->cf_name, rootname) == 0)
1242 			mapply(&m, cf);
1243 	}
1244 	return m.match;
1245 }
1246 
1247 static const char * const msgs[] = {
1248 [QUIET]		=	"",
1249 [UNCONF]	=	" not configured\n",
1250 [UNSUPP]	=	" unsupported\n",
1251 };
1252 
1253 /*
1254  * The given `aux' argument describes a device that has been found
1255  * on the given parent, but not necessarily configured.  Locate the
1256  * configuration data for that device (using the submatch function
1257  * provided, or using candidates' cd_match configuration driver
1258  * functions) and attach it, and return its device_t.  If the device was
1259  * not configured, call the given `print' function and return NULL.
1260  */
1261 device_t
1262 config_found_acquire(device_t parent, void *aux, cfprint_t print,
1263     const struct cfargs * const cfargs)
1264 {
1265 	cfdata_t cf;
1266 	struct cfargs_internal store;
1267 	const struct cfargs_internal * const args =
1268 	    cfargs_canonicalize(cfargs, &store);
1269 	device_t dev;
1270 
1271 	KERNEL_LOCK(1, NULL);
1272 
1273 	cf = config_search_internal(parent, aux, args);
1274 	if (cf != NULL) {
1275 		dev = config_attach_internal(parent, cf, aux, print, args);
1276 		goto out;
1277 	}
1278 
1279 	if (print) {
1280 		if (config_do_twiddle && cold)
1281 			twiddle();
1282 
1283 		const int pret = (*print)(aux, device_xname(parent));
1284 		KASSERT(pret >= 0);
1285 		KASSERT(pret < __arraycount(msgs));
1286 		KASSERT(msgs[pret] != NULL);
1287 		aprint_normal("%s", msgs[pret]);
1288 	}
1289 
1290 	dev = NULL;
1291 
1292 out:	KERNEL_UNLOCK_ONE(NULL);
1293 	return dev;
1294 }
1295 
1296 /*
1297  * config_found(parent, aux, print, cfargs)
1298  *
1299  *	Legacy entry point for callers whose use of the returned
1300  *	device_t is not delimited by device_release.
1301  *
1302  *	The caller is required to hold the kernel lock as a fragile
1303  *	defence against races.
1304  *
1305  *	Callers should ignore the return value or be converted to
1306  *	config_found_acquire with a matching device_release once they
1307  *	have finished with the returned device_t.
1308  */
1309 device_t
1310 config_found(device_t parent, void *aux, cfprint_t print,
1311     const struct cfargs * const cfargs)
1312 {
1313 	device_t dev;
1314 
1315 	KASSERT(KERNEL_LOCKED_P());
1316 
1317 	dev = config_found_acquire(parent, aux, print, cfargs);
1318 	if (dev == NULL)
1319 		return NULL;
1320 	device_release(dev);
1321 
1322 	return dev;
1323 }
1324 
1325 /*
1326  * As above, but for root devices.
1327  */
1328 device_t
1329 config_rootfound(const char *rootname, void *aux)
1330 {
1331 	cfdata_t cf;
1332 	device_t dev = NULL;
1333 
1334 	KERNEL_LOCK(1, NULL);
1335 	if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
1336 		dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE);
1337 	else
1338 		aprint_error("root device %s not configured\n", rootname);
1339 	KERNEL_UNLOCK_ONE(NULL);
1340 	return dev;
1341 }
1342 
1343 /* just like sprintf(buf, "%d") except that it works from the end */
1344 static char *
1345 number(char *ep, int n)
1346 {
1347 
1348 	*--ep = 0;
1349 	while (n >= 10) {
1350 		*--ep = (n % 10) + '0';
1351 		n /= 10;
1352 	}
1353 	*--ep = n + '0';
1354 	return ep;
1355 }
1356 
1357 /*
1358  * Expand the size of the cd_devs array if necessary.
1359  *
1360  * The caller must hold alldevs_lock. config_makeroom() may release and
1361  * re-acquire alldevs_lock, so callers should re-check conditions such
1362  * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
1363  * returns.
1364  */
1365 static void
1366 config_makeroom(int n, struct cfdriver *cd)
1367 {
1368 	int ondevs, nndevs;
1369 	device_t *osp, *nsp;
1370 
1371 	KASSERT(mutex_owned(&alldevs_lock));
1372 	alldevs_nwrite++;
1373 
1374 	/* XXX arithmetic overflow */
1375 	for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
1376 		;
1377 
1378 	while (n >= cd->cd_ndevs) {
1379 		/*
1380 		 * Need to expand the array.
1381 		 */
1382 		ondevs = cd->cd_ndevs;
1383 		osp = cd->cd_devs;
1384 
1385 		/*
1386 		 * Release alldevs_lock around allocation, which may
1387 		 * sleep.
1388 		 */
1389 		mutex_exit(&alldevs_lock);
1390 		nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
1391 		mutex_enter(&alldevs_lock);
1392 
1393 		/*
1394 		 * If another thread moved the array while we did
1395 		 * not hold alldevs_lock, try again.
1396 		 */
1397 		if (cd->cd_devs != osp || cd->cd_ndevs != ondevs) {
1398 			mutex_exit(&alldevs_lock);
1399 			kmem_free(nsp, sizeof(device_t) * nndevs);
1400 			mutex_enter(&alldevs_lock);
1401 			continue;
1402 		}
1403 
1404 		memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
1405 		if (ondevs != 0)
1406 			memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
1407 
1408 		cd->cd_ndevs = nndevs;
1409 		cd->cd_devs = nsp;
1410 		if (ondevs != 0) {
1411 			mutex_exit(&alldevs_lock);
1412 			kmem_free(osp, sizeof(device_t) * ondevs);
1413 			mutex_enter(&alldevs_lock);
1414 		}
1415 	}
1416 	KASSERT(mutex_owned(&alldevs_lock));
1417 	alldevs_nwrite--;
1418 }
1419 
1420 /*
1421  * Put dev into the devices list.
1422  */
1423 static void
1424 config_devlink(device_t dev)
1425 {
1426 
1427 	mutex_enter(&alldevs_lock);
1428 
1429 	KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
1430 
1431 	dev->dv_add_gen = alldevs_gen;
1432 	/* It is safe to add a device to the tail of the list while
1433 	 * readers and writers are in the list.
1434 	 */
1435 	TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
1436 	mutex_exit(&alldevs_lock);
1437 }
1438 
1439 static void
1440 config_devfree(device_t dev)
1441 {
1442 
1443 	KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
1444 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1445 
1446 	if (dev->dv_cfattach->ca_devsize > 0)
1447 		kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
1448 	kmem_free(dev, sizeof(*dev));
1449 }
1450 
1451 /*
1452  * Caller must hold alldevs_lock.
1453  */
1454 static void
1455 config_devunlink(device_t dev, struct devicelist *garbage)
1456 {
1457 	struct device_garbage *dg = &dev->dv_garbage;
1458 	cfdriver_t cd = device_cfdriver(dev);
1459 	int i;
1460 
1461 	KASSERT(mutex_owned(&alldevs_lock));
1462 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1463 
1464  	/* Unlink from device list.  Link to garbage list. */
1465 	TAILQ_REMOVE(&alldevs, dev, dv_list);
1466 	TAILQ_INSERT_TAIL(garbage, dev, dv_list);
1467 
1468 	/* Remove from cfdriver's array. */
1469 	cd->cd_devs[dev->dv_unit] = NULL;
1470 
1471 	/*
1472 	 * If the device now has no units in use, unlink its softc array.
1473 	 */
1474 	for (i = 0; i < cd->cd_ndevs; i++) {
1475 		if (cd->cd_devs[i] != NULL)
1476 			break;
1477 	}
1478 	/* Nothing found.  Unlink, now.  Deallocate, later. */
1479 	if (i == cd->cd_ndevs) {
1480 		dg->dg_ndevs = cd->cd_ndevs;
1481 		dg->dg_devs = cd->cd_devs;
1482 		cd->cd_devs = NULL;
1483 		cd->cd_ndevs = 0;
1484 	}
1485 }
1486 
1487 static void
1488 config_devdelete(device_t dev)
1489 {
1490 	struct device_garbage *dg = &dev->dv_garbage;
1491 	device_lock_t dvl = device_getlock(dev);
1492 
1493 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1494 
1495 	if (dg->dg_devs != NULL)
1496 		kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
1497 
1498 	localcount_fini(dev->dv_localcount);
1499 	kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount));
1500 
1501 	cv_destroy(&dvl->dvl_cv);
1502 	mutex_destroy(&dvl->dvl_mtx);
1503 
1504 	KASSERT(dev->dv_properties != NULL);
1505 	prop_object_release(dev->dv_properties);
1506 
1507 	if (dev->dv_activity_handlers)
1508 		panic("%s with registered handlers", __func__);
1509 
1510 	if (dev->dv_locators) {
1511 		size_t amount = *--dev->dv_locators;
1512 		kmem_free(dev->dv_locators, amount);
1513 	}
1514 
1515 	config_devfree(dev);
1516 }
1517 
1518 static int
1519 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
1520 {
1521 	int unit = cf->cf_unit;
1522 
1523 	KASSERT(mutex_owned(&alldevs_lock));
1524 
1525 	if (unit < 0)
1526 		return -1;
1527 	if (cf->cf_fstate == FSTATE_STAR) {
1528 		for (; unit < cd->cd_ndevs; unit++)
1529 			if (cd->cd_devs[unit] == NULL)
1530 				break;
1531 		/*
1532 		 * unit is now the unit of the first NULL device pointer,
1533 		 * or max(cd->cd_ndevs,cf->cf_unit).
1534 		 */
1535 	} else {
1536 		if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
1537 			unit = -1;
1538 	}
1539 	return unit;
1540 }
1541 
1542 static int
1543 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
1544 {
1545 	struct alldevs_foray af;
1546 	int unit;
1547 
1548 	config_alldevs_enter(&af);
1549 	for (;;) {
1550 		unit = config_unit_nextfree(cd, cf);
1551 		if (unit == -1)
1552 			break;
1553 		if (unit < cd->cd_ndevs) {
1554 			cd->cd_devs[unit] = dev;
1555 			dev->dv_unit = unit;
1556 			break;
1557 		}
1558 		config_makeroom(unit, cd);
1559 	}
1560 	config_alldevs_exit(&af);
1561 
1562 	return unit;
1563 }
1564 
1565 static device_t
1566 config_devalloc(const device_t parent, const cfdata_t cf,
1567     const struct cfargs_internal * const args)
1568 {
1569 	cfdriver_t cd;
1570 	cfattach_t ca;
1571 	size_t lname, lunit;
1572 	const char *xunit;
1573 	int myunit;
1574 	char num[10];
1575 	device_t dev;
1576 	void *dev_private;
1577 	const struct cfiattrdata *ia;
1578 	device_lock_t dvl;
1579 
1580 	cd = config_cfdriver_lookup(cf->cf_name);
1581 	if (cd == NULL)
1582 		return NULL;
1583 
1584 	ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
1585 	if (ca == NULL)
1586 		return NULL;
1587 
1588 	/* get memory for all device vars */
1589 	KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
1590 	if (ca->ca_devsize > 0) {
1591 		dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
1592 	} else {
1593 		dev_private = NULL;
1594 	}
1595 	dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
1596 
1597 	dev->dv_handle = args->devhandle;
1598 
1599 	dev->dv_class = cd->cd_class;
1600 	dev->dv_cfdata = cf;
1601 	dev->dv_cfdriver = cd;
1602 	dev->dv_cfattach = ca;
1603 	dev->dv_activity_count = 0;
1604 	dev->dv_activity_handlers = NULL;
1605 	dev->dv_private = dev_private;
1606 	dev->dv_flags = ca->ca_flags;	/* inherit flags from class */
1607 	dev->dv_attaching = curlwp;
1608 
1609 	myunit = config_unit_alloc(dev, cd, cf);
1610 	if (myunit == -1) {
1611 		config_devfree(dev);
1612 		return NULL;
1613 	}
1614 
1615 	/* compute length of name and decimal expansion of unit number */
1616 	lname = strlen(cd->cd_name);
1617 	xunit = number(&num[sizeof(num)], myunit);
1618 	lunit = &num[sizeof(num)] - xunit;
1619 	if (lname + lunit > sizeof(dev->dv_xname))
1620 		panic("config_devalloc: device name too long");
1621 
1622 	dvl = device_getlock(dev);
1623 
1624 	mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
1625 	cv_init(&dvl->dvl_cv, "pmfsusp");
1626 
1627 	memcpy(dev->dv_xname, cd->cd_name, lname);
1628 	memcpy(dev->dv_xname + lname, xunit, lunit);
1629 	dev->dv_parent = parent;
1630 	if (parent != NULL)
1631 		dev->dv_depth = parent->dv_depth + 1;
1632 	else
1633 		dev->dv_depth = 0;
1634 	dev->dv_flags |= DVF_ACTIVE;	/* always initially active */
1635 	if (args->locators) {
1636 		KASSERT(parent); /* no locators at root */
1637 		ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
1638 		dev->dv_locators =
1639 		    kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
1640 		*dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
1641 		memcpy(dev->dv_locators, args->locators,
1642 		    sizeof(int) * ia->ci_loclen);
1643 	}
1644 	dev->dv_properties = prop_dictionary_create();
1645 	KASSERT(dev->dv_properties != NULL);
1646 
1647 	prop_dictionary_set_string_nocopy(dev->dv_properties,
1648 	    "device-driver", dev->dv_cfdriver->cd_name);
1649 	prop_dictionary_set_uint16(dev->dv_properties,
1650 	    "device-unit", dev->dv_unit);
1651 	if (parent != NULL) {
1652 		prop_dictionary_set_string(dev->dv_properties,
1653 		    "device-parent", device_xname(parent));
1654 	}
1655 
1656 	dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount),
1657 	    KM_SLEEP);
1658 	localcount_init(dev->dv_localcount);
1659 
1660 	if (dev->dv_cfdriver->cd_attrs != NULL)
1661 		config_add_attrib_dict(dev);
1662 
1663 	return dev;
1664 }
1665 
1666 /*
1667  * Create an array of device attach attributes and add it
1668  * to the device's dv_properties dictionary.
1669  *
1670  * <key>interface-attributes</key>
1671  * <array>
1672  *    <dict>
1673  *       <key>attribute-name</key>
1674  *       <string>foo</string>
1675  *       <key>locators</key>
1676  *       <array>
1677  *          <dict>
1678  *             <key>loc-name</key>
1679  *             <string>foo-loc1</string>
1680  *          </dict>
1681  *          <dict>
1682  *             <key>loc-name</key>
1683  *             <string>foo-loc2</string>
1684  *             <key>default</key>
1685  *             <string>foo-loc2-default</string>
1686  *          </dict>
1687  *          ...
1688  *       </array>
1689  *    </dict>
1690  *    ...
1691  * </array>
1692  */
1693 
1694 static void
1695 config_add_attrib_dict(device_t dev)
1696 {
1697 	int i, j;
1698 	const struct cfiattrdata *ci;
1699 	prop_dictionary_t attr_dict, loc_dict;
1700 	prop_array_t attr_array, loc_array;
1701 
1702 	if ((attr_array = prop_array_create()) == NULL)
1703 		return;
1704 
1705 	for (i = 0; ; i++) {
1706 		if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
1707 			break;
1708 		if ((attr_dict = prop_dictionary_create()) == NULL)
1709 			break;
1710 		prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
1711 		    ci->ci_name);
1712 
1713 		/* Create an array of the locator names and defaults */
1714 
1715 		if (ci->ci_loclen != 0 &&
1716 		    (loc_array = prop_array_create()) != NULL) {
1717 			for (j = 0; j < ci->ci_loclen; j++) {
1718 				loc_dict = prop_dictionary_create();
1719 				if (loc_dict == NULL)
1720 					continue;
1721 				prop_dictionary_set_string_nocopy(loc_dict,
1722 				    "loc-name", ci->ci_locdesc[j].cld_name);
1723 				if (ci->ci_locdesc[j].cld_defaultstr != NULL)
1724 					prop_dictionary_set_string_nocopy(
1725 					    loc_dict, "default",
1726 					    ci->ci_locdesc[j].cld_defaultstr);
1727 				prop_array_set(loc_array, j, loc_dict);
1728 				prop_object_release(loc_dict);
1729 			}
1730 			prop_dictionary_set_and_rel(attr_dict, "locators",
1731 			    loc_array);
1732 		}
1733 		prop_array_add(attr_array, attr_dict);
1734 		prop_object_release(attr_dict);
1735 	}
1736 	if (i == 0)
1737 		prop_object_release(attr_array);
1738 	else
1739 		prop_dictionary_set_and_rel(dev->dv_properties,
1740 		    "interface-attributes", attr_array);
1741 
1742 	return;
1743 }
1744 
1745 /*
1746  * Attach a found device.
1747  *
1748  * Returns the device referenced, to be released with device_release.
1749  */
1750 static device_t
1751 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1752     const struct cfargs_internal * const args)
1753 {
1754 	device_t dev;
1755 	struct cftable *ct;
1756 	const char *drvname;
1757 	bool deferred;
1758 
1759 	KASSERT(KERNEL_LOCKED_P());
1760 
1761 	dev = config_devalloc(parent, cf, args);
1762 	if (!dev)
1763 		panic("config_attach: allocation of device softc failed");
1764 
1765 	/* XXX redundant - see below? */
1766 	if (cf->cf_fstate != FSTATE_STAR) {
1767 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1768 		cf->cf_fstate = FSTATE_FOUND;
1769 	}
1770 
1771 	config_devlink(dev);
1772 
1773 	if (config_do_twiddle && cold)
1774 		twiddle();
1775 	else
1776 		aprint_naive("Found ");
1777 	/*
1778 	 * We want the next two printfs for normal, verbose, and quiet,
1779 	 * but not silent (in which case, we're twiddling, instead).
1780 	 */
1781 	if (parent == ROOT) {
1782 		aprint_naive("%s (root)", device_xname(dev));
1783 		aprint_normal("%s (root)", device_xname(dev));
1784 	} else {
1785 		aprint_naive("%s at %s", device_xname(dev),
1786 		    device_xname(parent));
1787 		aprint_normal("%s at %s", device_xname(dev),
1788 		    device_xname(parent));
1789 		if (print)
1790 			(void) (*print)(aux, NULL);
1791 	}
1792 
1793 	/*
1794 	 * Before attaching, clobber any unfound devices that are
1795 	 * otherwise identical.
1796 	 * XXX code above is redundant?
1797 	 */
1798 	drvname = dev->dv_cfdriver->cd_name;
1799 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
1800 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1801 			if (STREQ(cf->cf_name, drvname) &&
1802 			    cf->cf_unit == dev->dv_unit) {
1803 				if (cf->cf_fstate == FSTATE_NOTFOUND)
1804 					cf->cf_fstate = FSTATE_FOUND;
1805 			}
1806 		}
1807 	}
1808 	device_register(dev, aux);
1809 
1810 	/* Let userland know */
1811 	devmon_report_device(dev, true);
1812 
1813 	/*
1814 	 * Prevent detach until the driver's attach function, and all
1815 	 * deferred actions, have finished.
1816 	 */
1817 	config_pending_incr(dev);
1818 
1819 	/*
1820 	 * Prevent concurrent detach from destroying the device_t until
1821 	 * the caller has released the device.
1822 	 */
1823 	device_acquire(dev);
1824 
1825 	/* Call the driver's attach function.  */
1826 	(*dev->dv_cfattach->ca_attach)(parent, dev, aux);
1827 
1828 	/*
1829 	 * Allow other threads to acquire references to the device now
1830 	 * that the driver's attach function is done.
1831 	 */
1832 	mutex_enter(&config_misc_lock);
1833 	KASSERT(dev->dv_attaching == curlwp);
1834 	dev->dv_attaching = NULL;
1835 	cv_broadcast(&config_misc_cv);
1836 	mutex_exit(&config_misc_lock);
1837 
1838 	/*
1839 	 * Synchronous parts of attach are done.  Allow detach, unless
1840 	 * the driver's attach function scheduled deferred actions.
1841 	 */
1842 	config_pending_decr(dev);
1843 
1844 	mutex_enter(&config_misc_lock);
1845 	deferred = (dev->dv_pending != 0);
1846 	mutex_exit(&config_misc_lock);
1847 
1848 	if (!deferred && !device_pmf_is_registered(dev))
1849 		aprint_debug_dev(dev,
1850 		    "WARNING: power management not supported\n");
1851 
1852 	config_process_deferred(&deferred_config_queue, dev);
1853 
1854 	device_register_post_config(dev, aux);
1855 	rnd_add_uint32(&rnd_autoconf_source, 0);
1856 	return dev;
1857 }
1858 
1859 device_t
1860 config_attach_acquire(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1861     const struct cfargs *cfargs)
1862 {
1863 	struct cfargs_internal store;
1864 	device_t dev;
1865 
1866 	KERNEL_LOCK(1, NULL);
1867 	dev = config_attach_internal(parent, cf, aux, print,
1868 	    cfargs_canonicalize(cfargs, &store));
1869 	KERNEL_UNLOCK_ONE(NULL);
1870 
1871 	return dev;
1872 }
1873 
1874 /*
1875  * config_attach(parent, cf, aux, print, cfargs)
1876  *
1877  *	Legacy entry point for callers whose use of the returned
1878  *	device_t is not delimited by device_release.
1879  *
1880  *	The caller is required to hold the kernel lock as a fragile
1881  *	defence against races.
1882  *
1883  *	Callers should ignore the return value or be converted to
1884  *	config_attach_acquire with a matching device_release once they
1885  *	have finished with the returned device_t.
1886  */
1887 device_t
1888 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1889     const struct cfargs *cfargs)
1890 {
1891 	device_t dev;
1892 
1893 	KASSERT(KERNEL_LOCKED_P());
1894 
1895 	dev = config_attach_acquire(parent, cf, aux, print, cfargs);
1896 	if (dev == NULL)
1897 		return NULL;
1898 	device_release(dev);
1899 
1900 	return dev;
1901 }
1902 
1903 /*
1904  * As above, but for pseudo-devices.  Pseudo-devices attached in this
1905  * way are silently inserted into the device tree, and their children
1906  * attached.
1907  *
1908  * Note that because pseudo-devices are attached silently, any information
1909  * the attach routine wishes to print should be prefixed with the device
1910  * name by the attach routine.
1911  */
1912 device_t
1913 config_attach_pseudo_acquire(cfdata_t cf, void *aux)
1914 {
1915 	device_t dev;
1916 
1917 	KERNEL_LOCK(1, NULL);
1918 
1919 	struct cfargs_internal args = { };
1920 	dev = config_devalloc(ROOT, cf, &args);
1921 	if (!dev)
1922 		goto out;
1923 
1924 	/* XXX mark busy in cfdata */
1925 
1926 	if (cf->cf_fstate != FSTATE_STAR) {
1927 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1928 		cf->cf_fstate = FSTATE_FOUND;
1929 	}
1930 
1931 	config_devlink(dev);
1932 
1933 #if 0	/* XXXJRT not yet */
1934 	device_register(dev, NULL);	/* like a root node */
1935 #endif
1936 
1937 	/* Let userland know */
1938 	devmon_report_device(dev, true);
1939 
1940 	/*
1941 	 * Prevent detach until the driver's attach function, and all
1942 	 * deferred actions, have finished.
1943 	 */
1944 	config_pending_incr(dev);
1945 
1946 	/*
1947 	 * Prevent concurrent detach from destroying the device_t until
1948 	 * the caller has released the device.
1949 	 */
1950 	device_acquire(dev);
1951 
1952 	/* Call the driver's attach function.  */
1953 	(*dev->dv_cfattach->ca_attach)(ROOT, dev, aux);
1954 
1955 	/*
1956 	 * Allow other threads to acquire references to the device now
1957 	 * that the driver's attach function is done.
1958 	 */
1959 	mutex_enter(&config_misc_lock);
1960 	KASSERT(dev->dv_attaching == curlwp);
1961 	dev->dv_attaching = NULL;
1962 	cv_broadcast(&config_misc_cv);
1963 	mutex_exit(&config_misc_lock);
1964 
1965 	/*
1966 	 * Synchronous parts of attach are done.  Allow detach, unless
1967 	 * the driver's attach function scheduled deferred actions.
1968 	 */
1969 	config_pending_decr(dev);
1970 
1971 	config_process_deferred(&deferred_config_queue, dev);
1972 
1973 out:	KERNEL_UNLOCK_ONE(NULL);
1974 	return dev;
1975 }
1976 
1977 /*
1978  * config_attach_pseudo(cf)
1979  *
1980  *	Legacy entry point for callers whose use of the returned
1981  *	device_t is not delimited by device_release.
1982  *
1983  *	The caller is required to hold the kernel lock as a fragile
1984  *	defence against races.
1985  *
1986  *	Callers should ignore the return value or be converted to
1987  *	config_attach_pseudo_acquire with a matching device_release
1988  *	once they have finished with the returned device_t.  As a
1989  *	bonus, config_attach_pseudo_acquire can pass a non-null aux
1990  *	argument into the driver's attach routine.
1991  */
1992 device_t
1993 config_attach_pseudo(cfdata_t cf)
1994 {
1995 	device_t dev;
1996 
1997 	dev = config_attach_pseudo_acquire(cf, NULL);
1998 	if (dev == NULL)
1999 		return dev;
2000 	device_release(dev);
2001 
2002 	return dev;
2003 }
2004 
2005 /*
2006  * Caller must hold alldevs_lock.
2007  */
2008 static void
2009 config_collect_garbage(struct devicelist *garbage)
2010 {
2011 	device_t dv;
2012 
2013 	KASSERT(!cpu_intr_p());
2014 	KASSERT(!cpu_softintr_p());
2015 	KASSERT(mutex_owned(&alldevs_lock));
2016 
2017 	while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
2018 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
2019 			if (dv->dv_del_gen != 0)
2020 				break;
2021 		}
2022 		if (dv == NULL) {
2023 			alldevs_garbage = false;
2024 			break;
2025 		}
2026 		config_devunlink(dv, garbage);
2027 	}
2028 	KASSERT(mutex_owned(&alldevs_lock));
2029 }
2030 
2031 static void
2032 config_dump_garbage(struct devicelist *garbage)
2033 {
2034 	device_t dv;
2035 
2036 	while ((dv = TAILQ_FIRST(garbage)) != NULL) {
2037 		TAILQ_REMOVE(garbage, dv, dv_list);
2038 		config_devdelete(dv);
2039 	}
2040 }
2041 
2042 static int
2043 config_detach_enter(device_t dev)
2044 {
2045 	struct lwp *l __diagused;
2046 	int error = 0;
2047 
2048 	mutex_enter(&config_misc_lock);
2049 
2050 	/*
2051 	 * Wait until attach has fully completed, and until any
2052 	 * concurrent detach (e.g., drvctl racing with USB event
2053 	 * thread) has completed.
2054 	 *
2055 	 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via
2056 	 * deviter) to ensure the winner of the race doesn't free the
2057 	 * device leading the loser of the race into use-after-free.
2058 	 *
2059 	 * XXX Not all callers do this!
2060 	 */
2061 	while (dev->dv_pending || dev->dv_detaching) {
2062 		KASSERTMSG(dev->dv_detaching != curlwp,
2063 		    "recursively detaching %s", device_xname(dev));
2064 		error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
2065 		if (error)
2066 			goto out;
2067 	}
2068 
2069 	/*
2070 	 * Attach has completed, and no other concurrent detach is
2071 	 * running.  Claim the device for detaching.  This will cause
2072 	 * all new attempts to acquire references to block.
2073 	 */
2074 	KASSERTMSG((l = dev->dv_attaching) == NULL,
2075 	    "lwp %ld [%s] @ %p attaching %s",
2076 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2077 	    device_xname(dev));
2078 	KASSERTMSG((l = dev->dv_detaching) == NULL,
2079 	    "lwp %ld [%s] @ %p detaching %s",
2080 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2081 	    device_xname(dev));
2082 	dev->dv_detaching = curlwp;
2083 
2084 out:	mutex_exit(&config_misc_lock);
2085 	return error;
2086 }
2087 
2088 static void
2089 config_detach_exit(device_t dev)
2090 {
2091 	struct lwp *l __diagused;
2092 
2093 	mutex_enter(&config_misc_lock);
2094 	KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
2095 	    device_xname(dev));
2096 	KASSERTMSG((l = dev->dv_detaching) == curlwp,
2097 	    "lwp %ld [%s] @ %p detaching %s",
2098 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2099 	    device_xname(dev));
2100 	dev->dv_detaching = NULL;
2101 	cv_broadcast(&config_misc_cv);
2102 	mutex_exit(&config_misc_lock);
2103 }
2104 
2105 /*
2106  * Detach a device.  Optionally forced (e.g. because of hardware
2107  * removal) and quiet.  Returns zero if successful, non-zero
2108  * (an error code) otherwise.
2109  *
2110  * Note that this code wants to be run from a process context, so
2111  * that the detach can sleep to allow processes which have a device
2112  * open to run and unwind their stacks.
2113  *
2114  * Caller must hold a reference with device_acquire or
2115  * device_lookup_acquire.
2116  */
2117 int
2118 config_detach_release(device_t dev, int flags)
2119 {
2120 	struct alldevs_foray af;
2121 	struct cftable *ct;
2122 	cfdata_t cf;
2123 	const struct cfattach *ca;
2124 	struct cfdriver *cd;
2125 	device_t d __diagused;
2126 	int rv = 0;
2127 
2128 	KERNEL_LOCK(1, NULL);
2129 
2130 	cf = dev->dv_cfdata;
2131 	KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
2132 		cf->cf_fstate == FSTATE_STAR),
2133 	    "config_detach: %s: bad device fstate: %d",
2134 	    device_xname(dev), cf ? cf->cf_fstate : -1);
2135 
2136 	cd = dev->dv_cfdriver;
2137 	KASSERT(cd != NULL);
2138 
2139 	ca = dev->dv_cfattach;
2140 	KASSERT(ca != NULL);
2141 
2142 	/*
2143 	 * Only one detach at a time, please -- and not until fully
2144 	 * attached.
2145 	 */
2146 	rv = config_detach_enter(dev);
2147 	device_release(dev);
2148 	if (rv) {
2149 		KERNEL_UNLOCK_ONE(NULL);
2150 		return rv;
2151 	}
2152 
2153 	mutex_enter(&alldevs_lock);
2154 	if (dev->dv_del_gen != 0) {
2155 		mutex_exit(&alldevs_lock);
2156 #ifdef DIAGNOSTIC
2157 		printf("%s: %s is already detached\n", __func__,
2158 		    device_xname(dev));
2159 #endif /* DIAGNOSTIC */
2160 		config_detach_exit(dev);
2161 		KERNEL_UNLOCK_ONE(NULL);
2162 		return ENOENT;
2163 	}
2164 	alldevs_nwrite++;
2165 	mutex_exit(&alldevs_lock);
2166 
2167 	/*
2168 	 * Call the driver's .ca_detach function, unless it has none or
2169 	 * we are skipping it because it's unforced shutdown time and
2170 	 * the driver didn't ask to detach on shutdown.
2171 	 */
2172 	if (!detachall &&
2173 	    (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
2174 	    (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
2175 		rv = EOPNOTSUPP;
2176 	} else if (ca->ca_detach != NULL) {
2177 		rv = (*ca->ca_detach)(dev, flags);
2178 	} else
2179 		rv = EOPNOTSUPP;
2180 
2181 	KASSERTMSG(!dev->dv_detach_done, "%s detached twice, error=%d",
2182 	    device_xname(dev), rv);
2183 
2184 	/*
2185 	 * If it was not possible to detach the device, then we either
2186 	 * panic() (for the forced but failed case), or return an error.
2187 	 */
2188 	if (rv) {
2189 		/*
2190 		 * Detach failed -- likely EOPNOTSUPP or EBUSY.  Driver
2191 		 * must not have called config_detach_commit.
2192 		 */
2193 		KASSERTMSG(!dev->dv_detach_committed,
2194 		    "%s committed to detaching and then backed out, error=%d",
2195 		    device_xname(dev), rv);
2196 		if (flags & DETACH_FORCE) {
2197 			panic("config_detach: forced detach of %s failed (%d)",
2198 			    device_xname(dev), rv);
2199 		}
2200 		goto out;
2201 	}
2202 
2203 	/*
2204 	 * The device has now been successfully detached.
2205 	 */
2206 	dev->dv_detach_done = true;
2207 
2208 	/*
2209 	 * If .ca_detach didn't commit to detach, then do that for it.
2210 	 * This wakes any pending device_lookup_acquire calls so they
2211 	 * will fail.
2212 	 */
2213 	config_detach_commit(dev);
2214 
2215 	/*
2216 	 * If it was possible to detach the device, ensure that the
2217 	 * device is deactivated.
2218 	 */
2219 	dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */
2220 
2221 	/*
2222 	 * Wait for all device_lookup_acquire references -- mostly, for
2223 	 * all attempts to open the device -- to drain.  It is the
2224 	 * responsibility of .ca_detach to ensure anything with open
2225 	 * references will be interrupted and release them promptly,
2226 	 * not block indefinitely.  All new attempts to acquire
2227 	 * references will fail, as config_detach_commit has arranged
2228 	 * by now.
2229 	 */
2230 	mutex_enter(&config_misc_lock);
2231 	localcount_drain(dev->dv_localcount,
2232 	    &config_misc_cv, &config_misc_lock);
2233 	mutex_exit(&config_misc_lock);
2234 
2235 	/* Let userland know */
2236 	devmon_report_device(dev, false);
2237 
2238 #ifdef DIAGNOSTIC
2239 	/*
2240 	 * Sanity: If you're successfully detached, you should have no
2241 	 * children.  (Note that because children must be attached
2242 	 * after parents, we only need to search the latter part of
2243 	 * the list.)
2244 	 */
2245 	mutex_enter(&alldevs_lock);
2246 	for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
2247 	    d = TAILQ_NEXT(d, dv_list)) {
2248 		if (d->dv_parent == dev && d->dv_del_gen == 0) {
2249 			printf("config_detach: detached device %s"
2250 			    " has children %s\n", device_xname(dev),
2251 			    device_xname(d));
2252 			panic("config_detach");
2253 		}
2254 	}
2255 	mutex_exit(&alldevs_lock);
2256 #endif
2257 
2258 	/* notify the parent that the child is gone */
2259 	if (dev->dv_parent) {
2260 		device_t p = dev->dv_parent;
2261 		if (p->dv_cfattach->ca_childdetached)
2262 			(*p->dv_cfattach->ca_childdetached)(p, dev);
2263 	}
2264 
2265 	/*
2266 	 * Mark cfdata to show that the unit can be reused, if possible.
2267 	 */
2268 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
2269 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
2270 			if (STREQ(cf->cf_name, cd->cd_name)) {
2271 				if (cf->cf_fstate == FSTATE_FOUND &&
2272 				    cf->cf_unit == dev->dv_unit)
2273 					cf->cf_fstate = FSTATE_NOTFOUND;
2274 			}
2275 		}
2276 	}
2277 
2278 	if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
2279 		aprint_normal_dev(dev, "detached\n");
2280 
2281 out:
2282 	config_detach_exit(dev);
2283 
2284 	config_alldevs_enter(&af);
2285 	KASSERT(alldevs_nwrite != 0);
2286 	--alldevs_nwrite;
2287 	if (rv == 0 && dev->dv_del_gen == 0) {
2288 		if (alldevs_nwrite == 0 && alldevs_nread == 0)
2289 			config_devunlink(dev, &af.af_garbage);
2290 		else {
2291 			dev->dv_del_gen = alldevs_gen;
2292 			alldevs_garbage = true;
2293 		}
2294 	}
2295 	config_alldevs_exit(&af);
2296 
2297 	KERNEL_UNLOCK_ONE(NULL);
2298 
2299 	return rv;
2300 }
2301 
2302 /*
2303  * config_detach(dev, flags)
2304  *
2305  *	Legacy entry point for callers that have not acquired a
2306  *	reference to dev.
2307  *
2308  *	The caller is required to hold the kernel lock as a fragile
2309  *	defence against races.
2310  *
2311  *	Callers should be converted to use device_acquire under a lock
2312  *	taken also by .ca_childdetached to synchronize access to the
2313  *	device_t, and then config_detach_release ouside the lock.
2314  *	Alternatively, most drivers detach children only in their own
2315  *	detach routines, which can be done with config_detach_children
2316  *	instead.
2317  */
2318 int
2319 config_detach(device_t dev, int flags)
2320 {
2321 
2322 	device_acquire(dev);
2323 	return config_detach_release(dev, flags);
2324 }
2325 
2326 /*
2327  * config_detach_commit(dev)
2328  *
2329  *	Issued by a driver's .ca_detach routine to notify anyone
2330  *	waiting in device_lookup_acquire that the driver is committed
2331  *	to detaching the device, which allows device_lookup_acquire to
2332  *	wake up and fail immediately.
2333  *
2334  *	Safe to call multiple times -- idempotent.  Must be called
2335  *	during config_detach_enter/exit.  Safe to use with
2336  *	device_lookup because the device is not actually removed from
2337  *	the table until after config_detach_exit.
2338  */
2339 void
2340 config_detach_commit(device_t dev)
2341 {
2342 	struct lwp *l __diagused;
2343 
2344 	mutex_enter(&config_misc_lock);
2345 	KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
2346 	    device_xname(dev));
2347 	KASSERTMSG((l = dev->dv_detaching) == curlwp,
2348 	    "lwp %ld [%s] @ %p detaching %s",
2349 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2350 	    device_xname(dev));
2351 	dev->dv_detach_committed = true;
2352 	cv_broadcast(&config_misc_cv);
2353 	mutex_exit(&config_misc_lock);
2354 }
2355 
2356 int
2357 config_detach_children(device_t parent, int flags)
2358 {
2359 	device_t dv;
2360 	deviter_t di;
2361 	int error = 0;
2362 
2363 	KASSERT(KERNEL_LOCKED_P());
2364 
2365 	for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
2366 	     dv = deviter_next(&di)) {
2367 		if (device_parent(dv) != parent)
2368 			continue;
2369 		if ((error = config_detach(dv, flags)) != 0)
2370 			break;
2371 	}
2372 	deviter_release(&di);
2373 	return error;
2374 }
2375 
2376 device_t
2377 shutdown_first(struct shutdown_state *s)
2378 {
2379 	if (!s->initialized) {
2380 		deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
2381 		s->initialized = true;
2382 	}
2383 	return shutdown_next(s);
2384 }
2385 
2386 device_t
2387 shutdown_next(struct shutdown_state *s)
2388 {
2389 	device_t dv;
2390 
2391 	while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
2392 		;
2393 
2394 	if (dv == NULL)
2395 		s->initialized = false;
2396 
2397 	return dv;
2398 }
2399 
2400 bool
2401 config_detach_all(int how)
2402 {
2403 	static struct shutdown_state s;
2404 	device_t curdev;
2405 	bool progress = false;
2406 	int flags;
2407 
2408 	KERNEL_LOCK(1, NULL);
2409 
2410 	if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
2411 		goto out;
2412 
2413 	if ((how & RB_POWERDOWN) == RB_POWERDOWN)
2414 		flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
2415 	else
2416 		flags = DETACH_SHUTDOWN;
2417 
2418 	for (curdev = shutdown_first(&s); curdev != NULL;
2419 	     curdev = shutdown_next(&s)) {
2420 		aprint_debug(" detaching %s, ", device_xname(curdev));
2421 		if (config_detach(curdev, flags) == 0) {
2422 			progress = true;
2423 			aprint_debug("success.");
2424 		} else
2425 			aprint_debug("failed.");
2426 	}
2427 
2428 out:	KERNEL_UNLOCK_ONE(NULL);
2429 	return progress;
2430 }
2431 
2432 static bool
2433 device_is_ancestor_of(device_t ancestor, device_t descendant)
2434 {
2435 	device_t dv;
2436 
2437 	for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
2438 		if (device_parent(dv) == ancestor)
2439 			return true;
2440 	}
2441 	return false;
2442 }
2443 
2444 int
2445 config_deactivate(device_t dev)
2446 {
2447 	deviter_t di;
2448 	const struct cfattach *ca;
2449 	device_t descendant;
2450 	int s, rv = 0, oflags;
2451 
2452 	for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
2453 	     descendant != NULL;
2454 	     descendant = deviter_next(&di)) {
2455 		if (dev != descendant &&
2456 		    !device_is_ancestor_of(dev, descendant))
2457 			continue;
2458 
2459 		if ((descendant->dv_flags & DVF_ACTIVE) == 0)
2460 			continue;
2461 
2462 		ca = descendant->dv_cfattach;
2463 		oflags = descendant->dv_flags;
2464 
2465 		descendant->dv_flags &= ~DVF_ACTIVE;
2466 		if (ca->ca_activate == NULL)
2467 			continue;
2468 		s = splhigh();
2469 		rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
2470 		splx(s);
2471 		if (rv != 0)
2472 			descendant->dv_flags = oflags;
2473 	}
2474 	deviter_release(&di);
2475 	return rv;
2476 }
2477 
2478 /*
2479  * Defer the configuration of the specified device until all
2480  * of its parent's devices have been attached.
2481  */
2482 void
2483 config_defer(device_t dev, void (*func)(device_t))
2484 {
2485 	struct deferred_config *dc;
2486 
2487 	if (dev->dv_parent == NULL)
2488 		panic("config_defer: can't defer config of a root device");
2489 
2490 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2491 
2492 	config_pending_incr(dev);
2493 
2494 	mutex_enter(&config_misc_lock);
2495 #ifdef DIAGNOSTIC
2496 	struct deferred_config *odc;
2497 	TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
2498 		if (odc->dc_dev == dev)
2499 			panic("config_defer: deferred twice");
2500 	}
2501 #endif
2502 	dc->dc_dev = dev;
2503 	dc->dc_func = func;
2504 	TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
2505 	mutex_exit(&config_misc_lock);
2506 }
2507 
2508 /*
2509  * Defer some autoconfiguration for a device until after interrupts
2510  * are enabled.
2511  */
2512 void
2513 config_interrupts(device_t dev, void (*func)(device_t))
2514 {
2515 	struct deferred_config *dc;
2516 
2517 	/*
2518 	 * If interrupts are enabled, callback now.
2519 	 */
2520 	if (cold == 0) {
2521 		(*func)(dev);
2522 		return;
2523 	}
2524 
2525 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2526 
2527 	config_pending_incr(dev);
2528 
2529 	mutex_enter(&config_misc_lock);
2530 #ifdef DIAGNOSTIC
2531 	struct deferred_config *odc;
2532 	TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
2533 		if (odc->dc_dev == dev)
2534 			panic("config_interrupts: deferred twice");
2535 	}
2536 #endif
2537 	dc->dc_dev = dev;
2538 	dc->dc_func = func;
2539 	TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
2540 	mutex_exit(&config_misc_lock);
2541 }
2542 
2543 /*
2544  * Defer some autoconfiguration for a device until after root file system
2545  * is mounted (to load firmware etc).
2546  */
2547 void
2548 config_mountroot(device_t dev, void (*func)(device_t))
2549 {
2550 	struct deferred_config *dc;
2551 
2552 	/*
2553 	 * If root file system is mounted, callback now.
2554 	 */
2555 	if (root_is_mounted) {
2556 		(*func)(dev);
2557 		return;
2558 	}
2559 
2560 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2561 
2562 	mutex_enter(&config_misc_lock);
2563 #ifdef DIAGNOSTIC
2564 	struct deferred_config *odc;
2565 	TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
2566 		if (odc->dc_dev == dev)
2567 			panic("%s: deferred twice", __func__);
2568 	}
2569 #endif
2570 
2571 	dc->dc_dev = dev;
2572 	dc->dc_func = func;
2573 	TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
2574 	mutex_exit(&config_misc_lock);
2575 }
2576 
2577 /*
2578  * Process a deferred configuration queue.
2579  */
2580 static void
2581 config_process_deferred(struct deferred_config_head *queue, device_t parent)
2582 {
2583 	struct deferred_config *dc;
2584 
2585 	KASSERT(KERNEL_LOCKED_P());
2586 
2587 	mutex_enter(&config_misc_lock);
2588 	dc = TAILQ_FIRST(queue);
2589 	while (dc) {
2590 		if (parent == NULL || dc->dc_dev->dv_parent == parent) {
2591 			TAILQ_REMOVE(queue, dc, dc_queue);
2592 			mutex_exit(&config_misc_lock);
2593 
2594 			(*dc->dc_func)(dc->dc_dev);
2595 			config_pending_decr(dc->dc_dev);
2596 			kmem_free(dc, sizeof(*dc));
2597 
2598 			mutex_enter(&config_misc_lock);
2599 			/* Restart, queue might have changed */
2600 			dc = TAILQ_FIRST(queue);
2601 		} else {
2602 			dc = TAILQ_NEXT(dc, dc_queue);
2603 		}
2604 	}
2605 	mutex_exit(&config_misc_lock);
2606 }
2607 
2608 /*
2609  * Manipulate the config_pending semaphore.
2610  */
2611 void
2612 config_pending_incr(device_t dev)
2613 {
2614 
2615 	mutex_enter(&config_misc_lock);
2616 	KASSERTMSG(dev->dv_pending < INT_MAX,
2617 	    "%s: excess config_pending_incr", device_xname(dev));
2618 	if (dev->dv_pending++ == 0)
2619 		TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
2620 #ifdef DEBUG_AUTOCONF
2621 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2622 #endif
2623 	mutex_exit(&config_misc_lock);
2624 }
2625 
2626 void
2627 config_pending_decr(device_t dev)
2628 {
2629 
2630 	mutex_enter(&config_misc_lock);
2631 	KASSERTMSG(dev->dv_pending > 0,
2632 	    "%s: excess config_pending_decr", device_xname(dev));
2633 	if (--dev->dv_pending == 0) {
2634 		TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
2635 		cv_broadcast(&config_misc_cv);
2636 	}
2637 #ifdef DEBUG_AUTOCONF
2638 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2639 #endif
2640 	mutex_exit(&config_misc_lock);
2641 }
2642 
2643 /*
2644  * Register a "finalization" routine.  Finalization routines are
2645  * called iteratively once all real devices have been found during
2646  * autoconfiguration, for as long as any one finalizer has done
2647  * any work.
2648  */
2649 int
2650 config_finalize_register(device_t dev, int (*fn)(device_t))
2651 {
2652 	struct finalize_hook *f;
2653 	int error = 0;
2654 
2655 	KERNEL_LOCK(1, NULL);
2656 
2657 	/*
2658 	 * If finalization has already been done, invoke the
2659 	 * callback function now.
2660 	 */
2661 	if (config_finalize_done) {
2662 		while ((*fn)(dev) != 0)
2663 			/* loop */ ;
2664 		goto out;
2665 	}
2666 
2667 	/* Ensure this isn't already on the list. */
2668 	TAILQ_FOREACH(f, &config_finalize_list, f_list) {
2669 		if (f->f_func == fn && f->f_dev == dev) {
2670 			error = EEXIST;
2671 			goto out;
2672 		}
2673 	}
2674 
2675 	f = kmem_alloc(sizeof(*f), KM_SLEEP);
2676 	f->f_func = fn;
2677 	f->f_dev = dev;
2678 	TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
2679 
2680 	/* Success!  */
2681 	error = 0;
2682 
2683 out:	KERNEL_UNLOCK_ONE(NULL);
2684 	return error;
2685 }
2686 
2687 void
2688 config_finalize(void)
2689 {
2690 	struct finalize_hook *f;
2691 	struct pdevinit *pdev;
2692 	extern struct pdevinit pdevinit[];
2693 	int errcnt, rv;
2694 
2695 	/*
2696 	 * Now that device driver threads have been created, wait for
2697 	 * them to finish any deferred autoconfiguration.
2698 	 */
2699 	mutex_enter(&config_misc_lock);
2700 	while (!TAILQ_EMPTY(&config_pending)) {
2701 		device_t dev;
2702 		int error;
2703 
2704 		error = cv_timedwait(&config_misc_cv, &config_misc_lock,
2705 		    mstohz(1000));
2706 		if (error == EWOULDBLOCK) {
2707 			aprint_debug("waiting for devices:");
2708 			TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
2709 				aprint_debug(" %s", device_xname(dev));
2710 			aprint_debug("\n");
2711 		}
2712 	}
2713 	mutex_exit(&config_misc_lock);
2714 
2715 	KERNEL_LOCK(1, NULL);
2716 
2717 	/* Attach pseudo-devices. */
2718 	for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
2719 		(*pdev->pdev_attach)(pdev->pdev_count);
2720 
2721 	/* Run the hooks until none of them does any work. */
2722 	do {
2723 		rv = 0;
2724 		TAILQ_FOREACH(f, &config_finalize_list, f_list)
2725 			rv |= (*f->f_func)(f->f_dev);
2726 	} while (rv != 0);
2727 
2728 	config_finalize_done = 1;
2729 
2730 	/* Now free all the hooks. */
2731 	while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
2732 		TAILQ_REMOVE(&config_finalize_list, f, f_list);
2733 		kmem_free(f, sizeof(*f));
2734 	}
2735 
2736 	KERNEL_UNLOCK_ONE(NULL);
2737 
2738 	errcnt = aprint_get_error_count();
2739 	if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
2740 	    (boothowto & AB_VERBOSE) == 0) {
2741 		mutex_enter(&config_misc_lock);
2742 		if (config_do_twiddle) {
2743 			config_do_twiddle = 0;
2744 			printf_nolog(" done.\n");
2745 		}
2746 		mutex_exit(&config_misc_lock);
2747 	}
2748 	if (errcnt != 0) {
2749 		printf("WARNING: %d error%s while detecting hardware; "
2750 		    "check system log.\n", errcnt,
2751 		    errcnt == 1 ? "" : "s");
2752 	}
2753 }
2754 
2755 void
2756 config_twiddle_init(void)
2757 {
2758 
2759 	if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
2760 		config_do_twiddle = 1;
2761 	}
2762 	callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
2763 }
2764 
2765 void
2766 config_twiddle_fn(void *cookie)
2767 {
2768 
2769 	mutex_enter(&config_misc_lock);
2770 	if (config_do_twiddle) {
2771 		twiddle();
2772 		callout_schedule(&config_twiddle_ch, mstohz(100));
2773 	}
2774 	mutex_exit(&config_misc_lock);
2775 }
2776 
2777 static void
2778 config_alldevs_enter(struct alldevs_foray *af)
2779 {
2780 	TAILQ_INIT(&af->af_garbage);
2781 	mutex_enter(&alldevs_lock);
2782 	config_collect_garbage(&af->af_garbage);
2783 }
2784 
2785 static void
2786 config_alldevs_exit(struct alldevs_foray *af)
2787 {
2788 	mutex_exit(&alldevs_lock);
2789 	config_dump_garbage(&af->af_garbage);
2790 }
2791 
2792 /*
2793  * device_lookup:
2794  *
2795  *	Look up a device instance for a given driver.
2796  *
2797  *	Caller is responsible for ensuring the device's state is
2798  *	stable, either by holding a reference already obtained with
2799  *	device_lookup_acquire or by otherwise ensuring the device is
2800  *	attached and can't be detached (e.g., holding an open device
2801  *	node and ensuring *_detach calls vdevgone).
2802  *
2803  *	XXX Find a way to assert this.
2804  *
2805  *	Safe for use up to and including interrupt context at IPL_VM.
2806  *	Never sleeps.
2807  */
2808 device_t
2809 device_lookup(cfdriver_t cd, int unit)
2810 {
2811 	device_t dv;
2812 
2813 	mutex_enter(&alldevs_lock);
2814 	if (unit < 0 || unit >= cd->cd_ndevs)
2815 		dv = NULL;
2816 	else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
2817 		dv = NULL;
2818 	mutex_exit(&alldevs_lock);
2819 
2820 	return dv;
2821 }
2822 
2823 /*
2824  * device_lookup_private:
2825  *
2826  *	Look up a softc instance for a given driver.
2827  */
2828 void *
2829 device_lookup_private(cfdriver_t cd, int unit)
2830 {
2831 
2832 	return device_private(device_lookup(cd, unit));
2833 }
2834 
2835 /*
2836  * device_lookup_acquire:
2837  *
2838  *	Look up a device instance for a given driver, and return a
2839  *	reference to it that must be released by device_release.
2840  *
2841  *	=> If the device is still attaching, blocks until *_attach has
2842  *	   returned.
2843  *
2844  *	=> If the device is detaching, blocks until *_detach has
2845  *	   returned.  May succeed or fail in that case, depending on
2846  *	   whether *_detach has backed out (EBUSY) or committed to
2847  *	   detaching.
2848  *
2849  *	May sleep.
2850  */
2851 device_t
2852 device_lookup_acquire(cfdriver_t cd, int unit)
2853 {
2854 	device_t dv;
2855 
2856 	ASSERT_SLEEPABLE();
2857 
2858 	/* XXX This should have a pserialized fast path -- TBD.  */
2859 	mutex_enter(&config_misc_lock);
2860 	mutex_enter(&alldevs_lock);
2861 retry:	if (unit < 0 || unit >= cd->cd_ndevs ||
2862 	    (dv = cd->cd_devs[unit]) == NULL ||
2863 	    dv->dv_del_gen != 0 ||
2864 	    dv->dv_detach_committed) {
2865 		dv = NULL;
2866 	} else {
2867 		/*
2868 		 * Wait for the device to stabilize, if attaching or
2869 		 * detaching.  Either way we must wait for *_attach or
2870 		 * *_detach to complete, and either way we must retry:
2871 		 * even if detaching, *_detach might fail (EBUSY) so
2872 		 * the device may still be there.
2873 		 */
2874 		if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) ||
2875 		    dv->dv_detaching != NULL) {
2876 			mutex_exit(&alldevs_lock);
2877 			cv_wait(&config_misc_cv, &config_misc_lock);
2878 			mutex_enter(&alldevs_lock);
2879 			goto retry;
2880 		}
2881 		device_acquire(dv);
2882 	}
2883 	mutex_exit(&alldevs_lock);
2884 	mutex_exit(&config_misc_lock);
2885 
2886 	return dv;
2887 }
2888 
2889 /*
2890  * device_acquire:
2891  *
2892  *	Acquire a reference to a device.  It is the caller's
2893  *	responsibility to ensure that the device's .ca_detach routine
2894  *	cannot return before calling this.  Caller must release the
2895  *	reference with device_release or config_detach_release.
2896  */
2897 void
2898 device_acquire(device_t dv)
2899 {
2900 
2901 	/*
2902 	 * No lock because the caller has promised that this can't
2903 	 * change concurrently with device_acquire.
2904 	 */
2905 	KASSERTMSG(!dv->dv_detach_done, "%s",
2906 	    dv == NULL ? "(null)" : device_xname(dv));
2907 	localcount_acquire(dv->dv_localcount);
2908 }
2909 
2910 /*
2911  * device_release:
2912  *
2913  *	Release a reference to a device acquired with device_acquire or
2914  *	device_lookup_acquire.
2915  */
2916 void
2917 device_release(device_t dv)
2918 {
2919 
2920 	localcount_release(dv->dv_localcount,
2921 	    &config_misc_cv, &config_misc_lock);
2922 }
2923 
2924 /*
2925  * device_find_by_xname:
2926  *
2927  *	Returns the device of the given name or NULL if it doesn't exist.
2928  */
2929 device_t
2930 device_find_by_xname(const char *name)
2931 {
2932 	device_t dv;
2933 	deviter_t di;
2934 
2935 	for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
2936 		if (strcmp(device_xname(dv), name) == 0)
2937 			break;
2938 	}
2939 	deviter_release(&di);
2940 
2941 	return dv;
2942 }
2943 
2944 /*
2945  * device_find_by_driver_unit:
2946  *
2947  *	Returns the device of the given driver name and unit or
2948  *	NULL if it doesn't exist.
2949  */
2950 device_t
2951 device_find_by_driver_unit(const char *name, int unit)
2952 {
2953 	struct cfdriver *cd;
2954 
2955 	if ((cd = config_cfdriver_lookup(name)) == NULL)
2956 		return NULL;
2957 	return device_lookup(cd, unit);
2958 }
2959 
2960 static bool
2961 match_strcmp(const char * const s1, const char * const s2)
2962 {
2963 	return strcmp(s1, s2) == 0;
2964 }
2965 
2966 static bool
2967 match_pmatch(const char * const s1, const char * const s2)
2968 {
2969 	return pmatch(s1, s2, NULL) == 2;
2970 }
2971 
2972 static bool
2973 strarray_match_internal(const char ** const strings,
2974     unsigned int const nstrings, const char * const str,
2975     unsigned int * const indexp,
2976     bool (*match_fn)(const char *, const char *))
2977 {
2978 	unsigned int i;
2979 
2980 	if (strings == NULL || nstrings == 0) {
2981 		return false;
2982 	}
2983 
2984 	for (i = 0; i < nstrings; i++) {
2985 		if ((*match_fn)(strings[i], str)) {
2986 			*indexp = i;
2987 			return true;
2988 		}
2989 	}
2990 
2991 	return false;
2992 }
2993 
2994 static int
2995 strarray_match(const char ** const strings, unsigned int const nstrings,
2996     const char * const str)
2997 {
2998 	unsigned int idx;
2999 
3000 	if (strarray_match_internal(strings, nstrings, str, &idx,
3001 				    match_strcmp)) {
3002 		return (int)(nstrings - idx);
3003 	}
3004 	return 0;
3005 }
3006 
3007 static int
3008 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
3009     const char * const pattern)
3010 {
3011 	unsigned int idx;
3012 
3013 	if (strarray_match_internal(strings, nstrings, pattern, &idx,
3014 				    match_pmatch)) {
3015 		return (int)(nstrings - idx);
3016 	}
3017 	return 0;
3018 }
3019 
3020 static int
3021 device_compatible_match_strarray_internal(
3022     const char **device_compats, int ndevice_compats,
3023     const struct device_compatible_entry *driver_compats,
3024     const struct device_compatible_entry **matching_entryp,
3025     int (*match_fn)(const char **, unsigned int, const char *))
3026 {
3027 	const struct device_compatible_entry *dce = NULL;
3028 	int rv;
3029 
3030 	if (ndevice_compats == 0 || device_compats == NULL ||
3031 	    driver_compats == NULL)
3032 		return 0;
3033 
3034 	for (dce = driver_compats; dce->compat != NULL; dce++) {
3035 		rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
3036 		if (rv != 0) {
3037 			if (matching_entryp != NULL) {
3038 				*matching_entryp = dce;
3039 			}
3040 			return rv;
3041 		}
3042 	}
3043 	return 0;
3044 }
3045 
3046 /*
3047  * device_compatible_match:
3048  *
3049  *	Match a driver's "compatible" data against a device's
3050  *	"compatible" strings.  Returns resulted weighted by
3051  *	which device "compatible" string was matched.
3052  */
3053 int
3054 device_compatible_match(const char **device_compats, int ndevice_compats,
3055     const struct device_compatible_entry *driver_compats)
3056 {
3057 	return device_compatible_match_strarray_internal(device_compats,
3058 	    ndevice_compats, driver_compats, NULL, strarray_match);
3059 }
3060 
3061 /*
3062  * device_compatible_pmatch:
3063  *
3064  *	Like device_compatible_match(), but uses pmatch(9) to compare
3065  *	the device "compatible" strings against patterns in the
3066  *	driver's "compatible" data.
3067  */
3068 int
3069 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
3070     const struct device_compatible_entry *driver_compats)
3071 {
3072 	return device_compatible_match_strarray_internal(device_compats,
3073 	    ndevice_compats, driver_compats, NULL, strarray_pmatch);
3074 }
3075 
3076 static int
3077 device_compatible_match_strlist_internal(
3078     const char * const device_compats, size_t const device_compatsize,
3079     const struct device_compatible_entry *driver_compats,
3080     const struct device_compatible_entry **matching_entryp,
3081     int (*match_fn)(const char *, size_t, const char *))
3082 {
3083 	const struct device_compatible_entry *dce = NULL;
3084 	int rv;
3085 
3086 	if (device_compats == NULL || device_compatsize == 0 ||
3087 	    driver_compats == NULL)
3088 		return 0;
3089 
3090 	for (dce = driver_compats; dce->compat != NULL; dce++) {
3091 		rv = (*match_fn)(device_compats, device_compatsize,
3092 		    dce->compat);
3093 		if (rv != 0) {
3094 			if (matching_entryp != NULL) {
3095 				*matching_entryp = dce;
3096 			}
3097 			return rv;
3098 		}
3099 	}
3100 	return 0;
3101 }
3102 
3103 /*
3104  * device_compatible_match_strlist:
3105  *
3106  *	Like device_compatible_match(), but take the device
3107  *	"compatible" strings as an OpenFirmware-style string
3108  *	list.
3109  */
3110 int
3111 device_compatible_match_strlist(
3112     const char * const device_compats, size_t const device_compatsize,
3113     const struct device_compatible_entry *driver_compats)
3114 {
3115 	return device_compatible_match_strlist_internal(device_compats,
3116 	    device_compatsize, driver_compats, NULL, strlist_match);
3117 }
3118 
3119 /*
3120  * device_compatible_pmatch_strlist:
3121  *
3122  *	Like device_compatible_pmatch(), but take the device
3123  *	"compatible" strings as an OpenFirmware-style string
3124  *	list.
3125  */
3126 int
3127 device_compatible_pmatch_strlist(
3128     const char * const device_compats, size_t const device_compatsize,
3129     const struct device_compatible_entry *driver_compats)
3130 {
3131 	return device_compatible_match_strlist_internal(device_compats,
3132 	    device_compatsize, driver_compats, NULL, strlist_pmatch);
3133 }
3134 
3135 static int
3136 device_compatible_match_id_internal(
3137     uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
3138     const struct device_compatible_entry *driver_compats,
3139     const struct device_compatible_entry **matching_entryp)
3140 {
3141 	const struct device_compatible_entry *dce = NULL;
3142 
3143 	if (mask == 0)
3144 		return 0;
3145 
3146 	for (dce = driver_compats; dce->id != sentinel_id; dce++) {
3147 		if ((id & mask) == dce->id) {
3148 			if (matching_entryp != NULL) {
3149 				*matching_entryp = dce;
3150 			}
3151 			return 1;
3152 		}
3153 	}
3154 	return 0;
3155 }
3156 
3157 /*
3158  * device_compatible_match_id:
3159  *
3160  *	Like device_compatible_match(), but takes a single
3161  *	unsigned integer device ID.
3162  */
3163 int
3164 device_compatible_match_id(
3165     uintptr_t const id, uintptr_t const sentinel_id,
3166     const struct device_compatible_entry *driver_compats)
3167 {
3168 	return device_compatible_match_id_internal(id, (uintptr_t)-1,
3169 	    sentinel_id, driver_compats, NULL);
3170 }
3171 
3172 /*
3173  * device_compatible_lookup:
3174  *
3175  *	Look up and return the device_compatible_entry, using the
3176  *	same matching criteria used by device_compatible_match().
3177  */
3178 const struct device_compatible_entry *
3179 device_compatible_lookup(const char **device_compats, int ndevice_compats,
3180 			 const struct device_compatible_entry *driver_compats)
3181 {
3182 	const struct device_compatible_entry *dce;
3183 
3184 	if (device_compatible_match_strarray_internal(device_compats,
3185 	    ndevice_compats, driver_compats, &dce, strarray_match)) {
3186 		return dce;
3187 	}
3188 	return NULL;
3189 }
3190 
3191 /*
3192  * device_compatible_plookup:
3193  *
3194  *	Look up and return the device_compatible_entry, using the
3195  *	same matching criteria used by device_compatible_pmatch().
3196  */
3197 const struct device_compatible_entry *
3198 device_compatible_plookup(const char **device_compats, int ndevice_compats,
3199 			  const struct device_compatible_entry *driver_compats)
3200 {
3201 	const struct device_compatible_entry *dce;
3202 
3203 	if (device_compatible_match_strarray_internal(device_compats,
3204 	    ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
3205 		return dce;
3206 	}
3207 	return NULL;
3208 }
3209 
3210 /*
3211  * device_compatible_lookup_strlist:
3212  *
3213  *	Like device_compatible_lookup(), but take the device
3214  *	"compatible" strings as an OpenFirmware-style string
3215  *	list.
3216  */
3217 const struct device_compatible_entry *
3218 device_compatible_lookup_strlist(
3219     const char * const device_compats, size_t const device_compatsize,
3220     const struct device_compatible_entry *driver_compats)
3221 {
3222 	const struct device_compatible_entry *dce;
3223 
3224 	if (device_compatible_match_strlist_internal(device_compats,
3225 	    device_compatsize, driver_compats, &dce, strlist_match)) {
3226 		return dce;
3227 	}
3228 	return NULL;
3229 }
3230 
3231 /*
3232  * device_compatible_plookup_strlist:
3233  *
3234  *	Like device_compatible_plookup(), but take the device
3235  *	"compatible" strings as an OpenFirmware-style string
3236  *	list.
3237  */
3238 const struct device_compatible_entry *
3239 device_compatible_plookup_strlist(
3240     const char * const device_compats, size_t const device_compatsize,
3241     const struct device_compatible_entry *driver_compats)
3242 {
3243 	const struct device_compatible_entry *dce;
3244 
3245 	if (device_compatible_match_strlist_internal(device_compats,
3246 	    device_compatsize, driver_compats, &dce, strlist_pmatch)) {
3247 		return dce;
3248 	}
3249 	return NULL;
3250 }
3251 
3252 /*
3253  * device_compatible_lookup_id:
3254  *
3255  *	Like device_compatible_lookup(), but takes a single
3256  *	unsigned integer device ID.
3257  */
3258 const struct device_compatible_entry *
3259 device_compatible_lookup_id(
3260     uintptr_t const id, uintptr_t const sentinel_id,
3261     const struct device_compatible_entry *driver_compats)
3262 {
3263 	const struct device_compatible_entry *dce;
3264 
3265 	if (device_compatible_match_id_internal(id, (uintptr_t)-1,
3266 	    sentinel_id, driver_compats, &dce)) {
3267 		return dce;
3268 	}
3269 	return NULL;
3270 }
3271 
3272 /*
3273  * Power management related functions.
3274  */
3275 
3276 bool
3277 device_pmf_is_registered(device_t dev)
3278 {
3279 	return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
3280 }
3281 
3282 bool
3283 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
3284 {
3285 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3286 		return true;
3287 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3288 		return false;
3289 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3290 	    dev->dv_driver_suspend != NULL &&
3291 	    !(*dev->dv_driver_suspend)(dev, qual))
3292 		return false;
3293 
3294 	dev->dv_flags |= DVF_DRIVER_SUSPENDED;
3295 	return true;
3296 }
3297 
3298 bool
3299 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
3300 {
3301 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3302 		return true;
3303 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3304 		return false;
3305 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3306 	    dev->dv_driver_resume != NULL &&
3307 	    !(*dev->dv_driver_resume)(dev, qual))
3308 		return false;
3309 
3310 	dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
3311 	return true;
3312 }
3313 
3314 bool
3315 device_pmf_driver_shutdown(device_t dev, int how)
3316 {
3317 
3318 	if (*dev->dv_driver_shutdown != NULL &&
3319 	    !(*dev->dv_driver_shutdown)(dev, how))
3320 		return false;
3321 	return true;
3322 }
3323 
3324 void
3325 device_pmf_driver_register(device_t dev,
3326     bool (*suspend)(device_t, const pmf_qual_t *),
3327     bool (*resume)(device_t, const pmf_qual_t *),
3328     bool (*shutdown)(device_t, int))
3329 {
3330 
3331 	dev->dv_driver_suspend = suspend;
3332 	dev->dv_driver_resume = resume;
3333 	dev->dv_driver_shutdown = shutdown;
3334 	dev->dv_flags |= DVF_POWER_HANDLERS;
3335 }
3336 
3337 void
3338 device_pmf_driver_deregister(device_t dev)
3339 {
3340 	device_lock_t dvl = device_getlock(dev);
3341 
3342 	dev->dv_driver_suspend = NULL;
3343 	dev->dv_driver_resume = NULL;
3344 
3345 	mutex_enter(&dvl->dvl_mtx);
3346 	dev->dv_flags &= ~DVF_POWER_HANDLERS;
3347 	while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
3348 		/* Wake a thread that waits for the lock.  That
3349 		 * thread will fail to acquire the lock, and then
3350 		 * it will wake the next thread that waits for the
3351 		 * lock, or else it will wake us.
3352 		 */
3353 		cv_signal(&dvl->dvl_cv);
3354 		pmflock_debug(dev, __func__, __LINE__);
3355 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3356 		pmflock_debug(dev, __func__, __LINE__);
3357 	}
3358 	mutex_exit(&dvl->dvl_mtx);
3359 }
3360 
3361 void
3362 device_pmf_driver_child_register(device_t dev)
3363 {
3364 	device_t parent = device_parent(dev);
3365 
3366 	if (parent == NULL || parent->dv_driver_child_register == NULL)
3367 		return;
3368 	(*parent->dv_driver_child_register)(dev);
3369 }
3370 
3371 void
3372 device_pmf_driver_set_child_register(device_t dev,
3373     void (*child_register)(device_t))
3374 {
3375 	dev->dv_driver_child_register = child_register;
3376 }
3377 
3378 static void
3379 pmflock_debug(device_t dev, const char *func, int line)
3380 {
3381 #ifdef PMFLOCK_DEBUG
3382 	device_lock_t dvl = device_getlock(dev);
3383 	const char *curlwp_name;
3384 
3385 	if (curlwp->l_name != NULL)
3386 		curlwp_name = curlwp->l_name;
3387 	else
3388 		curlwp_name = curlwp->l_proc->p_comm;
3389 
3390 	aprint_debug_dev(dev,
3391 	    "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
3392 	    curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
3393 #endif	/* PMFLOCK_DEBUG */
3394 }
3395 
3396 static bool
3397 device_pmf_lock1(device_t dev)
3398 {
3399 	device_lock_t dvl = device_getlock(dev);
3400 
3401 	while (device_pmf_is_registered(dev) &&
3402 	    dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
3403 		dvl->dvl_nwait++;
3404 		pmflock_debug(dev, __func__, __LINE__);
3405 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3406 		pmflock_debug(dev, __func__, __LINE__);
3407 		dvl->dvl_nwait--;
3408 	}
3409 	if (!device_pmf_is_registered(dev)) {
3410 		pmflock_debug(dev, __func__, __LINE__);
3411 		/* We could not acquire the lock, but some other thread may
3412 		 * wait for it, also.  Wake that thread.
3413 		 */
3414 		cv_signal(&dvl->dvl_cv);
3415 		return false;
3416 	}
3417 	dvl->dvl_nlock++;
3418 	dvl->dvl_holder = curlwp;
3419 	pmflock_debug(dev, __func__, __LINE__);
3420 	return true;
3421 }
3422 
3423 bool
3424 device_pmf_lock(device_t dev)
3425 {
3426 	bool rc;
3427 	device_lock_t dvl = device_getlock(dev);
3428 
3429 	mutex_enter(&dvl->dvl_mtx);
3430 	rc = device_pmf_lock1(dev);
3431 	mutex_exit(&dvl->dvl_mtx);
3432 
3433 	return rc;
3434 }
3435 
3436 void
3437 device_pmf_unlock(device_t dev)
3438 {
3439 	device_lock_t dvl = device_getlock(dev);
3440 
3441 	KASSERT(dvl->dvl_nlock > 0);
3442 	mutex_enter(&dvl->dvl_mtx);
3443 	if (--dvl->dvl_nlock == 0)
3444 		dvl->dvl_holder = NULL;
3445 	cv_signal(&dvl->dvl_cv);
3446 	pmflock_debug(dev, __func__, __LINE__);
3447 	mutex_exit(&dvl->dvl_mtx);
3448 }
3449 
3450 device_lock_t
3451 device_getlock(device_t dev)
3452 {
3453 	return &dev->dv_lock;
3454 }
3455 
3456 void *
3457 device_pmf_bus_private(device_t dev)
3458 {
3459 	return dev->dv_bus_private;
3460 }
3461 
3462 bool
3463 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
3464 {
3465 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3466 		return true;
3467 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
3468 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3469 		return false;
3470 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3471 	    dev->dv_bus_suspend != NULL &&
3472 	    !(*dev->dv_bus_suspend)(dev, qual))
3473 		return false;
3474 
3475 	dev->dv_flags |= DVF_BUS_SUSPENDED;
3476 	return true;
3477 }
3478 
3479 bool
3480 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
3481 {
3482 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
3483 		return true;
3484 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3485 	    dev->dv_bus_resume != NULL &&
3486 	    !(*dev->dv_bus_resume)(dev, qual))
3487 		return false;
3488 
3489 	dev->dv_flags &= ~DVF_BUS_SUSPENDED;
3490 	return true;
3491 }
3492 
3493 bool
3494 device_pmf_bus_shutdown(device_t dev, int how)
3495 {
3496 
3497 	if (*dev->dv_bus_shutdown != NULL &&
3498 	    !(*dev->dv_bus_shutdown)(dev, how))
3499 		return false;
3500 	return true;
3501 }
3502 
3503 void
3504 device_pmf_bus_register(device_t dev, void *priv,
3505     bool (*suspend)(device_t, const pmf_qual_t *),
3506     bool (*resume)(device_t, const pmf_qual_t *),
3507     bool (*shutdown)(device_t, int), void (*deregister)(device_t))
3508 {
3509 	dev->dv_bus_private = priv;
3510 	dev->dv_bus_resume = resume;
3511 	dev->dv_bus_suspend = suspend;
3512 	dev->dv_bus_shutdown = shutdown;
3513 	dev->dv_bus_deregister = deregister;
3514 }
3515 
3516 void
3517 device_pmf_bus_deregister(device_t dev)
3518 {
3519 	if (dev->dv_bus_deregister == NULL)
3520 		return;
3521 	(*dev->dv_bus_deregister)(dev);
3522 	dev->dv_bus_private = NULL;
3523 	dev->dv_bus_suspend = NULL;
3524 	dev->dv_bus_resume = NULL;
3525 	dev->dv_bus_deregister = NULL;
3526 }
3527 
3528 void *
3529 device_pmf_class_private(device_t dev)
3530 {
3531 	return dev->dv_class_private;
3532 }
3533 
3534 bool
3535 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
3536 {
3537 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
3538 		return true;
3539 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3540 	    dev->dv_class_suspend != NULL &&
3541 	    !(*dev->dv_class_suspend)(dev, qual))
3542 		return false;
3543 
3544 	dev->dv_flags |= DVF_CLASS_SUSPENDED;
3545 	return true;
3546 }
3547 
3548 bool
3549 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
3550 {
3551 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3552 		return true;
3553 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
3554 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3555 		return false;
3556 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3557 	    dev->dv_class_resume != NULL &&
3558 	    !(*dev->dv_class_resume)(dev, qual))
3559 		return false;
3560 
3561 	dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
3562 	return true;
3563 }
3564 
3565 void
3566 device_pmf_class_register(device_t dev, void *priv,
3567     bool (*suspend)(device_t, const pmf_qual_t *),
3568     bool (*resume)(device_t, const pmf_qual_t *),
3569     void (*deregister)(device_t))
3570 {
3571 	dev->dv_class_private = priv;
3572 	dev->dv_class_suspend = suspend;
3573 	dev->dv_class_resume = resume;
3574 	dev->dv_class_deregister = deregister;
3575 }
3576 
3577 void
3578 device_pmf_class_deregister(device_t dev)
3579 {
3580 	if (dev->dv_class_deregister == NULL)
3581 		return;
3582 	(*dev->dv_class_deregister)(dev);
3583 	dev->dv_class_private = NULL;
3584 	dev->dv_class_suspend = NULL;
3585 	dev->dv_class_resume = NULL;
3586 	dev->dv_class_deregister = NULL;
3587 }
3588 
3589 bool
3590 device_active(device_t dev, devactive_t type)
3591 {
3592 	size_t i;
3593 
3594 	if (dev->dv_activity_count == 0)
3595 		return false;
3596 
3597 	for (i = 0; i < dev->dv_activity_count; ++i) {
3598 		if (dev->dv_activity_handlers[i] == NULL)
3599 			break;
3600 		(*dev->dv_activity_handlers[i])(dev, type);
3601 	}
3602 
3603 	return true;
3604 }
3605 
3606 bool
3607 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
3608 {
3609 	void (**new_handlers)(device_t, devactive_t);
3610 	void (**old_handlers)(device_t, devactive_t);
3611 	size_t i, old_size, new_size;
3612 	int s;
3613 
3614 	old_handlers = dev->dv_activity_handlers;
3615 	old_size = dev->dv_activity_count;
3616 
3617 	KASSERT(old_size == 0 || old_handlers != NULL);
3618 
3619 	for (i = 0; i < old_size; ++i) {
3620 		KASSERT(old_handlers[i] != handler);
3621 		if (old_handlers[i] == NULL) {
3622 			old_handlers[i] = handler;
3623 			return true;
3624 		}
3625 	}
3626 
3627 	new_size = old_size + 4;
3628 	new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
3629 
3630 	for (i = 0; i < old_size; ++i)
3631 		new_handlers[i] = old_handlers[i];
3632 	new_handlers[old_size] = handler;
3633 	for (i = old_size+1; i < new_size; ++i)
3634 		new_handlers[i] = NULL;
3635 
3636 	s = splhigh();
3637 	dev->dv_activity_count = new_size;
3638 	dev->dv_activity_handlers = new_handlers;
3639 	splx(s);
3640 
3641 	if (old_size > 0)
3642 		kmem_free(old_handlers, sizeof(void *) * old_size);
3643 
3644 	return true;
3645 }
3646 
3647 void
3648 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
3649 {
3650 	void (**old_handlers)(device_t, devactive_t);
3651 	size_t i, old_size;
3652 	int s;
3653 
3654 	old_handlers = dev->dv_activity_handlers;
3655 	old_size = dev->dv_activity_count;
3656 
3657 	for (i = 0; i < old_size; ++i) {
3658 		if (old_handlers[i] == handler)
3659 			break;
3660 		if (old_handlers[i] == NULL)
3661 			return; /* XXX panic? */
3662 	}
3663 
3664 	if (i == old_size)
3665 		return; /* XXX panic? */
3666 
3667 	for (; i < old_size - 1; ++i) {
3668 		if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
3669 			continue;
3670 
3671 		if (i == 0) {
3672 			s = splhigh();
3673 			dev->dv_activity_count = 0;
3674 			dev->dv_activity_handlers = NULL;
3675 			splx(s);
3676 			kmem_free(old_handlers, sizeof(void *) * old_size);
3677 		}
3678 		return;
3679 	}
3680 	old_handlers[i] = NULL;
3681 }
3682 
3683 /* Return true iff the device_t `dev' exists at generation `gen'. */
3684 static bool
3685 device_exists_at(device_t dv, devgen_t gen)
3686 {
3687 	return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
3688 	    dv->dv_add_gen <= gen;
3689 }
3690 
3691 static bool
3692 deviter_visits(const deviter_t *di, device_t dv)
3693 {
3694 	return device_exists_at(dv, di->di_gen);
3695 }
3696 
3697 /*
3698  * Device Iteration
3699  *
3700  * deviter_t: a device iterator.  Holds state for a "walk" visiting
3701  *     each device_t's in the device tree.
3702  *
3703  * deviter_init(di, flags): initialize the device iterator `di'
3704  *     to "walk" the device tree.  deviter_next(di) will return
3705  *     the first device_t in the device tree, or NULL if there are
3706  *     no devices.
3707  *
3708  *     `flags' is one or more of DEVITER_F_RW, indicating that the
3709  *     caller intends to modify the device tree by calling
3710  *     config_detach(9) on devices in the order that the iterator
3711  *     returns them; DEVITER_F_ROOT_FIRST, asking for the devices
3712  *     nearest the "root" of the device tree to be returned, first;
3713  *     DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
3714  *     the root of the device tree, first; and DEVITER_F_SHUTDOWN,
3715  *     indicating both that deviter_init() should not respect any
3716  *     locks on the device tree, and that deviter_next(di) may run
3717  *     in more than one LWP before the walk has finished.
3718  *
3719  *     Only one DEVITER_F_RW iterator may be in the device tree at
3720  *     once.
3721  *
3722  *     DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
3723  *
3724  *     Results are undefined if the flags DEVITER_F_ROOT_FIRST and
3725  *     DEVITER_F_LEAVES_FIRST are used in combination.
3726  *
3727  * deviter_first(di, flags): initialize the device iterator `di'
3728  *     and return the first device_t in the device tree, or NULL
3729  *     if there are no devices.  The statement
3730  *
3731  *         dv = deviter_first(di);
3732  *
3733  *     is shorthand for
3734  *
3735  *         deviter_init(di);
3736  *         dv = deviter_next(di);
3737  *
3738  * deviter_next(di): return the next device_t in the device tree,
3739  *     or NULL if there are no more devices.  deviter_next(di)
3740  *     is undefined if `di' was not initialized with deviter_init() or
3741  *     deviter_first().
3742  *
3743  * deviter_release(di): stops iteration (subsequent calls to
3744  *     deviter_next() will return NULL), releases any locks and
3745  *     resources held by the device iterator.
3746  *
3747  * Device iteration does not return device_t's in any particular
3748  * order.  An iterator will never return the same device_t twice.
3749  * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
3750  * is called repeatedly on the same `di', it will eventually return
3751  * NULL.  It is ok to attach/detach devices during device iteration.
3752  */
3753 void
3754 deviter_init(deviter_t *di, deviter_flags_t flags)
3755 {
3756 	device_t dv;
3757 
3758 	memset(di, 0, sizeof(*di));
3759 
3760 	if ((flags & DEVITER_F_SHUTDOWN) != 0)
3761 		flags |= DEVITER_F_RW;
3762 
3763 	mutex_enter(&alldevs_lock);
3764 	if ((flags & DEVITER_F_RW) != 0)
3765 		alldevs_nwrite++;
3766 	else
3767 		alldevs_nread++;
3768 	di->di_gen = alldevs_gen++;
3769 	di->di_flags = flags;
3770 
3771 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3772 	case DEVITER_F_LEAVES_FIRST:
3773 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
3774 			if (!deviter_visits(di, dv))
3775 				continue;
3776 			di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
3777 		}
3778 		break;
3779 	case DEVITER_F_ROOT_FIRST:
3780 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
3781 			if (!deviter_visits(di, dv))
3782 				continue;
3783 			di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
3784 		}
3785 		break;
3786 	default:
3787 		break;
3788 	}
3789 
3790 	deviter_reinit(di);
3791 	mutex_exit(&alldevs_lock);
3792 }
3793 
3794 static void
3795 deviter_reinit(deviter_t *di)
3796 {
3797 
3798 	KASSERT(mutex_owned(&alldevs_lock));
3799 	if ((di->di_flags & DEVITER_F_RW) != 0)
3800 		di->di_prev = TAILQ_LAST(&alldevs, devicelist);
3801 	else
3802 		di->di_prev = TAILQ_FIRST(&alldevs);
3803 }
3804 
3805 device_t
3806 deviter_first(deviter_t *di, deviter_flags_t flags)
3807 {
3808 
3809 	deviter_init(di, flags);
3810 	return deviter_next(di);
3811 }
3812 
3813 static device_t
3814 deviter_next2(deviter_t *di)
3815 {
3816 	device_t dv;
3817 
3818 	KASSERT(mutex_owned(&alldevs_lock));
3819 
3820 	dv = di->di_prev;
3821 
3822 	if (dv == NULL)
3823 		return NULL;
3824 
3825 	if ((di->di_flags & DEVITER_F_RW) != 0)
3826 		di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
3827 	else
3828 		di->di_prev = TAILQ_NEXT(dv, dv_list);
3829 
3830 	return dv;
3831 }
3832 
3833 static device_t
3834 deviter_next1(deviter_t *di)
3835 {
3836 	device_t dv;
3837 
3838 	KASSERT(mutex_owned(&alldevs_lock));
3839 
3840 	do {
3841 		dv = deviter_next2(di);
3842 	} while (dv != NULL && !deviter_visits(di, dv));
3843 
3844 	return dv;
3845 }
3846 
3847 device_t
3848 deviter_next(deviter_t *di)
3849 {
3850 	device_t dv = NULL;
3851 
3852 	mutex_enter(&alldevs_lock);
3853 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3854 	case 0:
3855 		dv = deviter_next1(di);
3856 		break;
3857 	case DEVITER_F_LEAVES_FIRST:
3858 		while (di->di_curdepth >= 0) {
3859 			if ((dv = deviter_next1(di)) == NULL) {
3860 				di->di_curdepth--;
3861 				deviter_reinit(di);
3862 			} else if (dv->dv_depth == di->di_curdepth)
3863 				break;
3864 		}
3865 		break;
3866 	case DEVITER_F_ROOT_FIRST:
3867 		while (di->di_curdepth <= di->di_maxdepth) {
3868 			if ((dv = deviter_next1(di)) == NULL) {
3869 				di->di_curdepth++;
3870 				deviter_reinit(di);
3871 			} else if (dv->dv_depth == di->di_curdepth)
3872 				break;
3873 		}
3874 		break;
3875 	default:
3876 		break;
3877 	}
3878 	mutex_exit(&alldevs_lock);
3879 
3880 	return dv;
3881 }
3882 
3883 void
3884 deviter_release(deviter_t *di)
3885 {
3886 	bool rw = (di->di_flags & DEVITER_F_RW) != 0;
3887 
3888 	mutex_enter(&alldevs_lock);
3889 	if (rw)
3890 		--alldevs_nwrite;
3891 	else
3892 		--alldevs_nread;
3893 	/* XXX wake a garbage-collection thread */
3894 	mutex_exit(&alldevs_lock);
3895 }
3896 
3897 const char *
3898 cfdata_ifattr(const struct cfdata *cf)
3899 {
3900 	return cf->cf_pspec->cfp_iattr;
3901 }
3902 
3903 bool
3904 ifattr_match(const char *snull, const char *t)
3905 {
3906 	return (snull == NULL) || strcmp(snull, t) == 0;
3907 }
3908 
3909 void
3910 null_childdetached(device_t self, device_t child)
3911 {
3912 	/* do nothing */
3913 }
3914 
3915 static void
3916 sysctl_detach_setup(struct sysctllog **clog)
3917 {
3918 
3919 	sysctl_createv(clog, 0, NULL, NULL,
3920 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
3921 		CTLTYPE_BOOL, "detachall",
3922 		SYSCTL_DESCR("Detach all devices at shutdown"),
3923 		NULL, 0, &detachall, 0,
3924 		CTL_KERN, CTL_CREATE, CTL_EOL);
3925 }
3926