1 /* $NetBSD: subr_autoconf.c,v 1.307 2023/02/22 17:00:16 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 1996, 2000 Christopher G. Demetriou 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed for the 18 * NetBSD Project. See http://www.NetBSD.org/ for 19 * information about NetBSD. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )-- 35 */ 36 37 /* 38 * Copyright (c) 1992, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This software was developed by the Computer Systems Engineering group 42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 43 * contributed to Berkeley. 44 * 45 * All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed by the University of 48 * California, Lawrence Berkeley Laboratories. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 3. Neither the name of the University nor the names of its contributors 59 * may be used to endorse or promote products derived from this software 60 * without specific prior written permission. 61 * 62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 72 * SUCH DAMAGE. 73 * 74 * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp (LBL) 75 * 76 * @(#)subr_autoconf.c 8.3 (Berkeley) 5/17/94 77 */ 78 79 #include <sys/cdefs.h> 80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.307 2023/02/22 17:00:16 riastradh Exp $"); 81 82 #ifdef _KERNEL_OPT 83 #include "opt_ddb.h" 84 #include "drvctl.h" 85 #endif 86 87 #include <sys/param.h> 88 #include <sys/device.h> 89 #include <sys/device_impl.h> 90 #include <sys/disklabel.h> 91 #include <sys/conf.h> 92 #include <sys/kauth.h> 93 #include <sys/kmem.h> 94 #include <sys/systm.h> 95 #include <sys/kernel.h> 96 #include <sys/errno.h> 97 #include <sys/proc.h> 98 #include <sys/reboot.h> 99 #include <sys/kthread.h> 100 #include <sys/buf.h> 101 #include <sys/dirent.h> 102 #include <sys/mount.h> 103 #include <sys/namei.h> 104 #include <sys/unistd.h> 105 #include <sys/fcntl.h> 106 #include <sys/lockf.h> 107 #include <sys/callout.h> 108 #include <sys/devmon.h> 109 #include <sys/cpu.h> 110 #include <sys/sysctl.h> 111 #include <sys/stdarg.h> 112 #include <sys/localcount.h> 113 114 #include <sys/disk.h> 115 116 #include <sys/rndsource.h> 117 118 #include <machine/limits.h> 119 120 /* 121 * Autoconfiguration subroutines. 122 */ 123 124 /* 125 * Device autoconfiguration timings are mixed into the entropy pool. 126 */ 127 static krndsource_t rnd_autoconf_source; 128 129 /* 130 * ioconf.c exports exactly two names: cfdata and cfroots. All system 131 * devices and drivers are found via these tables. 132 */ 133 extern struct cfdata cfdata[]; 134 extern const short cfroots[]; 135 136 /* 137 * List of all cfdriver structures. We use this to detect duplicates 138 * when other cfdrivers are loaded. 139 */ 140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers); 141 extern struct cfdriver * const cfdriver_list_initial[]; 142 143 /* 144 * Initial list of cfattach's. 145 */ 146 extern const struct cfattachinit cfattachinit[]; 147 148 /* 149 * List of cfdata tables. We always have one such list -- the one 150 * built statically when the kernel was configured. 151 */ 152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables); 153 static struct cftable initcftable; 154 155 #define ROOT ((device_t)NULL) 156 157 struct matchinfo { 158 cfsubmatch_t fn; 159 device_t parent; 160 const int *locs; 161 void *aux; 162 struct cfdata *match; 163 int pri; 164 }; 165 166 struct alldevs_foray { 167 int af_s; 168 struct devicelist af_garbage; 169 }; 170 171 /* 172 * Internal version of the cfargs structure; all versions are 173 * canonicalized to this. 174 */ 175 struct cfargs_internal { 176 union { 177 cfsubmatch_t submatch;/* submatch function (direct config) */ 178 cfsearch_t search; /* search function (indirect config) */ 179 }; 180 const char * iattr; /* interface attribute */ 181 const int * locators; /* locators array */ 182 devhandle_t devhandle; /* devhandle_t (by value) */ 183 }; 184 185 static char *number(char *, int); 186 static void mapply(struct matchinfo *, cfdata_t); 187 static void config_devdelete(device_t); 188 static void config_devunlink(device_t, struct devicelist *); 189 static void config_makeroom(int, struct cfdriver *); 190 static void config_devlink(device_t); 191 static void config_alldevs_enter(struct alldevs_foray *); 192 static void config_alldevs_exit(struct alldevs_foray *); 193 static void config_add_attrib_dict(device_t); 194 static device_t config_attach_internal(device_t, cfdata_t, void *, 195 cfprint_t, const struct cfargs_internal *); 196 197 static void config_collect_garbage(struct devicelist *); 198 static void config_dump_garbage(struct devicelist *); 199 200 static void pmflock_debug(device_t, const char *, int); 201 202 static device_t deviter_next1(deviter_t *); 203 static void deviter_reinit(deviter_t *); 204 205 struct deferred_config { 206 TAILQ_ENTRY(deferred_config) dc_queue; 207 device_t dc_dev; 208 void (*dc_func)(device_t); 209 }; 210 211 TAILQ_HEAD(deferred_config_head, deferred_config); 212 213 static struct deferred_config_head deferred_config_queue = 214 TAILQ_HEAD_INITIALIZER(deferred_config_queue); 215 static struct deferred_config_head interrupt_config_queue = 216 TAILQ_HEAD_INITIALIZER(interrupt_config_queue); 217 static int interrupt_config_threads = 8; 218 static struct deferred_config_head mountroot_config_queue = 219 TAILQ_HEAD_INITIALIZER(mountroot_config_queue); 220 static int mountroot_config_threads = 2; 221 static lwp_t **mountroot_config_lwpids; 222 static size_t mountroot_config_lwpids_size; 223 bool root_is_mounted = false; 224 225 static void config_process_deferred(struct deferred_config_head *, device_t); 226 227 /* Hooks to finalize configuration once all real devices have been found. */ 228 struct finalize_hook { 229 TAILQ_ENTRY(finalize_hook) f_list; 230 int (*f_func)(device_t); 231 device_t f_dev; 232 }; 233 static TAILQ_HEAD(, finalize_hook) config_finalize_list = 234 TAILQ_HEAD_INITIALIZER(config_finalize_list); 235 static int config_finalize_done; 236 237 /* list of all devices */ 238 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs); 239 static kmutex_t alldevs_lock __cacheline_aligned; 240 static devgen_t alldevs_gen = 1; 241 static int alldevs_nread = 0; 242 static int alldevs_nwrite = 0; 243 static bool alldevs_garbage = false; 244 245 static struct devicelist config_pending = 246 TAILQ_HEAD_INITIALIZER(config_pending); 247 static kmutex_t config_misc_lock; 248 static kcondvar_t config_misc_cv; 249 250 static bool detachall = false; 251 252 #define STREQ(s1, s2) \ 253 (*(s1) == *(s2) && strcmp((s1), (s2)) == 0) 254 255 static bool config_initialized = false; /* config_init() has been called. */ 256 257 static int config_do_twiddle; 258 static callout_t config_twiddle_ch; 259 260 static void sysctl_detach_setup(struct sysctllog **); 261 262 int no_devmon_insert(const char *, prop_dictionary_t); 263 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert; 264 265 typedef int (*cfdriver_fn)(struct cfdriver *); 266 static int 267 frob_cfdrivervec(struct cfdriver * const *cfdriverv, 268 cfdriver_fn drv_do, cfdriver_fn drv_undo, 269 const char *style, bool dopanic) 270 { 271 void (*pr)(const char *, ...) __printflike(1, 2) = 272 dopanic ? panic : printf; 273 int i, error = 0, e2 __diagused; 274 275 for (i = 0; cfdriverv[i] != NULL; i++) { 276 if ((error = drv_do(cfdriverv[i])) != 0) { 277 pr("configure: `%s' driver %s failed: %d", 278 cfdriverv[i]->cd_name, style, error); 279 goto bad; 280 } 281 } 282 283 KASSERT(error == 0); 284 return 0; 285 286 bad: 287 printf("\n"); 288 for (i--; i >= 0; i--) { 289 e2 = drv_undo(cfdriverv[i]); 290 KASSERT(e2 == 0); 291 } 292 293 return error; 294 } 295 296 typedef int (*cfattach_fn)(const char *, struct cfattach *); 297 static int 298 frob_cfattachvec(const struct cfattachinit *cfattachv, 299 cfattach_fn att_do, cfattach_fn att_undo, 300 const char *style, bool dopanic) 301 { 302 const struct cfattachinit *cfai = NULL; 303 void (*pr)(const char *, ...) __printflike(1, 2) = 304 dopanic ? panic : printf; 305 int j = 0, error = 0, e2 __diagused; 306 307 for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) { 308 for (j = 0; cfai->cfai_list[j] != NULL; j++) { 309 if ((error = att_do(cfai->cfai_name, 310 cfai->cfai_list[j])) != 0) { 311 pr("configure: attachment `%s' " 312 "of `%s' driver %s failed: %d", 313 cfai->cfai_list[j]->ca_name, 314 cfai->cfai_name, style, error); 315 goto bad; 316 } 317 } 318 } 319 320 KASSERT(error == 0); 321 return 0; 322 323 bad: 324 /* 325 * Rollback in reverse order. dunno if super-important, but 326 * do that anyway. Although the code looks a little like 327 * someone did a little integration (in the math sense). 328 */ 329 printf("\n"); 330 if (cfai) { 331 bool last; 332 333 for (last = false; last == false; ) { 334 if (cfai == &cfattachv[0]) 335 last = true; 336 for (j--; j >= 0; j--) { 337 e2 = att_undo(cfai->cfai_name, 338 cfai->cfai_list[j]); 339 KASSERT(e2 == 0); 340 } 341 if (!last) { 342 cfai--; 343 for (j = 0; cfai->cfai_list[j] != NULL; j++) 344 ; 345 } 346 } 347 } 348 349 return error; 350 } 351 352 /* 353 * Initialize the autoconfiguration data structures. Normally this 354 * is done by configure(), but some platforms need to do this very 355 * early (to e.g. initialize the console). 356 */ 357 void 358 config_init(void) 359 { 360 361 KASSERT(config_initialized == false); 362 363 mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM); 364 365 mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE); 366 cv_init(&config_misc_cv, "cfgmisc"); 367 368 callout_init(&config_twiddle_ch, CALLOUT_MPSAFE); 369 370 frob_cfdrivervec(cfdriver_list_initial, 371 config_cfdriver_attach, NULL, "bootstrap", true); 372 frob_cfattachvec(cfattachinit, 373 config_cfattach_attach, NULL, "bootstrap", true); 374 375 initcftable.ct_cfdata = cfdata; 376 TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list); 377 378 rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN, 379 RND_FLAG_COLLECT_TIME); 380 381 config_initialized = true; 382 } 383 384 /* 385 * Init or fini drivers and attachments. Either all or none 386 * are processed (via rollback). It would be nice if this were 387 * atomic to outside consumers, but with the current state of 388 * locking ... 389 */ 390 int 391 config_init_component(struct cfdriver * const *cfdriverv, 392 const struct cfattachinit *cfattachv, struct cfdata *cfdatav) 393 { 394 int error; 395 396 KERNEL_LOCK(1, NULL); 397 398 if ((error = frob_cfdrivervec(cfdriverv, 399 config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0) 400 goto out; 401 if ((error = frob_cfattachvec(cfattachv, 402 config_cfattach_attach, config_cfattach_detach, 403 "init", false)) != 0) { 404 frob_cfdrivervec(cfdriverv, 405 config_cfdriver_detach, NULL, "init rollback", true); 406 goto out; 407 } 408 if ((error = config_cfdata_attach(cfdatav, 1)) != 0) { 409 frob_cfattachvec(cfattachv, 410 config_cfattach_detach, NULL, "init rollback", true); 411 frob_cfdrivervec(cfdriverv, 412 config_cfdriver_detach, NULL, "init rollback", true); 413 goto out; 414 } 415 416 /* Success! */ 417 error = 0; 418 419 out: KERNEL_UNLOCK_ONE(NULL); 420 return error; 421 } 422 423 int 424 config_fini_component(struct cfdriver * const *cfdriverv, 425 const struct cfattachinit *cfattachv, struct cfdata *cfdatav) 426 { 427 int error; 428 429 KERNEL_LOCK(1, NULL); 430 431 if ((error = config_cfdata_detach(cfdatav)) != 0) 432 goto out; 433 if ((error = frob_cfattachvec(cfattachv, 434 config_cfattach_detach, config_cfattach_attach, 435 "fini", false)) != 0) { 436 if (config_cfdata_attach(cfdatav, 0) != 0) 437 panic("config_cfdata fini rollback failed"); 438 goto out; 439 } 440 if ((error = frob_cfdrivervec(cfdriverv, 441 config_cfdriver_detach, config_cfdriver_attach, 442 "fini", false)) != 0) { 443 frob_cfattachvec(cfattachv, 444 config_cfattach_attach, NULL, "fini rollback", true); 445 if (config_cfdata_attach(cfdatav, 0) != 0) 446 panic("config_cfdata fini rollback failed"); 447 goto out; 448 } 449 450 /* Success! */ 451 error = 0; 452 453 out: KERNEL_UNLOCK_ONE(NULL); 454 return error; 455 } 456 457 void 458 config_init_mi(void) 459 { 460 461 if (!config_initialized) 462 config_init(); 463 464 sysctl_detach_setup(NULL); 465 } 466 467 void 468 config_deferred(device_t dev) 469 { 470 471 KASSERT(KERNEL_LOCKED_P()); 472 473 config_process_deferred(&deferred_config_queue, dev); 474 config_process_deferred(&interrupt_config_queue, dev); 475 config_process_deferred(&mountroot_config_queue, dev); 476 } 477 478 static void 479 config_interrupts_thread(void *cookie) 480 { 481 struct deferred_config *dc; 482 device_t dev; 483 484 mutex_enter(&config_misc_lock); 485 while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) { 486 TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue); 487 mutex_exit(&config_misc_lock); 488 489 dev = dc->dc_dev; 490 (*dc->dc_func)(dev); 491 if (!device_pmf_is_registered(dev)) 492 aprint_debug_dev(dev, 493 "WARNING: power management not supported\n"); 494 config_pending_decr(dev); 495 kmem_free(dc, sizeof(*dc)); 496 497 mutex_enter(&config_misc_lock); 498 } 499 mutex_exit(&config_misc_lock); 500 501 kthread_exit(0); 502 } 503 504 void 505 config_create_interruptthreads(void) 506 { 507 int i; 508 509 for (i = 0; i < interrupt_config_threads; i++) { 510 (void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL, 511 config_interrupts_thread, NULL, NULL, "configintr"); 512 } 513 } 514 515 static void 516 config_mountroot_thread(void *cookie) 517 { 518 struct deferred_config *dc; 519 520 mutex_enter(&config_misc_lock); 521 while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) { 522 TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue); 523 mutex_exit(&config_misc_lock); 524 525 (*dc->dc_func)(dc->dc_dev); 526 kmem_free(dc, sizeof(*dc)); 527 528 mutex_enter(&config_misc_lock); 529 } 530 mutex_exit(&config_misc_lock); 531 532 kthread_exit(0); 533 } 534 535 void 536 config_create_mountrootthreads(void) 537 { 538 int i; 539 540 if (!root_is_mounted) 541 root_is_mounted = true; 542 543 mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) * 544 mountroot_config_threads; 545 mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size, 546 KM_NOSLEEP); 547 KASSERT(mountroot_config_lwpids); 548 for (i = 0; i < mountroot_config_threads; i++) { 549 mountroot_config_lwpids[i] = 0; 550 (void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */, 551 NULL, config_mountroot_thread, NULL, 552 &mountroot_config_lwpids[i], 553 "configroot"); 554 } 555 } 556 557 void 558 config_finalize_mountroot(void) 559 { 560 int i, error; 561 562 for (i = 0; i < mountroot_config_threads; i++) { 563 if (mountroot_config_lwpids[i] == 0) 564 continue; 565 566 error = kthread_join(mountroot_config_lwpids[i]); 567 if (error) 568 printf("%s: thread %x joined with error %d\n", 569 __func__, i, error); 570 } 571 kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size); 572 } 573 574 /* 575 * Announce device attach/detach to userland listeners. 576 */ 577 578 int 579 no_devmon_insert(const char *name, prop_dictionary_t p) 580 { 581 582 return ENODEV; 583 } 584 585 static void 586 devmon_report_device(device_t dev, bool isattach) 587 { 588 prop_dictionary_t ev, dict = device_properties(dev); 589 const char *parent; 590 const char *what; 591 const char *where; 592 device_t pdev = device_parent(dev); 593 594 /* If currently no drvctl device, just return */ 595 if (devmon_insert_vec == no_devmon_insert) 596 return; 597 598 ev = prop_dictionary_create(); 599 if (ev == NULL) 600 return; 601 602 what = (isattach ? "device-attach" : "device-detach"); 603 parent = (pdev == NULL ? "root" : device_xname(pdev)); 604 if (prop_dictionary_get_string(dict, "location", &where)) { 605 prop_dictionary_set_string(ev, "location", where); 606 aprint_debug("ev: %s %s at %s in [%s]\n", 607 what, device_xname(dev), parent, where); 608 } 609 if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) || 610 !prop_dictionary_set_string(ev, "parent", parent)) { 611 prop_object_release(ev); 612 return; 613 } 614 615 if ((*devmon_insert_vec)(what, ev) != 0) 616 prop_object_release(ev); 617 } 618 619 /* 620 * Add a cfdriver to the system. 621 */ 622 int 623 config_cfdriver_attach(struct cfdriver *cd) 624 { 625 struct cfdriver *lcd; 626 627 /* Make sure this driver isn't already in the system. */ 628 LIST_FOREACH(lcd, &allcfdrivers, cd_list) { 629 if (STREQ(lcd->cd_name, cd->cd_name)) 630 return EEXIST; 631 } 632 633 LIST_INIT(&cd->cd_attach); 634 LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list); 635 636 return 0; 637 } 638 639 /* 640 * Remove a cfdriver from the system. 641 */ 642 int 643 config_cfdriver_detach(struct cfdriver *cd) 644 { 645 struct alldevs_foray af; 646 int i, rc = 0; 647 648 config_alldevs_enter(&af); 649 /* Make sure there are no active instances. */ 650 for (i = 0; i < cd->cd_ndevs; i++) { 651 if (cd->cd_devs[i] != NULL) { 652 rc = EBUSY; 653 break; 654 } 655 } 656 config_alldevs_exit(&af); 657 658 if (rc != 0) 659 return rc; 660 661 /* ...and no attachments loaded. */ 662 if (LIST_EMPTY(&cd->cd_attach) == 0) 663 return EBUSY; 664 665 LIST_REMOVE(cd, cd_list); 666 667 KASSERT(cd->cd_devs == NULL); 668 669 return 0; 670 } 671 672 /* 673 * Look up a cfdriver by name. 674 */ 675 struct cfdriver * 676 config_cfdriver_lookup(const char *name) 677 { 678 struct cfdriver *cd; 679 680 LIST_FOREACH(cd, &allcfdrivers, cd_list) { 681 if (STREQ(cd->cd_name, name)) 682 return cd; 683 } 684 685 return NULL; 686 } 687 688 /* 689 * Add a cfattach to the specified driver. 690 */ 691 int 692 config_cfattach_attach(const char *driver, struct cfattach *ca) 693 { 694 struct cfattach *lca; 695 struct cfdriver *cd; 696 697 cd = config_cfdriver_lookup(driver); 698 if (cd == NULL) 699 return ESRCH; 700 701 /* Make sure this attachment isn't already on this driver. */ 702 LIST_FOREACH(lca, &cd->cd_attach, ca_list) { 703 if (STREQ(lca->ca_name, ca->ca_name)) 704 return EEXIST; 705 } 706 707 LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list); 708 709 return 0; 710 } 711 712 /* 713 * Remove a cfattach from the specified driver. 714 */ 715 int 716 config_cfattach_detach(const char *driver, struct cfattach *ca) 717 { 718 struct alldevs_foray af; 719 struct cfdriver *cd; 720 device_t dev; 721 int i, rc = 0; 722 723 cd = config_cfdriver_lookup(driver); 724 if (cd == NULL) 725 return ESRCH; 726 727 config_alldevs_enter(&af); 728 /* Make sure there are no active instances. */ 729 for (i = 0; i < cd->cd_ndevs; i++) { 730 if ((dev = cd->cd_devs[i]) == NULL) 731 continue; 732 if (dev->dv_cfattach == ca) { 733 rc = EBUSY; 734 break; 735 } 736 } 737 config_alldevs_exit(&af); 738 739 if (rc != 0) 740 return rc; 741 742 LIST_REMOVE(ca, ca_list); 743 744 return 0; 745 } 746 747 /* 748 * Look up a cfattach by name. 749 */ 750 static struct cfattach * 751 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname) 752 { 753 struct cfattach *ca; 754 755 LIST_FOREACH(ca, &cd->cd_attach, ca_list) { 756 if (STREQ(ca->ca_name, atname)) 757 return ca; 758 } 759 760 return NULL; 761 } 762 763 /* 764 * Look up a cfattach by driver/attachment name. 765 */ 766 struct cfattach * 767 config_cfattach_lookup(const char *name, const char *atname) 768 { 769 struct cfdriver *cd; 770 771 cd = config_cfdriver_lookup(name); 772 if (cd == NULL) 773 return NULL; 774 775 return config_cfattach_lookup_cd(cd, atname); 776 } 777 778 /* 779 * Apply the matching function and choose the best. This is used 780 * a few times and we want to keep the code small. 781 */ 782 static void 783 mapply(struct matchinfo *m, cfdata_t cf) 784 { 785 int pri; 786 787 if (m->fn != NULL) { 788 pri = (*m->fn)(m->parent, cf, m->locs, m->aux); 789 } else { 790 pri = config_match(m->parent, cf, m->aux); 791 } 792 if (pri > m->pri) { 793 m->match = cf; 794 m->pri = pri; 795 } 796 } 797 798 int 799 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux) 800 { 801 const struct cfiattrdata *ci; 802 const struct cflocdesc *cl; 803 int nlocs, i; 804 805 ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver); 806 KASSERT(ci); 807 nlocs = ci->ci_loclen; 808 KASSERT(!nlocs || locs); 809 for (i = 0; i < nlocs; i++) { 810 cl = &ci->ci_locdesc[i]; 811 if (cl->cld_defaultstr != NULL && 812 cf->cf_loc[i] == cl->cld_default) 813 continue; 814 if (cf->cf_loc[i] == locs[i]) 815 continue; 816 return 0; 817 } 818 819 return config_match(parent, cf, aux); 820 } 821 822 /* 823 * Helper function: check whether the driver supports the interface attribute 824 * and return its descriptor structure. 825 */ 826 static const struct cfiattrdata * 827 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia) 828 { 829 const struct cfiattrdata * const *cpp; 830 831 if (cd->cd_attrs == NULL) 832 return 0; 833 834 for (cpp = cd->cd_attrs; *cpp; cpp++) { 835 if (STREQ((*cpp)->ci_name, ia)) { 836 /* Match. */ 837 return *cpp; 838 } 839 } 840 return 0; 841 } 842 843 static int __diagused 844 cfdriver_iattr_count(const struct cfdriver *cd) 845 { 846 const struct cfiattrdata * const *cpp; 847 int i; 848 849 if (cd->cd_attrs == NULL) 850 return 0; 851 852 for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) { 853 i++; 854 } 855 return i; 856 } 857 858 /* 859 * Lookup an interface attribute description by name. 860 * If the driver is given, consider only its supported attributes. 861 */ 862 const struct cfiattrdata * 863 cfiattr_lookup(const char *name, const struct cfdriver *cd) 864 { 865 const struct cfdriver *d; 866 const struct cfiattrdata *ia; 867 868 if (cd) 869 return cfdriver_get_iattr(cd, name); 870 871 LIST_FOREACH(d, &allcfdrivers, cd_list) { 872 ia = cfdriver_get_iattr(d, name); 873 if (ia) 874 return ia; 875 } 876 return 0; 877 } 878 879 /* 880 * Determine if `parent' is a potential parent for a device spec based 881 * on `cfp'. 882 */ 883 static int 884 cfparent_match(const device_t parent, const struct cfparent *cfp) 885 { 886 struct cfdriver *pcd; 887 888 /* We don't match root nodes here. */ 889 if (cfp == NULL) 890 return 0; 891 892 pcd = parent->dv_cfdriver; 893 KASSERT(pcd != NULL); 894 895 /* 896 * First, ensure this parent has the correct interface 897 * attribute. 898 */ 899 if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr)) 900 return 0; 901 902 /* 903 * If no specific parent device instance was specified (i.e. 904 * we're attaching to the attribute only), we're done! 905 */ 906 if (cfp->cfp_parent == NULL) 907 return 1; 908 909 /* 910 * Check the parent device's name. 911 */ 912 if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0) 913 return 0; /* not the same parent */ 914 915 /* 916 * Make sure the unit number matches. 917 */ 918 if (cfp->cfp_unit == DVUNIT_ANY || /* wildcard */ 919 cfp->cfp_unit == parent->dv_unit) 920 return 1; 921 922 /* Unit numbers don't match. */ 923 return 0; 924 } 925 926 /* 927 * Helper for config_cfdata_attach(): check all devices whether it could be 928 * parent any attachment in the config data table passed, and rescan. 929 */ 930 static void 931 rescan_with_cfdata(const struct cfdata *cf) 932 { 933 device_t d; 934 const struct cfdata *cf1; 935 deviter_t di; 936 937 KASSERT(KERNEL_LOCKED_P()); 938 939 /* 940 * "alldevs" is likely longer than a modules's cfdata, so make it 941 * the outer loop. 942 */ 943 for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) { 944 945 if (!(d->dv_cfattach->ca_rescan)) 946 continue; 947 948 for (cf1 = cf; cf1->cf_name; cf1++) { 949 950 if (!cfparent_match(d, cf1->cf_pspec)) 951 continue; 952 953 (*d->dv_cfattach->ca_rescan)(d, 954 cfdata_ifattr(cf1), cf1->cf_loc); 955 956 config_deferred(d); 957 } 958 } 959 deviter_release(&di); 960 } 961 962 /* 963 * Attach a supplemental config data table and rescan potential 964 * parent devices if required. 965 */ 966 int 967 config_cfdata_attach(cfdata_t cf, int scannow) 968 { 969 struct cftable *ct; 970 971 KERNEL_LOCK(1, NULL); 972 973 ct = kmem_alloc(sizeof(*ct), KM_SLEEP); 974 ct->ct_cfdata = cf; 975 TAILQ_INSERT_TAIL(&allcftables, ct, ct_list); 976 977 if (scannow) 978 rescan_with_cfdata(cf); 979 980 KERNEL_UNLOCK_ONE(NULL); 981 982 return 0; 983 } 984 985 /* 986 * Helper for config_cfdata_detach: check whether a device is 987 * found through any attachment in the config data table. 988 */ 989 static int 990 dev_in_cfdata(device_t d, cfdata_t cf) 991 { 992 const struct cfdata *cf1; 993 994 for (cf1 = cf; cf1->cf_name; cf1++) 995 if (d->dv_cfdata == cf1) 996 return 1; 997 998 return 0; 999 } 1000 1001 /* 1002 * Detach a supplemental config data table. Detach all devices found 1003 * through that table (and thus keeping references to it) before. 1004 */ 1005 int 1006 config_cfdata_detach(cfdata_t cf) 1007 { 1008 device_t d; 1009 int error = 0; 1010 struct cftable *ct; 1011 deviter_t di; 1012 1013 KERNEL_LOCK(1, NULL); 1014 1015 for (d = deviter_first(&di, DEVITER_F_RW); d != NULL; 1016 d = deviter_next(&di)) { 1017 if (!dev_in_cfdata(d, cf)) 1018 continue; 1019 if ((error = config_detach(d, 0)) != 0) 1020 break; 1021 } 1022 deviter_release(&di); 1023 if (error) { 1024 aprint_error_dev(d, "unable to detach instance\n"); 1025 goto out; 1026 } 1027 1028 TAILQ_FOREACH(ct, &allcftables, ct_list) { 1029 if (ct->ct_cfdata == cf) { 1030 TAILQ_REMOVE(&allcftables, ct, ct_list); 1031 kmem_free(ct, sizeof(*ct)); 1032 error = 0; 1033 goto out; 1034 } 1035 } 1036 1037 /* not found -- shouldn't happen */ 1038 error = EINVAL; 1039 1040 out: KERNEL_UNLOCK_ONE(NULL); 1041 return error; 1042 } 1043 1044 /* 1045 * Invoke the "match" routine for a cfdata entry on behalf of 1046 * an external caller, usually a direct config "submatch" routine. 1047 */ 1048 int 1049 config_match(device_t parent, cfdata_t cf, void *aux) 1050 { 1051 struct cfattach *ca; 1052 1053 KASSERT(KERNEL_LOCKED_P()); 1054 1055 ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname); 1056 if (ca == NULL) { 1057 /* No attachment for this entry, oh well. */ 1058 return 0; 1059 } 1060 1061 return (*ca->ca_match)(parent, cf, aux); 1062 } 1063 1064 /* 1065 * Invoke the "probe" routine for a cfdata entry on behalf of 1066 * an external caller, usually an indirect config "search" routine. 1067 */ 1068 int 1069 config_probe(device_t parent, cfdata_t cf, void *aux) 1070 { 1071 /* 1072 * This is currently a synonym for config_match(), but this 1073 * is an implementation detail; "match" and "probe" routines 1074 * have different behaviors. 1075 * 1076 * XXX config_probe() should return a bool, because there is 1077 * XXX no match score for probe -- it's either there or it's 1078 * XXX not, but some ports abuse the return value as a way 1079 * XXX to attach "critical" devices before "non-critical" 1080 * XXX devices. 1081 */ 1082 return config_match(parent, cf, aux); 1083 } 1084 1085 static struct cfargs_internal * 1086 cfargs_canonicalize(const struct cfargs * const cfargs, 1087 struct cfargs_internal * const store) 1088 { 1089 struct cfargs_internal *args = store; 1090 1091 memset(args, 0, sizeof(*args)); 1092 1093 /* If none specified, are all-NULL pointers are good. */ 1094 if (cfargs == NULL) { 1095 return args; 1096 } 1097 1098 /* 1099 * Only one arguments version is recognized at this time. 1100 */ 1101 if (cfargs->cfargs_version != CFARGS_VERSION) { 1102 panic("cfargs_canonicalize: unknown version %lu\n", 1103 (unsigned long)cfargs->cfargs_version); 1104 } 1105 1106 /* 1107 * submatch and search are mutually-exclusive. 1108 */ 1109 if (cfargs->submatch != NULL && cfargs->search != NULL) { 1110 panic("cfargs_canonicalize: submatch and search are " 1111 "mutually-exclusive"); 1112 } 1113 if (cfargs->submatch != NULL) { 1114 args->submatch = cfargs->submatch; 1115 } else if (cfargs->search != NULL) { 1116 args->search = cfargs->search; 1117 } 1118 1119 args->iattr = cfargs->iattr; 1120 args->locators = cfargs->locators; 1121 args->devhandle = cfargs->devhandle; 1122 1123 return args; 1124 } 1125 1126 /* 1127 * Iterate over all potential children of some device, calling the given 1128 * function (default being the child's match function) for each one. 1129 * Nonzero returns are matches; the highest value returned is considered 1130 * the best match. Return the `found child' if we got a match, or NULL 1131 * otherwise. The `aux' pointer is simply passed on through. 1132 * 1133 * Note that this function is designed so that it can be used to apply 1134 * an arbitrary function to all potential children (its return value 1135 * can be ignored). 1136 */ 1137 static cfdata_t 1138 config_search_internal(device_t parent, void *aux, 1139 const struct cfargs_internal * const args) 1140 { 1141 struct cftable *ct; 1142 cfdata_t cf; 1143 struct matchinfo m; 1144 1145 KASSERT(config_initialized); 1146 KASSERTMSG((!args->iattr || 1147 cfdriver_get_iattr(parent->dv_cfdriver, args->iattr)), 1148 "%s searched for child at interface attribute %s," 1149 " but device %s(4) has no such interface attribute in config(5)", 1150 device_xname(parent), args->iattr, 1151 parent->dv_cfdriver->cd_name); 1152 KASSERTMSG((args->iattr || 1153 cfdriver_iattr_count(parent->dv_cfdriver) < 2), 1154 "%s searched for child without interface attribute," 1155 " needed to disambiguate among the %d declared for in %s(4)" 1156 " in config(5)", 1157 device_xname(parent), 1158 cfdriver_iattr_count(parent->dv_cfdriver), 1159 parent->dv_cfdriver->cd_name); 1160 1161 m.fn = args->submatch; /* N.B. union */ 1162 m.parent = parent; 1163 m.locs = args->locators; 1164 m.aux = aux; 1165 m.match = NULL; 1166 m.pri = 0; 1167 1168 TAILQ_FOREACH(ct, &allcftables, ct_list) { 1169 for (cf = ct->ct_cfdata; cf->cf_name; cf++) { 1170 1171 /* We don't match root nodes here. */ 1172 if (!cf->cf_pspec) 1173 continue; 1174 1175 /* 1176 * Skip cf if no longer eligible, otherwise scan 1177 * through parents for one matching `parent', and 1178 * try match function. 1179 */ 1180 if (cf->cf_fstate == FSTATE_FOUND) 1181 continue; 1182 if (cf->cf_fstate == FSTATE_DNOTFOUND || 1183 cf->cf_fstate == FSTATE_DSTAR) 1184 continue; 1185 1186 /* 1187 * If an interface attribute was specified, 1188 * consider only children which attach to 1189 * that attribute. 1190 */ 1191 if (args->iattr != NULL && 1192 !STREQ(args->iattr, cfdata_ifattr(cf))) 1193 continue; 1194 1195 if (cfparent_match(parent, cf->cf_pspec)) 1196 mapply(&m, cf); 1197 } 1198 } 1199 rnd_add_uint32(&rnd_autoconf_source, 0); 1200 return m.match; 1201 } 1202 1203 cfdata_t 1204 config_search(device_t parent, void *aux, const struct cfargs *cfargs) 1205 { 1206 cfdata_t cf; 1207 struct cfargs_internal store; 1208 1209 cf = config_search_internal(parent, aux, 1210 cfargs_canonicalize(cfargs, &store)); 1211 1212 return cf; 1213 } 1214 1215 /* 1216 * Find the given root device. 1217 * This is much like config_search, but there is no parent. 1218 * Don't bother with multiple cfdata tables; the root node 1219 * must always be in the initial table. 1220 */ 1221 cfdata_t 1222 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux) 1223 { 1224 cfdata_t cf; 1225 const short *p; 1226 struct matchinfo m; 1227 1228 m.fn = fn; 1229 m.parent = ROOT; 1230 m.aux = aux; 1231 m.match = NULL; 1232 m.pri = 0; 1233 m.locs = 0; 1234 /* 1235 * Look at root entries for matching name. We do not bother 1236 * with found-state here since only one root should ever be 1237 * searched (and it must be done first). 1238 */ 1239 for (p = cfroots; *p >= 0; p++) { 1240 cf = &cfdata[*p]; 1241 if (strcmp(cf->cf_name, rootname) == 0) 1242 mapply(&m, cf); 1243 } 1244 return m.match; 1245 } 1246 1247 static const char * const msgs[] = { 1248 [QUIET] = "", 1249 [UNCONF] = " not configured\n", 1250 [UNSUPP] = " unsupported\n", 1251 }; 1252 1253 /* 1254 * The given `aux' argument describes a device that has been found 1255 * on the given parent, but not necessarily configured. Locate the 1256 * configuration data for that device (using the submatch function 1257 * provided, or using candidates' cd_match configuration driver 1258 * functions) and attach it, and return its device_t. If the device was 1259 * not configured, call the given `print' function and return NULL. 1260 */ 1261 device_t 1262 config_found(device_t parent, void *aux, cfprint_t print, 1263 const struct cfargs * const cfargs) 1264 { 1265 cfdata_t cf; 1266 struct cfargs_internal store; 1267 const struct cfargs_internal * const args = 1268 cfargs_canonicalize(cfargs, &store); 1269 1270 cf = config_search_internal(parent, aux, args); 1271 if (cf != NULL) { 1272 return config_attach_internal(parent, cf, aux, print, args); 1273 } 1274 1275 if (print) { 1276 if (config_do_twiddle && cold) 1277 twiddle(); 1278 1279 const int pret = (*print)(aux, device_xname(parent)); 1280 KASSERT(pret >= 0); 1281 KASSERT(pret < __arraycount(msgs)); 1282 KASSERT(msgs[pret] != NULL); 1283 aprint_normal("%s", msgs[pret]); 1284 } 1285 1286 return NULL; 1287 } 1288 1289 /* 1290 * As above, but for root devices. 1291 */ 1292 device_t 1293 config_rootfound(const char *rootname, void *aux) 1294 { 1295 cfdata_t cf; 1296 device_t dev = NULL; 1297 1298 KERNEL_LOCK(1, NULL); 1299 if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL) 1300 dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE); 1301 else 1302 aprint_error("root device %s not configured\n", rootname); 1303 KERNEL_UNLOCK_ONE(NULL); 1304 return dev; 1305 } 1306 1307 /* just like sprintf(buf, "%d") except that it works from the end */ 1308 static char * 1309 number(char *ep, int n) 1310 { 1311 1312 *--ep = 0; 1313 while (n >= 10) { 1314 *--ep = (n % 10) + '0'; 1315 n /= 10; 1316 } 1317 *--ep = n + '0'; 1318 return ep; 1319 } 1320 1321 /* 1322 * Expand the size of the cd_devs array if necessary. 1323 * 1324 * The caller must hold alldevs_lock. config_makeroom() may release and 1325 * re-acquire alldevs_lock, so callers should re-check conditions such 1326 * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom() 1327 * returns. 1328 */ 1329 static void 1330 config_makeroom(int n, struct cfdriver *cd) 1331 { 1332 int ondevs, nndevs; 1333 device_t *osp, *nsp; 1334 1335 KASSERT(mutex_owned(&alldevs_lock)); 1336 alldevs_nwrite++; 1337 1338 for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs) 1339 ; 1340 1341 while (n >= cd->cd_ndevs) { 1342 /* 1343 * Need to expand the array. 1344 */ 1345 ondevs = cd->cd_ndevs; 1346 osp = cd->cd_devs; 1347 1348 /* 1349 * Release alldevs_lock around allocation, which may 1350 * sleep. 1351 */ 1352 mutex_exit(&alldevs_lock); 1353 nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP); 1354 mutex_enter(&alldevs_lock); 1355 1356 /* 1357 * If another thread moved the array while we did 1358 * not hold alldevs_lock, try again. 1359 */ 1360 if (cd->cd_devs != osp) { 1361 mutex_exit(&alldevs_lock); 1362 kmem_free(nsp, sizeof(device_t) * nndevs); 1363 mutex_enter(&alldevs_lock); 1364 continue; 1365 } 1366 1367 memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs)); 1368 if (ondevs != 0) 1369 memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs); 1370 1371 cd->cd_ndevs = nndevs; 1372 cd->cd_devs = nsp; 1373 if (ondevs != 0) { 1374 mutex_exit(&alldevs_lock); 1375 kmem_free(osp, sizeof(device_t) * ondevs); 1376 mutex_enter(&alldevs_lock); 1377 } 1378 } 1379 KASSERT(mutex_owned(&alldevs_lock)); 1380 alldevs_nwrite--; 1381 } 1382 1383 /* 1384 * Put dev into the devices list. 1385 */ 1386 static void 1387 config_devlink(device_t dev) 1388 { 1389 1390 mutex_enter(&alldevs_lock); 1391 1392 KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev); 1393 1394 dev->dv_add_gen = alldevs_gen; 1395 /* It is safe to add a device to the tail of the list while 1396 * readers and writers are in the list. 1397 */ 1398 TAILQ_INSERT_TAIL(&alldevs, dev, dv_list); 1399 mutex_exit(&alldevs_lock); 1400 } 1401 1402 static void 1403 config_devfree(device_t dev) 1404 { 1405 1406 KASSERT(dev->dv_flags & DVF_PRIV_ALLOC); 1407 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending); 1408 1409 if (dev->dv_cfattach->ca_devsize > 0) 1410 kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize); 1411 kmem_free(dev, sizeof(*dev)); 1412 } 1413 1414 /* 1415 * Caller must hold alldevs_lock. 1416 */ 1417 static void 1418 config_devunlink(device_t dev, struct devicelist *garbage) 1419 { 1420 struct device_garbage *dg = &dev->dv_garbage; 1421 cfdriver_t cd = device_cfdriver(dev); 1422 int i; 1423 1424 KASSERT(mutex_owned(&alldevs_lock)); 1425 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending); 1426 1427 /* Unlink from device list. Link to garbage list. */ 1428 TAILQ_REMOVE(&alldevs, dev, dv_list); 1429 TAILQ_INSERT_TAIL(garbage, dev, dv_list); 1430 1431 /* Remove from cfdriver's array. */ 1432 cd->cd_devs[dev->dv_unit] = NULL; 1433 1434 /* 1435 * If the device now has no units in use, unlink its softc array. 1436 */ 1437 for (i = 0; i < cd->cd_ndevs; i++) { 1438 if (cd->cd_devs[i] != NULL) 1439 break; 1440 } 1441 /* Nothing found. Unlink, now. Deallocate, later. */ 1442 if (i == cd->cd_ndevs) { 1443 dg->dg_ndevs = cd->cd_ndevs; 1444 dg->dg_devs = cd->cd_devs; 1445 cd->cd_devs = NULL; 1446 cd->cd_ndevs = 0; 1447 } 1448 } 1449 1450 static void 1451 config_devdelete(device_t dev) 1452 { 1453 struct device_garbage *dg = &dev->dv_garbage; 1454 device_lock_t dvl = device_getlock(dev); 1455 1456 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending); 1457 1458 if (dg->dg_devs != NULL) 1459 kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs); 1460 1461 localcount_fini(dev->dv_localcount); 1462 kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount)); 1463 1464 cv_destroy(&dvl->dvl_cv); 1465 mutex_destroy(&dvl->dvl_mtx); 1466 1467 KASSERT(dev->dv_properties != NULL); 1468 prop_object_release(dev->dv_properties); 1469 1470 if (dev->dv_activity_handlers) 1471 panic("%s with registered handlers", __func__); 1472 1473 if (dev->dv_locators) { 1474 size_t amount = *--dev->dv_locators; 1475 kmem_free(dev->dv_locators, amount); 1476 } 1477 1478 config_devfree(dev); 1479 } 1480 1481 static int 1482 config_unit_nextfree(cfdriver_t cd, cfdata_t cf) 1483 { 1484 int unit = cf->cf_unit; 1485 1486 if (unit < 0) 1487 return -1; 1488 if (cf->cf_fstate == FSTATE_STAR) { 1489 for (; unit < cd->cd_ndevs; unit++) 1490 if (cd->cd_devs[unit] == NULL) 1491 break; 1492 /* 1493 * unit is now the unit of the first NULL device pointer, 1494 * or max(cd->cd_ndevs,cf->cf_unit). 1495 */ 1496 } else { 1497 if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL) 1498 unit = -1; 1499 } 1500 return unit; 1501 } 1502 1503 static int 1504 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf) 1505 { 1506 struct alldevs_foray af; 1507 int unit; 1508 1509 config_alldevs_enter(&af); 1510 for (;;) { 1511 unit = config_unit_nextfree(cd, cf); 1512 if (unit == -1) 1513 break; 1514 if (unit < cd->cd_ndevs) { 1515 cd->cd_devs[unit] = dev; 1516 dev->dv_unit = unit; 1517 break; 1518 } 1519 config_makeroom(unit, cd); 1520 } 1521 config_alldevs_exit(&af); 1522 1523 return unit; 1524 } 1525 1526 static device_t 1527 config_devalloc(const device_t parent, const cfdata_t cf, 1528 const struct cfargs_internal * const args) 1529 { 1530 cfdriver_t cd; 1531 cfattach_t ca; 1532 size_t lname, lunit; 1533 const char *xunit; 1534 int myunit; 1535 char num[10]; 1536 device_t dev; 1537 void *dev_private; 1538 const struct cfiattrdata *ia; 1539 device_lock_t dvl; 1540 1541 cd = config_cfdriver_lookup(cf->cf_name); 1542 if (cd == NULL) 1543 return NULL; 1544 1545 ca = config_cfattach_lookup_cd(cd, cf->cf_atname); 1546 if (ca == NULL) 1547 return NULL; 1548 1549 /* get memory for all device vars */ 1550 KASSERT(ca->ca_flags & DVF_PRIV_ALLOC); 1551 if (ca->ca_devsize > 0) { 1552 dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP); 1553 } else { 1554 dev_private = NULL; 1555 } 1556 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP); 1557 1558 dev->dv_handle = args->devhandle; 1559 1560 dev->dv_class = cd->cd_class; 1561 dev->dv_cfdata = cf; 1562 dev->dv_cfdriver = cd; 1563 dev->dv_cfattach = ca; 1564 dev->dv_activity_count = 0; 1565 dev->dv_activity_handlers = NULL; 1566 dev->dv_private = dev_private; 1567 dev->dv_flags = ca->ca_flags; /* inherit flags from class */ 1568 dev->dv_attaching = curlwp; 1569 1570 myunit = config_unit_alloc(dev, cd, cf); 1571 if (myunit == -1) { 1572 config_devfree(dev); 1573 return NULL; 1574 } 1575 1576 /* compute length of name and decimal expansion of unit number */ 1577 lname = strlen(cd->cd_name); 1578 xunit = number(&num[sizeof(num)], myunit); 1579 lunit = &num[sizeof(num)] - xunit; 1580 if (lname + lunit > sizeof(dev->dv_xname)) 1581 panic("config_devalloc: device name too long"); 1582 1583 dvl = device_getlock(dev); 1584 1585 mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE); 1586 cv_init(&dvl->dvl_cv, "pmfsusp"); 1587 1588 memcpy(dev->dv_xname, cd->cd_name, lname); 1589 memcpy(dev->dv_xname + lname, xunit, lunit); 1590 dev->dv_parent = parent; 1591 if (parent != NULL) 1592 dev->dv_depth = parent->dv_depth + 1; 1593 else 1594 dev->dv_depth = 0; 1595 dev->dv_flags |= DVF_ACTIVE; /* always initially active */ 1596 if (args->locators) { 1597 KASSERT(parent); /* no locators at root */ 1598 ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver); 1599 dev->dv_locators = 1600 kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP); 1601 *dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1); 1602 memcpy(dev->dv_locators, args->locators, 1603 sizeof(int) * ia->ci_loclen); 1604 } 1605 dev->dv_properties = prop_dictionary_create(); 1606 KASSERT(dev->dv_properties != NULL); 1607 1608 prop_dictionary_set_string_nocopy(dev->dv_properties, 1609 "device-driver", dev->dv_cfdriver->cd_name); 1610 prop_dictionary_set_uint16(dev->dv_properties, 1611 "device-unit", dev->dv_unit); 1612 if (parent != NULL) { 1613 prop_dictionary_set_string(dev->dv_properties, 1614 "device-parent", device_xname(parent)); 1615 } 1616 1617 dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount), 1618 KM_SLEEP); 1619 localcount_init(dev->dv_localcount); 1620 1621 if (dev->dv_cfdriver->cd_attrs != NULL) 1622 config_add_attrib_dict(dev); 1623 1624 return dev; 1625 } 1626 1627 /* 1628 * Create an array of device attach attributes and add it 1629 * to the device's dv_properties dictionary. 1630 * 1631 * <key>interface-attributes</key> 1632 * <array> 1633 * <dict> 1634 * <key>attribute-name</key> 1635 * <string>foo</string> 1636 * <key>locators</key> 1637 * <array> 1638 * <dict> 1639 * <key>loc-name</key> 1640 * <string>foo-loc1</string> 1641 * </dict> 1642 * <dict> 1643 * <key>loc-name</key> 1644 * <string>foo-loc2</string> 1645 * <key>default</key> 1646 * <string>foo-loc2-default</string> 1647 * </dict> 1648 * ... 1649 * </array> 1650 * </dict> 1651 * ... 1652 * </array> 1653 */ 1654 1655 static void 1656 config_add_attrib_dict(device_t dev) 1657 { 1658 int i, j; 1659 const struct cfiattrdata *ci; 1660 prop_dictionary_t attr_dict, loc_dict; 1661 prop_array_t attr_array, loc_array; 1662 1663 if ((attr_array = prop_array_create()) == NULL) 1664 return; 1665 1666 for (i = 0; ; i++) { 1667 if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL) 1668 break; 1669 if ((attr_dict = prop_dictionary_create()) == NULL) 1670 break; 1671 prop_dictionary_set_string_nocopy(attr_dict, "attribute-name", 1672 ci->ci_name); 1673 1674 /* Create an array of the locator names and defaults */ 1675 1676 if (ci->ci_loclen != 0 && 1677 (loc_array = prop_array_create()) != NULL) { 1678 for (j = 0; j < ci->ci_loclen; j++) { 1679 loc_dict = prop_dictionary_create(); 1680 if (loc_dict == NULL) 1681 continue; 1682 prop_dictionary_set_string_nocopy(loc_dict, 1683 "loc-name", ci->ci_locdesc[j].cld_name); 1684 if (ci->ci_locdesc[j].cld_defaultstr != NULL) 1685 prop_dictionary_set_string_nocopy( 1686 loc_dict, "default", 1687 ci->ci_locdesc[j].cld_defaultstr); 1688 prop_array_set(loc_array, j, loc_dict); 1689 prop_object_release(loc_dict); 1690 } 1691 prop_dictionary_set_and_rel(attr_dict, "locators", 1692 loc_array); 1693 } 1694 prop_array_add(attr_array, attr_dict); 1695 prop_object_release(attr_dict); 1696 } 1697 if (i == 0) 1698 prop_object_release(attr_array); 1699 else 1700 prop_dictionary_set_and_rel(dev->dv_properties, 1701 "interface-attributes", attr_array); 1702 1703 return; 1704 } 1705 1706 /* 1707 * Attach a found device. 1708 */ 1709 static device_t 1710 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print, 1711 const struct cfargs_internal * const args) 1712 { 1713 device_t dev; 1714 struct cftable *ct; 1715 const char *drvname; 1716 bool deferred; 1717 1718 KASSERT(KERNEL_LOCKED_P()); 1719 1720 dev = config_devalloc(parent, cf, args); 1721 if (!dev) 1722 panic("config_attach: allocation of device softc failed"); 1723 1724 /* XXX redundant - see below? */ 1725 if (cf->cf_fstate != FSTATE_STAR) { 1726 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND); 1727 cf->cf_fstate = FSTATE_FOUND; 1728 } 1729 1730 config_devlink(dev); 1731 1732 if (config_do_twiddle && cold) 1733 twiddle(); 1734 else 1735 aprint_naive("Found "); 1736 /* 1737 * We want the next two printfs for normal, verbose, and quiet, 1738 * but not silent (in which case, we're twiddling, instead). 1739 */ 1740 if (parent == ROOT) { 1741 aprint_naive("%s (root)", device_xname(dev)); 1742 aprint_normal("%s (root)", device_xname(dev)); 1743 } else { 1744 aprint_naive("%s at %s", device_xname(dev), 1745 device_xname(parent)); 1746 aprint_normal("%s at %s", device_xname(dev), 1747 device_xname(parent)); 1748 if (print) 1749 (void) (*print)(aux, NULL); 1750 } 1751 1752 /* 1753 * Before attaching, clobber any unfound devices that are 1754 * otherwise identical. 1755 * XXX code above is redundant? 1756 */ 1757 drvname = dev->dv_cfdriver->cd_name; 1758 TAILQ_FOREACH(ct, &allcftables, ct_list) { 1759 for (cf = ct->ct_cfdata; cf->cf_name; cf++) { 1760 if (STREQ(cf->cf_name, drvname) && 1761 cf->cf_unit == dev->dv_unit) { 1762 if (cf->cf_fstate == FSTATE_NOTFOUND) 1763 cf->cf_fstate = FSTATE_FOUND; 1764 } 1765 } 1766 } 1767 device_register(dev, aux); 1768 1769 /* Let userland know */ 1770 devmon_report_device(dev, true); 1771 1772 /* 1773 * Prevent detach until the driver's attach function, and all 1774 * deferred actions, have finished. 1775 */ 1776 config_pending_incr(dev); 1777 1778 /* Call the driver's attach function. */ 1779 (*dev->dv_cfattach->ca_attach)(parent, dev, aux); 1780 1781 /* 1782 * Allow other threads to acquire references to the device now 1783 * that the driver's attach function is done. 1784 */ 1785 mutex_enter(&config_misc_lock); 1786 KASSERT(dev->dv_attaching == curlwp); 1787 dev->dv_attaching = NULL; 1788 cv_broadcast(&config_misc_cv); 1789 mutex_exit(&config_misc_lock); 1790 1791 /* 1792 * Synchronous parts of attach are done. Allow detach, unless 1793 * the driver's attach function scheduled deferred actions. 1794 */ 1795 config_pending_decr(dev); 1796 1797 mutex_enter(&config_misc_lock); 1798 deferred = (dev->dv_pending != 0); 1799 mutex_exit(&config_misc_lock); 1800 1801 if (!deferred && !device_pmf_is_registered(dev)) 1802 aprint_debug_dev(dev, 1803 "WARNING: power management not supported\n"); 1804 1805 config_process_deferred(&deferred_config_queue, dev); 1806 1807 device_register_post_config(dev, aux); 1808 rnd_add_uint32(&rnd_autoconf_source, 0); 1809 return dev; 1810 } 1811 1812 device_t 1813 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print, 1814 const struct cfargs *cfargs) 1815 { 1816 struct cfargs_internal store; 1817 1818 KASSERT(KERNEL_LOCKED_P()); 1819 1820 return config_attach_internal(parent, cf, aux, print, 1821 cfargs_canonicalize(cfargs, &store)); 1822 } 1823 1824 /* 1825 * As above, but for pseudo-devices. Pseudo-devices attached in this 1826 * way are silently inserted into the device tree, and their children 1827 * attached. 1828 * 1829 * Note that because pseudo-devices are attached silently, any information 1830 * the attach routine wishes to print should be prefixed with the device 1831 * name by the attach routine. 1832 */ 1833 device_t 1834 config_attach_pseudo(cfdata_t cf) 1835 { 1836 device_t dev; 1837 1838 KERNEL_LOCK(1, NULL); 1839 1840 struct cfargs_internal args = { }; 1841 dev = config_devalloc(ROOT, cf, &args); 1842 if (!dev) 1843 goto out; 1844 1845 /* XXX mark busy in cfdata */ 1846 1847 if (cf->cf_fstate != FSTATE_STAR) { 1848 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND); 1849 cf->cf_fstate = FSTATE_FOUND; 1850 } 1851 1852 config_devlink(dev); 1853 1854 #if 0 /* XXXJRT not yet */ 1855 device_register(dev, NULL); /* like a root node */ 1856 #endif 1857 1858 /* Let userland know */ 1859 devmon_report_device(dev, true); 1860 1861 /* 1862 * Prevent detach until the driver's attach function, and all 1863 * deferred actions, have finished. 1864 */ 1865 config_pending_incr(dev); 1866 1867 /* Call the driver's attach function. */ 1868 (*dev->dv_cfattach->ca_attach)(ROOT, dev, NULL); 1869 1870 /* 1871 * Allow other threads to acquire references to the device now 1872 * that the driver's attach function is done. 1873 */ 1874 mutex_enter(&config_misc_lock); 1875 KASSERT(dev->dv_attaching == curlwp); 1876 dev->dv_attaching = NULL; 1877 cv_broadcast(&config_misc_cv); 1878 mutex_exit(&config_misc_lock); 1879 1880 /* 1881 * Synchronous parts of attach are done. Allow detach, unless 1882 * the driver's attach function scheduled deferred actions. 1883 */ 1884 config_pending_decr(dev); 1885 1886 config_process_deferred(&deferred_config_queue, dev); 1887 1888 out: KERNEL_UNLOCK_ONE(NULL); 1889 return dev; 1890 } 1891 1892 /* 1893 * Caller must hold alldevs_lock. 1894 */ 1895 static void 1896 config_collect_garbage(struct devicelist *garbage) 1897 { 1898 device_t dv; 1899 1900 KASSERT(!cpu_intr_p()); 1901 KASSERT(!cpu_softintr_p()); 1902 KASSERT(mutex_owned(&alldevs_lock)); 1903 1904 while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) { 1905 TAILQ_FOREACH(dv, &alldevs, dv_list) { 1906 if (dv->dv_del_gen != 0) 1907 break; 1908 } 1909 if (dv == NULL) { 1910 alldevs_garbage = false; 1911 break; 1912 } 1913 config_devunlink(dv, garbage); 1914 } 1915 KASSERT(mutex_owned(&alldevs_lock)); 1916 } 1917 1918 static void 1919 config_dump_garbage(struct devicelist *garbage) 1920 { 1921 device_t dv; 1922 1923 while ((dv = TAILQ_FIRST(garbage)) != NULL) { 1924 TAILQ_REMOVE(garbage, dv, dv_list); 1925 config_devdelete(dv); 1926 } 1927 } 1928 1929 static int 1930 config_detach_enter(device_t dev) 1931 { 1932 struct lwp *l __diagused; 1933 int error = 0; 1934 1935 mutex_enter(&config_misc_lock); 1936 1937 /* 1938 * Wait until attach has fully completed, and until any 1939 * concurrent detach (e.g., drvctl racing with USB event 1940 * thread) has completed. 1941 * 1942 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via 1943 * deviter) to ensure the winner of the race doesn't free the 1944 * device leading the loser of the race into use-after-free. 1945 * 1946 * XXX Not all callers do this! 1947 */ 1948 while (dev->dv_pending || dev->dv_detaching) { 1949 KASSERTMSG(dev->dv_detaching != curlwp, 1950 "recursively detaching %s", device_xname(dev)); 1951 error = cv_wait_sig(&config_misc_cv, &config_misc_lock); 1952 if (error) 1953 goto out; 1954 } 1955 1956 /* 1957 * Attach has completed, and no other concurrent detach is 1958 * running. Claim the device for detaching. This will cause 1959 * all new attempts to acquire references to block. 1960 */ 1961 KASSERTMSG((l = dev->dv_attaching) == NULL, 1962 "lwp %ld [%s] @ %p attaching %s", 1963 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l, 1964 device_xname(dev)); 1965 KASSERTMSG((l = dev->dv_detaching) == NULL, 1966 "lwp %ld [%s] @ %p detaching %s", 1967 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l, 1968 device_xname(dev)); 1969 dev->dv_detaching = curlwp; 1970 1971 out: mutex_exit(&config_misc_lock); 1972 return error; 1973 } 1974 1975 static void 1976 config_detach_exit(device_t dev) 1977 { 1978 struct lwp *l __diagused; 1979 1980 mutex_enter(&config_misc_lock); 1981 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s", 1982 device_xname(dev)); 1983 KASSERTMSG((l = dev->dv_detaching) == curlwp, 1984 "lwp %ld [%s] @ %p detaching %s", 1985 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l, 1986 device_xname(dev)); 1987 dev->dv_detaching = NULL; 1988 cv_broadcast(&config_misc_cv); 1989 mutex_exit(&config_misc_lock); 1990 } 1991 1992 /* 1993 * Detach a device. Optionally forced (e.g. because of hardware 1994 * removal) and quiet. Returns zero if successful, non-zero 1995 * (an error code) otherwise. 1996 * 1997 * Note that this code wants to be run from a process context, so 1998 * that the detach can sleep to allow processes which have a device 1999 * open to run and unwind their stacks. 2000 */ 2001 int 2002 config_detach(device_t dev, int flags) 2003 { 2004 struct alldevs_foray af; 2005 struct cftable *ct; 2006 cfdata_t cf; 2007 const struct cfattach *ca; 2008 struct cfdriver *cd; 2009 device_t d __diagused; 2010 int rv = 0; 2011 2012 KERNEL_LOCK(1, NULL); 2013 2014 cf = dev->dv_cfdata; 2015 KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND || 2016 cf->cf_fstate == FSTATE_STAR), 2017 "config_detach: %s: bad device fstate: %d", 2018 device_xname(dev), cf ? cf->cf_fstate : -1); 2019 2020 cd = dev->dv_cfdriver; 2021 KASSERT(cd != NULL); 2022 2023 ca = dev->dv_cfattach; 2024 KASSERT(ca != NULL); 2025 2026 /* 2027 * Only one detach at a time, please -- and not until fully 2028 * attached. 2029 */ 2030 rv = config_detach_enter(dev); 2031 if (rv) { 2032 KERNEL_UNLOCK_ONE(NULL); 2033 return rv; 2034 } 2035 2036 mutex_enter(&alldevs_lock); 2037 if (dev->dv_del_gen != 0) { 2038 mutex_exit(&alldevs_lock); 2039 #ifdef DIAGNOSTIC 2040 printf("%s: %s is already detached\n", __func__, 2041 device_xname(dev)); 2042 #endif /* DIAGNOSTIC */ 2043 config_detach_exit(dev); 2044 KERNEL_UNLOCK_ONE(NULL); 2045 return ENOENT; 2046 } 2047 alldevs_nwrite++; 2048 mutex_exit(&alldevs_lock); 2049 2050 /* 2051 * Call the driver's .ca_detach function, unless it has none or 2052 * we are skipping it because it's unforced shutdown time and 2053 * the driver didn't ask to detach on shutdown. 2054 */ 2055 if (!detachall && 2056 (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN && 2057 (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) { 2058 rv = EOPNOTSUPP; 2059 } else if (ca->ca_detach != NULL) { 2060 rv = (*ca->ca_detach)(dev, flags); 2061 } else 2062 rv = EOPNOTSUPP; 2063 2064 KASSERTMSG(!dev->dv_detach_done, "%s detached twice, error=%d", 2065 device_xname(dev), rv); 2066 2067 /* 2068 * If it was not possible to detach the device, then we either 2069 * panic() (for the forced but failed case), or return an error. 2070 */ 2071 if (rv) { 2072 /* 2073 * Detach failed -- likely EOPNOTSUPP or EBUSY. Driver 2074 * must not have called config_detach_commit. 2075 */ 2076 KASSERTMSG(!dev->dv_detach_committed, 2077 "%s committed to detaching and then backed out, error=%d", 2078 device_xname(dev), rv); 2079 if (flags & DETACH_FORCE) { 2080 panic("config_detach: forced detach of %s failed (%d)", 2081 device_xname(dev), rv); 2082 } 2083 goto out; 2084 } 2085 2086 /* 2087 * The device has now been successfully detached. 2088 */ 2089 dev->dv_detach_done = true; 2090 2091 /* 2092 * If .ca_detach didn't commit to detach, then do that for it. 2093 * This wakes any pending device_lookup_acquire calls so they 2094 * will fail. 2095 */ 2096 config_detach_commit(dev); 2097 2098 /* 2099 * If it was possible to detach the device, ensure that the 2100 * device is deactivated. 2101 */ 2102 dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */ 2103 2104 /* 2105 * Wait for all device_lookup_acquire references -- mostly, for 2106 * all attempts to open the device -- to drain. It is the 2107 * responsibility of .ca_detach to ensure anything with open 2108 * references will be interrupted and release them promptly, 2109 * not block indefinitely. All new attempts to acquire 2110 * references will fail, as config_detach_commit has arranged 2111 * by now. 2112 */ 2113 mutex_enter(&config_misc_lock); 2114 localcount_drain(dev->dv_localcount, 2115 &config_misc_cv, &config_misc_lock); 2116 mutex_exit(&config_misc_lock); 2117 2118 /* Let userland know */ 2119 devmon_report_device(dev, false); 2120 2121 #ifdef DIAGNOSTIC 2122 /* 2123 * Sanity: If you're successfully detached, you should have no 2124 * children. (Note that because children must be attached 2125 * after parents, we only need to search the latter part of 2126 * the list.) 2127 */ 2128 mutex_enter(&alldevs_lock); 2129 for (d = TAILQ_NEXT(dev, dv_list); d != NULL; 2130 d = TAILQ_NEXT(d, dv_list)) { 2131 if (d->dv_parent == dev && d->dv_del_gen == 0) { 2132 printf("config_detach: detached device %s" 2133 " has children %s\n", device_xname(dev), 2134 device_xname(d)); 2135 panic("config_detach"); 2136 } 2137 } 2138 mutex_exit(&alldevs_lock); 2139 #endif 2140 2141 /* notify the parent that the child is gone */ 2142 if (dev->dv_parent) { 2143 device_t p = dev->dv_parent; 2144 if (p->dv_cfattach->ca_childdetached) 2145 (*p->dv_cfattach->ca_childdetached)(p, dev); 2146 } 2147 2148 /* 2149 * Mark cfdata to show that the unit can be reused, if possible. 2150 */ 2151 TAILQ_FOREACH(ct, &allcftables, ct_list) { 2152 for (cf = ct->ct_cfdata; cf->cf_name; cf++) { 2153 if (STREQ(cf->cf_name, cd->cd_name)) { 2154 if (cf->cf_fstate == FSTATE_FOUND && 2155 cf->cf_unit == dev->dv_unit) 2156 cf->cf_fstate = FSTATE_NOTFOUND; 2157 } 2158 } 2159 } 2160 2161 if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0) 2162 aprint_normal_dev(dev, "detached\n"); 2163 2164 out: 2165 config_detach_exit(dev); 2166 2167 config_alldevs_enter(&af); 2168 KASSERT(alldevs_nwrite != 0); 2169 --alldevs_nwrite; 2170 if (rv == 0 && dev->dv_del_gen == 0) { 2171 if (alldevs_nwrite == 0 && alldevs_nread == 0) 2172 config_devunlink(dev, &af.af_garbage); 2173 else { 2174 dev->dv_del_gen = alldevs_gen; 2175 alldevs_garbage = true; 2176 } 2177 } 2178 config_alldevs_exit(&af); 2179 2180 KERNEL_UNLOCK_ONE(NULL); 2181 2182 return rv; 2183 } 2184 2185 /* 2186 * config_detach_commit(dev) 2187 * 2188 * Issued by a driver's .ca_detach routine to notify anyone 2189 * waiting in device_lookup_acquire that the driver is committed 2190 * to detaching the device, which allows device_lookup_acquire to 2191 * wake up and fail immediately. 2192 * 2193 * Safe to call multiple times -- idempotent. Must be called 2194 * during config_detach_enter/exit. Safe to use with 2195 * device_lookup because the device is not actually removed from 2196 * the table until after config_detach_exit. 2197 */ 2198 void 2199 config_detach_commit(device_t dev) 2200 { 2201 struct lwp *l __diagused; 2202 2203 mutex_enter(&config_misc_lock); 2204 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s", 2205 device_xname(dev)); 2206 KASSERTMSG((l = dev->dv_detaching) == curlwp, 2207 "lwp %ld [%s] @ %p detaching %s", 2208 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l, 2209 device_xname(dev)); 2210 dev->dv_detach_committed = true; 2211 cv_broadcast(&config_misc_cv); 2212 mutex_exit(&config_misc_lock); 2213 } 2214 2215 int 2216 config_detach_children(device_t parent, int flags) 2217 { 2218 device_t dv; 2219 deviter_t di; 2220 int error = 0; 2221 2222 KASSERT(KERNEL_LOCKED_P()); 2223 2224 for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL; 2225 dv = deviter_next(&di)) { 2226 if (device_parent(dv) != parent) 2227 continue; 2228 if ((error = config_detach(dv, flags)) != 0) 2229 break; 2230 } 2231 deviter_release(&di); 2232 return error; 2233 } 2234 2235 device_t 2236 shutdown_first(struct shutdown_state *s) 2237 { 2238 if (!s->initialized) { 2239 deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST); 2240 s->initialized = true; 2241 } 2242 return shutdown_next(s); 2243 } 2244 2245 device_t 2246 shutdown_next(struct shutdown_state *s) 2247 { 2248 device_t dv; 2249 2250 while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv)) 2251 ; 2252 2253 if (dv == NULL) 2254 s->initialized = false; 2255 2256 return dv; 2257 } 2258 2259 bool 2260 config_detach_all(int how) 2261 { 2262 static struct shutdown_state s; 2263 device_t curdev; 2264 bool progress = false; 2265 int flags; 2266 2267 KERNEL_LOCK(1, NULL); 2268 2269 if ((how & (RB_NOSYNC|RB_DUMP)) != 0) 2270 goto out; 2271 2272 if ((how & RB_POWERDOWN) == RB_POWERDOWN) 2273 flags = DETACH_SHUTDOWN | DETACH_POWEROFF; 2274 else 2275 flags = DETACH_SHUTDOWN; 2276 2277 for (curdev = shutdown_first(&s); curdev != NULL; 2278 curdev = shutdown_next(&s)) { 2279 aprint_debug(" detaching %s, ", device_xname(curdev)); 2280 if (config_detach(curdev, flags) == 0) { 2281 progress = true; 2282 aprint_debug("success."); 2283 } else 2284 aprint_debug("failed."); 2285 } 2286 2287 out: KERNEL_UNLOCK_ONE(NULL); 2288 return progress; 2289 } 2290 2291 static bool 2292 device_is_ancestor_of(device_t ancestor, device_t descendant) 2293 { 2294 device_t dv; 2295 2296 for (dv = descendant; dv != NULL; dv = device_parent(dv)) { 2297 if (device_parent(dv) == ancestor) 2298 return true; 2299 } 2300 return false; 2301 } 2302 2303 int 2304 config_deactivate(device_t dev) 2305 { 2306 deviter_t di; 2307 const struct cfattach *ca; 2308 device_t descendant; 2309 int s, rv = 0, oflags; 2310 2311 for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST); 2312 descendant != NULL; 2313 descendant = deviter_next(&di)) { 2314 if (dev != descendant && 2315 !device_is_ancestor_of(dev, descendant)) 2316 continue; 2317 2318 if ((descendant->dv_flags & DVF_ACTIVE) == 0) 2319 continue; 2320 2321 ca = descendant->dv_cfattach; 2322 oflags = descendant->dv_flags; 2323 2324 descendant->dv_flags &= ~DVF_ACTIVE; 2325 if (ca->ca_activate == NULL) 2326 continue; 2327 s = splhigh(); 2328 rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE); 2329 splx(s); 2330 if (rv != 0) 2331 descendant->dv_flags = oflags; 2332 } 2333 deviter_release(&di); 2334 return rv; 2335 } 2336 2337 /* 2338 * Defer the configuration of the specified device until all 2339 * of its parent's devices have been attached. 2340 */ 2341 void 2342 config_defer(device_t dev, void (*func)(device_t)) 2343 { 2344 struct deferred_config *dc; 2345 2346 if (dev->dv_parent == NULL) 2347 panic("config_defer: can't defer config of a root device"); 2348 2349 dc = kmem_alloc(sizeof(*dc), KM_SLEEP); 2350 2351 config_pending_incr(dev); 2352 2353 mutex_enter(&config_misc_lock); 2354 #ifdef DIAGNOSTIC 2355 struct deferred_config *odc; 2356 TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) { 2357 if (odc->dc_dev == dev) 2358 panic("config_defer: deferred twice"); 2359 } 2360 #endif 2361 dc->dc_dev = dev; 2362 dc->dc_func = func; 2363 TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue); 2364 mutex_exit(&config_misc_lock); 2365 } 2366 2367 /* 2368 * Defer some autoconfiguration for a device until after interrupts 2369 * are enabled. 2370 */ 2371 void 2372 config_interrupts(device_t dev, void (*func)(device_t)) 2373 { 2374 struct deferred_config *dc; 2375 2376 /* 2377 * If interrupts are enabled, callback now. 2378 */ 2379 if (cold == 0) { 2380 (*func)(dev); 2381 return; 2382 } 2383 2384 dc = kmem_alloc(sizeof(*dc), KM_SLEEP); 2385 2386 config_pending_incr(dev); 2387 2388 mutex_enter(&config_misc_lock); 2389 #ifdef DIAGNOSTIC 2390 struct deferred_config *odc; 2391 TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) { 2392 if (odc->dc_dev == dev) 2393 panic("config_interrupts: deferred twice"); 2394 } 2395 #endif 2396 dc->dc_dev = dev; 2397 dc->dc_func = func; 2398 TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue); 2399 mutex_exit(&config_misc_lock); 2400 } 2401 2402 /* 2403 * Defer some autoconfiguration for a device until after root file system 2404 * is mounted (to load firmware etc). 2405 */ 2406 void 2407 config_mountroot(device_t dev, void (*func)(device_t)) 2408 { 2409 struct deferred_config *dc; 2410 2411 /* 2412 * If root file system is mounted, callback now. 2413 */ 2414 if (root_is_mounted) { 2415 (*func)(dev); 2416 return; 2417 } 2418 2419 dc = kmem_alloc(sizeof(*dc), KM_SLEEP); 2420 2421 mutex_enter(&config_misc_lock); 2422 #ifdef DIAGNOSTIC 2423 struct deferred_config *odc; 2424 TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) { 2425 if (odc->dc_dev == dev) 2426 panic("%s: deferred twice", __func__); 2427 } 2428 #endif 2429 2430 dc->dc_dev = dev; 2431 dc->dc_func = func; 2432 TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue); 2433 mutex_exit(&config_misc_lock); 2434 } 2435 2436 /* 2437 * Process a deferred configuration queue. 2438 */ 2439 static void 2440 config_process_deferred(struct deferred_config_head *queue, device_t parent) 2441 { 2442 struct deferred_config *dc; 2443 2444 KASSERT(KERNEL_LOCKED_P()); 2445 2446 mutex_enter(&config_misc_lock); 2447 dc = TAILQ_FIRST(queue); 2448 while (dc) { 2449 if (parent == NULL || dc->dc_dev->dv_parent == parent) { 2450 TAILQ_REMOVE(queue, dc, dc_queue); 2451 mutex_exit(&config_misc_lock); 2452 2453 (*dc->dc_func)(dc->dc_dev); 2454 config_pending_decr(dc->dc_dev); 2455 kmem_free(dc, sizeof(*dc)); 2456 2457 mutex_enter(&config_misc_lock); 2458 /* Restart, queue might have changed */ 2459 dc = TAILQ_FIRST(queue); 2460 } else { 2461 dc = TAILQ_NEXT(dc, dc_queue); 2462 } 2463 } 2464 mutex_exit(&config_misc_lock); 2465 } 2466 2467 /* 2468 * Manipulate the config_pending semaphore. 2469 */ 2470 void 2471 config_pending_incr(device_t dev) 2472 { 2473 2474 mutex_enter(&config_misc_lock); 2475 KASSERTMSG(dev->dv_pending < INT_MAX, 2476 "%s: excess config_pending_incr", device_xname(dev)); 2477 if (dev->dv_pending++ == 0) 2478 TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list); 2479 #ifdef DEBUG_AUTOCONF 2480 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending); 2481 #endif 2482 mutex_exit(&config_misc_lock); 2483 } 2484 2485 void 2486 config_pending_decr(device_t dev) 2487 { 2488 2489 mutex_enter(&config_misc_lock); 2490 KASSERTMSG(dev->dv_pending > 0, 2491 "%s: excess config_pending_decr", device_xname(dev)); 2492 if (--dev->dv_pending == 0) { 2493 TAILQ_REMOVE(&config_pending, dev, dv_pending_list); 2494 cv_broadcast(&config_misc_cv); 2495 } 2496 #ifdef DEBUG_AUTOCONF 2497 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending); 2498 #endif 2499 mutex_exit(&config_misc_lock); 2500 } 2501 2502 /* 2503 * Register a "finalization" routine. Finalization routines are 2504 * called iteratively once all real devices have been found during 2505 * autoconfiguration, for as long as any one finalizer has done 2506 * any work. 2507 */ 2508 int 2509 config_finalize_register(device_t dev, int (*fn)(device_t)) 2510 { 2511 struct finalize_hook *f; 2512 int error = 0; 2513 2514 KERNEL_LOCK(1, NULL); 2515 2516 /* 2517 * If finalization has already been done, invoke the 2518 * callback function now. 2519 */ 2520 if (config_finalize_done) { 2521 while ((*fn)(dev) != 0) 2522 /* loop */ ; 2523 goto out; 2524 } 2525 2526 /* Ensure this isn't already on the list. */ 2527 TAILQ_FOREACH(f, &config_finalize_list, f_list) { 2528 if (f->f_func == fn && f->f_dev == dev) { 2529 error = EEXIST; 2530 goto out; 2531 } 2532 } 2533 2534 f = kmem_alloc(sizeof(*f), KM_SLEEP); 2535 f->f_func = fn; 2536 f->f_dev = dev; 2537 TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list); 2538 2539 /* Success! */ 2540 error = 0; 2541 2542 out: KERNEL_UNLOCK_ONE(NULL); 2543 return error; 2544 } 2545 2546 void 2547 config_finalize(void) 2548 { 2549 struct finalize_hook *f; 2550 struct pdevinit *pdev; 2551 extern struct pdevinit pdevinit[]; 2552 int errcnt, rv; 2553 2554 /* 2555 * Now that device driver threads have been created, wait for 2556 * them to finish any deferred autoconfiguration. 2557 */ 2558 mutex_enter(&config_misc_lock); 2559 while (!TAILQ_EMPTY(&config_pending)) { 2560 device_t dev; 2561 int error; 2562 2563 error = cv_timedwait(&config_misc_cv, &config_misc_lock, 2564 mstohz(1000)); 2565 if (error == EWOULDBLOCK) { 2566 aprint_debug("waiting for devices:"); 2567 TAILQ_FOREACH(dev, &config_pending, dv_pending_list) 2568 aprint_debug(" %s", device_xname(dev)); 2569 aprint_debug("\n"); 2570 } 2571 } 2572 mutex_exit(&config_misc_lock); 2573 2574 KERNEL_LOCK(1, NULL); 2575 2576 /* Attach pseudo-devices. */ 2577 for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++) 2578 (*pdev->pdev_attach)(pdev->pdev_count); 2579 2580 /* Run the hooks until none of them does any work. */ 2581 do { 2582 rv = 0; 2583 TAILQ_FOREACH(f, &config_finalize_list, f_list) 2584 rv |= (*f->f_func)(f->f_dev); 2585 } while (rv != 0); 2586 2587 config_finalize_done = 1; 2588 2589 /* Now free all the hooks. */ 2590 while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) { 2591 TAILQ_REMOVE(&config_finalize_list, f, f_list); 2592 kmem_free(f, sizeof(*f)); 2593 } 2594 2595 KERNEL_UNLOCK_ONE(NULL); 2596 2597 errcnt = aprint_get_error_count(); 2598 if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 && 2599 (boothowto & AB_VERBOSE) == 0) { 2600 mutex_enter(&config_misc_lock); 2601 if (config_do_twiddle) { 2602 config_do_twiddle = 0; 2603 printf_nolog(" done.\n"); 2604 } 2605 mutex_exit(&config_misc_lock); 2606 } 2607 if (errcnt != 0) { 2608 printf("WARNING: %d error%s while detecting hardware; " 2609 "check system log.\n", errcnt, 2610 errcnt == 1 ? "" : "s"); 2611 } 2612 } 2613 2614 void 2615 config_twiddle_init(void) 2616 { 2617 2618 if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) { 2619 config_do_twiddle = 1; 2620 } 2621 callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL); 2622 } 2623 2624 void 2625 config_twiddle_fn(void *cookie) 2626 { 2627 2628 mutex_enter(&config_misc_lock); 2629 if (config_do_twiddle) { 2630 twiddle(); 2631 callout_schedule(&config_twiddle_ch, mstohz(100)); 2632 } 2633 mutex_exit(&config_misc_lock); 2634 } 2635 2636 static void 2637 config_alldevs_enter(struct alldevs_foray *af) 2638 { 2639 TAILQ_INIT(&af->af_garbage); 2640 mutex_enter(&alldevs_lock); 2641 config_collect_garbage(&af->af_garbage); 2642 } 2643 2644 static void 2645 config_alldevs_exit(struct alldevs_foray *af) 2646 { 2647 mutex_exit(&alldevs_lock); 2648 config_dump_garbage(&af->af_garbage); 2649 } 2650 2651 /* 2652 * device_lookup: 2653 * 2654 * Look up a device instance for a given driver. 2655 * 2656 * Caller is responsible for ensuring the device's state is 2657 * stable, either by holding a reference already obtained with 2658 * device_lookup_acquire or by otherwise ensuring the device is 2659 * attached and can't be detached (e.g., holding an open device 2660 * node and ensuring *_detach calls vdevgone). 2661 * 2662 * XXX Find a way to assert this. 2663 * 2664 * Safe for use up to and including interrupt context at IPL_VM. 2665 * Never sleeps. 2666 */ 2667 device_t 2668 device_lookup(cfdriver_t cd, int unit) 2669 { 2670 device_t dv; 2671 2672 mutex_enter(&alldevs_lock); 2673 if (unit < 0 || unit >= cd->cd_ndevs) 2674 dv = NULL; 2675 else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0) 2676 dv = NULL; 2677 mutex_exit(&alldevs_lock); 2678 2679 return dv; 2680 } 2681 2682 /* 2683 * device_lookup_private: 2684 * 2685 * Look up a softc instance for a given driver. 2686 */ 2687 void * 2688 device_lookup_private(cfdriver_t cd, int unit) 2689 { 2690 2691 return device_private(device_lookup(cd, unit)); 2692 } 2693 2694 /* 2695 * device_lookup_acquire: 2696 * 2697 * Look up a device instance for a given driver, and return a 2698 * reference to it that must be released by device_release. 2699 * 2700 * => If the device is still attaching, blocks until *_attach has 2701 * returned. 2702 * 2703 * => If the device is detaching, blocks until *_detach has 2704 * returned. May succeed or fail in that case, depending on 2705 * whether *_detach has backed out (EBUSY) or committed to 2706 * detaching. 2707 * 2708 * May sleep. 2709 */ 2710 device_t 2711 device_lookup_acquire(cfdriver_t cd, int unit) 2712 { 2713 device_t dv; 2714 2715 ASSERT_SLEEPABLE(); 2716 2717 /* XXX This should have a pserialized fast path -- TBD. */ 2718 mutex_enter(&config_misc_lock); 2719 mutex_enter(&alldevs_lock); 2720 retry: if (unit < 0 || unit >= cd->cd_ndevs || 2721 (dv = cd->cd_devs[unit]) == NULL || 2722 dv->dv_del_gen != 0 || 2723 dv->dv_detach_committed) { 2724 dv = NULL; 2725 } else { 2726 /* 2727 * Wait for the device to stabilize, if attaching or 2728 * detaching. Either way we must wait for *_attach or 2729 * *_detach to complete, and either way we must retry: 2730 * even if detaching, *_detach might fail (EBUSY) so 2731 * the device may still be there. 2732 */ 2733 if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) || 2734 dv->dv_detaching != NULL) { 2735 mutex_exit(&alldevs_lock); 2736 cv_wait(&config_misc_cv, &config_misc_lock); 2737 mutex_enter(&alldevs_lock); 2738 goto retry; 2739 } 2740 localcount_acquire(dv->dv_localcount); 2741 } 2742 mutex_exit(&alldevs_lock); 2743 mutex_exit(&config_misc_lock); 2744 2745 return dv; 2746 } 2747 2748 /* 2749 * device_release: 2750 * 2751 * Release a reference to a device acquired with 2752 * device_lookup_acquire. 2753 */ 2754 void 2755 device_release(device_t dv) 2756 { 2757 2758 localcount_release(dv->dv_localcount, 2759 &config_misc_cv, &config_misc_lock); 2760 } 2761 2762 /* 2763 * device_find_by_xname: 2764 * 2765 * Returns the device of the given name or NULL if it doesn't exist. 2766 */ 2767 device_t 2768 device_find_by_xname(const char *name) 2769 { 2770 device_t dv; 2771 deviter_t di; 2772 2773 for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) { 2774 if (strcmp(device_xname(dv), name) == 0) 2775 break; 2776 } 2777 deviter_release(&di); 2778 2779 return dv; 2780 } 2781 2782 /* 2783 * device_find_by_driver_unit: 2784 * 2785 * Returns the device of the given driver name and unit or 2786 * NULL if it doesn't exist. 2787 */ 2788 device_t 2789 device_find_by_driver_unit(const char *name, int unit) 2790 { 2791 struct cfdriver *cd; 2792 2793 if ((cd = config_cfdriver_lookup(name)) == NULL) 2794 return NULL; 2795 return device_lookup(cd, unit); 2796 } 2797 2798 static bool 2799 match_strcmp(const char * const s1, const char * const s2) 2800 { 2801 return strcmp(s1, s2) == 0; 2802 } 2803 2804 static bool 2805 match_pmatch(const char * const s1, const char * const s2) 2806 { 2807 return pmatch(s1, s2, NULL) == 2; 2808 } 2809 2810 static bool 2811 strarray_match_internal(const char ** const strings, 2812 unsigned int const nstrings, const char * const str, 2813 unsigned int * const indexp, 2814 bool (*match_fn)(const char *, const char *)) 2815 { 2816 unsigned int i; 2817 2818 if (strings == NULL || nstrings == 0) { 2819 return false; 2820 } 2821 2822 for (i = 0; i < nstrings; i++) { 2823 if ((*match_fn)(strings[i], str)) { 2824 *indexp = i; 2825 return true; 2826 } 2827 } 2828 2829 return false; 2830 } 2831 2832 static int 2833 strarray_match(const char ** const strings, unsigned int const nstrings, 2834 const char * const str) 2835 { 2836 unsigned int idx; 2837 2838 if (strarray_match_internal(strings, nstrings, str, &idx, 2839 match_strcmp)) { 2840 return (int)(nstrings - idx); 2841 } 2842 return 0; 2843 } 2844 2845 static int 2846 strarray_pmatch(const char ** const strings, unsigned int const nstrings, 2847 const char * const pattern) 2848 { 2849 unsigned int idx; 2850 2851 if (strarray_match_internal(strings, nstrings, pattern, &idx, 2852 match_pmatch)) { 2853 return (int)(nstrings - idx); 2854 } 2855 return 0; 2856 } 2857 2858 static int 2859 device_compatible_match_strarray_internal( 2860 const char **device_compats, int ndevice_compats, 2861 const struct device_compatible_entry *driver_compats, 2862 const struct device_compatible_entry **matching_entryp, 2863 int (*match_fn)(const char **, unsigned int, const char *)) 2864 { 2865 const struct device_compatible_entry *dce = NULL; 2866 int rv; 2867 2868 if (ndevice_compats == 0 || device_compats == NULL || 2869 driver_compats == NULL) 2870 return 0; 2871 2872 for (dce = driver_compats; dce->compat != NULL; dce++) { 2873 rv = (*match_fn)(device_compats, ndevice_compats, dce->compat); 2874 if (rv != 0) { 2875 if (matching_entryp != NULL) { 2876 *matching_entryp = dce; 2877 } 2878 return rv; 2879 } 2880 } 2881 return 0; 2882 } 2883 2884 /* 2885 * device_compatible_match: 2886 * 2887 * Match a driver's "compatible" data against a device's 2888 * "compatible" strings. Returns resulted weighted by 2889 * which device "compatible" string was matched. 2890 */ 2891 int 2892 device_compatible_match(const char **device_compats, int ndevice_compats, 2893 const struct device_compatible_entry *driver_compats) 2894 { 2895 return device_compatible_match_strarray_internal(device_compats, 2896 ndevice_compats, driver_compats, NULL, strarray_match); 2897 } 2898 2899 /* 2900 * device_compatible_pmatch: 2901 * 2902 * Like device_compatible_match(), but uses pmatch(9) to compare 2903 * the device "compatible" strings against patterns in the 2904 * driver's "compatible" data. 2905 */ 2906 int 2907 device_compatible_pmatch(const char **device_compats, int ndevice_compats, 2908 const struct device_compatible_entry *driver_compats) 2909 { 2910 return device_compatible_match_strarray_internal(device_compats, 2911 ndevice_compats, driver_compats, NULL, strarray_pmatch); 2912 } 2913 2914 static int 2915 device_compatible_match_strlist_internal( 2916 const char * const device_compats, size_t const device_compatsize, 2917 const struct device_compatible_entry *driver_compats, 2918 const struct device_compatible_entry **matching_entryp, 2919 int (*match_fn)(const char *, size_t, const char *)) 2920 { 2921 const struct device_compatible_entry *dce = NULL; 2922 int rv; 2923 2924 if (device_compats == NULL || device_compatsize == 0 || 2925 driver_compats == NULL) 2926 return 0; 2927 2928 for (dce = driver_compats; dce->compat != NULL; dce++) { 2929 rv = (*match_fn)(device_compats, device_compatsize, 2930 dce->compat); 2931 if (rv != 0) { 2932 if (matching_entryp != NULL) { 2933 *matching_entryp = dce; 2934 } 2935 return rv; 2936 } 2937 } 2938 return 0; 2939 } 2940 2941 /* 2942 * device_compatible_match_strlist: 2943 * 2944 * Like device_compatible_match(), but take the device 2945 * "compatible" strings as an OpenFirmware-style string 2946 * list. 2947 */ 2948 int 2949 device_compatible_match_strlist( 2950 const char * const device_compats, size_t const device_compatsize, 2951 const struct device_compatible_entry *driver_compats) 2952 { 2953 return device_compatible_match_strlist_internal(device_compats, 2954 device_compatsize, driver_compats, NULL, strlist_match); 2955 } 2956 2957 /* 2958 * device_compatible_pmatch_strlist: 2959 * 2960 * Like device_compatible_pmatch(), but take the device 2961 * "compatible" strings as an OpenFirmware-style string 2962 * list. 2963 */ 2964 int 2965 device_compatible_pmatch_strlist( 2966 const char * const device_compats, size_t const device_compatsize, 2967 const struct device_compatible_entry *driver_compats) 2968 { 2969 return device_compatible_match_strlist_internal(device_compats, 2970 device_compatsize, driver_compats, NULL, strlist_pmatch); 2971 } 2972 2973 static int 2974 device_compatible_match_id_internal( 2975 uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id, 2976 const struct device_compatible_entry *driver_compats, 2977 const struct device_compatible_entry **matching_entryp) 2978 { 2979 const struct device_compatible_entry *dce = NULL; 2980 2981 if (mask == 0) 2982 return 0; 2983 2984 for (dce = driver_compats; dce->id != sentinel_id; dce++) { 2985 if ((id & mask) == dce->id) { 2986 if (matching_entryp != NULL) { 2987 *matching_entryp = dce; 2988 } 2989 return 1; 2990 } 2991 } 2992 return 0; 2993 } 2994 2995 /* 2996 * device_compatible_match_id: 2997 * 2998 * Like device_compatible_match(), but takes a single 2999 * unsigned integer device ID. 3000 */ 3001 int 3002 device_compatible_match_id( 3003 uintptr_t const id, uintptr_t const sentinel_id, 3004 const struct device_compatible_entry *driver_compats) 3005 { 3006 return device_compatible_match_id_internal(id, (uintptr_t)-1, 3007 sentinel_id, driver_compats, NULL); 3008 } 3009 3010 /* 3011 * device_compatible_lookup: 3012 * 3013 * Look up and return the device_compatible_entry, using the 3014 * same matching criteria used by device_compatible_match(). 3015 */ 3016 const struct device_compatible_entry * 3017 device_compatible_lookup(const char **device_compats, int ndevice_compats, 3018 const struct device_compatible_entry *driver_compats) 3019 { 3020 const struct device_compatible_entry *dce; 3021 3022 if (device_compatible_match_strarray_internal(device_compats, 3023 ndevice_compats, driver_compats, &dce, strarray_match)) { 3024 return dce; 3025 } 3026 return NULL; 3027 } 3028 3029 /* 3030 * device_compatible_plookup: 3031 * 3032 * Look up and return the device_compatible_entry, using the 3033 * same matching criteria used by device_compatible_pmatch(). 3034 */ 3035 const struct device_compatible_entry * 3036 device_compatible_plookup(const char **device_compats, int ndevice_compats, 3037 const struct device_compatible_entry *driver_compats) 3038 { 3039 const struct device_compatible_entry *dce; 3040 3041 if (device_compatible_match_strarray_internal(device_compats, 3042 ndevice_compats, driver_compats, &dce, strarray_pmatch)) { 3043 return dce; 3044 } 3045 return NULL; 3046 } 3047 3048 /* 3049 * device_compatible_lookup_strlist: 3050 * 3051 * Like device_compatible_lookup(), but take the device 3052 * "compatible" strings as an OpenFirmware-style string 3053 * list. 3054 */ 3055 const struct device_compatible_entry * 3056 device_compatible_lookup_strlist( 3057 const char * const device_compats, size_t const device_compatsize, 3058 const struct device_compatible_entry *driver_compats) 3059 { 3060 const struct device_compatible_entry *dce; 3061 3062 if (device_compatible_match_strlist_internal(device_compats, 3063 device_compatsize, driver_compats, &dce, strlist_match)) { 3064 return dce; 3065 } 3066 return NULL; 3067 } 3068 3069 /* 3070 * device_compatible_plookup_strlist: 3071 * 3072 * Like device_compatible_plookup(), but take the device 3073 * "compatible" strings as an OpenFirmware-style string 3074 * list. 3075 */ 3076 const struct device_compatible_entry * 3077 device_compatible_plookup_strlist( 3078 const char * const device_compats, size_t const device_compatsize, 3079 const struct device_compatible_entry *driver_compats) 3080 { 3081 const struct device_compatible_entry *dce; 3082 3083 if (device_compatible_match_strlist_internal(device_compats, 3084 device_compatsize, driver_compats, &dce, strlist_pmatch)) { 3085 return dce; 3086 } 3087 return NULL; 3088 } 3089 3090 /* 3091 * device_compatible_lookup_id: 3092 * 3093 * Like device_compatible_lookup(), but takes a single 3094 * unsigned integer device ID. 3095 */ 3096 const struct device_compatible_entry * 3097 device_compatible_lookup_id( 3098 uintptr_t const id, uintptr_t const sentinel_id, 3099 const struct device_compatible_entry *driver_compats) 3100 { 3101 const struct device_compatible_entry *dce; 3102 3103 if (device_compatible_match_id_internal(id, (uintptr_t)-1, 3104 sentinel_id, driver_compats, &dce)) { 3105 return dce; 3106 } 3107 return NULL; 3108 } 3109 3110 /* 3111 * Power management related functions. 3112 */ 3113 3114 bool 3115 device_pmf_is_registered(device_t dev) 3116 { 3117 return (dev->dv_flags & DVF_POWER_HANDLERS) != 0; 3118 } 3119 3120 bool 3121 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual) 3122 { 3123 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0) 3124 return true; 3125 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0) 3126 return false; 3127 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER && 3128 dev->dv_driver_suspend != NULL && 3129 !(*dev->dv_driver_suspend)(dev, qual)) 3130 return false; 3131 3132 dev->dv_flags |= DVF_DRIVER_SUSPENDED; 3133 return true; 3134 } 3135 3136 bool 3137 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual) 3138 { 3139 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0) 3140 return true; 3141 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0) 3142 return false; 3143 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER && 3144 dev->dv_driver_resume != NULL && 3145 !(*dev->dv_driver_resume)(dev, qual)) 3146 return false; 3147 3148 dev->dv_flags &= ~DVF_DRIVER_SUSPENDED; 3149 return true; 3150 } 3151 3152 bool 3153 device_pmf_driver_shutdown(device_t dev, int how) 3154 { 3155 3156 if (*dev->dv_driver_shutdown != NULL && 3157 !(*dev->dv_driver_shutdown)(dev, how)) 3158 return false; 3159 return true; 3160 } 3161 3162 void 3163 device_pmf_driver_register(device_t dev, 3164 bool (*suspend)(device_t, const pmf_qual_t *), 3165 bool (*resume)(device_t, const pmf_qual_t *), 3166 bool (*shutdown)(device_t, int)) 3167 { 3168 3169 dev->dv_driver_suspend = suspend; 3170 dev->dv_driver_resume = resume; 3171 dev->dv_driver_shutdown = shutdown; 3172 dev->dv_flags |= DVF_POWER_HANDLERS; 3173 } 3174 3175 void 3176 device_pmf_driver_deregister(device_t dev) 3177 { 3178 device_lock_t dvl = device_getlock(dev); 3179 3180 dev->dv_driver_suspend = NULL; 3181 dev->dv_driver_resume = NULL; 3182 3183 mutex_enter(&dvl->dvl_mtx); 3184 dev->dv_flags &= ~DVF_POWER_HANDLERS; 3185 while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) { 3186 /* Wake a thread that waits for the lock. That 3187 * thread will fail to acquire the lock, and then 3188 * it will wake the next thread that waits for the 3189 * lock, or else it will wake us. 3190 */ 3191 cv_signal(&dvl->dvl_cv); 3192 pmflock_debug(dev, __func__, __LINE__); 3193 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx); 3194 pmflock_debug(dev, __func__, __LINE__); 3195 } 3196 mutex_exit(&dvl->dvl_mtx); 3197 } 3198 3199 void 3200 device_pmf_driver_child_register(device_t dev) 3201 { 3202 device_t parent = device_parent(dev); 3203 3204 if (parent == NULL || parent->dv_driver_child_register == NULL) 3205 return; 3206 (*parent->dv_driver_child_register)(dev); 3207 } 3208 3209 void 3210 device_pmf_driver_set_child_register(device_t dev, 3211 void (*child_register)(device_t)) 3212 { 3213 dev->dv_driver_child_register = child_register; 3214 } 3215 3216 static void 3217 pmflock_debug(device_t dev, const char *func, int line) 3218 { 3219 #ifdef PMFLOCK_DEBUG 3220 device_lock_t dvl = device_getlock(dev); 3221 const char *curlwp_name; 3222 3223 if (curlwp->l_name != NULL) 3224 curlwp_name = curlwp->l_name; 3225 else 3226 curlwp_name = curlwp->l_proc->p_comm; 3227 3228 aprint_debug_dev(dev, 3229 "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line, 3230 curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags); 3231 #endif /* PMFLOCK_DEBUG */ 3232 } 3233 3234 static bool 3235 device_pmf_lock1(device_t dev) 3236 { 3237 device_lock_t dvl = device_getlock(dev); 3238 3239 while (device_pmf_is_registered(dev) && 3240 dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) { 3241 dvl->dvl_nwait++; 3242 pmflock_debug(dev, __func__, __LINE__); 3243 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx); 3244 pmflock_debug(dev, __func__, __LINE__); 3245 dvl->dvl_nwait--; 3246 } 3247 if (!device_pmf_is_registered(dev)) { 3248 pmflock_debug(dev, __func__, __LINE__); 3249 /* We could not acquire the lock, but some other thread may 3250 * wait for it, also. Wake that thread. 3251 */ 3252 cv_signal(&dvl->dvl_cv); 3253 return false; 3254 } 3255 dvl->dvl_nlock++; 3256 dvl->dvl_holder = curlwp; 3257 pmflock_debug(dev, __func__, __LINE__); 3258 return true; 3259 } 3260 3261 bool 3262 device_pmf_lock(device_t dev) 3263 { 3264 bool rc; 3265 device_lock_t dvl = device_getlock(dev); 3266 3267 mutex_enter(&dvl->dvl_mtx); 3268 rc = device_pmf_lock1(dev); 3269 mutex_exit(&dvl->dvl_mtx); 3270 3271 return rc; 3272 } 3273 3274 void 3275 device_pmf_unlock(device_t dev) 3276 { 3277 device_lock_t dvl = device_getlock(dev); 3278 3279 KASSERT(dvl->dvl_nlock > 0); 3280 mutex_enter(&dvl->dvl_mtx); 3281 if (--dvl->dvl_nlock == 0) 3282 dvl->dvl_holder = NULL; 3283 cv_signal(&dvl->dvl_cv); 3284 pmflock_debug(dev, __func__, __LINE__); 3285 mutex_exit(&dvl->dvl_mtx); 3286 } 3287 3288 device_lock_t 3289 device_getlock(device_t dev) 3290 { 3291 return &dev->dv_lock; 3292 } 3293 3294 void * 3295 device_pmf_bus_private(device_t dev) 3296 { 3297 return dev->dv_bus_private; 3298 } 3299 3300 bool 3301 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual) 3302 { 3303 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0) 3304 return true; 3305 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 || 3306 (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0) 3307 return false; 3308 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS && 3309 dev->dv_bus_suspend != NULL && 3310 !(*dev->dv_bus_suspend)(dev, qual)) 3311 return false; 3312 3313 dev->dv_flags |= DVF_BUS_SUSPENDED; 3314 return true; 3315 } 3316 3317 bool 3318 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual) 3319 { 3320 if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0) 3321 return true; 3322 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS && 3323 dev->dv_bus_resume != NULL && 3324 !(*dev->dv_bus_resume)(dev, qual)) 3325 return false; 3326 3327 dev->dv_flags &= ~DVF_BUS_SUSPENDED; 3328 return true; 3329 } 3330 3331 bool 3332 device_pmf_bus_shutdown(device_t dev, int how) 3333 { 3334 3335 if (*dev->dv_bus_shutdown != NULL && 3336 !(*dev->dv_bus_shutdown)(dev, how)) 3337 return false; 3338 return true; 3339 } 3340 3341 void 3342 device_pmf_bus_register(device_t dev, void *priv, 3343 bool (*suspend)(device_t, const pmf_qual_t *), 3344 bool (*resume)(device_t, const pmf_qual_t *), 3345 bool (*shutdown)(device_t, int), void (*deregister)(device_t)) 3346 { 3347 dev->dv_bus_private = priv; 3348 dev->dv_bus_resume = resume; 3349 dev->dv_bus_suspend = suspend; 3350 dev->dv_bus_shutdown = shutdown; 3351 dev->dv_bus_deregister = deregister; 3352 } 3353 3354 void 3355 device_pmf_bus_deregister(device_t dev) 3356 { 3357 if (dev->dv_bus_deregister == NULL) 3358 return; 3359 (*dev->dv_bus_deregister)(dev); 3360 dev->dv_bus_private = NULL; 3361 dev->dv_bus_suspend = NULL; 3362 dev->dv_bus_resume = NULL; 3363 dev->dv_bus_deregister = NULL; 3364 } 3365 3366 void * 3367 device_pmf_class_private(device_t dev) 3368 { 3369 return dev->dv_class_private; 3370 } 3371 3372 bool 3373 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual) 3374 { 3375 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0) 3376 return true; 3377 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS && 3378 dev->dv_class_suspend != NULL && 3379 !(*dev->dv_class_suspend)(dev, qual)) 3380 return false; 3381 3382 dev->dv_flags |= DVF_CLASS_SUSPENDED; 3383 return true; 3384 } 3385 3386 bool 3387 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual) 3388 { 3389 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0) 3390 return true; 3391 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 || 3392 (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0) 3393 return false; 3394 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS && 3395 dev->dv_class_resume != NULL && 3396 !(*dev->dv_class_resume)(dev, qual)) 3397 return false; 3398 3399 dev->dv_flags &= ~DVF_CLASS_SUSPENDED; 3400 return true; 3401 } 3402 3403 void 3404 device_pmf_class_register(device_t dev, void *priv, 3405 bool (*suspend)(device_t, const pmf_qual_t *), 3406 bool (*resume)(device_t, const pmf_qual_t *), 3407 void (*deregister)(device_t)) 3408 { 3409 dev->dv_class_private = priv; 3410 dev->dv_class_suspend = suspend; 3411 dev->dv_class_resume = resume; 3412 dev->dv_class_deregister = deregister; 3413 } 3414 3415 void 3416 device_pmf_class_deregister(device_t dev) 3417 { 3418 if (dev->dv_class_deregister == NULL) 3419 return; 3420 (*dev->dv_class_deregister)(dev); 3421 dev->dv_class_private = NULL; 3422 dev->dv_class_suspend = NULL; 3423 dev->dv_class_resume = NULL; 3424 dev->dv_class_deregister = NULL; 3425 } 3426 3427 bool 3428 device_active(device_t dev, devactive_t type) 3429 { 3430 size_t i; 3431 3432 if (dev->dv_activity_count == 0) 3433 return false; 3434 3435 for (i = 0; i < dev->dv_activity_count; ++i) { 3436 if (dev->dv_activity_handlers[i] == NULL) 3437 break; 3438 (*dev->dv_activity_handlers[i])(dev, type); 3439 } 3440 3441 return true; 3442 } 3443 3444 bool 3445 device_active_register(device_t dev, void (*handler)(device_t, devactive_t)) 3446 { 3447 void (**new_handlers)(device_t, devactive_t); 3448 void (**old_handlers)(device_t, devactive_t); 3449 size_t i, old_size, new_size; 3450 int s; 3451 3452 old_handlers = dev->dv_activity_handlers; 3453 old_size = dev->dv_activity_count; 3454 3455 KASSERT(old_size == 0 || old_handlers != NULL); 3456 3457 for (i = 0; i < old_size; ++i) { 3458 KASSERT(old_handlers[i] != handler); 3459 if (old_handlers[i] == NULL) { 3460 old_handlers[i] = handler; 3461 return true; 3462 } 3463 } 3464 3465 new_size = old_size + 4; 3466 new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP); 3467 3468 for (i = 0; i < old_size; ++i) 3469 new_handlers[i] = old_handlers[i]; 3470 new_handlers[old_size] = handler; 3471 for (i = old_size+1; i < new_size; ++i) 3472 new_handlers[i] = NULL; 3473 3474 s = splhigh(); 3475 dev->dv_activity_count = new_size; 3476 dev->dv_activity_handlers = new_handlers; 3477 splx(s); 3478 3479 if (old_size > 0) 3480 kmem_free(old_handlers, sizeof(void *) * old_size); 3481 3482 return true; 3483 } 3484 3485 void 3486 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t)) 3487 { 3488 void (**old_handlers)(device_t, devactive_t); 3489 size_t i, old_size; 3490 int s; 3491 3492 old_handlers = dev->dv_activity_handlers; 3493 old_size = dev->dv_activity_count; 3494 3495 for (i = 0; i < old_size; ++i) { 3496 if (old_handlers[i] == handler) 3497 break; 3498 if (old_handlers[i] == NULL) 3499 return; /* XXX panic? */ 3500 } 3501 3502 if (i == old_size) 3503 return; /* XXX panic? */ 3504 3505 for (; i < old_size - 1; ++i) { 3506 if ((old_handlers[i] = old_handlers[i + 1]) != NULL) 3507 continue; 3508 3509 if (i == 0) { 3510 s = splhigh(); 3511 dev->dv_activity_count = 0; 3512 dev->dv_activity_handlers = NULL; 3513 splx(s); 3514 kmem_free(old_handlers, sizeof(void *) * old_size); 3515 } 3516 return; 3517 } 3518 old_handlers[i] = NULL; 3519 } 3520 3521 /* Return true iff the device_t `dev' exists at generation `gen'. */ 3522 static bool 3523 device_exists_at(device_t dv, devgen_t gen) 3524 { 3525 return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) && 3526 dv->dv_add_gen <= gen; 3527 } 3528 3529 static bool 3530 deviter_visits(const deviter_t *di, device_t dv) 3531 { 3532 return device_exists_at(dv, di->di_gen); 3533 } 3534 3535 /* 3536 * Device Iteration 3537 * 3538 * deviter_t: a device iterator. Holds state for a "walk" visiting 3539 * each device_t's in the device tree. 3540 * 3541 * deviter_init(di, flags): initialize the device iterator `di' 3542 * to "walk" the device tree. deviter_next(di) will return 3543 * the first device_t in the device tree, or NULL if there are 3544 * no devices. 3545 * 3546 * `flags' is one or more of DEVITER_F_RW, indicating that the 3547 * caller intends to modify the device tree by calling 3548 * config_detach(9) on devices in the order that the iterator 3549 * returns them; DEVITER_F_ROOT_FIRST, asking for the devices 3550 * nearest the "root" of the device tree to be returned, first; 3551 * DEVITER_F_LEAVES_FIRST, asking for the devices furthest from 3552 * the root of the device tree, first; and DEVITER_F_SHUTDOWN, 3553 * indicating both that deviter_init() should not respect any 3554 * locks on the device tree, and that deviter_next(di) may run 3555 * in more than one LWP before the walk has finished. 3556 * 3557 * Only one DEVITER_F_RW iterator may be in the device tree at 3558 * once. 3559 * 3560 * DEVITER_F_SHUTDOWN implies DEVITER_F_RW. 3561 * 3562 * Results are undefined if the flags DEVITER_F_ROOT_FIRST and 3563 * DEVITER_F_LEAVES_FIRST are used in combination. 3564 * 3565 * deviter_first(di, flags): initialize the device iterator `di' 3566 * and return the first device_t in the device tree, or NULL 3567 * if there are no devices. The statement 3568 * 3569 * dv = deviter_first(di); 3570 * 3571 * is shorthand for 3572 * 3573 * deviter_init(di); 3574 * dv = deviter_next(di); 3575 * 3576 * deviter_next(di): return the next device_t in the device tree, 3577 * or NULL if there are no more devices. deviter_next(di) 3578 * is undefined if `di' was not initialized with deviter_init() or 3579 * deviter_first(). 3580 * 3581 * deviter_release(di): stops iteration (subsequent calls to 3582 * deviter_next() will return NULL), releases any locks and 3583 * resources held by the device iterator. 3584 * 3585 * Device iteration does not return device_t's in any particular 3586 * order. An iterator will never return the same device_t twice. 3587 * Device iteration is guaranteed to complete---i.e., if deviter_next(di) 3588 * is called repeatedly on the same `di', it will eventually return 3589 * NULL. It is ok to attach/detach devices during device iteration. 3590 */ 3591 void 3592 deviter_init(deviter_t *di, deviter_flags_t flags) 3593 { 3594 device_t dv; 3595 3596 memset(di, 0, sizeof(*di)); 3597 3598 if ((flags & DEVITER_F_SHUTDOWN) != 0) 3599 flags |= DEVITER_F_RW; 3600 3601 mutex_enter(&alldevs_lock); 3602 if ((flags & DEVITER_F_RW) != 0) 3603 alldevs_nwrite++; 3604 else 3605 alldevs_nread++; 3606 di->di_gen = alldevs_gen++; 3607 di->di_flags = flags; 3608 3609 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) { 3610 case DEVITER_F_LEAVES_FIRST: 3611 TAILQ_FOREACH(dv, &alldevs, dv_list) { 3612 if (!deviter_visits(di, dv)) 3613 continue; 3614 di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth); 3615 } 3616 break; 3617 case DEVITER_F_ROOT_FIRST: 3618 TAILQ_FOREACH(dv, &alldevs, dv_list) { 3619 if (!deviter_visits(di, dv)) 3620 continue; 3621 di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth); 3622 } 3623 break; 3624 default: 3625 break; 3626 } 3627 3628 deviter_reinit(di); 3629 mutex_exit(&alldevs_lock); 3630 } 3631 3632 static void 3633 deviter_reinit(deviter_t *di) 3634 { 3635 3636 KASSERT(mutex_owned(&alldevs_lock)); 3637 if ((di->di_flags & DEVITER_F_RW) != 0) 3638 di->di_prev = TAILQ_LAST(&alldevs, devicelist); 3639 else 3640 di->di_prev = TAILQ_FIRST(&alldevs); 3641 } 3642 3643 device_t 3644 deviter_first(deviter_t *di, deviter_flags_t flags) 3645 { 3646 3647 deviter_init(di, flags); 3648 return deviter_next(di); 3649 } 3650 3651 static device_t 3652 deviter_next2(deviter_t *di) 3653 { 3654 device_t dv; 3655 3656 KASSERT(mutex_owned(&alldevs_lock)); 3657 3658 dv = di->di_prev; 3659 3660 if (dv == NULL) 3661 return NULL; 3662 3663 if ((di->di_flags & DEVITER_F_RW) != 0) 3664 di->di_prev = TAILQ_PREV(dv, devicelist, dv_list); 3665 else 3666 di->di_prev = TAILQ_NEXT(dv, dv_list); 3667 3668 return dv; 3669 } 3670 3671 static device_t 3672 deviter_next1(deviter_t *di) 3673 { 3674 device_t dv; 3675 3676 KASSERT(mutex_owned(&alldevs_lock)); 3677 3678 do { 3679 dv = deviter_next2(di); 3680 } while (dv != NULL && !deviter_visits(di, dv)); 3681 3682 return dv; 3683 } 3684 3685 device_t 3686 deviter_next(deviter_t *di) 3687 { 3688 device_t dv = NULL; 3689 3690 mutex_enter(&alldevs_lock); 3691 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) { 3692 case 0: 3693 dv = deviter_next1(di); 3694 break; 3695 case DEVITER_F_LEAVES_FIRST: 3696 while (di->di_curdepth >= 0) { 3697 if ((dv = deviter_next1(di)) == NULL) { 3698 di->di_curdepth--; 3699 deviter_reinit(di); 3700 } else if (dv->dv_depth == di->di_curdepth) 3701 break; 3702 } 3703 break; 3704 case DEVITER_F_ROOT_FIRST: 3705 while (di->di_curdepth <= di->di_maxdepth) { 3706 if ((dv = deviter_next1(di)) == NULL) { 3707 di->di_curdepth++; 3708 deviter_reinit(di); 3709 } else if (dv->dv_depth == di->di_curdepth) 3710 break; 3711 } 3712 break; 3713 default: 3714 break; 3715 } 3716 mutex_exit(&alldevs_lock); 3717 3718 return dv; 3719 } 3720 3721 void 3722 deviter_release(deviter_t *di) 3723 { 3724 bool rw = (di->di_flags & DEVITER_F_RW) != 0; 3725 3726 mutex_enter(&alldevs_lock); 3727 if (rw) 3728 --alldevs_nwrite; 3729 else 3730 --alldevs_nread; 3731 /* XXX wake a garbage-collection thread */ 3732 mutex_exit(&alldevs_lock); 3733 } 3734 3735 const char * 3736 cfdata_ifattr(const struct cfdata *cf) 3737 { 3738 return cf->cf_pspec->cfp_iattr; 3739 } 3740 3741 bool 3742 ifattr_match(const char *snull, const char *t) 3743 { 3744 return (snull == NULL) || strcmp(snull, t) == 0; 3745 } 3746 3747 void 3748 null_childdetached(device_t self, device_t child) 3749 { 3750 /* do nothing */ 3751 } 3752 3753 static void 3754 sysctl_detach_setup(struct sysctllog **clog) 3755 { 3756 3757 sysctl_createv(clog, 0, NULL, NULL, 3758 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 3759 CTLTYPE_BOOL, "detachall", 3760 SYSCTL_DESCR("Detach all devices at shutdown"), 3761 NULL, 0, &detachall, 0, 3762 CTL_KERN, CTL_CREATE, CTL_EOL); 3763 } 3764