1 /* $NetBSD: subr_autoconf.c,v 1.315 2025/10/03 16:49:07 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1996, 2000 Christopher G. Demetriou 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed for the 18 * NetBSD Project. See http://www.NetBSD.org/ for 19 * information about NetBSD. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )-- 35 */ 36 37 /* 38 * Copyright (c) 1992, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This software was developed by the Computer Systems Engineering group 42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 43 * contributed to Berkeley. 44 * 45 * All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed by the University of 48 * California, Lawrence Berkeley Laboratories. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 3. Neither the name of the University nor the names of its contributors 59 * may be used to endorse or promote products derived from this software 60 * without specific prior written permission. 61 * 62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 72 * SUCH DAMAGE. 73 * 74 * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp (LBL) 75 * 76 * @(#)subr_autoconf.c 8.3 (Berkeley) 5/17/94 77 */ 78 79 #include <sys/cdefs.h> 80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.315 2025/10/03 16:49:07 thorpej Exp $"); 81 82 #ifdef _KERNEL_OPT 83 #include "opt_ddb.h" 84 #include "drvctl.h" 85 #endif 86 87 #include <sys/param.h> 88 #include <sys/device.h> 89 #include <sys/device_impl.h> 90 #include <sys/device_calls.h> 91 #include <sys/disklabel.h> 92 #include <sys/conf.h> 93 #include <sys/kauth.h> 94 #include <sys/kmem.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/errno.h> 98 #include <sys/proc.h> 99 #include <sys/reboot.h> 100 #include <sys/kthread.h> 101 #include <sys/buf.h> 102 #include <sys/dirent.h> 103 #include <sys/mount.h> 104 #include <sys/namei.h> 105 #include <sys/unistd.h> 106 #include <sys/fcntl.h> 107 #include <sys/lockf.h> 108 #include <sys/callout.h> 109 #include <sys/devmon.h> 110 #include <sys/cpu.h> 111 #include <sys/sysctl.h> 112 #include <sys/stdarg.h> 113 #include <sys/localcount.h> 114 115 #include <sys/disk.h> 116 117 #include <sys/rndsource.h> 118 119 #include <machine/limits.h> 120 121 /* 122 * Autoconfiguration subroutines. 123 */ 124 125 /* 126 * Device autoconfiguration timings are mixed into the entropy pool. 127 */ 128 static krndsource_t rnd_autoconf_source; 129 130 /* 131 * ioconf.c exports exactly two names: cfdata and cfroots. All system 132 * devices and drivers are found via these tables. 133 */ 134 extern struct cfdata cfdata[]; 135 extern const short cfroots[]; 136 137 /* 138 * List of all cfdriver structures. We use this to detect duplicates 139 * when other cfdrivers are loaded. 140 */ 141 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers); 142 extern struct cfdriver * const cfdriver_list_initial[]; 143 144 /* 145 * Initial list of cfattach's. 146 */ 147 extern const struct cfattachinit cfattachinit[]; 148 149 /* 150 * List of cfdata tables. We always have one such list -- the one 151 * built statically when the kernel was configured. 152 */ 153 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables); 154 static struct cftable initcftable; 155 156 #define ROOT ((device_t)NULL) 157 158 struct matchinfo { 159 cfsubmatch_t fn; 160 device_t parent; 161 const int *locs; 162 void *aux; 163 struct cfdata *match; 164 int pri; 165 }; 166 167 struct alldevs_foray { 168 int af_s; 169 struct devicelist af_garbage; 170 }; 171 172 /* 173 * Internal version of the cfargs structure; all versions are 174 * canonicalized to this. 175 */ 176 struct cfargs_internal { 177 union { 178 cfsubmatch_t submatch;/* submatch function (direct config) */ 179 cfsearch_t search; /* search function (indirect config) */ 180 }; 181 const char * iattr; /* interface attribute */ 182 const int * locators; /* locators array */ 183 devhandle_t devhandle; /* devhandle_t (by value) */ 184 }; 185 186 static char *number(char *, int); 187 static void mapply(struct matchinfo *, cfdata_t); 188 static void config_devdelete(device_t); 189 static void config_devunlink(device_t, struct devicelist *); 190 static void config_makeroom(int, struct cfdriver *); 191 static void config_devlink(device_t); 192 static void config_alldevs_enter(struct alldevs_foray *); 193 static void config_alldevs_exit(struct alldevs_foray *); 194 static void config_add_attrib_dict(device_t); 195 static device_t config_attach_internal(device_t, cfdata_t, void *, 196 cfprint_t, const struct cfargs_internal *); 197 198 static void config_collect_garbage(struct devicelist *); 199 static void config_dump_garbage(struct devicelist *); 200 201 static void pmflock_debug(device_t, const char *, int); 202 203 static device_t deviter_next1(deviter_t *); 204 static void deviter_reinit(deviter_t *); 205 206 struct deferred_config { 207 TAILQ_ENTRY(deferred_config) dc_queue; 208 device_t dc_dev; 209 void (*dc_func)(device_t); 210 }; 211 212 TAILQ_HEAD(deferred_config_head, deferred_config); 213 214 static struct deferred_config_head deferred_config_queue = 215 TAILQ_HEAD_INITIALIZER(deferred_config_queue); 216 static struct deferred_config_head interrupt_config_queue = 217 TAILQ_HEAD_INITIALIZER(interrupt_config_queue); 218 static int interrupt_config_threads = 8; 219 static struct deferred_config_head mountroot_config_queue = 220 TAILQ_HEAD_INITIALIZER(mountroot_config_queue); 221 static int mountroot_config_threads = 2; 222 static lwp_t **mountroot_config_lwpids; 223 static size_t mountroot_config_lwpids_size; 224 bool root_is_mounted = false; 225 226 static void config_process_deferred(struct deferred_config_head *, device_t); 227 228 /* Hooks to finalize configuration once all real devices have been found. */ 229 struct finalize_hook { 230 TAILQ_ENTRY(finalize_hook) f_list; 231 int (*f_func)(device_t); 232 device_t f_dev; 233 }; 234 static TAILQ_HEAD(, finalize_hook) config_finalize_list = 235 TAILQ_HEAD_INITIALIZER(config_finalize_list); 236 static int config_finalize_done; 237 238 /* list of all devices */ 239 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs); 240 static kmutex_t alldevs_lock __cacheline_aligned; 241 static devgen_t alldevs_gen = 1; 242 static int alldevs_nread = 0; 243 static int alldevs_nwrite = 0; 244 static bool alldevs_garbage = false; 245 246 static struct devicelist config_pending = 247 TAILQ_HEAD_INITIALIZER(config_pending); 248 static kmutex_t config_misc_lock; 249 static kcondvar_t config_misc_cv; 250 251 static bool detachall = false; 252 253 #define STREQ(s1, s2) \ 254 (*(s1) == *(s2) && strcmp((s1), (s2)) == 0) 255 256 static bool config_initialized = false; /* config_init() has been called. */ 257 258 static int config_do_twiddle; 259 static callout_t config_twiddle_ch; 260 261 static void sysctl_detach_setup(struct sysctllog **); 262 263 int no_devmon_insert(const char *, prop_dictionary_t); 264 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert; 265 266 typedef int (*cfdriver_fn)(struct cfdriver *); 267 static int 268 frob_cfdrivervec(struct cfdriver * const *cfdriverv, 269 cfdriver_fn drv_do, cfdriver_fn drv_undo, 270 const char *style, bool dopanic) 271 { 272 void (*pr)(const char *, ...) __printflike(1, 2) = 273 dopanic ? panic : printf; 274 int i, error = 0, e2 __diagused; 275 276 for (i = 0; cfdriverv[i] != NULL; i++) { 277 if ((error = drv_do(cfdriverv[i])) != 0) { 278 pr("configure: `%s' driver %s failed: %d", 279 cfdriverv[i]->cd_name, style, error); 280 goto bad; 281 } 282 } 283 284 KASSERT(error == 0); 285 return 0; 286 287 bad: 288 printf("\n"); 289 for (i--; i >= 0; i--) { 290 e2 = drv_undo(cfdriverv[i]); 291 KASSERT(e2 == 0); 292 } 293 294 return error; 295 } 296 297 typedef int (*cfattach_fn)(const char *, struct cfattach *); 298 static int 299 frob_cfattachvec(const struct cfattachinit *cfattachv, 300 cfattach_fn att_do, cfattach_fn att_undo, 301 const char *style, bool dopanic) 302 { 303 const struct cfattachinit *cfai = NULL; 304 void (*pr)(const char *, ...) __printflike(1, 2) = 305 dopanic ? panic : printf; 306 int j = 0, error = 0, e2 __diagused; 307 308 for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) { 309 for (j = 0; cfai->cfai_list[j] != NULL; j++) { 310 if ((error = att_do(cfai->cfai_name, 311 cfai->cfai_list[j])) != 0) { 312 pr("configure: attachment `%s' " 313 "of `%s' driver %s failed: %d", 314 cfai->cfai_list[j]->ca_name, 315 cfai->cfai_name, style, error); 316 goto bad; 317 } 318 } 319 } 320 321 KASSERT(error == 0); 322 return 0; 323 324 bad: 325 /* 326 * Rollback in reverse order. dunno if super-important, but 327 * do that anyway. Although the code looks a little like 328 * someone did a little integration (in the math sense). 329 */ 330 printf("\n"); 331 if (cfai) { 332 bool last; 333 334 for (last = false; last == false; ) { 335 if (cfai == &cfattachv[0]) 336 last = true; 337 for (j--; j >= 0; j--) { 338 e2 = att_undo(cfai->cfai_name, 339 cfai->cfai_list[j]); 340 KASSERT(e2 == 0); 341 } 342 if (!last) { 343 cfai--; 344 for (j = 0; cfai->cfai_list[j] != NULL; j++) 345 ; 346 } 347 } 348 } 349 350 return error; 351 } 352 353 /* 354 * Initialize the autoconfiguration data structures. Normally this 355 * is done by configure(), but some platforms need to do this very 356 * early (to e.g. initialize the console). 357 */ 358 void 359 config_init(void) 360 { 361 362 KASSERT(config_initialized == false); 363 364 mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM); 365 366 mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE); 367 cv_init(&config_misc_cv, "cfgmisc"); 368 369 callout_init(&config_twiddle_ch, CALLOUT_MPSAFE); 370 371 frob_cfdrivervec(cfdriver_list_initial, 372 config_cfdriver_attach, NULL, "bootstrap", true); 373 frob_cfattachvec(cfattachinit, 374 config_cfattach_attach, NULL, "bootstrap", true); 375 376 initcftable.ct_cfdata = cfdata; 377 TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list); 378 379 rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN, 380 RND_FLAG_COLLECT_TIME); 381 382 config_initialized = true; 383 } 384 385 /* 386 * Init or fini drivers and attachments. Either all or none 387 * are processed (via rollback). It would be nice if this were 388 * atomic to outside consumers, but with the current state of 389 * locking ... 390 */ 391 int 392 config_init_component(struct cfdriver * const *cfdriverv, 393 const struct cfattachinit *cfattachv, struct cfdata *cfdatav) 394 { 395 int error; 396 397 KERNEL_LOCK(1, NULL); 398 399 if ((error = frob_cfdrivervec(cfdriverv, 400 config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0) 401 goto out; 402 if ((error = frob_cfattachvec(cfattachv, 403 config_cfattach_attach, config_cfattach_detach, 404 "init", false)) != 0) { 405 frob_cfdrivervec(cfdriverv, 406 config_cfdriver_detach, NULL, "init rollback", true); 407 goto out; 408 } 409 if ((error = config_cfdata_attach(cfdatav, 1)) != 0) { 410 frob_cfattachvec(cfattachv, 411 config_cfattach_detach, NULL, "init rollback", true); 412 frob_cfdrivervec(cfdriverv, 413 config_cfdriver_detach, NULL, "init rollback", true); 414 goto out; 415 } 416 417 /* Success! */ 418 error = 0; 419 420 out: KERNEL_UNLOCK_ONE(NULL); 421 return error; 422 } 423 424 int 425 config_fini_component(struct cfdriver * const *cfdriverv, 426 const struct cfattachinit *cfattachv, struct cfdata *cfdatav) 427 { 428 int error; 429 430 KERNEL_LOCK(1, NULL); 431 432 if ((error = config_cfdata_detach(cfdatav)) != 0) 433 goto out; 434 if ((error = frob_cfattachvec(cfattachv, 435 config_cfattach_detach, config_cfattach_attach, 436 "fini", false)) != 0) { 437 if (config_cfdata_attach(cfdatav, 0) != 0) 438 panic("config_cfdata fini rollback failed"); 439 goto out; 440 } 441 if ((error = frob_cfdrivervec(cfdriverv, 442 config_cfdriver_detach, config_cfdriver_attach, 443 "fini", false)) != 0) { 444 frob_cfattachvec(cfattachv, 445 config_cfattach_attach, NULL, "fini rollback", true); 446 if (config_cfdata_attach(cfdatav, 0) != 0) 447 panic("config_cfdata fini rollback failed"); 448 goto out; 449 } 450 451 /* Success! */ 452 error = 0; 453 454 out: KERNEL_UNLOCK_ONE(NULL); 455 return error; 456 } 457 458 void 459 config_init_mi(void) 460 { 461 462 if (!config_initialized) 463 config_init(); 464 465 sysctl_detach_setup(NULL); 466 } 467 468 void 469 config_deferred(device_t dev) 470 { 471 472 KASSERT(KERNEL_LOCKED_P()); 473 474 config_process_deferred(&deferred_config_queue, dev); 475 config_process_deferred(&interrupt_config_queue, dev); 476 config_process_deferred(&mountroot_config_queue, dev); 477 } 478 479 static void 480 config_interrupts_thread(void *cookie) 481 { 482 struct deferred_config *dc; 483 device_t dev; 484 485 mutex_enter(&config_misc_lock); 486 while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) { 487 TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue); 488 mutex_exit(&config_misc_lock); 489 490 dev = dc->dc_dev; 491 (*dc->dc_func)(dev); 492 if (!device_pmf_is_registered(dev)) 493 aprint_debug_dev(dev, 494 "WARNING: power management not supported\n"); 495 config_pending_decr(dev); 496 kmem_free(dc, sizeof(*dc)); 497 498 mutex_enter(&config_misc_lock); 499 } 500 mutex_exit(&config_misc_lock); 501 502 kthread_exit(0); 503 } 504 505 void 506 config_create_interruptthreads(void) 507 { 508 int i; 509 510 for (i = 0; i < interrupt_config_threads; i++) { 511 (void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL, 512 config_interrupts_thread, NULL, NULL, "configintr"); 513 } 514 } 515 516 static void 517 config_mountroot_thread(void *cookie) 518 { 519 struct deferred_config *dc; 520 521 mutex_enter(&config_misc_lock); 522 while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) { 523 TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue); 524 mutex_exit(&config_misc_lock); 525 526 (*dc->dc_func)(dc->dc_dev); 527 kmem_free(dc, sizeof(*dc)); 528 529 mutex_enter(&config_misc_lock); 530 } 531 mutex_exit(&config_misc_lock); 532 533 kthread_exit(0); 534 } 535 536 void 537 config_create_mountrootthreads(void) 538 { 539 int i; 540 541 if (!root_is_mounted) 542 root_is_mounted = true; 543 544 mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) * 545 mountroot_config_threads; 546 mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size, 547 KM_NOSLEEP); 548 KASSERT(mountroot_config_lwpids); 549 for (i = 0; i < mountroot_config_threads; i++) { 550 mountroot_config_lwpids[i] = 0; 551 (void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */, 552 NULL, config_mountroot_thread, NULL, 553 &mountroot_config_lwpids[i], 554 "configroot"); 555 } 556 } 557 558 void 559 config_finalize_mountroot(void) 560 { 561 int i, error; 562 563 for (i = 0; i < mountroot_config_threads; i++) { 564 if (mountroot_config_lwpids[i] == 0) 565 continue; 566 567 error = kthread_join(mountroot_config_lwpids[i]); 568 if (error) 569 printf("%s: thread %x joined with error %d\n", 570 __func__, i, error); 571 } 572 kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size); 573 } 574 575 /* 576 * Announce device attach/detach to userland listeners. 577 */ 578 579 int 580 no_devmon_insert(const char *name, prop_dictionary_t p) 581 { 582 583 return ENODEV; 584 } 585 586 static void 587 devmon_report_device(device_t dev, bool isattach) 588 { 589 prop_dictionary_t ev, dict = device_properties(dev); 590 const char *parent; 591 const char *what; 592 const char *where; 593 device_t pdev = device_parent(dev); 594 595 /* If currently no drvctl device, just return */ 596 if (devmon_insert_vec == no_devmon_insert) 597 return; 598 599 ev = prop_dictionary_create(); 600 if (ev == NULL) 601 return; 602 603 what = (isattach ? "device-attach" : "device-detach"); 604 parent = (pdev == NULL ? "root" : device_xname(pdev)); 605 if (prop_dictionary_get_string(dict, "location", &where)) { 606 prop_dictionary_set_string(ev, "location", where); 607 aprint_debug("ev: %s %s at %s in [%s]\n", 608 what, device_xname(dev), parent, where); 609 } 610 if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) || 611 !prop_dictionary_set_string(ev, "parent", parent)) { 612 prop_object_release(ev); 613 return; 614 } 615 616 if ((*devmon_insert_vec)(what, ev) != 0) 617 prop_object_release(ev); 618 } 619 620 /* 621 * Add a cfdriver to the system. 622 */ 623 int 624 config_cfdriver_attach(struct cfdriver *cd) 625 { 626 struct cfdriver *lcd; 627 628 /* Make sure this driver isn't already in the system. */ 629 LIST_FOREACH(lcd, &allcfdrivers, cd_list) { 630 if (STREQ(lcd->cd_name, cd->cd_name)) 631 return EEXIST; 632 } 633 634 LIST_INIT(&cd->cd_attach); 635 LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list); 636 637 return 0; 638 } 639 640 /* 641 * Remove a cfdriver from the system. 642 */ 643 int 644 config_cfdriver_detach(struct cfdriver *cd) 645 { 646 struct alldevs_foray af; 647 int i, rc = 0; 648 649 config_alldevs_enter(&af); 650 /* Make sure there are no active instances. */ 651 for (i = 0; i < cd->cd_ndevs; i++) { 652 if (cd->cd_devs[i] != NULL) { 653 rc = EBUSY; 654 break; 655 } 656 } 657 config_alldevs_exit(&af); 658 659 if (rc != 0) 660 return rc; 661 662 /* ...and no attachments loaded. */ 663 if (LIST_EMPTY(&cd->cd_attach) == 0) 664 return EBUSY; 665 666 LIST_REMOVE(cd, cd_list); 667 668 KASSERT(cd->cd_devs == NULL); 669 670 return 0; 671 } 672 673 /* 674 * Look up a cfdriver by name. 675 */ 676 struct cfdriver * 677 config_cfdriver_lookup(const char *name) 678 { 679 struct cfdriver *cd; 680 681 LIST_FOREACH(cd, &allcfdrivers, cd_list) { 682 if (STREQ(cd->cd_name, name)) 683 return cd; 684 } 685 686 return NULL; 687 } 688 689 /* 690 * Add a cfattach to the specified driver. 691 */ 692 int 693 config_cfattach_attach(const char *driver, struct cfattach *ca) 694 { 695 struct cfattach *lca; 696 struct cfdriver *cd; 697 698 cd = config_cfdriver_lookup(driver); 699 if (cd == NULL) 700 return ESRCH; 701 702 /* Make sure this attachment isn't already on this driver. */ 703 LIST_FOREACH(lca, &cd->cd_attach, ca_list) { 704 if (STREQ(lca->ca_name, ca->ca_name)) 705 return EEXIST; 706 } 707 708 LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list); 709 710 return 0; 711 } 712 713 /* 714 * Remove a cfattach from the specified driver. 715 */ 716 int 717 config_cfattach_detach(const char *driver, struct cfattach *ca) 718 { 719 struct alldevs_foray af; 720 struct cfdriver *cd; 721 device_t dev; 722 int i, rc = 0; 723 724 cd = config_cfdriver_lookup(driver); 725 if (cd == NULL) 726 return ESRCH; 727 728 config_alldevs_enter(&af); 729 /* Make sure there are no active instances. */ 730 for (i = 0; i < cd->cd_ndevs; i++) { 731 if ((dev = cd->cd_devs[i]) == NULL) 732 continue; 733 if (dev->dv_cfattach == ca) { 734 rc = EBUSY; 735 break; 736 } 737 } 738 config_alldevs_exit(&af); 739 740 if (rc != 0) 741 return rc; 742 743 LIST_REMOVE(ca, ca_list); 744 745 return 0; 746 } 747 748 /* 749 * Look up a cfattach by name. 750 */ 751 static struct cfattach * 752 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname) 753 { 754 struct cfattach *ca; 755 756 LIST_FOREACH(ca, &cd->cd_attach, ca_list) { 757 if (STREQ(ca->ca_name, atname)) 758 return ca; 759 } 760 761 return NULL; 762 } 763 764 /* 765 * Look up a cfattach by driver/attachment name. 766 */ 767 struct cfattach * 768 config_cfattach_lookup(const char *name, const char *atname) 769 { 770 struct cfdriver *cd; 771 772 cd = config_cfdriver_lookup(name); 773 if (cd == NULL) 774 return NULL; 775 776 return config_cfattach_lookup_cd(cd, atname); 777 } 778 779 /* 780 * Apply the matching function and choose the best. This is used 781 * a few times and we want to keep the code small. 782 */ 783 static void 784 mapply(struct matchinfo *m, cfdata_t cf) 785 { 786 int pri; 787 788 if (m->fn != NULL) { 789 pri = (*m->fn)(m->parent, cf, m->locs, m->aux); 790 } else { 791 pri = config_match(m->parent, cf, m->aux); 792 } 793 if (pri > m->pri) { 794 m->match = cf; 795 m->pri = pri; 796 } 797 } 798 799 int 800 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux) 801 { 802 const struct cfiattrdata *ci; 803 const struct cflocdesc *cl; 804 int nlocs, i; 805 806 ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver); 807 KASSERT(ci); 808 nlocs = ci->ci_loclen; 809 KASSERT(!nlocs || locs); 810 for (i = 0; i < nlocs; i++) { 811 cl = &ci->ci_locdesc[i]; 812 if (cl->cld_defaultstr != NULL && 813 cf->cf_loc[i] == cl->cld_default) 814 continue; 815 if (cf->cf_loc[i] == locs[i]) 816 continue; 817 return 0; 818 } 819 820 return config_match(parent, cf, aux); 821 } 822 823 /* 824 * Helper function: check whether the driver supports the interface attribute 825 * and return its descriptor structure. 826 */ 827 static const struct cfiattrdata * 828 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia) 829 { 830 const struct cfiattrdata * const *cpp; 831 832 if (cd->cd_attrs == NULL) 833 return 0; 834 835 for (cpp = cd->cd_attrs; *cpp; cpp++) { 836 if (STREQ((*cpp)->ci_name, ia)) { 837 /* Match. */ 838 return *cpp; 839 } 840 } 841 return 0; 842 } 843 844 static int __diagused 845 cfdriver_iattr_count(const struct cfdriver *cd) 846 { 847 const struct cfiattrdata * const *cpp; 848 int i; 849 850 if (cd->cd_attrs == NULL) 851 return 0; 852 853 for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) { 854 i++; 855 } 856 return i; 857 } 858 859 /* 860 * Lookup an interface attribute description by name. 861 * If the driver is given, consider only its supported attributes. 862 */ 863 const struct cfiattrdata * 864 cfiattr_lookup(const char *name, const struct cfdriver *cd) 865 { 866 const struct cfdriver *d; 867 const struct cfiattrdata *ia; 868 869 if (cd) 870 return cfdriver_get_iattr(cd, name); 871 872 LIST_FOREACH(d, &allcfdrivers, cd_list) { 873 ia = cfdriver_get_iattr(d, name); 874 if (ia) 875 return ia; 876 } 877 return 0; 878 } 879 880 /* 881 * Determine if `parent' is a potential parent for a device spec based 882 * on `cfp'. 883 */ 884 static int 885 cfparent_match(const device_t parent, const struct cfparent *cfp) 886 { 887 struct cfdriver *pcd; 888 889 /* We don't match root nodes here. */ 890 if (cfp == NULL) 891 return 0; 892 893 pcd = parent->dv_cfdriver; 894 KASSERT(pcd != NULL); 895 896 /* 897 * First, ensure this parent has the correct interface 898 * attribute. 899 */ 900 if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr)) 901 return 0; 902 903 /* 904 * If no specific parent device instance was specified (i.e. 905 * we're attaching to the attribute only), we're done! 906 */ 907 if (cfp->cfp_parent == NULL) 908 return 1; 909 910 /* 911 * Check the parent device's name. 912 */ 913 if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0) 914 return 0; /* not the same parent */ 915 916 /* 917 * Make sure the unit number matches. 918 */ 919 if (cfp->cfp_unit == DVUNIT_ANY || /* wildcard */ 920 cfp->cfp_unit == parent->dv_unit) 921 return 1; 922 923 /* Unit numbers don't match. */ 924 return 0; 925 } 926 927 /* 928 * Helper for config_cfdata_attach(): check all devices whether it could be 929 * parent any attachment in the config data table passed, and rescan. 930 */ 931 static void 932 rescan_with_cfdata(const struct cfdata *cf) 933 { 934 device_t d; 935 const struct cfdata *cf1; 936 deviter_t di; 937 938 KASSERT(KERNEL_LOCKED_P()); 939 940 /* 941 * "alldevs" is likely longer than a modules's cfdata, so make it 942 * the outer loop. 943 */ 944 for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) { 945 946 if (!(d->dv_cfattach->ca_rescan)) 947 continue; 948 949 for (cf1 = cf; cf1->cf_name; cf1++) { 950 951 if (!cfparent_match(d, cf1->cf_pspec)) 952 continue; 953 954 (*d->dv_cfattach->ca_rescan)(d, 955 cfdata_ifattr(cf1), cf1->cf_loc); 956 957 config_deferred(d); 958 } 959 } 960 deviter_release(&di); 961 } 962 963 /* 964 * Attach a supplemental config data table and rescan potential 965 * parent devices if required. 966 */ 967 int 968 config_cfdata_attach(cfdata_t cf, int scannow) 969 { 970 struct cftable *ct; 971 972 KERNEL_LOCK(1, NULL); 973 974 ct = kmem_alloc(sizeof(*ct), KM_SLEEP); 975 ct->ct_cfdata = cf; 976 TAILQ_INSERT_TAIL(&allcftables, ct, ct_list); 977 978 if (scannow) 979 rescan_with_cfdata(cf); 980 981 KERNEL_UNLOCK_ONE(NULL); 982 983 return 0; 984 } 985 986 /* 987 * Helper for config_cfdata_detach: check whether a device is 988 * found through any attachment in the config data table. 989 */ 990 static int 991 dev_in_cfdata(device_t d, cfdata_t cf) 992 { 993 const struct cfdata *cf1; 994 995 for (cf1 = cf; cf1->cf_name; cf1++) 996 if (d->dv_cfdata == cf1) 997 return 1; 998 999 return 0; 1000 } 1001 1002 /* 1003 * Detach a supplemental config data table. Detach all devices found 1004 * through that table (and thus keeping references to it) before. 1005 */ 1006 int 1007 config_cfdata_detach(cfdata_t cf) 1008 { 1009 device_t d; 1010 int error = 0; 1011 struct cftable *ct; 1012 deviter_t di; 1013 1014 KERNEL_LOCK(1, NULL); 1015 1016 for (d = deviter_first(&di, DEVITER_F_RW); d != NULL; 1017 d = deviter_next(&di)) { 1018 if (!dev_in_cfdata(d, cf)) 1019 continue; 1020 if ((error = config_detach(d, 0)) != 0) 1021 break; 1022 } 1023 deviter_release(&di); 1024 if (error) { 1025 aprint_error_dev(d, "unable to detach instance\n"); 1026 goto out; 1027 } 1028 1029 TAILQ_FOREACH(ct, &allcftables, ct_list) { 1030 if (ct->ct_cfdata == cf) { 1031 TAILQ_REMOVE(&allcftables, ct, ct_list); 1032 kmem_free(ct, sizeof(*ct)); 1033 error = 0; 1034 goto out; 1035 } 1036 } 1037 1038 /* not found -- shouldn't happen */ 1039 error = EINVAL; 1040 1041 out: KERNEL_UNLOCK_ONE(NULL); 1042 return error; 1043 } 1044 1045 /* 1046 * Invoke the "match" routine for a cfdata entry on behalf of 1047 * an external caller, usually a direct config "submatch" routine. 1048 */ 1049 int 1050 config_match(device_t parent, cfdata_t cf, void *aux) 1051 { 1052 struct cfattach *ca; 1053 1054 KASSERT(KERNEL_LOCKED_P()); 1055 1056 ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname); 1057 if (ca == NULL) { 1058 /* No attachment for this entry, oh well. */ 1059 return 0; 1060 } 1061 1062 return (*ca->ca_match)(parent, cf, aux); 1063 } 1064 1065 /* 1066 * Invoke the "probe" routine for a cfdata entry on behalf of 1067 * an external caller, usually an indirect config "search" routine. 1068 */ 1069 int 1070 config_probe(device_t parent, cfdata_t cf, void *aux) 1071 { 1072 /* 1073 * This is currently a synonym for config_match(), but this 1074 * is an implementation detail; "match" and "probe" routines 1075 * have different behaviors. 1076 * 1077 * XXX config_probe() should return a bool, because there is 1078 * XXX no match score for probe -- it's either there or it's 1079 * XXX not, but some ports abuse the return value as a way 1080 * XXX to attach "critical" devices before "non-critical" 1081 * XXX devices. 1082 */ 1083 return config_match(parent, cf, aux); 1084 } 1085 1086 static struct cfargs_internal * 1087 cfargs_canonicalize(const struct cfargs * const cfargs, 1088 struct cfargs_internal * const store) 1089 { 1090 struct cfargs_internal *args = store; 1091 1092 memset(args, 0, sizeof(*args)); 1093 1094 /* If none specified, are all-NULL pointers are good. */ 1095 if (cfargs == NULL) { 1096 return args; 1097 } 1098 1099 /* 1100 * Only one arguments version is recognized at this time. 1101 */ 1102 if (cfargs->cfargs_version != CFARGS_VERSION) { 1103 panic("cfargs_canonicalize: unknown version %lu\n", 1104 (unsigned long)cfargs->cfargs_version); 1105 } 1106 1107 /* 1108 * submatch and search are mutually-exclusive. 1109 */ 1110 if (cfargs->submatch != NULL && cfargs->search != NULL) { 1111 panic("cfargs_canonicalize: submatch and search are " 1112 "mutually-exclusive"); 1113 } 1114 if (cfargs->submatch != NULL) { 1115 args->submatch = cfargs->submatch; 1116 } else if (cfargs->search != NULL) { 1117 args->search = cfargs->search; 1118 } 1119 1120 args->iattr = cfargs->iattr; 1121 args->locators = cfargs->locators; 1122 args->devhandle = cfargs->devhandle; 1123 1124 return args; 1125 } 1126 1127 /* 1128 * Iterate over all potential children of some device, calling the given 1129 * function (default being the child's match function) for each one. 1130 * Nonzero returns are matches; the highest value returned is considered 1131 * the best match. Return the `found child' if we got a match, or NULL 1132 * otherwise. The `aux' pointer is simply passed on through. 1133 * 1134 * Note that this function is designed so that it can be used to apply 1135 * an arbitrary function to all potential children (its return value 1136 * can be ignored). 1137 */ 1138 static cfdata_t 1139 config_search_internal(device_t parent, void *aux, 1140 const struct cfargs_internal * const args) 1141 { 1142 struct cftable *ct; 1143 cfdata_t cf; 1144 struct matchinfo m; 1145 1146 KASSERT(config_initialized); 1147 KASSERTMSG((!args->iattr || 1148 cfdriver_get_iattr(parent->dv_cfdriver, args->iattr)), 1149 "%s searched for child at interface attribute %s," 1150 " but device %s(4) has no such interface attribute in config(5)", 1151 device_xname(parent), args->iattr, 1152 parent->dv_cfdriver->cd_name); 1153 KASSERTMSG((args->iattr || 1154 cfdriver_iattr_count(parent->dv_cfdriver) < 2), 1155 "%s searched for child without interface attribute," 1156 " needed to disambiguate among the %d declared for in %s(4)" 1157 " in config(5)", 1158 device_xname(parent), 1159 cfdriver_iattr_count(parent->dv_cfdriver), 1160 parent->dv_cfdriver->cd_name); 1161 1162 m.fn = args->submatch; /* N.B. union */ 1163 m.parent = parent; 1164 m.locs = args->locators; 1165 m.aux = aux; 1166 m.match = NULL; 1167 m.pri = 0; 1168 1169 TAILQ_FOREACH(ct, &allcftables, ct_list) { 1170 for (cf = ct->ct_cfdata; cf->cf_name; cf++) { 1171 1172 /* We don't match root nodes here. */ 1173 if (!cf->cf_pspec) 1174 continue; 1175 1176 /* 1177 * Skip cf if no longer eligible, otherwise scan 1178 * through parents for one matching `parent', and 1179 * try match function. 1180 */ 1181 if (cf->cf_fstate == FSTATE_FOUND) 1182 continue; 1183 if (cf->cf_fstate == FSTATE_DNOTFOUND || 1184 cf->cf_fstate == FSTATE_DSTAR) 1185 continue; 1186 1187 /* 1188 * If an interface attribute was specified, 1189 * consider only children which attach to 1190 * that attribute. 1191 */ 1192 if (args->iattr != NULL && 1193 !STREQ(args->iattr, cfdata_ifattr(cf))) 1194 continue; 1195 1196 if (cfparent_match(parent, cf->cf_pspec)) 1197 mapply(&m, cf); 1198 } 1199 } 1200 rnd_add_uint32(&rnd_autoconf_source, 0); 1201 return m.match; 1202 } 1203 1204 cfdata_t 1205 config_search(device_t parent, void *aux, const struct cfargs *cfargs) 1206 { 1207 cfdata_t cf; 1208 struct cfargs_internal store; 1209 1210 cf = config_search_internal(parent, aux, 1211 cfargs_canonicalize(cfargs, &store)); 1212 1213 return cf; 1214 } 1215 1216 /* 1217 * Find the given root device. 1218 * This is much like config_search, but there is no parent. 1219 * Don't bother with multiple cfdata tables; the root node 1220 * must always be in the initial table. 1221 */ 1222 cfdata_t 1223 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux) 1224 { 1225 cfdata_t cf; 1226 const short *p; 1227 struct matchinfo m; 1228 1229 m.fn = fn; 1230 m.parent = ROOT; 1231 m.aux = aux; 1232 m.match = NULL; 1233 m.pri = 0; 1234 m.locs = 0; 1235 /* 1236 * Look at root entries for matching name. We do not bother 1237 * with found-state here since only one root should ever be 1238 * searched (and it must be done first). 1239 */ 1240 for (p = cfroots; *p >= 0; p++) { 1241 cf = &cfdata[*p]; 1242 if (strcmp(cf->cf_name, rootname) == 0) 1243 mapply(&m, cf); 1244 } 1245 return m.match; 1246 } 1247 1248 static const char * const msgs[] = { 1249 [QUIET] = "", 1250 [UNCONF] = " not configured\n", 1251 [UNSUPP] = " unsupported\n", 1252 }; 1253 1254 /* 1255 * The given `aux' argument describes a device that has been found 1256 * on the given parent, but not necessarily configured. Locate the 1257 * configuration data for that device (using the submatch function 1258 * provided, or using candidates' cd_match configuration driver 1259 * functions) and attach it, and return its device_t. If the device was 1260 * not configured, call the given `print' function and return NULL. 1261 */ 1262 device_t 1263 config_found_acquire(device_t parent, void *aux, cfprint_t print, 1264 const struct cfargs * const cfargs) 1265 { 1266 cfdata_t cf; 1267 struct cfargs_internal store; 1268 const struct cfargs_internal * const args = 1269 cfargs_canonicalize(cfargs, &store); 1270 device_t dev; 1271 1272 KERNEL_LOCK(1, NULL); 1273 1274 cf = config_search_internal(parent, aux, args); 1275 if (cf != NULL) { 1276 dev = config_attach_internal(parent, cf, aux, print, args); 1277 goto out; 1278 } 1279 1280 if (print) { 1281 if (config_do_twiddle && cold) 1282 twiddle(); 1283 1284 const int pret = (*print)(aux, device_xname(parent)); 1285 KASSERT(pret >= 0); 1286 KASSERT(pret < __arraycount(msgs)); 1287 KASSERT(msgs[pret] != NULL); 1288 aprint_normal("%s", msgs[pret]); 1289 } 1290 1291 dev = NULL; 1292 1293 out: KERNEL_UNLOCK_ONE(NULL); 1294 return dev; 1295 } 1296 1297 /* 1298 * config_found(parent, aux, print, cfargs) 1299 * 1300 * Legacy entry point for callers whose use of the returned 1301 * device_t is not delimited by device_release. 1302 * 1303 * The caller is required to hold the kernel lock as a fragile 1304 * defence against races. 1305 * 1306 * Callers should ignore the return value or be converted to 1307 * config_found_acquire with a matching device_release once they 1308 * have finished with the returned device_t. 1309 */ 1310 device_t 1311 config_found(device_t parent, void *aux, cfprint_t print, 1312 const struct cfargs * const cfargs) 1313 { 1314 device_t dev; 1315 1316 KASSERT(KERNEL_LOCKED_P()); 1317 1318 dev = config_found_acquire(parent, aux, print, cfargs); 1319 if (dev == NULL) 1320 return NULL; 1321 device_release(dev); 1322 1323 return dev; 1324 } 1325 1326 /* 1327 * As above, but for root devices. 1328 */ 1329 device_t 1330 config_rootfound(const char *rootname, void *aux) 1331 { 1332 cfdata_t cf; 1333 device_t dev = NULL; 1334 1335 KERNEL_LOCK(1, NULL); 1336 if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL) 1337 dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE); 1338 else 1339 aprint_error("root device %s not configured\n", rootname); 1340 KERNEL_UNLOCK_ONE(NULL); 1341 return dev; 1342 } 1343 1344 /* just like sprintf(buf, "%d") except that it works from the end */ 1345 static char * 1346 number(char *ep, int n) 1347 { 1348 1349 *--ep = 0; 1350 while (n >= 10) { 1351 *--ep = (n % 10) + '0'; 1352 n /= 10; 1353 } 1354 *--ep = n + '0'; 1355 return ep; 1356 } 1357 1358 /* 1359 * Expand the size of the cd_devs array if necessary. 1360 * 1361 * The caller must hold alldevs_lock. config_makeroom() may release and 1362 * re-acquire alldevs_lock, so callers should re-check conditions such 1363 * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom() 1364 * returns. 1365 */ 1366 static void 1367 config_makeroom(int n, struct cfdriver *cd) 1368 { 1369 int ondevs, nndevs; 1370 device_t *osp, *nsp; 1371 1372 KASSERT(mutex_owned(&alldevs_lock)); 1373 alldevs_nwrite++; 1374 1375 /* XXX arithmetic overflow */ 1376 for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs) 1377 ; 1378 1379 while (n >= cd->cd_ndevs) { 1380 /* 1381 * Need to expand the array. 1382 */ 1383 ondevs = cd->cd_ndevs; 1384 osp = cd->cd_devs; 1385 1386 /* 1387 * Release alldevs_lock around allocation, which may 1388 * sleep. 1389 */ 1390 mutex_exit(&alldevs_lock); 1391 nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP); 1392 mutex_enter(&alldevs_lock); 1393 1394 /* 1395 * If another thread moved the array while we did 1396 * not hold alldevs_lock, try again. 1397 */ 1398 if (cd->cd_devs != osp || cd->cd_ndevs != ondevs) { 1399 mutex_exit(&alldevs_lock); 1400 kmem_free(nsp, sizeof(device_t) * nndevs); 1401 mutex_enter(&alldevs_lock); 1402 continue; 1403 } 1404 1405 memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs)); 1406 if (ondevs != 0) 1407 memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs); 1408 1409 cd->cd_ndevs = nndevs; 1410 cd->cd_devs = nsp; 1411 if (ondevs != 0) { 1412 mutex_exit(&alldevs_lock); 1413 kmem_free(osp, sizeof(device_t) * ondevs); 1414 mutex_enter(&alldevs_lock); 1415 } 1416 } 1417 KASSERT(mutex_owned(&alldevs_lock)); 1418 alldevs_nwrite--; 1419 } 1420 1421 /* 1422 * Put dev into the devices list. 1423 */ 1424 static void 1425 config_devlink(device_t dev) 1426 { 1427 1428 mutex_enter(&alldevs_lock); 1429 1430 KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev); 1431 1432 dev->dv_add_gen = alldevs_gen; 1433 /* It is safe to add a device to the tail of the list while 1434 * readers and writers are in the list. 1435 */ 1436 TAILQ_INSERT_TAIL(&alldevs, dev, dv_list); 1437 mutex_exit(&alldevs_lock); 1438 } 1439 1440 static void 1441 config_devfree(device_t dev) 1442 { 1443 1444 KASSERT(dev->dv_flags & DVF_PRIV_ALLOC); 1445 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending); 1446 1447 if (dev->dv_cfattach->ca_devsize > 0) 1448 kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize); 1449 kmem_free(dev, sizeof(*dev)); 1450 } 1451 1452 /* 1453 * Caller must hold alldevs_lock. 1454 */ 1455 static void 1456 config_devunlink(device_t dev, struct devicelist *garbage) 1457 { 1458 struct device_garbage *dg = &dev->dv_garbage; 1459 cfdriver_t cd = device_cfdriver(dev); 1460 int i; 1461 1462 KASSERT(mutex_owned(&alldevs_lock)); 1463 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending); 1464 1465 /* Unlink from device list. Link to garbage list. */ 1466 TAILQ_REMOVE(&alldevs, dev, dv_list); 1467 TAILQ_INSERT_TAIL(garbage, dev, dv_list); 1468 1469 /* Remove from cfdriver's array. */ 1470 cd->cd_devs[dev->dv_unit] = NULL; 1471 1472 /* 1473 * If the device now has no units in use, unlink its softc array. 1474 */ 1475 for (i = 0; i < cd->cd_ndevs; i++) { 1476 if (cd->cd_devs[i] != NULL) 1477 break; 1478 } 1479 /* Nothing found. Unlink, now. Deallocate, later. */ 1480 if (i == cd->cd_ndevs) { 1481 dg->dg_ndevs = cd->cd_ndevs; 1482 dg->dg_devs = cd->cd_devs; 1483 cd->cd_devs = NULL; 1484 cd->cd_ndevs = 0; 1485 } 1486 } 1487 1488 static void 1489 config_devdelete(device_t dev) 1490 { 1491 struct device_garbage *dg = &dev->dv_garbage; 1492 device_lock_t dvl = device_getlock(dev); 1493 1494 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending); 1495 1496 if (dg->dg_devs != NULL) 1497 kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs); 1498 1499 localcount_fini(dev->dv_localcount); 1500 kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount)); 1501 1502 cv_destroy(&dvl->dvl_cv); 1503 mutex_destroy(&dvl->dvl_mtx); 1504 1505 KASSERT(dev->dv_properties != NULL); 1506 prop_object_release(dev->dv_properties); 1507 1508 if (dev->dv_activity_handlers) 1509 panic("%s with registered handlers", __func__); 1510 1511 if (dev->dv_locators) { 1512 size_t amount = *--dev->dv_locators; 1513 kmem_free(dev->dv_locators, amount); 1514 } 1515 1516 config_devfree(dev); 1517 } 1518 1519 static int 1520 config_unit_nextfree(cfdriver_t cd, cfdata_t cf) 1521 { 1522 int unit = cf->cf_unit; 1523 1524 KASSERT(mutex_owned(&alldevs_lock)); 1525 1526 if (unit < 0) 1527 return -1; 1528 if (cf->cf_fstate == FSTATE_STAR) { 1529 for (; unit < cd->cd_ndevs; unit++) 1530 if (cd->cd_devs[unit] == NULL) 1531 break; 1532 /* 1533 * unit is now the unit of the first NULL device pointer, 1534 * or max(cd->cd_ndevs,cf->cf_unit). 1535 */ 1536 } else { 1537 if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL) 1538 unit = -1; 1539 } 1540 return unit; 1541 } 1542 1543 static int 1544 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf) 1545 { 1546 struct alldevs_foray af; 1547 int unit; 1548 1549 config_alldevs_enter(&af); 1550 for (;;) { 1551 unit = config_unit_nextfree(cd, cf); 1552 if (unit == -1) 1553 break; 1554 if (unit < cd->cd_ndevs) { 1555 cd->cd_devs[unit] = dev; 1556 dev->dv_unit = unit; 1557 break; 1558 } 1559 config_makeroom(unit, cd); 1560 } 1561 config_alldevs_exit(&af); 1562 1563 return unit; 1564 } 1565 1566 static device_t 1567 config_devalloc(const device_t parent, const cfdata_t cf, 1568 const struct cfargs_internal * const args) 1569 { 1570 cfdriver_t cd; 1571 cfattach_t ca; 1572 size_t lname, lunit; 1573 const char *xunit; 1574 int myunit; 1575 char num[10]; 1576 device_t dev; 1577 void *dev_private; 1578 const struct cfiattrdata *ia; 1579 device_lock_t dvl; 1580 1581 cd = config_cfdriver_lookup(cf->cf_name); 1582 if (cd == NULL) 1583 return NULL; 1584 1585 ca = config_cfattach_lookup_cd(cd, cf->cf_atname); 1586 if (ca == NULL) 1587 return NULL; 1588 1589 /* get memory for all device vars */ 1590 KASSERT(ca->ca_flags & DVF_PRIV_ALLOC); 1591 if (ca->ca_devsize > 0) { 1592 dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP); 1593 } else { 1594 dev_private = NULL; 1595 } 1596 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP); 1597 1598 dev->dv_handle = args->devhandle; 1599 1600 dev->dv_class = cd->cd_class; 1601 dev->dv_cfdata = cf; 1602 dev->dv_cfdriver = cd; 1603 dev->dv_cfattach = ca; 1604 dev->dv_activity_count = 0; 1605 dev->dv_activity_handlers = NULL; 1606 dev->dv_private = dev_private; 1607 dev->dv_flags = ca->ca_flags; /* inherit flags from class */ 1608 dev->dv_attaching = curlwp; 1609 1610 myunit = config_unit_alloc(dev, cd, cf); 1611 if (myunit == -1) { 1612 config_devfree(dev); 1613 return NULL; 1614 } 1615 1616 /* compute length of name and decimal expansion of unit number */ 1617 lname = strlen(cd->cd_name); 1618 xunit = number(&num[sizeof(num)], myunit); 1619 lunit = &num[sizeof(num)] - xunit; 1620 if (lname + lunit > sizeof(dev->dv_xname)) 1621 panic("config_devalloc: device name too long"); 1622 1623 dvl = device_getlock(dev); 1624 1625 mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE); 1626 cv_init(&dvl->dvl_cv, "pmfsusp"); 1627 1628 memcpy(dev->dv_xname, cd->cd_name, lname); 1629 memcpy(dev->dv_xname + lname, xunit, lunit); 1630 dev->dv_parent = parent; 1631 if (parent != NULL) 1632 dev->dv_depth = parent->dv_depth + 1; 1633 else 1634 dev->dv_depth = 0; 1635 dev->dv_flags |= DVF_ACTIVE; /* always initially active */ 1636 if (args->locators) { 1637 KASSERT(parent); /* no locators at root */ 1638 ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver); 1639 dev->dv_locators = 1640 kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP); 1641 *dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1); 1642 memcpy(dev->dv_locators, args->locators, 1643 sizeof(int) * ia->ci_loclen); 1644 } 1645 dev->dv_properties = prop_dictionary_create(); 1646 KASSERT(dev->dv_properties != NULL); 1647 1648 prop_dictionary_set_string_nocopy(dev->dv_properties, 1649 "device-driver", dev->dv_cfdriver->cd_name); 1650 prop_dictionary_set_uint16(dev->dv_properties, 1651 "device-unit", dev->dv_unit); 1652 if (parent != NULL) { 1653 prop_dictionary_set_string(dev->dv_properties, 1654 "device-parent", device_xname(parent)); 1655 } 1656 1657 dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount), 1658 KM_SLEEP); 1659 localcount_init(dev->dv_localcount); 1660 1661 if (dev->dv_cfdriver->cd_attrs != NULL) 1662 config_add_attrib_dict(dev); 1663 1664 return dev; 1665 } 1666 1667 /* 1668 * Create an array of device attach attributes and add it 1669 * to the device's dv_properties dictionary. 1670 * 1671 * <key>interface-attributes</key> 1672 * <array> 1673 * <dict> 1674 * <key>attribute-name</key> 1675 * <string>foo</string> 1676 * <key>locators</key> 1677 * <array> 1678 * <dict> 1679 * <key>loc-name</key> 1680 * <string>foo-loc1</string> 1681 * </dict> 1682 * <dict> 1683 * <key>loc-name</key> 1684 * <string>foo-loc2</string> 1685 * <key>default</key> 1686 * <string>foo-loc2-default</string> 1687 * </dict> 1688 * ... 1689 * </array> 1690 * </dict> 1691 * ... 1692 * </array> 1693 */ 1694 1695 static void 1696 config_add_attrib_dict(device_t dev) 1697 { 1698 int i, j; 1699 const struct cfiattrdata *ci; 1700 prop_dictionary_t attr_dict, loc_dict; 1701 prop_array_t attr_array, loc_array; 1702 1703 if ((attr_array = prop_array_create()) == NULL) 1704 return; 1705 1706 for (i = 0; ; i++) { 1707 if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL) 1708 break; 1709 if ((attr_dict = prop_dictionary_create()) == NULL) 1710 break; 1711 prop_dictionary_set_string_nocopy(attr_dict, "attribute-name", 1712 ci->ci_name); 1713 1714 /* Create an array of the locator names and defaults */ 1715 1716 if (ci->ci_loclen != 0 && 1717 (loc_array = prop_array_create()) != NULL) { 1718 for (j = 0; j < ci->ci_loclen; j++) { 1719 loc_dict = prop_dictionary_create(); 1720 if (loc_dict == NULL) 1721 continue; 1722 prop_dictionary_set_string_nocopy(loc_dict, 1723 "loc-name", ci->ci_locdesc[j].cld_name); 1724 if (ci->ci_locdesc[j].cld_defaultstr != NULL) 1725 prop_dictionary_set_string_nocopy( 1726 loc_dict, "default", 1727 ci->ci_locdesc[j].cld_defaultstr); 1728 prop_array_set(loc_array, j, loc_dict); 1729 prop_object_release(loc_dict); 1730 } 1731 prop_dictionary_set_and_rel(attr_dict, "locators", 1732 loc_array); 1733 } 1734 prop_array_add(attr_array, attr_dict); 1735 prop_object_release(attr_dict); 1736 } 1737 if (i == 0) 1738 prop_object_release(attr_array); 1739 else 1740 prop_dictionary_set_and_rel(dev->dv_properties, 1741 "interface-attributes", attr_array); 1742 1743 return; 1744 } 1745 1746 static void 1747 config_device_register(device_t dev, void *aux) 1748 { 1749 struct device_register_args args = { 1750 .aux = aux, 1751 }; 1752 1753 /* We don't really care if this fails. */ 1754 device_call(dev, DEVICE_REGISTER(&args)); 1755 1756 device_register(dev, aux); 1757 } 1758 1759 /* 1760 * Attach a found device. 1761 * 1762 * Returns the device referenced, to be released with device_release. 1763 */ 1764 static device_t 1765 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print, 1766 const struct cfargs_internal * const args) 1767 { 1768 device_t dev; 1769 struct cftable *ct; 1770 const char *drvname; 1771 bool deferred; 1772 1773 KASSERT(KERNEL_LOCKED_P()); 1774 1775 dev = config_devalloc(parent, cf, args); 1776 if (!dev) 1777 panic("config_attach: allocation of device softc failed"); 1778 1779 /* XXX redundant - see below? */ 1780 if (cf->cf_fstate != FSTATE_STAR) { 1781 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND); 1782 cf->cf_fstate = FSTATE_FOUND; 1783 } 1784 1785 config_devlink(dev); 1786 1787 if (config_do_twiddle && cold) 1788 twiddle(); 1789 else 1790 aprint_naive("Found "); 1791 /* 1792 * We want the next two printfs for normal, verbose, and quiet, 1793 * but not silent (in which case, we're twiddling, instead). 1794 */ 1795 if (parent == ROOT) { 1796 aprint_naive("%s (root)", device_xname(dev)); 1797 aprint_normal("%s (root)", device_xname(dev)); 1798 } else { 1799 aprint_naive("%s at %s", device_xname(dev), 1800 device_xname(parent)); 1801 aprint_normal("%s at %s", device_xname(dev), 1802 device_xname(parent)); 1803 if (print) 1804 (void) (*print)(aux, NULL); 1805 } 1806 1807 /* 1808 * Before attaching, clobber any unfound devices that are 1809 * otherwise identical. 1810 * XXX code above is redundant? 1811 */ 1812 drvname = dev->dv_cfdriver->cd_name; 1813 TAILQ_FOREACH(ct, &allcftables, ct_list) { 1814 for (cf = ct->ct_cfdata; cf->cf_name; cf++) { 1815 if (STREQ(cf->cf_name, drvname) && 1816 cf->cf_unit == dev->dv_unit) { 1817 if (cf->cf_fstate == FSTATE_NOTFOUND) 1818 cf->cf_fstate = FSTATE_FOUND; 1819 } 1820 } 1821 } 1822 config_device_register(dev, aux); 1823 1824 /* Let userland know */ 1825 devmon_report_device(dev, true); 1826 1827 /* 1828 * Prevent detach until the driver's attach function, and all 1829 * deferred actions, have finished. 1830 */ 1831 config_pending_incr(dev); 1832 1833 /* 1834 * Prevent concurrent detach from destroying the device_t until 1835 * the caller has released the device. 1836 */ 1837 device_acquire(dev); 1838 1839 /* Call the driver's attach function. */ 1840 (*dev->dv_cfattach->ca_attach)(parent, dev, aux); 1841 1842 /* 1843 * Allow other threads to acquire references to the device now 1844 * that the driver's attach function is done. 1845 */ 1846 mutex_enter(&config_misc_lock); 1847 KASSERT(dev->dv_attaching == curlwp); 1848 dev->dv_attaching = NULL; 1849 cv_broadcast(&config_misc_cv); 1850 mutex_exit(&config_misc_lock); 1851 1852 /* 1853 * Synchronous parts of attach are done. Allow detach, unless 1854 * the driver's attach function scheduled deferred actions. 1855 */ 1856 config_pending_decr(dev); 1857 1858 mutex_enter(&config_misc_lock); 1859 deferred = (dev->dv_pending != 0); 1860 mutex_exit(&config_misc_lock); 1861 1862 if (!deferred && !device_pmf_is_registered(dev)) 1863 aprint_debug_dev(dev, 1864 "WARNING: power management not supported\n"); 1865 1866 config_process_deferred(&deferred_config_queue, dev); 1867 1868 device_register_post_config(dev, aux); 1869 rnd_add_uint32(&rnd_autoconf_source, 0); 1870 return dev; 1871 } 1872 1873 device_t 1874 config_attach_acquire(device_t parent, cfdata_t cf, void *aux, cfprint_t print, 1875 const struct cfargs *cfargs) 1876 { 1877 struct cfargs_internal store; 1878 device_t dev; 1879 1880 KERNEL_LOCK(1, NULL); 1881 dev = config_attach_internal(parent, cf, aux, print, 1882 cfargs_canonicalize(cfargs, &store)); 1883 KERNEL_UNLOCK_ONE(NULL); 1884 1885 return dev; 1886 } 1887 1888 /* 1889 * config_attach(parent, cf, aux, print, cfargs) 1890 * 1891 * Legacy entry point for callers whose use of the returned 1892 * device_t is not delimited by device_release. 1893 * 1894 * The caller is required to hold the kernel lock as a fragile 1895 * defence against races. 1896 * 1897 * Callers should ignore the return value or be converted to 1898 * config_attach_acquire with a matching device_release once they 1899 * have finished with the returned device_t. 1900 */ 1901 device_t 1902 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print, 1903 const struct cfargs *cfargs) 1904 { 1905 device_t dev; 1906 1907 KASSERT(KERNEL_LOCKED_P()); 1908 1909 dev = config_attach_acquire(parent, cf, aux, print, cfargs); 1910 if (dev == NULL) 1911 return NULL; 1912 device_release(dev); 1913 1914 return dev; 1915 } 1916 1917 /* 1918 * As above, but for pseudo-devices. Pseudo-devices attached in this 1919 * way are silently inserted into the device tree, and their children 1920 * attached. 1921 * 1922 * Note that because pseudo-devices are attached silently, any information 1923 * the attach routine wishes to print should be prefixed with the device 1924 * name by the attach routine. 1925 */ 1926 device_t 1927 config_attach_pseudo_acquire(cfdata_t cf, void *aux) 1928 { 1929 device_t dev; 1930 1931 KERNEL_LOCK(1, NULL); 1932 1933 struct cfargs_internal args = { }; 1934 dev = config_devalloc(ROOT, cf, &args); 1935 if (!dev) 1936 goto out; 1937 1938 /* XXX mark busy in cfdata */ 1939 1940 if (cf->cf_fstate != FSTATE_STAR) { 1941 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND); 1942 cf->cf_fstate = FSTATE_FOUND; 1943 } 1944 1945 config_devlink(dev); 1946 1947 #if 0 /* XXXJRT not yet */ 1948 config_device_register(dev, NULL); /* like a root node */ 1949 #endif 1950 1951 /* Let userland know */ 1952 devmon_report_device(dev, true); 1953 1954 /* 1955 * Prevent detach until the driver's attach function, and all 1956 * deferred actions, have finished. 1957 */ 1958 config_pending_incr(dev); 1959 1960 /* 1961 * Prevent concurrent detach from destroying the device_t until 1962 * the caller has released the device. 1963 */ 1964 device_acquire(dev); 1965 1966 /* Call the driver's attach function. */ 1967 (*dev->dv_cfattach->ca_attach)(ROOT, dev, aux); 1968 1969 /* 1970 * Allow other threads to acquire references to the device now 1971 * that the driver's attach function is done. 1972 */ 1973 mutex_enter(&config_misc_lock); 1974 KASSERT(dev->dv_attaching == curlwp); 1975 dev->dv_attaching = NULL; 1976 cv_broadcast(&config_misc_cv); 1977 mutex_exit(&config_misc_lock); 1978 1979 /* 1980 * Synchronous parts of attach are done. Allow detach, unless 1981 * the driver's attach function scheduled deferred actions. 1982 */ 1983 config_pending_decr(dev); 1984 1985 config_process_deferred(&deferred_config_queue, dev); 1986 1987 out: KERNEL_UNLOCK_ONE(NULL); 1988 return dev; 1989 } 1990 1991 /* 1992 * config_attach_pseudo(cf) 1993 * 1994 * Legacy entry point for callers whose use of the returned 1995 * device_t is not delimited by device_release. 1996 * 1997 * The caller is required to hold the kernel lock as a fragile 1998 * defence against races. 1999 * 2000 * Callers should ignore the return value or be converted to 2001 * config_attach_pseudo_acquire with a matching device_release 2002 * once they have finished with the returned device_t. As a 2003 * bonus, config_attach_pseudo_acquire can pass a non-null aux 2004 * argument into the driver's attach routine. 2005 */ 2006 device_t 2007 config_attach_pseudo(cfdata_t cf) 2008 { 2009 device_t dev; 2010 2011 dev = config_attach_pseudo_acquire(cf, NULL); 2012 if (dev == NULL) 2013 return dev; 2014 device_release(dev); 2015 2016 return dev; 2017 } 2018 2019 /* 2020 * Caller must hold alldevs_lock. 2021 */ 2022 static void 2023 config_collect_garbage(struct devicelist *garbage) 2024 { 2025 device_t dv; 2026 2027 KASSERT(!cpu_intr_p()); 2028 KASSERT(!cpu_softintr_p()); 2029 KASSERT(mutex_owned(&alldevs_lock)); 2030 2031 while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) { 2032 TAILQ_FOREACH(dv, &alldevs, dv_list) { 2033 if (dv->dv_del_gen != 0) 2034 break; 2035 } 2036 if (dv == NULL) { 2037 alldevs_garbage = false; 2038 break; 2039 } 2040 config_devunlink(dv, garbage); 2041 } 2042 KASSERT(mutex_owned(&alldevs_lock)); 2043 } 2044 2045 static void 2046 config_dump_garbage(struct devicelist *garbage) 2047 { 2048 device_t dv; 2049 2050 while ((dv = TAILQ_FIRST(garbage)) != NULL) { 2051 TAILQ_REMOVE(garbage, dv, dv_list); 2052 config_devdelete(dv); 2053 } 2054 } 2055 2056 static int 2057 config_detach_enter(device_t dev) 2058 { 2059 struct lwp *l __diagused; 2060 int error = 0; 2061 2062 mutex_enter(&config_misc_lock); 2063 2064 /* 2065 * Wait until attach has fully completed, and until any 2066 * concurrent detach (e.g., drvctl racing with USB event 2067 * thread) has completed. 2068 * 2069 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via 2070 * deviter) to ensure the winner of the race doesn't free the 2071 * device leading the loser of the race into use-after-free. 2072 * 2073 * XXX Not all callers do this! 2074 */ 2075 while (dev->dv_pending || dev->dv_detaching) { 2076 KASSERTMSG(dev->dv_detaching != curlwp, 2077 "recursively detaching %s", device_xname(dev)); 2078 error = cv_wait_sig(&config_misc_cv, &config_misc_lock); 2079 if (error) 2080 goto out; 2081 } 2082 2083 /* 2084 * Attach has completed, and no other concurrent detach is 2085 * running. Claim the device for detaching. This will cause 2086 * all new attempts to acquire references to block. 2087 */ 2088 KASSERTMSG((l = dev->dv_attaching) == NULL, 2089 "lwp %ld [%s] @ %p attaching %s", 2090 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l, 2091 device_xname(dev)); 2092 KASSERTMSG((l = dev->dv_detaching) == NULL, 2093 "lwp %ld [%s] @ %p detaching %s", 2094 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l, 2095 device_xname(dev)); 2096 dev->dv_detaching = curlwp; 2097 2098 out: mutex_exit(&config_misc_lock); 2099 return error; 2100 } 2101 2102 static void 2103 config_detach_exit(device_t dev) 2104 { 2105 struct lwp *l __diagused; 2106 2107 mutex_enter(&config_misc_lock); 2108 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s", 2109 device_xname(dev)); 2110 KASSERTMSG((l = dev->dv_detaching) == curlwp, 2111 "lwp %ld [%s] @ %p detaching %s", 2112 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l, 2113 device_xname(dev)); 2114 dev->dv_detaching = NULL; 2115 cv_broadcast(&config_misc_cv); 2116 mutex_exit(&config_misc_lock); 2117 } 2118 2119 /* 2120 * Detach a device. Optionally forced (e.g. because of hardware 2121 * removal) and quiet. Returns zero if successful, non-zero 2122 * (an error code) otherwise. 2123 * 2124 * Note that this code wants to be run from a process context, so 2125 * that the detach can sleep to allow processes which have a device 2126 * open to run and unwind their stacks. 2127 * 2128 * Caller must hold a reference with device_acquire or 2129 * device_lookup_acquire. 2130 */ 2131 int 2132 config_detach_release(device_t dev, int flags) 2133 { 2134 struct alldevs_foray af; 2135 struct cftable *ct; 2136 cfdata_t cf; 2137 const struct cfattach *ca; 2138 struct cfdriver *cd; 2139 device_t d __diagused; 2140 int rv = 0; 2141 2142 KERNEL_LOCK(1, NULL); 2143 2144 cf = dev->dv_cfdata; 2145 KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND || 2146 cf->cf_fstate == FSTATE_STAR), 2147 "config_detach: %s: bad device fstate: %d", 2148 device_xname(dev), cf ? cf->cf_fstate : -1); 2149 2150 cd = dev->dv_cfdriver; 2151 KASSERT(cd != NULL); 2152 2153 ca = dev->dv_cfattach; 2154 KASSERT(ca != NULL); 2155 2156 /* 2157 * Only one detach at a time, please -- and not until fully 2158 * attached. 2159 */ 2160 rv = config_detach_enter(dev); 2161 device_release(dev); 2162 if (rv) { 2163 KERNEL_UNLOCK_ONE(NULL); 2164 return rv; 2165 } 2166 2167 mutex_enter(&alldevs_lock); 2168 if (dev->dv_del_gen != 0) { 2169 mutex_exit(&alldevs_lock); 2170 #ifdef DIAGNOSTIC 2171 printf("%s: %s is already detached\n", __func__, 2172 device_xname(dev)); 2173 #endif /* DIAGNOSTIC */ 2174 config_detach_exit(dev); 2175 KERNEL_UNLOCK_ONE(NULL); 2176 return ENOENT; 2177 } 2178 alldevs_nwrite++; 2179 mutex_exit(&alldevs_lock); 2180 2181 /* 2182 * Call the driver's .ca_detach function, unless it has none or 2183 * we are skipping it because it's unforced shutdown time and 2184 * the driver didn't ask to detach on shutdown. 2185 */ 2186 if (!detachall && 2187 (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN && 2188 (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) { 2189 rv = EOPNOTSUPP; 2190 } else if (ca->ca_detach != NULL) { 2191 rv = (*ca->ca_detach)(dev, flags); 2192 } else 2193 rv = EOPNOTSUPP; 2194 2195 KASSERTMSG(!dev->dv_detach_done, "%s detached twice, error=%d", 2196 device_xname(dev), rv); 2197 2198 /* 2199 * If it was not possible to detach the device, then we either 2200 * panic() (for the forced but failed case), or return an error. 2201 */ 2202 if (rv) { 2203 /* 2204 * Detach failed -- likely EOPNOTSUPP or EBUSY. Driver 2205 * must not have called config_detach_commit. 2206 */ 2207 KASSERTMSG(!dev->dv_detach_committed, 2208 "%s committed to detaching and then backed out, error=%d", 2209 device_xname(dev), rv); 2210 if (flags & DETACH_FORCE) { 2211 panic("config_detach: forced detach of %s failed (%d)", 2212 device_xname(dev), rv); 2213 } 2214 goto out; 2215 } 2216 2217 /* 2218 * The device has now been successfully detached. 2219 */ 2220 dev->dv_detach_done = true; 2221 2222 /* 2223 * If .ca_detach didn't commit to detach, then do that for it. 2224 * This wakes any pending device_lookup_acquire calls so they 2225 * will fail. 2226 */ 2227 config_detach_commit(dev); 2228 2229 /* 2230 * If it was possible to detach the device, ensure that the 2231 * device is deactivated. 2232 */ 2233 dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */ 2234 2235 /* 2236 * Wait for all device_lookup_acquire references -- mostly, for 2237 * all attempts to open the device -- to drain. It is the 2238 * responsibility of .ca_detach to ensure anything with open 2239 * references will be interrupted and release them promptly, 2240 * not block indefinitely. All new attempts to acquire 2241 * references will fail, as config_detach_commit has arranged 2242 * by now. 2243 */ 2244 mutex_enter(&config_misc_lock); 2245 localcount_drain(dev->dv_localcount, 2246 &config_misc_cv, &config_misc_lock); 2247 mutex_exit(&config_misc_lock); 2248 2249 /* Let userland know */ 2250 devmon_report_device(dev, false); 2251 2252 #ifdef DIAGNOSTIC 2253 /* 2254 * Sanity: If you're successfully detached, you should have no 2255 * children. (Note that because children must be attached 2256 * after parents, we only need to search the latter part of 2257 * the list.) 2258 */ 2259 mutex_enter(&alldevs_lock); 2260 for (d = TAILQ_NEXT(dev, dv_list); d != NULL; 2261 d = TAILQ_NEXT(d, dv_list)) { 2262 if (d->dv_parent == dev && d->dv_del_gen == 0) { 2263 printf("config_detach: detached device %s" 2264 " has children %s\n", device_xname(dev), 2265 device_xname(d)); 2266 panic("config_detach"); 2267 } 2268 } 2269 mutex_exit(&alldevs_lock); 2270 #endif 2271 2272 /* notify the parent that the child is gone */ 2273 if (dev->dv_parent) { 2274 device_t p = dev->dv_parent; 2275 if (p->dv_cfattach->ca_childdetached) 2276 (*p->dv_cfattach->ca_childdetached)(p, dev); 2277 } 2278 2279 /* 2280 * Mark cfdata to show that the unit can be reused, if possible. 2281 */ 2282 TAILQ_FOREACH(ct, &allcftables, ct_list) { 2283 for (cf = ct->ct_cfdata; cf->cf_name; cf++) { 2284 if (STREQ(cf->cf_name, cd->cd_name)) { 2285 if (cf->cf_fstate == FSTATE_FOUND && 2286 cf->cf_unit == dev->dv_unit) 2287 cf->cf_fstate = FSTATE_NOTFOUND; 2288 } 2289 } 2290 } 2291 2292 if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0) 2293 aprint_normal_dev(dev, "detached\n"); 2294 2295 out: 2296 config_detach_exit(dev); 2297 2298 config_alldevs_enter(&af); 2299 KASSERT(alldevs_nwrite != 0); 2300 --alldevs_nwrite; 2301 if (rv == 0 && dev->dv_del_gen == 0) { 2302 if (alldevs_nwrite == 0 && alldevs_nread == 0) 2303 config_devunlink(dev, &af.af_garbage); 2304 else { 2305 dev->dv_del_gen = alldevs_gen; 2306 alldevs_garbage = true; 2307 } 2308 } 2309 config_alldevs_exit(&af); 2310 2311 KERNEL_UNLOCK_ONE(NULL); 2312 2313 return rv; 2314 } 2315 2316 /* 2317 * config_detach(dev, flags) 2318 * 2319 * Legacy entry point for callers that have not acquired a 2320 * reference to dev. 2321 * 2322 * The caller is required to hold the kernel lock as a fragile 2323 * defence against races. 2324 * 2325 * Callers should be converted to use device_acquire under a lock 2326 * taken also by .ca_childdetached to synchronize access to the 2327 * device_t, and then config_detach_release ouside the lock. 2328 * Alternatively, most drivers detach children only in their own 2329 * detach routines, which can be done with config_detach_children 2330 * instead. 2331 */ 2332 int 2333 config_detach(device_t dev, int flags) 2334 { 2335 2336 device_acquire(dev); 2337 return config_detach_release(dev, flags); 2338 } 2339 2340 /* 2341 * config_detach_commit(dev) 2342 * 2343 * Issued by a driver's .ca_detach routine to notify anyone 2344 * waiting in device_lookup_acquire that the driver is committed 2345 * to detaching the device, which allows device_lookup_acquire to 2346 * wake up and fail immediately. 2347 * 2348 * Safe to call multiple times -- idempotent. Must be called 2349 * during config_detach_enter/exit. Safe to use with 2350 * device_lookup because the device is not actually removed from 2351 * the table until after config_detach_exit. 2352 */ 2353 void 2354 config_detach_commit(device_t dev) 2355 { 2356 struct lwp *l __diagused; 2357 2358 mutex_enter(&config_misc_lock); 2359 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s", 2360 device_xname(dev)); 2361 KASSERTMSG((l = dev->dv_detaching) == curlwp, 2362 "lwp %ld [%s] @ %p detaching %s", 2363 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l, 2364 device_xname(dev)); 2365 dev->dv_detach_committed = true; 2366 cv_broadcast(&config_misc_cv); 2367 mutex_exit(&config_misc_lock); 2368 } 2369 2370 int 2371 config_detach_children(device_t parent, int flags) 2372 { 2373 device_t dv; 2374 deviter_t di; 2375 int error = 0; 2376 2377 KASSERT(KERNEL_LOCKED_P()); 2378 2379 for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL; 2380 dv = deviter_next(&di)) { 2381 if (device_parent(dv) != parent) 2382 continue; 2383 if ((error = config_detach(dv, flags)) != 0) 2384 break; 2385 } 2386 deviter_release(&di); 2387 return error; 2388 } 2389 2390 device_t 2391 shutdown_first(struct shutdown_state *s) 2392 { 2393 if (!s->initialized) { 2394 deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST); 2395 s->initialized = true; 2396 } 2397 return shutdown_next(s); 2398 } 2399 2400 device_t 2401 shutdown_next(struct shutdown_state *s) 2402 { 2403 device_t dv; 2404 2405 while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv)) 2406 ; 2407 2408 if (dv == NULL) 2409 s->initialized = false; 2410 2411 return dv; 2412 } 2413 2414 bool 2415 config_detach_all(int how) 2416 { 2417 static struct shutdown_state s; 2418 device_t curdev; 2419 bool progress = false; 2420 int flags; 2421 2422 KERNEL_LOCK(1, NULL); 2423 2424 if ((how & (RB_NOSYNC|RB_DUMP)) != 0) 2425 goto out; 2426 2427 if ((how & RB_POWERDOWN) == RB_POWERDOWN) 2428 flags = DETACH_SHUTDOWN | DETACH_POWEROFF; 2429 else 2430 flags = DETACH_SHUTDOWN; 2431 2432 for (curdev = shutdown_first(&s); curdev != NULL; 2433 curdev = shutdown_next(&s)) { 2434 aprint_debug(" detaching %s, ", device_xname(curdev)); 2435 if (config_detach(curdev, flags) == 0) { 2436 progress = true; 2437 aprint_debug("success."); 2438 } else 2439 aprint_debug("failed."); 2440 } 2441 2442 out: KERNEL_UNLOCK_ONE(NULL); 2443 return progress; 2444 } 2445 2446 static bool 2447 device_is_ancestor_of(device_t ancestor, device_t descendant) 2448 { 2449 device_t dv; 2450 2451 for (dv = descendant; dv != NULL; dv = device_parent(dv)) { 2452 if (device_parent(dv) == ancestor) 2453 return true; 2454 } 2455 return false; 2456 } 2457 2458 int 2459 config_deactivate(device_t dev) 2460 { 2461 deviter_t di; 2462 const struct cfattach *ca; 2463 device_t descendant; 2464 int s, rv = 0, oflags; 2465 2466 for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST); 2467 descendant != NULL; 2468 descendant = deviter_next(&di)) { 2469 if (dev != descendant && 2470 !device_is_ancestor_of(dev, descendant)) 2471 continue; 2472 2473 if ((descendant->dv_flags & DVF_ACTIVE) == 0) 2474 continue; 2475 2476 ca = descendant->dv_cfattach; 2477 oflags = descendant->dv_flags; 2478 2479 descendant->dv_flags &= ~DVF_ACTIVE; 2480 if (ca->ca_activate == NULL) 2481 continue; 2482 s = splhigh(); 2483 rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE); 2484 splx(s); 2485 if (rv != 0) 2486 descendant->dv_flags = oflags; 2487 } 2488 deviter_release(&di); 2489 return rv; 2490 } 2491 2492 /* 2493 * Defer the configuration of the specified device until all 2494 * of its parent's devices have been attached. 2495 */ 2496 void 2497 config_defer(device_t dev, void (*func)(device_t)) 2498 { 2499 struct deferred_config *dc; 2500 2501 if (dev->dv_parent == NULL) 2502 panic("config_defer: can't defer config of a root device"); 2503 2504 dc = kmem_alloc(sizeof(*dc), KM_SLEEP); 2505 2506 config_pending_incr(dev); 2507 2508 mutex_enter(&config_misc_lock); 2509 #ifdef DIAGNOSTIC 2510 struct deferred_config *odc; 2511 TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) { 2512 if (odc->dc_dev == dev) 2513 panic("config_defer: deferred twice"); 2514 } 2515 #endif 2516 dc->dc_dev = dev; 2517 dc->dc_func = func; 2518 TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue); 2519 mutex_exit(&config_misc_lock); 2520 } 2521 2522 /* 2523 * Defer some autoconfiguration for a device until after interrupts 2524 * are enabled. 2525 */ 2526 void 2527 config_interrupts(device_t dev, void (*func)(device_t)) 2528 { 2529 struct deferred_config *dc; 2530 2531 /* 2532 * If interrupts are enabled, callback now. 2533 */ 2534 if (cold == 0) { 2535 (*func)(dev); 2536 return; 2537 } 2538 2539 dc = kmem_alloc(sizeof(*dc), KM_SLEEP); 2540 2541 config_pending_incr(dev); 2542 2543 mutex_enter(&config_misc_lock); 2544 #ifdef DIAGNOSTIC 2545 struct deferred_config *odc; 2546 TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) { 2547 if (odc->dc_dev == dev) 2548 panic("config_interrupts: deferred twice"); 2549 } 2550 #endif 2551 dc->dc_dev = dev; 2552 dc->dc_func = func; 2553 TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue); 2554 mutex_exit(&config_misc_lock); 2555 } 2556 2557 /* 2558 * Defer some autoconfiguration for a device until after root file system 2559 * is mounted (to load firmware etc). 2560 */ 2561 void 2562 config_mountroot(device_t dev, void (*func)(device_t)) 2563 { 2564 struct deferred_config *dc; 2565 2566 /* 2567 * If root file system is mounted, callback now. 2568 */ 2569 if (root_is_mounted) { 2570 (*func)(dev); 2571 return; 2572 } 2573 2574 dc = kmem_alloc(sizeof(*dc), KM_SLEEP); 2575 2576 mutex_enter(&config_misc_lock); 2577 #ifdef DIAGNOSTIC 2578 struct deferred_config *odc; 2579 TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) { 2580 if (odc->dc_dev == dev) 2581 panic("%s: deferred twice", __func__); 2582 } 2583 #endif 2584 2585 dc->dc_dev = dev; 2586 dc->dc_func = func; 2587 TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue); 2588 mutex_exit(&config_misc_lock); 2589 } 2590 2591 /* 2592 * Process a deferred configuration queue. 2593 */ 2594 static void 2595 config_process_deferred(struct deferred_config_head *queue, device_t parent) 2596 { 2597 struct deferred_config *dc; 2598 2599 KASSERT(KERNEL_LOCKED_P()); 2600 2601 mutex_enter(&config_misc_lock); 2602 dc = TAILQ_FIRST(queue); 2603 while (dc) { 2604 if (parent == NULL || dc->dc_dev->dv_parent == parent) { 2605 TAILQ_REMOVE(queue, dc, dc_queue); 2606 mutex_exit(&config_misc_lock); 2607 2608 (*dc->dc_func)(dc->dc_dev); 2609 config_pending_decr(dc->dc_dev); 2610 kmem_free(dc, sizeof(*dc)); 2611 2612 mutex_enter(&config_misc_lock); 2613 /* Restart, queue might have changed */ 2614 dc = TAILQ_FIRST(queue); 2615 } else { 2616 dc = TAILQ_NEXT(dc, dc_queue); 2617 } 2618 } 2619 mutex_exit(&config_misc_lock); 2620 } 2621 2622 /* 2623 * Manipulate the config_pending semaphore. 2624 */ 2625 void 2626 config_pending_incr(device_t dev) 2627 { 2628 2629 mutex_enter(&config_misc_lock); 2630 KASSERTMSG(dev->dv_pending < INT_MAX, 2631 "%s: excess config_pending_incr", device_xname(dev)); 2632 if (dev->dv_pending++ == 0) 2633 TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list); 2634 #ifdef DEBUG_AUTOCONF 2635 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending); 2636 #endif 2637 mutex_exit(&config_misc_lock); 2638 } 2639 2640 void 2641 config_pending_decr(device_t dev) 2642 { 2643 2644 mutex_enter(&config_misc_lock); 2645 KASSERTMSG(dev->dv_pending > 0, 2646 "%s: excess config_pending_decr", device_xname(dev)); 2647 if (--dev->dv_pending == 0) { 2648 TAILQ_REMOVE(&config_pending, dev, dv_pending_list); 2649 cv_broadcast(&config_misc_cv); 2650 } 2651 #ifdef DEBUG_AUTOCONF 2652 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending); 2653 #endif 2654 mutex_exit(&config_misc_lock); 2655 } 2656 2657 /* 2658 * Register a "finalization" routine. Finalization routines are 2659 * called iteratively once all real devices have been found during 2660 * autoconfiguration, for as long as any one finalizer has done 2661 * any work. 2662 */ 2663 int 2664 config_finalize_register(device_t dev, int (*fn)(device_t)) 2665 { 2666 struct finalize_hook *f; 2667 int error = 0; 2668 2669 KERNEL_LOCK(1, NULL); 2670 2671 /* 2672 * If finalization has already been done, invoke the 2673 * callback function now. 2674 */ 2675 if (config_finalize_done) { 2676 while ((*fn)(dev) != 0) 2677 /* loop */ ; 2678 goto out; 2679 } 2680 2681 /* Ensure this isn't already on the list. */ 2682 TAILQ_FOREACH(f, &config_finalize_list, f_list) { 2683 if (f->f_func == fn && f->f_dev == dev) { 2684 error = EEXIST; 2685 goto out; 2686 } 2687 } 2688 2689 f = kmem_alloc(sizeof(*f), KM_SLEEP); 2690 f->f_func = fn; 2691 f->f_dev = dev; 2692 TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list); 2693 2694 /* Success! */ 2695 error = 0; 2696 2697 out: KERNEL_UNLOCK_ONE(NULL); 2698 return error; 2699 } 2700 2701 void 2702 config_finalize(void) 2703 { 2704 struct finalize_hook *f; 2705 struct pdevinit *pdev; 2706 extern struct pdevinit pdevinit[]; 2707 unsigned t0 = getticks(); 2708 int errcnt, rv; 2709 2710 /* 2711 * Now that device driver threads have been created, wait for 2712 * them to finish any deferred autoconfiguration. 2713 */ 2714 mutex_enter(&config_misc_lock); 2715 while (!TAILQ_EMPTY(&config_pending)) { 2716 const unsigned t1 = getticks(); 2717 2718 if (t1 - t0 >= hz) { 2719 void (*pr)(const char *, ...) __printflike(1,2); 2720 device_t dev; 2721 2722 if (t1 - t0 >= 60*hz) { 2723 pr = aprint_normal; 2724 t0 = t1; 2725 } else { 2726 pr = aprint_debug; 2727 } 2728 2729 (*pr)("waiting for devices:"); 2730 TAILQ_FOREACH(dev, &config_pending, dv_pending_list) 2731 (*pr)(" %s", device_xname(dev)); 2732 (*pr)("\n"); 2733 } 2734 2735 (void)cv_timedwait(&config_misc_cv, &config_misc_lock, 2736 mstohz(1000)); 2737 } 2738 mutex_exit(&config_misc_lock); 2739 2740 KERNEL_LOCK(1, NULL); 2741 2742 /* Attach pseudo-devices. */ 2743 for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++) 2744 (*pdev->pdev_attach)(pdev->pdev_count); 2745 2746 /* Run the hooks until none of them does any work. */ 2747 do { 2748 rv = 0; 2749 TAILQ_FOREACH(f, &config_finalize_list, f_list) 2750 rv |= (*f->f_func)(f->f_dev); 2751 } while (rv != 0); 2752 2753 config_finalize_done = 1; 2754 2755 /* Now free all the hooks. */ 2756 while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) { 2757 TAILQ_REMOVE(&config_finalize_list, f, f_list); 2758 kmem_free(f, sizeof(*f)); 2759 } 2760 2761 KERNEL_UNLOCK_ONE(NULL); 2762 2763 errcnt = aprint_get_error_count(); 2764 if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 && 2765 (boothowto & AB_VERBOSE) == 0) { 2766 mutex_enter(&config_misc_lock); 2767 if (config_do_twiddle) { 2768 config_do_twiddle = 0; 2769 printf_nolog(" done.\n"); 2770 } 2771 mutex_exit(&config_misc_lock); 2772 } 2773 if (errcnt != 0) { 2774 printf("WARNING: %d error%s while detecting hardware; " 2775 "check system log.\n", errcnt, 2776 errcnt == 1 ? "" : "s"); 2777 } 2778 } 2779 2780 void 2781 config_twiddle_init(void) 2782 { 2783 2784 if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) { 2785 config_do_twiddle = 1; 2786 } 2787 callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL); 2788 } 2789 2790 void 2791 config_twiddle_fn(void *cookie) 2792 { 2793 2794 mutex_enter(&config_misc_lock); 2795 if (config_do_twiddle) { 2796 twiddle(); 2797 callout_schedule(&config_twiddle_ch, mstohz(100)); 2798 } 2799 mutex_exit(&config_misc_lock); 2800 } 2801 2802 static void 2803 config_alldevs_enter(struct alldevs_foray *af) 2804 { 2805 TAILQ_INIT(&af->af_garbage); 2806 mutex_enter(&alldevs_lock); 2807 config_collect_garbage(&af->af_garbage); 2808 } 2809 2810 static void 2811 config_alldevs_exit(struct alldevs_foray *af) 2812 { 2813 mutex_exit(&alldevs_lock); 2814 config_dump_garbage(&af->af_garbage); 2815 } 2816 2817 /* 2818 * device_lookup: 2819 * 2820 * Look up a device instance for a given driver. 2821 * 2822 * Caller is responsible for ensuring the device's state is 2823 * stable, either by holding a reference already obtained with 2824 * device_lookup_acquire or by otherwise ensuring the device is 2825 * attached and can't be detached (e.g., holding an open device 2826 * node and ensuring *_detach calls vdevgone). 2827 * 2828 * XXX Find a way to assert this. 2829 * 2830 * Safe for use up to and including interrupt context at IPL_VM. 2831 * Never sleeps. 2832 */ 2833 device_t 2834 device_lookup(cfdriver_t cd, int unit) 2835 { 2836 device_t dv; 2837 2838 mutex_enter(&alldevs_lock); 2839 if (unit < 0 || unit >= cd->cd_ndevs) 2840 dv = NULL; 2841 else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0) 2842 dv = NULL; 2843 mutex_exit(&alldevs_lock); 2844 2845 return dv; 2846 } 2847 2848 /* 2849 * device_lookup_private: 2850 * 2851 * Look up a softc instance for a given driver. 2852 */ 2853 void * 2854 device_lookup_private(cfdriver_t cd, int unit) 2855 { 2856 2857 return device_private(device_lookup(cd, unit)); 2858 } 2859 2860 /* 2861 * device_lookup_acquire: 2862 * 2863 * Look up a device instance for a given driver, and return a 2864 * reference to it that must be released by device_release. 2865 * 2866 * => If the device is still attaching, blocks until *_attach has 2867 * returned. 2868 * 2869 * => If the device is detaching, blocks until *_detach has 2870 * returned. May succeed or fail in that case, depending on 2871 * whether *_detach has backed out (EBUSY) or committed to 2872 * detaching. 2873 * 2874 * May sleep. 2875 */ 2876 device_t 2877 device_lookup_acquire(cfdriver_t cd, int unit) 2878 { 2879 device_t dv; 2880 2881 ASSERT_SLEEPABLE(); 2882 2883 /* XXX This should have a pserialized fast path -- TBD. */ 2884 mutex_enter(&config_misc_lock); 2885 mutex_enter(&alldevs_lock); 2886 retry: if (unit < 0 || unit >= cd->cd_ndevs || 2887 (dv = cd->cd_devs[unit]) == NULL || 2888 dv->dv_del_gen != 0 || 2889 dv->dv_detach_committed) { 2890 dv = NULL; 2891 } else { 2892 /* 2893 * Wait for the device to stabilize, if attaching or 2894 * detaching. Either way we must wait for *_attach or 2895 * *_detach to complete, and either way we must retry: 2896 * even if detaching, *_detach might fail (EBUSY) so 2897 * the device may still be there. 2898 */ 2899 if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) || 2900 dv->dv_detaching != NULL) { 2901 mutex_exit(&alldevs_lock); 2902 cv_wait(&config_misc_cv, &config_misc_lock); 2903 mutex_enter(&alldevs_lock); 2904 goto retry; 2905 } 2906 device_acquire(dv); 2907 } 2908 mutex_exit(&alldevs_lock); 2909 mutex_exit(&config_misc_lock); 2910 2911 return dv; 2912 } 2913 2914 /* 2915 * device_acquire: 2916 * 2917 * Acquire a reference to a device. It is the caller's 2918 * responsibility to ensure that the device's .ca_detach routine 2919 * cannot return before calling this. Caller must release the 2920 * reference with device_release or config_detach_release. 2921 */ 2922 void 2923 device_acquire(device_t dv) 2924 { 2925 2926 /* 2927 * No lock because the caller has promised that this can't 2928 * change concurrently with device_acquire. 2929 */ 2930 KASSERTMSG(!dv->dv_detach_done, "%s", 2931 dv == NULL ? "(null)" : device_xname(dv)); 2932 localcount_acquire(dv->dv_localcount); 2933 } 2934 2935 /* 2936 * device_release: 2937 * 2938 * Release a reference to a device acquired with device_acquire or 2939 * device_lookup_acquire. 2940 */ 2941 void 2942 device_release(device_t dv) 2943 { 2944 2945 localcount_release(dv->dv_localcount, 2946 &config_misc_cv, &config_misc_lock); 2947 } 2948 2949 /* 2950 * device_find_by_xname: 2951 * 2952 * Returns the device of the given name or NULL if it doesn't exist. 2953 */ 2954 device_t 2955 device_find_by_xname(const char *name) 2956 { 2957 device_t dv; 2958 deviter_t di; 2959 2960 for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) { 2961 if (strcmp(device_xname(dv), name) == 0) 2962 break; 2963 } 2964 deviter_release(&di); 2965 2966 return dv; 2967 } 2968 2969 /* 2970 * device_find_by_driver_unit: 2971 * 2972 * Returns the device of the given driver name and unit or 2973 * NULL if it doesn't exist. 2974 */ 2975 device_t 2976 device_find_by_driver_unit(const char *name, int unit) 2977 { 2978 struct cfdriver *cd; 2979 2980 if ((cd = config_cfdriver_lookup(name)) == NULL) 2981 return NULL; 2982 return device_lookup(cd, unit); 2983 } 2984 2985 static bool 2986 match_strcmp(const char * const s1, const char * const s2) 2987 { 2988 return strcmp(s1, s2) == 0; 2989 } 2990 2991 static bool 2992 match_pmatch(const char * const s1, const char * const s2) 2993 { 2994 return pmatch(s1, s2, NULL) == 2; 2995 } 2996 2997 static bool 2998 strarray_match_internal(const char ** const strings, 2999 unsigned int const nstrings, const char * const str, 3000 unsigned int * const indexp, 3001 bool (*match_fn)(const char *, const char *)) 3002 { 3003 unsigned int i; 3004 3005 if (strings == NULL || nstrings == 0) { 3006 return false; 3007 } 3008 3009 for (i = 0; i < nstrings; i++) { 3010 if ((*match_fn)(strings[i], str)) { 3011 *indexp = i; 3012 return true; 3013 } 3014 } 3015 3016 return false; 3017 } 3018 3019 static int 3020 strarray_match(const char ** const strings, unsigned int const nstrings, 3021 const char * const str) 3022 { 3023 unsigned int idx; 3024 3025 if (strarray_match_internal(strings, nstrings, str, &idx, 3026 match_strcmp)) { 3027 return (int)(nstrings - idx); 3028 } 3029 return 0; 3030 } 3031 3032 static int 3033 strarray_pmatch(const char ** const strings, unsigned int const nstrings, 3034 const char * const pattern) 3035 { 3036 unsigned int idx; 3037 3038 if (strarray_match_internal(strings, nstrings, pattern, &idx, 3039 match_pmatch)) { 3040 return (int)(nstrings - idx); 3041 } 3042 return 0; 3043 } 3044 3045 static int 3046 device_compatible_match_strarray_internal( 3047 const char **device_compats, int ndevice_compats, 3048 const struct device_compatible_entry *driver_compats, 3049 const struct device_compatible_entry **matching_entryp, 3050 int (*match_fn)(const char **, unsigned int, const char *)) 3051 { 3052 const struct device_compatible_entry *dce = NULL; 3053 int rv; 3054 3055 if (ndevice_compats == 0 || device_compats == NULL || 3056 driver_compats == NULL) 3057 return 0; 3058 3059 for (dce = driver_compats; dce->compat != NULL; dce++) { 3060 rv = (*match_fn)(device_compats, ndevice_compats, dce->compat); 3061 if (rv != 0) { 3062 if (matching_entryp != NULL) { 3063 *matching_entryp = dce; 3064 } 3065 return rv; 3066 } 3067 } 3068 return 0; 3069 } 3070 3071 /* 3072 * device_compatible_match: 3073 * 3074 * Match a driver's "compatible" data against a device's 3075 * "compatible" strings. Returns resulted weighted by 3076 * which device "compatible" string was matched. 3077 */ 3078 int 3079 device_compatible_match(const char **device_compats, int ndevice_compats, 3080 const struct device_compatible_entry *driver_compats) 3081 { 3082 return device_compatible_match_strarray_internal(device_compats, 3083 ndevice_compats, driver_compats, NULL, strarray_match); 3084 } 3085 3086 /* 3087 * device_compatible_pmatch: 3088 * 3089 * Like device_compatible_match(), but uses pmatch(9) to compare 3090 * the device "compatible" strings against patterns in the 3091 * driver's "compatible" data. 3092 */ 3093 int 3094 device_compatible_pmatch(const char **device_compats, int ndevice_compats, 3095 const struct device_compatible_entry *driver_compats) 3096 { 3097 return device_compatible_match_strarray_internal(device_compats, 3098 ndevice_compats, driver_compats, NULL, strarray_pmatch); 3099 } 3100 3101 static int 3102 device_compatible_match_strlist_internal( 3103 const char * const device_compats, size_t const device_compatsize, 3104 const struct device_compatible_entry *driver_compats, 3105 const struct device_compatible_entry **matching_entryp, 3106 int (*match_fn)(const char *, size_t, const char *)) 3107 { 3108 const struct device_compatible_entry *dce = NULL; 3109 int rv; 3110 3111 if (device_compats == NULL || device_compatsize == 0 || 3112 driver_compats == NULL) 3113 return 0; 3114 3115 for (dce = driver_compats; dce->compat != NULL; dce++) { 3116 rv = (*match_fn)(device_compats, device_compatsize, 3117 dce->compat); 3118 if (rv != 0) { 3119 if (matching_entryp != NULL) { 3120 *matching_entryp = dce; 3121 } 3122 return rv; 3123 } 3124 } 3125 return 0; 3126 } 3127 3128 /* 3129 * device_compatible_match_strlist: 3130 * 3131 * Like device_compatible_match(), but take the device 3132 * "compatible" strings as an OpenFirmware-style string 3133 * list. 3134 */ 3135 int 3136 device_compatible_match_strlist( 3137 const char * const device_compats, size_t const device_compatsize, 3138 const struct device_compatible_entry *driver_compats) 3139 { 3140 return device_compatible_match_strlist_internal(device_compats, 3141 device_compatsize, driver_compats, NULL, strlist_match); 3142 } 3143 3144 /* 3145 * device_compatible_pmatch_strlist: 3146 * 3147 * Like device_compatible_pmatch(), but take the device 3148 * "compatible" strings as an OpenFirmware-style string 3149 * list. 3150 */ 3151 int 3152 device_compatible_pmatch_strlist( 3153 const char * const device_compats, size_t const device_compatsize, 3154 const struct device_compatible_entry *driver_compats) 3155 { 3156 return device_compatible_match_strlist_internal(device_compats, 3157 device_compatsize, driver_compats, NULL, strlist_pmatch); 3158 } 3159 3160 static int 3161 device_compatible_match_id_internal( 3162 uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id, 3163 const struct device_compatible_entry *driver_compats, 3164 const struct device_compatible_entry **matching_entryp) 3165 { 3166 const struct device_compatible_entry *dce = NULL; 3167 3168 if (mask == 0) 3169 return 0; 3170 3171 for (dce = driver_compats; dce->id != sentinel_id; dce++) { 3172 if ((id & mask) == dce->id) { 3173 if (matching_entryp != NULL) { 3174 *matching_entryp = dce; 3175 } 3176 return 1; 3177 } 3178 } 3179 return 0; 3180 } 3181 3182 /* 3183 * device_compatible_match_id: 3184 * 3185 * Like device_compatible_match(), but takes a single 3186 * unsigned integer device ID. 3187 */ 3188 int 3189 device_compatible_match_id( 3190 uintptr_t const id, uintptr_t const sentinel_id, 3191 const struct device_compatible_entry *driver_compats) 3192 { 3193 return device_compatible_match_id_internal(id, (uintptr_t)-1, 3194 sentinel_id, driver_compats, NULL); 3195 } 3196 3197 /* 3198 * device_compatible_lookup: 3199 * 3200 * Look up and return the device_compatible_entry, using the 3201 * same matching criteria used by device_compatible_match(). 3202 */ 3203 const struct device_compatible_entry * 3204 device_compatible_lookup(const char **device_compats, int ndevice_compats, 3205 const struct device_compatible_entry *driver_compats) 3206 { 3207 const struct device_compatible_entry *dce; 3208 3209 if (device_compatible_match_strarray_internal(device_compats, 3210 ndevice_compats, driver_compats, &dce, strarray_match)) { 3211 return dce; 3212 } 3213 return NULL; 3214 } 3215 3216 /* 3217 * device_compatible_plookup: 3218 * 3219 * Look up and return the device_compatible_entry, using the 3220 * same matching criteria used by device_compatible_pmatch(). 3221 */ 3222 const struct device_compatible_entry * 3223 device_compatible_plookup(const char **device_compats, int ndevice_compats, 3224 const struct device_compatible_entry *driver_compats) 3225 { 3226 const struct device_compatible_entry *dce; 3227 3228 if (device_compatible_match_strarray_internal(device_compats, 3229 ndevice_compats, driver_compats, &dce, strarray_pmatch)) { 3230 return dce; 3231 } 3232 return NULL; 3233 } 3234 3235 /* 3236 * device_compatible_lookup_strlist: 3237 * 3238 * Like device_compatible_lookup(), but take the device 3239 * "compatible" strings as an OpenFirmware-style string 3240 * list. 3241 */ 3242 const struct device_compatible_entry * 3243 device_compatible_lookup_strlist( 3244 const char * const device_compats, size_t const device_compatsize, 3245 const struct device_compatible_entry *driver_compats) 3246 { 3247 const struct device_compatible_entry *dce; 3248 3249 if (device_compatible_match_strlist_internal(device_compats, 3250 device_compatsize, driver_compats, &dce, strlist_match)) { 3251 return dce; 3252 } 3253 return NULL; 3254 } 3255 3256 /* 3257 * device_compatible_plookup_strlist: 3258 * 3259 * Like device_compatible_plookup(), but take the device 3260 * "compatible" strings as an OpenFirmware-style string 3261 * list. 3262 */ 3263 const struct device_compatible_entry * 3264 device_compatible_plookup_strlist( 3265 const char * const device_compats, size_t const device_compatsize, 3266 const struct device_compatible_entry *driver_compats) 3267 { 3268 const struct device_compatible_entry *dce; 3269 3270 if (device_compatible_match_strlist_internal(device_compats, 3271 device_compatsize, driver_compats, &dce, strlist_pmatch)) { 3272 return dce; 3273 } 3274 return NULL; 3275 } 3276 3277 /* 3278 * device_compatible_lookup_id: 3279 * 3280 * Like device_compatible_lookup(), but takes a single 3281 * unsigned integer device ID. 3282 */ 3283 const struct device_compatible_entry * 3284 device_compatible_lookup_id( 3285 uintptr_t const id, uintptr_t const sentinel_id, 3286 const struct device_compatible_entry *driver_compats) 3287 { 3288 const struct device_compatible_entry *dce; 3289 3290 if (device_compatible_match_id_internal(id, (uintptr_t)-1, 3291 sentinel_id, driver_compats, &dce)) { 3292 return dce; 3293 } 3294 return NULL; 3295 } 3296 3297 /* 3298 * Power management related functions. 3299 */ 3300 3301 bool 3302 device_pmf_is_registered(device_t dev) 3303 { 3304 return (dev->dv_flags & DVF_POWER_HANDLERS) != 0; 3305 } 3306 3307 bool 3308 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual) 3309 { 3310 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0) 3311 return true; 3312 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0) 3313 return false; 3314 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER && 3315 dev->dv_driver_suspend != NULL && 3316 !(*dev->dv_driver_suspend)(dev, qual)) 3317 return false; 3318 3319 dev->dv_flags |= DVF_DRIVER_SUSPENDED; 3320 return true; 3321 } 3322 3323 bool 3324 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual) 3325 { 3326 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0) 3327 return true; 3328 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0) 3329 return false; 3330 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER && 3331 dev->dv_driver_resume != NULL && 3332 !(*dev->dv_driver_resume)(dev, qual)) 3333 return false; 3334 3335 dev->dv_flags &= ~DVF_DRIVER_SUSPENDED; 3336 return true; 3337 } 3338 3339 bool 3340 device_pmf_driver_shutdown(device_t dev, int how) 3341 { 3342 3343 if (*dev->dv_driver_shutdown != NULL && 3344 !(*dev->dv_driver_shutdown)(dev, how)) 3345 return false; 3346 return true; 3347 } 3348 3349 void 3350 device_pmf_driver_register(device_t dev, 3351 bool (*suspend)(device_t, const pmf_qual_t *), 3352 bool (*resume)(device_t, const pmf_qual_t *), 3353 bool (*shutdown)(device_t, int)) 3354 { 3355 3356 dev->dv_driver_suspend = suspend; 3357 dev->dv_driver_resume = resume; 3358 dev->dv_driver_shutdown = shutdown; 3359 dev->dv_flags |= DVF_POWER_HANDLERS; 3360 } 3361 3362 void 3363 device_pmf_driver_deregister(device_t dev) 3364 { 3365 device_lock_t dvl = device_getlock(dev); 3366 3367 dev->dv_driver_suspend = NULL; 3368 dev->dv_driver_resume = NULL; 3369 3370 mutex_enter(&dvl->dvl_mtx); 3371 dev->dv_flags &= ~DVF_POWER_HANDLERS; 3372 while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) { 3373 /* Wake a thread that waits for the lock. That 3374 * thread will fail to acquire the lock, and then 3375 * it will wake the next thread that waits for the 3376 * lock, or else it will wake us. 3377 */ 3378 cv_signal(&dvl->dvl_cv); 3379 pmflock_debug(dev, __func__, __LINE__); 3380 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx); 3381 pmflock_debug(dev, __func__, __LINE__); 3382 } 3383 mutex_exit(&dvl->dvl_mtx); 3384 } 3385 3386 void 3387 device_pmf_driver_child_register(device_t dev) 3388 { 3389 device_t parent = device_parent(dev); 3390 3391 if (parent == NULL || parent->dv_driver_child_register == NULL) 3392 return; 3393 (*parent->dv_driver_child_register)(dev); 3394 } 3395 3396 void 3397 device_pmf_driver_set_child_register(device_t dev, 3398 void (*child_register)(device_t)) 3399 { 3400 dev->dv_driver_child_register = child_register; 3401 } 3402 3403 static void 3404 pmflock_debug(device_t dev, const char *func, int line) 3405 { 3406 #ifdef PMFLOCK_DEBUG 3407 device_lock_t dvl = device_getlock(dev); 3408 const char *curlwp_name; 3409 3410 if (curlwp->l_name != NULL) 3411 curlwp_name = curlwp->l_name; 3412 else 3413 curlwp_name = curlwp->l_proc->p_comm; 3414 3415 aprint_debug_dev(dev, 3416 "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line, 3417 curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags); 3418 #endif /* PMFLOCK_DEBUG */ 3419 } 3420 3421 static bool 3422 device_pmf_lock1(device_t dev) 3423 { 3424 device_lock_t dvl = device_getlock(dev); 3425 3426 while (device_pmf_is_registered(dev) && 3427 dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) { 3428 dvl->dvl_nwait++; 3429 pmflock_debug(dev, __func__, __LINE__); 3430 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx); 3431 pmflock_debug(dev, __func__, __LINE__); 3432 dvl->dvl_nwait--; 3433 } 3434 if (!device_pmf_is_registered(dev)) { 3435 pmflock_debug(dev, __func__, __LINE__); 3436 /* We could not acquire the lock, but some other thread may 3437 * wait for it, also. Wake that thread. 3438 */ 3439 cv_signal(&dvl->dvl_cv); 3440 return false; 3441 } 3442 dvl->dvl_nlock++; 3443 dvl->dvl_holder = curlwp; 3444 pmflock_debug(dev, __func__, __LINE__); 3445 return true; 3446 } 3447 3448 bool 3449 device_pmf_lock(device_t dev) 3450 { 3451 bool rc; 3452 device_lock_t dvl = device_getlock(dev); 3453 3454 mutex_enter(&dvl->dvl_mtx); 3455 rc = device_pmf_lock1(dev); 3456 mutex_exit(&dvl->dvl_mtx); 3457 3458 return rc; 3459 } 3460 3461 void 3462 device_pmf_unlock(device_t dev) 3463 { 3464 device_lock_t dvl = device_getlock(dev); 3465 3466 KASSERT(dvl->dvl_nlock > 0); 3467 mutex_enter(&dvl->dvl_mtx); 3468 if (--dvl->dvl_nlock == 0) 3469 dvl->dvl_holder = NULL; 3470 cv_signal(&dvl->dvl_cv); 3471 pmflock_debug(dev, __func__, __LINE__); 3472 mutex_exit(&dvl->dvl_mtx); 3473 } 3474 3475 device_lock_t 3476 device_getlock(device_t dev) 3477 { 3478 return &dev->dv_lock; 3479 } 3480 3481 void * 3482 device_pmf_bus_private(device_t dev) 3483 { 3484 return dev->dv_bus_private; 3485 } 3486 3487 bool 3488 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual) 3489 { 3490 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0) 3491 return true; 3492 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 || 3493 (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0) 3494 return false; 3495 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS && 3496 dev->dv_bus_suspend != NULL && 3497 !(*dev->dv_bus_suspend)(dev, qual)) 3498 return false; 3499 3500 dev->dv_flags |= DVF_BUS_SUSPENDED; 3501 return true; 3502 } 3503 3504 bool 3505 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual) 3506 { 3507 if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0) 3508 return true; 3509 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS && 3510 dev->dv_bus_resume != NULL && 3511 !(*dev->dv_bus_resume)(dev, qual)) 3512 return false; 3513 3514 dev->dv_flags &= ~DVF_BUS_SUSPENDED; 3515 return true; 3516 } 3517 3518 bool 3519 device_pmf_bus_shutdown(device_t dev, int how) 3520 { 3521 3522 if (*dev->dv_bus_shutdown != NULL && 3523 !(*dev->dv_bus_shutdown)(dev, how)) 3524 return false; 3525 return true; 3526 } 3527 3528 void 3529 device_pmf_bus_register(device_t dev, void *priv, 3530 bool (*suspend)(device_t, const pmf_qual_t *), 3531 bool (*resume)(device_t, const pmf_qual_t *), 3532 bool (*shutdown)(device_t, int), void (*deregister)(device_t)) 3533 { 3534 dev->dv_bus_private = priv; 3535 dev->dv_bus_resume = resume; 3536 dev->dv_bus_suspend = suspend; 3537 dev->dv_bus_shutdown = shutdown; 3538 dev->dv_bus_deregister = deregister; 3539 } 3540 3541 void 3542 device_pmf_bus_deregister(device_t dev) 3543 { 3544 if (dev->dv_bus_deregister == NULL) 3545 return; 3546 (*dev->dv_bus_deregister)(dev); 3547 dev->dv_bus_private = NULL; 3548 dev->dv_bus_suspend = NULL; 3549 dev->dv_bus_resume = NULL; 3550 dev->dv_bus_deregister = NULL; 3551 } 3552 3553 void * 3554 device_pmf_class_private(device_t dev) 3555 { 3556 return dev->dv_class_private; 3557 } 3558 3559 bool 3560 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual) 3561 { 3562 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0) 3563 return true; 3564 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS && 3565 dev->dv_class_suspend != NULL && 3566 !(*dev->dv_class_suspend)(dev, qual)) 3567 return false; 3568 3569 dev->dv_flags |= DVF_CLASS_SUSPENDED; 3570 return true; 3571 } 3572 3573 bool 3574 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual) 3575 { 3576 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0) 3577 return true; 3578 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 || 3579 (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0) 3580 return false; 3581 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS && 3582 dev->dv_class_resume != NULL && 3583 !(*dev->dv_class_resume)(dev, qual)) 3584 return false; 3585 3586 dev->dv_flags &= ~DVF_CLASS_SUSPENDED; 3587 return true; 3588 } 3589 3590 void 3591 device_pmf_class_register(device_t dev, void *priv, 3592 bool (*suspend)(device_t, const pmf_qual_t *), 3593 bool (*resume)(device_t, const pmf_qual_t *), 3594 void (*deregister)(device_t)) 3595 { 3596 dev->dv_class_private = priv; 3597 dev->dv_class_suspend = suspend; 3598 dev->dv_class_resume = resume; 3599 dev->dv_class_deregister = deregister; 3600 } 3601 3602 void 3603 device_pmf_class_deregister(device_t dev) 3604 { 3605 if (dev->dv_class_deregister == NULL) 3606 return; 3607 (*dev->dv_class_deregister)(dev); 3608 dev->dv_class_private = NULL; 3609 dev->dv_class_suspend = NULL; 3610 dev->dv_class_resume = NULL; 3611 dev->dv_class_deregister = NULL; 3612 } 3613 3614 bool 3615 device_active(device_t dev, devactive_t type) 3616 { 3617 size_t i; 3618 3619 if (dev->dv_activity_count == 0) 3620 return false; 3621 3622 for (i = 0; i < dev->dv_activity_count; ++i) { 3623 if (dev->dv_activity_handlers[i] == NULL) 3624 break; 3625 (*dev->dv_activity_handlers[i])(dev, type); 3626 } 3627 3628 return true; 3629 } 3630 3631 bool 3632 device_active_register(device_t dev, void (*handler)(device_t, devactive_t)) 3633 { 3634 void (**new_handlers)(device_t, devactive_t); 3635 void (**old_handlers)(device_t, devactive_t); 3636 size_t i, old_size, new_size; 3637 int s; 3638 3639 old_handlers = dev->dv_activity_handlers; 3640 old_size = dev->dv_activity_count; 3641 3642 KASSERT(old_size == 0 || old_handlers != NULL); 3643 3644 for (i = 0; i < old_size; ++i) { 3645 KASSERT(old_handlers[i] != handler); 3646 if (old_handlers[i] == NULL) { 3647 old_handlers[i] = handler; 3648 return true; 3649 } 3650 } 3651 3652 new_size = old_size + 4; 3653 new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP); 3654 3655 for (i = 0; i < old_size; ++i) 3656 new_handlers[i] = old_handlers[i]; 3657 new_handlers[old_size] = handler; 3658 for (i = old_size+1; i < new_size; ++i) 3659 new_handlers[i] = NULL; 3660 3661 s = splhigh(); 3662 dev->dv_activity_count = new_size; 3663 dev->dv_activity_handlers = new_handlers; 3664 splx(s); 3665 3666 if (old_size > 0) 3667 kmem_free(old_handlers, sizeof(void *) * old_size); 3668 3669 return true; 3670 } 3671 3672 void 3673 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t)) 3674 { 3675 void (**old_handlers)(device_t, devactive_t); 3676 size_t i, old_size; 3677 int s; 3678 3679 old_handlers = dev->dv_activity_handlers; 3680 old_size = dev->dv_activity_count; 3681 3682 for (i = 0; i < old_size; ++i) { 3683 if (old_handlers[i] == handler) 3684 break; 3685 if (old_handlers[i] == NULL) 3686 return; /* XXX panic? */ 3687 } 3688 3689 if (i == old_size) 3690 return; /* XXX panic? */ 3691 3692 for (; i < old_size - 1; ++i) { 3693 if ((old_handlers[i] = old_handlers[i + 1]) != NULL) 3694 continue; 3695 3696 if (i == 0) { 3697 s = splhigh(); 3698 dev->dv_activity_count = 0; 3699 dev->dv_activity_handlers = NULL; 3700 splx(s); 3701 kmem_free(old_handlers, sizeof(void *) * old_size); 3702 } 3703 return; 3704 } 3705 old_handlers[i] = NULL; 3706 } 3707 3708 /* Return true iff the device_t `dev' exists at generation `gen'. */ 3709 static bool 3710 device_exists_at(device_t dv, devgen_t gen) 3711 { 3712 return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) && 3713 dv->dv_add_gen <= gen; 3714 } 3715 3716 static bool 3717 deviter_visits(const deviter_t *di, device_t dv) 3718 { 3719 return device_exists_at(dv, di->di_gen); 3720 } 3721 3722 /* 3723 * Device Iteration 3724 * 3725 * deviter_t: a device iterator. Holds state for a "walk" visiting 3726 * each device_t's in the device tree. 3727 * 3728 * deviter_init(di, flags): initialize the device iterator `di' 3729 * to "walk" the device tree. deviter_next(di) will return 3730 * the first device_t in the device tree, or NULL if there are 3731 * no devices. 3732 * 3733 * `flags' is one or more of DEVITER_F_RW, indicating that the 3734 * caller intends to modify the device tree by calling 3735 * config_detach(9) on devices in the order that the iterator 3736 * returns them; DEVITER_F_ROOT_FIRST, asking for the devices 3737 * nearest the "root" of the device tree to be returned, first; 3738 * DEVITER_F_LEAVES_FIRST, asking for the devices furthest from 3739 * the root of the device tree, first; and DEVITER_F_SHUTDOWN, 3740 * indicating both that deviter_init() should not respect any 3741 * locks on the device tree, and that deviter_next(di) may run 3742 * in more than one LWP before the walk has finished. 3743 * 3744 * Only one DEVITER_F_RW iterator may be in the device tree at 3745 * once. 3746 * 3747 * DEVITER_F_SHUTDOWN implies DEVITER_F_RW. 3748 * 3749 * Results are undefined if the flags DEVITER_F_ROOT_FIRST and 3750 * DEVITER_F_LEAVES_FIRST are used in combination. 3751 * 3752 * deviter_first(di, flags): initialize the device iterator `di' 3753 * and return the first device_t in the device tree, or NULL 3754 * if there are no devices. The statement 3755 * 3756 * dv = deviter_first(di); 3757 * 3758 * is shorthand for 3759 * 3760 * deviter_init(di); 3761 * dv = deviter_next(di); 3762 * 3763 * deviter_next(di): return the next device_t in the device tree, 3764 * or NULL if there are no more devices. deviter_next(di) 3765 * is undefined if `di' was not initialized with deviter_init() or 3766 * deviter_first(). 3767 * 3768 * deviter_release(di): stops iteration (subsequent calls to 3769 * deviter_next() will return NULL), releases any locks and 3770 * resources held by the device iterator. 3771 * 3772 * Device iteration does not return device_t's in any particular 3773 * order. An iterator will never return the same device_t twice. 3774 * Device iteration is guaranteed to complete---i.e., if deviter_next(di) 3775 * is called repeatedly on the same `di', it will eventually return 3776 * NULL. It is ok to attach/detach devices during device iteration. 3777 */ 3778 void 3779 deviter_init(deviter_t *di, deviter_flags_t flags) 3780 { 3781 device_t dv; 3782 3783 memset(di, 0, sizeof(*di)); 3784 3785 if ((flags & DEVITER_F_SHUTDOWN) != 0) 3786 flags |= DEVITER_F_RW; 3787 3788 mutex_enter(&alldevs_lock); 3789 if ((flags & DEVITER_F_RW) != 0) 3790 alldevs_nwrite++; 3791 else 3792 alldevs_nread++; 3793 di->di_gen = alldevs_gen++; 3794 di->di_flags = flags; 3795 3796 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) { 3797 case DEVITER_F_LEAVES_FIRST: 3798 TAILQ_FOREACH(dv, &alldevs, dv_list) { 3799 if (!deviter_visits(di, dv)) 3800 continue; 3801 di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth); 3802 } 3803 break; 3804 case DEVITER_F_ROOT_FIRST: 3805 TAILQ_FOREACH(dv, &alldevs, dv_list) { 3806 if (!deviter_visits(di, dv)) 3807 continue; 3808 di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth); 3809 } 3810 break; 3811 default: 3812 break; 3813 } 3814 3815 deviter_reinit(di); 3816 mutex_exit(&alldevs_lock); 3817 } 3818 3819 static void 3820 deviter_reinit(deviter_t *di) 3821 { 3822 3823 KASSERT(mutex_owned(&alldevs_lock)); 3824 if ((di->di_flags & DEVITER_F_RW) != 0) 3825 di->di_prev = TAILQ_LAST(&alldevs, devicelist); 3826 else 3827 di->di_prev = TAILQ_FIRST(&alldevs); 3828 } 3829 3830 device_t 3831 deviter_first(deviter_t *di, deviter_flags_t flags) 3832 { 3833 3834 deviter_init(di, flags); 3835 return deviter_next(di); 3836 } 3837 3838 static device_t 3839 deviter_next2(deviter_t *di) 3840 { 3841 device_t dv; 3842 3843 KASSERT(mutex_owned(&alldevs_lock)); 3844 3845 dv = di->di_prev; 3846 3847 if (dv == NULL) 3848 return NULL; 3849 3850 if ((di->di_flags & DEVITER_F_RW) != 0) 3851 di->di_prev = TAILQ_PREV(dv, devicelist, dv_list); 3852 else 3853 di->di_prev = TAILQ_NEXT(dv, dv_list); 3854 3855 return dv; 3856 } 3857 3858 static device_t 3859 deviter_next1(deviter_t *di) 3860 { 3861 device_t dv; 3862 3863 KASSERT(mutex_owned(&alldevs_lock)); 3864 3865 do { 3866 dv = deviter_next2(di); 3867 } while (dv != NULL && !deviter_visits(di, dv)); 3868 3869 return dv; 3870 } 3871 3872 device_t 3873 deviter_next(deviter_t *di) 3874 { 3875 device_t dv = NULL; 3876 3877 mutex_enter(&alldevs_lock); 3878 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) { 3879 case 0: 3880 dv = deviter_next1(di); 3881 break; 3882 case DEVITER_F_LEAVES_FIRST: 3883 while (di->di_curdepth >= 0) { 3884 if ((dv = deviter_next1(di)) == NULL) { 3885 di->di_curdepth--; 3886 deviter_reinit(di); 3887 } else if (dv->dv_depth == di->di_curdepth) 3888 break; 3889 } 3890 break; 3891 case DEVITER_F_ROOT_FIRST: 3892 while (di->di_curdepth <= di->di_maxdepth) { 3893 if ((dv = deviter_next1(di)) == NULL) { 3894 di->di_curdepth++; 3895 deviter_reinit(di); 3896 } else if (dv->dv_depth == di->di_curdepth) 3897 break; 3898 } 3899 break; 3900 default: 3901 break; 3902 } 3903 mutex_exit(&alldevs_lock); 3904 3905 return dv; 3906 } 3907 3908 void 3909 deviter_release(deviter_t *di) 3910 { 3911 bool rw = (di->di_flags & DEVITER_F_RW) != 0; 3912 3913 mutex_enter(&alldevs_lock); 3914 if (rw) 3915 --alldevs_nwrite; 3916 else 3917 --alldevs_nread; 3918 /* XXX wake a garbage-collection thread */ 3919 mutex_exit(&alldevs_lock); 3920 } 3921 3922 const char * 3923 cfdata_ifattr(const struct cfdata *cf) 3924 { 3925 return cf->cf_pspec->cfp_iattr; 3926 } 3927 3928 bool 3929 ifattr_match(const char *snull, const char *t) 3930 { 3931 return (snull == NULL) || strcmp(snull, t) == 0; 3932 } 3933 3934 void 3935 null_childdetached(device_t self, device_t child) 3936 { 3937 /* do nothing */ 3938 } 3939 3940 static void 3941 sysctl_detach_setup(struct sysctllog **clog) 3942 { 3943 3944 sysctl_createv(clog, 0, NULL, NULL, 3945 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 3946 CTLTYPE_BOOL, "detachall", 3947 SYSCTL_DESCR("Detach all devices at shutdown"), 3948 NULL, 0, &detachall, 0, 3949 CTL_KERN, CTL_CREATE, CTL_EOL); 3950 } 3951