Home | History | Annotate | Line # | Download | only in kern
subr_autoconf.c revision 1.312
      1 /* $NetBSD: subr_autoconf.c,v 1.312 2023/05/23 00:31:42 riastradh Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1996, 2000 Christopher G. Demetriou
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *          This product includes software developed for the
     18  *          NetBSD Project.  See http://www.NetBSD.org/ for
     19  *          information about NetBSD.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
     35  */
     36 
     37 /*
     38  * Copyright (c) 1992, 1993
     39  *	The Regents of the University of California.  All rights reserved.
     40  *
     41  * This software was developed by the Computer Systems Engineering group
     42  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     43  * contributed to Berkeley.
     44  *
     45  * All advertising materials mentioning features or use of this software
     46  * must display the following acknowledgement:
     47  *	This product includes software developed by the University of
     48  *	California, Lawrence Berkeley Laboratories.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp  (LBL)
     75  *
     76  *	@(#)subr_autoconf.c	8.3 (Berkeley) 5/17/94
     77  */
     78 
     79 #include <sys/cdefs.h>
     80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.312 2023/05/23 00:31:42 riastradh Exp $");
     81 
     82 #ifdef _KERNEL_OPT
     83 #include "opt_ddb.h"
     84 #include "drvctl.h"
     85 #endif
     86 
     87 #include <sys/param.h>
     88 #include <sys/device.h>
     89 #include <sys/device_impl.h>
     90 #include <sys/disklabel.h>
     91 #include <sys/conf.h>
     92 #include <sys/kauth.h>
     93 #include <sys/kmem.h>
     94 #include <sys/systm.h>
     95 #include <sys/kernel.h>
     96 #include <sys/errno.h>
     97 #include <sys/proc.h>
     98 #include <sys/reboot.h>
     99 #include <sys/kthread.h>
    100 #include <sys/buf.h>
    101 #include <sys/dirent.h>
    102 #include <sys/mount.h>
    103 #include <sys/namei.h>
    104 #include <sys/unistd.h>
    105 #include <sys/fcntl.h>
    106 #include <sys/lockf.h>
    107 #include <sys/callout.h>
    108 #include <sys/devmon.h>
    109 #include <sys/cpu.h>
    110 #include <sys/sysctl.h>
    111 #include <sys/stdarg.h>
    112 #include <sys/localcount.h>
    113 
    114 #include <sys/disk.h>
    115 
    116 #include <sys/rndsource.h>
    117 
    118 #include <machine/limits.h>
    119 
    120 /*
    121  * Autoconfiguration subroutines.
    122  */
    123 
    124 /*
    125  * Device autoconfiguration timings are mixed into the entropy pool.
    126  */
    127 static krndsource_t rnd_autoconf_source;
    128 
    129 /*
    130  * ioconf.c exports exactly two names: cfdata and cfroots.  All system
    131  * devices and drivers are found via these tables.
    132  */
    133 extern struct cfdata cfdata[];
    134 extern const short cfroots[];
    135 
    136 /*
    137  * List of all cfdriver structures.  We use this to detect duplicates
    138  * when other cfdrivers are loaded.
    139  */
    140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
    141 extern struct cfdriver * const cfdriver_list_initial[];
    142 
    143 /*
    144  * Initial list of cfattach's.
    145  */
    146 extern const struct cfattachinit cfattachinit[];
    147 
    148 /*
    149  * List of cfdata tables.  We always have one such list -- the one
    150  * built statically when the kernel was configured.
    151  */
    152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
    153 static struct cftable initcftable;
    154 
    155 #define	ROOT ((device_t)NULL)
    156 
    157 struct matchinfo {
    158 	cfsubmatch_t fn;
    159 	device_t parent;
    160 	const int *locs;
    161 	void	*aux;
    162 	struct	cfdata *match;
    163 	int	pri;
    164 };
    165 
    166 struct alldevs_foray {
    167 	int			af_s;
    168 	struct devicelist	af_garbage;
    169 };
    170 
    171 /*
    172  * Internal version of the cfargs structure; all versions are
    173  * canonicalized to this.
    174  */
    175 struct cfargs_internal {
    176 	union {
    177 		cfsubmatch_t	submatch;/* submatch function (direct config) */
    178 		cfsearch_t	search;	 /* search function (indirect config) */
    179 	};
    180 	const char *	iattr;		/* interface attribute */
    181 	const int *	locators;	/* locators array */
    182 	devhandle_t	devhandle;	/* devhandle_t (by value) */
    183 };
    184 
    185 static char *number(char *, int);
    186 static void mapply(struct matchinfo *, cfdata_t);
    187 static void config_devdelete(device_t);
    188 static void config_devunlink(device_t, struct devicelist *);
    189 static void config_makeroom(int, struct cfdriver *);
    190 static void config_devlink(device_t);
    191 static void config_alldevs_enter(struct alldevs_foray *);
    192 static void config_alldevs_exit(struct alldevs_foray *);
    193 static void config_add_attrib_dict(device_t);
    194 static device_t	config_attach_internal(device_t, cfdata_t, void *,
    195 		    cfprint_t, const struct cfargs_internal *);
    196 
    197 static void config_collect_garbage(struct devicelist *);
    198 static void config_dump_garbage(struct devicelist *);
    199 
    200 static void pmflock_debug(device_t, const char *, int);
    201 
    202 static device_t deviter_next1(deviter_t *);
    203 static void deviter_reinit(deviter_t *);
    204 
    205 struct deferred_config {
    206 	TAILQ_ENTRY(deferred_config) dc_queue;
    207 	device_t dc_dev;
    208 	void (*dc_func)(device_t);
    209 };
    210 
    211 TAILQ_HEAD(deferred_config_head, deferred_config);
    212 
    213 static struct deferred_config_head deferred_config_queue =
    214 	TAILQ_HEAD_INITIALIZER(deferred_config_queue);
    215 static struct deferred_config_head interrupt_config_queue =
    216 	TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
    217 static int interrupt_config_threads = 8;
    218 static struct deferred_config_head mountroot_config_queue =
    219 	TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
    220 static int mountroot_config_threads = 2;
    221 static lwp_t **mountroot_config_lwpids;
    222 static size_t mountroot_config_lwpids_size;
    223 bool root_is_mounted = false;
    224 
    225 static void config_process_deferred(struct deferred_config_head *, device_t);
    226 
    227 /* Hooks to finalize configuration once all real devices have been found. */
    228 struct finalize_hook {
    229 	TAILQ_ENTRY(finalize_hook) f_list;
    230 	int (*f_func)(device_t);
    231 	device_t f_dev;
    232 };
    233 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
    234 	TAILQ_HEAD_INITIALIZER(config_finalize_list);
    235 static int config_finalize_done;
    236 
    237 /* list of all devices */
    238 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
    239 static kmutex_t alldevs_lock __cacheline_aligned;
    240 static devgen_t alldevs_gen = 1;
    241 static int alldevs_nread = 0;
    242 static int alldevs_nwrite = 0;
    243 static bool alldevs_garbage = false;
    244 
    245 static struct devicelist config_pending =
    246     TAILQ_HEAD_INITIALIZER(config_pending);
    247 static kmutex_t config_misc_lock;
    248 static kcondvar_t config_misc_cv;
    249 
    250 static bool detachall = false;
    251 
    252 #define	STREQ(s1, s2)			\
    253 	(*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
    254 
    255 static bool config_initialized = false;	/* config_init() has been called. */
    256 
    257 static int config_do_twiddle;
    258 static callout_t config_twiddle_ch;
    259 
    260 static void sysctl_detach_setup(struct sysctllog **);
    261 
    262 int no_devmon_insert(const char *, prop_dictionary_t);
    263 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
    264 
    265 typedef int (*cfdriver_fn)(struct cfdriver *);
    266 static int
    267 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
    268 	cfdriver_fn drv_do, cfdriver_fn drv_undo,
    269 	const char *style, bool dopanic)
    270 {
    271 	void (*pr)(const char *, ...) __printflike(1, 2) =
    272 	    dopanic ? panic : printf;
    273 	int i, error = 0, e2 __diagused;
    274 
    275 	for (i = 0; cfdriverv[i] != NULL; i++) {
    276 		if ((error = drv_do(cfdriverv[i])) != 0) {
    277 			pr("configure: `%s' driver %s failed: %d",
    278 			    cfdriverv[i]->cd_name, style, error);
    279 			goto bad;
    280 		}
    281 	}
    282 
    283 	KASSERT(error == 0);
    284 	return 0;
    285 
    286  bad:
    287 	printf("\n");
    288 	for (i--; i >= 0; i--) {
    289 		e2 = drv_undo(cfdriverv[i]);
    290 		KASSERT(e2 == 0);
    291 	}
    292 
    293 	return error;
    294 }
    295 
    296 typedef int (*cfattach_fn)(const char *, struct cfattach *);
    297 static int
    298 frob_cfattachvec(const struct cfattachinit *cfattachv,
    299 	cfattach_fn att_do, cfattach_fn att_undo,
    300 	const char *style, bool dopanic)
    301 {
    302 	const struct cfattachinit *cfai = NULL;
    303 	void (*pr)(const char *, ...) __printflike(1, 2) =
    304 	    dopanic ? panic : printf;
    305 	int j = 0, error = 0, e2 __diagused;
    306 
    307 	for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
    308 		for (j = 0; cfai->cfai_list[j] != NULL; j++) {
    309 			if ((error = att_do(cfai->cfai_name,
    310 			    cfai->cfai_list[j])) != 0) {
    311 				pr("configure: attachment `%s' "
    312 				    "of `%s' driver %s failed: %d",
    313 				    cfai->cfai_list[j]->ca_name,
    314 				    cfai->cfai_name, style, error);
    315 				goto bad;
    316 			}
    317 		}
    318 	}
    319 
    320 	KASSERT(error == 0);
    321 	return 0;
    322 
    323  bad:
    324 	/*
    325 	 * Rollback in reverse order.  dunno if super-important, but
    326 	 * do that anyway.  Although the code looks a little like
    327 	 * someone did a little integration (in the math sense).
    328 	 */
    329 	printf("\n");
    330 	if (cfai) {
    331 		bool last;
    332 
    333 		for (last = false; last == false; ) {
    334 			if (cfai == &cfattachv[0])
    335 				last = true;
    336 			for (j--; j >= 0; j--) {
    337 				e2 = att_undo(cfai->cfai_name,
    338 				    cfai->cfai_list[j]);
    339 				KASSERT(e2 == 0);
    340 			}
    341 			if (!last) {
    342 				cfai--;
    343 				for (j = 0; cfai->cfai_list[j] != NULL; j++)
    344 					;
    345 			}
    346 		}
    347 	}
    348 
    349 	return error;
    350 }
    351 
    352 /*
    353  * Initialize the autoconfiguration data structures.  Normally this
    354  * is done by configure(), but some platforms need to do this very
    355  * early (to e.g. initialize the console).
    356  */
    357 void
    358 config_init(void)
    359 {
    360 
    361 	KASSERT(config_initialized == false);
    362 
    363 	mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
    364 
    365 	mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
    366 	cv_init(&config_misc_cv, "cfgmisc");
    367 
    368 	callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
    369 
    370 	frob_cfdrivervec(cfdriver_list_initial,
    371 	    config_cfdriver_attach, NULL, "bootstrap", true);
    372 	frob_cfattachvec(cfattachinit,
    373 	    config_cfattach_attach, NULL, "bootstrap", true);
    374 
    375 	initcftable.ct_cfdata = cfdata;
    376 	TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
    377 
    378 	rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
    379 	    RND_FLAG_COLLECT_TIME);
    380 
    381 	config_initialized = true;
    382 }
    383 
    384 /*
    385  * Init or fini drivers and attachments.  Either all or none
    386  * are processed (via rollback).  It would be nice if this were
    387  * atomic to outside consumers, but with the current state of
    388  * locking ...
    389  */
    390 int
    391 config_init_component(struct cfdriver * const *cfdriverv,
    392 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
    393 {
    394 	int error;
    395 
    396 	KERNEL_LOCK(1, NULL);
    397 
    398 	if ((error = frob_cfdrivervec(cfdriverv,
    399 	    config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
    400 		goto out;
    401 	if ((error = frob_cfattachvec(cfattachv,
    402 	    config_cfattach_attach, config_cfattach_detach,
    403 	    "init", false)) != 0) {
    404 		frob_cfdrivervec(cfdriverv,
    405 	            config_cfdriver_detach, NULL, "init rollback", true);
    406 		goto out;
    407 	}
    408 	if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
    409 		frob_cfattachvec(cfattachv,
    410 		    config_cfattach_detach, NULL, "init rollback", true);
    411 		frob_cfdrivervec(cfdriverv,
    412 	            config_cfdriver_detach, NULL, "init rollback", true);
    413 		goto out;
    414 	}
    415 
    416 	/* Success!  */
    417 	error = 0;
    418 
    419 out:	KERNEL_UNLOCK_ONE(NULL);
    420 	return error;
    421 }
    422 
    423 int
    424 config_fini_component(struct cfdriver * const *cfdriverv,
    425 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
    426 {
    427 	int error;
    428 
    429 	KERNEL_LOCK(1, NULL);
    430 
    431 	if ((error = config_cfdata_detach(cfdatav)) != 0)
    432 		goto out;
    433 	if ((error = frob_cfattachvec(cfattachv,
    434 	    config_cfattach_detach, config_cfattach_attach,
    435 	    "fini", false)) != 0) {
    436 		if (config_cfdata_attach(cfdatav, 0) != 0)
    437 			panic("config_cfdata fini rollback failed");
    438 		goto out;
    439 	}
    440 	if ((error = frob_cfdrivervec(cfdriverv,
    441 	    config_cfdriver_detach, config_cfdriver_attach,
    442 	    "fini", false)) != 0) {
    443 		frob_cfattachvec(cfattachv,
    444 	            config_cfattach_attach, NULL, "fini rollback", true);
    445 		if (config_cfdata_attach(cfdatav, 0) != 0)
    446 			panic("config_cfdata fini rollback failed");
    447 		goto out;
    448 	}
    449 
    450 	/* Success!  */
    451 	error = 0;
    452 
    453 out:	KERNEL_UNLOCK_ONE(NULL);
    454 	return error;
    455 }
    456 
    457 void
    458 config_init_mi(void)
    459 {
    460 
    461 	if (!config_initialized)
    462 		config_init();
    463 
    464 	sysctl_detach_setup(NULL);
    465 }
    466 
    467 void
    468 config_deferred(device_t dev)
    469 {
    470 
    471 	KASSERT(KERNEL_LOCKED_P());
    472 
    473 	config_process_deferred(&deferred_config_queue, dev);
    474 	config_process_deferred(&interrupt_config_queue, dev);
    475 	config_process_deferred(&mountroot_config_queue, dev);
    476 }
    477 
    478 static void
    479 config_interrupts_thread(void *cookie)
    480 {
    481 	struct deferred_config *dc;
    482 	device_t dev;
    483 
    484 	mutex_enter(&config_misc_lock);
    485 	while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
    486 		TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
    487 		mutex_exit(&config_misc_lock);
    488 
    489 		dev = dc->dc_dev;
    490 		(*dc->dc_func)(dev);
    491 		if (!device_pmf_is_registered(dev))
    492 			aprint_debug_dev(dev,
    493 			    "WARNING: power management not supported\n");
    494 		config_pending_decr(dev);
    495 		kmem_free(dc, sizeof(*dc));
    496 
    497 		mutex_enter(&config_misc_lock);
    498 	}
    499 	mutex_exit(&config_misc_lock);
    500 
    501 	kthread_exit(0);
    502 }
    503 
    504 void
    505 config_create_interruptthreads(void)
    506 {
    507 	int i;
    508 
    509 	for (i = 0; i < interrupt_config_threads; i++) {
    510 		(void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
    511 		    config_interrupts_thread, NULL, NULL, "configintr");
    512 	}
    513 }
    514 
    515 static void
    516 config_mountroot_thread(void *cookie)
    517 {
    518 	struct deferred_config *dc;
    519 
    520 	mutex_enter(&config_misc_lock);
    521 	while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
    522 		TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
    523 		mutex_exit(&config_misc_lock);
    524 
    525 		(*dc->dc_func)(dc->dc_dev);
    526 		kmem_free(dc, sizeof(*dc));
    527 
    528 		mutex_enter(&config_misc_lock);
    529 	}
    530 	mutex_exit(&config_misc_lock);
    531 
    532 	kthread_exit(0);
    533 }
    534 
    535 void
    536 config_create_mountrootthreads(void)
    537 {
    538 	int i;
    539 
    540 	if (!root_is_mounted)
    541 		root_is_mounted = true;
    542 
    543 	mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
    544 				       mountroot_config_threads;
    545 	mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
    546 					     KM_NOSLEEP);
    547 	KASSERT(mountroot_config_lwpids);
    548 	for (i = 0; i < mountroot_config_threads; i++) {
    549 		mountroot_config_lwpids[i] = 0;
    550 		(void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
    551 				     NULL, config_mountroot_thread, NULL,
    552 				     &mountroot_config_lwpids[i],
    553 				     "configroot");
    554 	}
    555 }
    556 
    557 void
    558 config_finalize_mountroot(void)
    559 {
    560 	int i, error;
    561 
    562 	for (i = 0; i < mountroot_config_threads; i++) {
    563 		if (mountroot_config_lwpids[i] == 0)
    564 			continue;
    565 
    566 		error = kthread_join(mountroot_config_lwpids[i]);
    567 		if (error)
    568 			printf("%s: thread %x joined with error %d\n",
    569 			       __func__, i, error);
    570 	}
    571 	kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
    572 }
    573 
    574 /*
    575  * Announce device attach/detach to userland listeners.
    576  */
    577 
    578 int
    579 no_devmon_insert(const char *name, prop_dictionary_t p)
    580 {
    581 
    582 	return ENODEV;
    583 }
    584 
    585 static void
    586 devmon_report_device(device_t dev, bool isattach)
    587 {
    588 	prop_dictionary_t ev, dict = device_properties(dev);
    589 	const char *parent;
    590 	const char *what;
    591 	const char *where;
    592 	device_t pdev = device_parent(dev);
    593 
    594 	/* If currently no drvctl device, just return */
    595 	if (devmon_insert_vec == no_devmon_insert)
    596 		return;
    597 
    598 	ev = prop_dictionary_create();
    599 	if (ev == NULL)
    600 		return;
    601 
    602 	what = (isattach ? "device-attach" : "device-detach");
    603 	parent = (pdev == NULL ? "root" : device_xname(pdev));
    604 	if (prop_dictionary_get_string(dict, "location", &where)) {
    605 		prop_dictionary_set_string(ev, "location", where);
    606 		aprint_debug("ev: %s %s at %s in [%s]\n",
    607 		    what, device_xname(dev), parent, where);
    608 	}
    609 	if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
    610 	    !prop_dictionary_set_string(ev, "parent", parent)) {
    611 		prop_object_release(ev);
    612 		return;
    613 	}
    614 
    615 	if ((*devmon_insert_vec)(what, ev) != 0)
    616 		prop_object_release(ev);
    617 }
    618 
    619 /*
    620  * Add a cfdriver to the system.
    621  */
    622 int
    623 config_cfdriver_attach(struct cfdriver *cd)
    624 {
    625 	struct cfdriver *lcd;
    626 
    627 	/* Make sure this driver isn't already in the system. */
    628 	LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
    629 		if (STREQ(lcd->cd_name, cd->cd_name))
    630 			return EEXIST;
    631 	}
    632 
    633 	LIST_INIT(&cd->cd_attach);
    634 	LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
    635 
    636 	return 0;
    637 }
    638 
    639 /*
    640  * Remove a cfdriver from the system.
    641  */
    642 int
    643 config_cfdriver_detach(struct cfdriver *cd)
    644 {
    645 	struct alldevs_foray af;
    646 	int i, rc = 0;
    647 
    648 	config_alldevs_enter(&af);
    649 	/* Make sure there are no active instances. */
    650 	for (i = 0; i < cd->cd_ndevs; i++) {
    651 		if (cd->cd_devs[i] != NULL) {
    652 			rc = EBUSY;
    653 			break;
    654 		}
    655 	}
    656 	config_alldevs_exit(&af);
    657 
    658 	if (rc != 0)
    659 		return rc;
    660 
    661 	/* ...and no attachments loaded. */
    662 	if (LIST_EMPTY(&cd->cd_attach) == 0)
    663 		return EBUSY;
    664 
    665 	LIST_REMOVE(cd, cd_list);
    666 
    667 	KASSERT(cd->cd_devs == NULL);
    668 
    669 	return 0;
    670 }
    671 
    672 /*
    673  * Look up a cfdriver by name.
    674  */
    675 struct cfdriver *
    676 config_cfdriver_lookup(const char *name)
    677 {
    678 	struct cfdriver *cd;
    679 
    680 	LIST_FOREACH(cd, &allcfdrivers, cd_list) {
    681 		if (STREQ(cd->cd_name, name))
    682 			return cd;
    683 	}
    684 
    685 	return NULL;
    686 }
    687 
    688 /*
    689  * Add a cfattach to the specified driver.
    690  */
    691 int
    692 config_cfattach_attach(const char *driver, struct cfattach *ca)
    693 {
    694 	struct cfattach *lca;
    695 	struct cfdriver *cd;
    696 
    697 	cd = config_cfdriver_lookup(driver);
    698 	if (cd == NULL)
    699 		return ESRCH;
    700 
    701 	/* Make sure this attachment isn't already on this driver. */
    702 	LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
    703 		if (STREQ(lca->ca_name, ca->ca_name))
    704 			return EEXIST;
    705 	}
    706 
    707 	LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
    708 
    709 	return 0;
    710 }
    711 
    712 /*
    713  * Remove a cfattach from the specified driver.
    714  */
    715 int
    716 config_cfattach_detach(const char *driver, struct cfattach *ca)
    717 {
    718 	struct alldevs_foray af;
    719 	struct cfdriver *cd;
    720 	device_t dev;
    721 	int i, rc = 0;
    722 
    723 	cd = config_cfdriver_lookup(driver);
    724 	if (cd == NULL)
    725 		return ESRCH;
    726 
    727 	config_alldevs_enter(&af);
    728 	/* Make sure there are no active instances. */
    729 	for (i = 0; i < cd->cd_ndevs; i++) {
    730 		if ((dev = cd->cd_devs[i]) == NULL)
    731 			continue;
    732 		if (dev->dv_cfattach == ca) {
    733 			rc = EBUSY;
    734 			break;
    735 		}
    736 	}
    737 	config_alldevs_exit(&af);
    738 
    739 	if (rc != 0)
    740 		return rc;
    741 
    742 	LIST_REMOVE(ca, ca_list);
    743 
    744 	return 0;
    745 }
    746 
    747 /*
    748  * Look up a cfattach by name.
    749  */
    750 static struct cfattach *
    751 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
    752 {
    753 	struct cfattach *ca;
    754 
    755 	LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
    756 		if (STREQ(ca->ca_name, atname))
    757 			return ca;
    758 	}
    759 
    760 	return NULL;
    761 }
    762 
    763 /*
    764  * Look up a cfattach by driver/attachment name.
    765  */
    766 struct cfattach *
    767 config_cfattach_lookup(const char *name, const char *atname)
    768 {
    769 	struct cfdriver *cd;
    770 
    771 	cd = config_cfdriver_lookup(name);
    772 	if (cd == NULL)
    773 		return NULL;
    774 
    775 	return config_cfattach_lookup_cd(cd, atname);
    776 }
    777 
    778 /*
    779  * Apply the matching function and choose the best.  This is used
    780  * a few times and we want to keep the code small.
    781  */
    782 static void
    783 mapply(struct matchinfo *m, cfdata_t cf)
    784 {
    785 	int pri;
    786 
    787 	if (m->fn != NULL) {
    788 		pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
    789 	} else {
    790 		pri = config_match(m->parent, cf, m->aux);
    791 	}
    792 	if (pri > m->pri) {
    793 		m->match = cf;
    794 		m->pri = pri;
    795 	}
    796 }
    797 
    798 int
    799 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
    800 {
    801 	const struct cfiattrdata *ci;
    802 	const struct cflocdesc *cl;
    803 	int nlocs, i;
    804 
    805 	ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
    806 	KASSERT(ci);
    807 	nlocs = ci->ci_loclen;
    808 	KASSERT(!nlocs || locs);
    809 	for (i = 0; i < nlocs; i++) {
    810 		cl = &ci->ci_locdesc[i];
    811 		if (cl->cld_defaultstr != NULL &&
    812 		    cf->cf_loc[i] == cl->cld_default)
    813 			continue;
    814 		if (cf->cf_loc[i] == locs[i])
    815 			continue;
    816 		return 0;
    817 	}
    818 
    819 	return config_match(parent, cf, aux);
    820 }
    821 
    822 /*
    823  * Helper function: check whether the driver supports the interface attribute
    824  * and return its descriptor structure.
    825  */
    826 static const struct cfiattrdata *
    827 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
    828 {
    829 	const struct cfiattrdata * const *cpp;
    830 
    831 	if (cd->cd_attrs == NULL)
    832 		return 0;
    833 
    834 	for (cpp = cd->cd_attrs; *cpp; cpp++) {
    835 		if (STREQ((*cpp)->ci_name, ia)) {
    836 			/* Match. */
    837 			return *cpp;
    838 		}
    839 	}
    840 	return 0;
    841 }
    842 
    843 static int __diagused
    844 cfdriver_iattr_count(const struct cfdriver *cd)
    845 {
    846 	const struct cfiattrdata * const *cpp;
    847 	int i;
    848 
    849 	if (cd->cd_attrs == NULL)
    850 		return 0;
    851 
    852 	for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
    853 		i++;
    854 	}
    855 	return i;
    856 }
    857 
    858 /*
    859  * Lookup an interface attribute description by name.
    860  * If the driver is given, consider only its supported attributes.
    861  */
    862 const struct cfiattrdata *
    863 cfiattr_lookup(const char *name, const struct cfdriver *cd)
    864 {
    865 	const struct cfdriver *d;
    866 	const struct cfiattrdata *ia;
    867 
    868 	if (cd)
    869 		return cfdriver_get_iattr(cd, name);
    870 
    871 	LIST_FOREACH(d, &allcfdrivers, cd_list) {
    872 		ia = cfdriver_get_iattr(d, name);
    873 		if (ia)
    874 			return ia;
    875 	}
    876 	return 0;
    877 }
    878 
    879 /*
    880  * Determine if `parent' is a potential parent for a device spec based
    881  * on `cfp'.
    882  */
    883 static int
    884 cfparent_match(const device_t parent, const struct cfparent *cfp)
    885 {
    886 	struct cfdriver *pcd;
    887 
    888 	/* We don't match root nodes here. */
    889 	if (cfp == NULL)
    890 		return 0;
    891 
    892 	pcd = parent->dv_cfdriver;
    893 	KASSERT(pcd != NULL);
    894 
    895 	/*
    896 	 * First, ensure this parent has the correct interface
    897 	 * attribute.
    898 	 */
    899 	if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr))
    900 		return 0;
    901 
    902 	/*
    903 	 * If no specific parent device instance was specified (i.e.
    904 	 * we're attaching to the attribute only), we're done!
    905 	 */
    906 	if (cfp->cfp_parent == NULL)
    907 		return 1;
    908 
    909 	/*
    910 	 * Check the parent device's name.
    911 	 */
    912 	if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
    913 		return 0;	/* not the same parent */
    914 
    915 	/*
    916 	 * Make sure the unit number matches.
    917 	 */
    918 	if (cfp->cfp_unit == DVUNIT_ANY ||	/* wildcard */
    919 	    cfp->cfp_unit == parent->dv_unit)
    920 		return 1;
    921 
    922 	/* Unit numbers don't match. */
    923 	return 0;
    924 }
    925 
    926 /*
    927  * Helper for config_cfdata_attach(): check all devices whether it could be
    928  * parent any attachment in the config data table passed, and rescan.
    929  */
    930 static void
    931 rescan_with_cfdata(const struct cfdata *cf)
    932 {
    933 	device_t d;
    934 	const struct cfdata *cf1;
    935 	deviter_t di;
    936 
    937 	KASSERT(KERNEL_LOCKED_P());
    938 
    939 	/*
    940 	 * "alldevs" is likely longer than a modules's cfdata, so make it
    941 	 * the outer loop.
    942 	 */
    943 	for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
    944 
    945 		if (!(d->dv_cfattach->ca_rescan))
    946 			continue;
    947 
    948 		for (cf1 = cf; cf1->cf_name; cf1++) {
    949 
    950 			if (!cfparent_match(d, cf1->cf_pspec))
    951 				continue;
    952 
    953 			(*d->dv_cfattach->ca_rescan)(d,
    954 				cfdata_ifattr(cf1), cf1->cf_loc);
    955 
    956 			config_deferred(d);
    957 		}
    958 	}
    959 	deviter_release(&di);
    960 }
    961 
    962 /*
    963  * Attach a supplemental config data table and rescan potential
    964  * parent devices if required.
    965  */
    966 int
    967 config_cfdata_attach(cfdata_t cf, int scannow)
    968 {
    969 	struct cftable *ct;
    970 
    971 	KERNEL_LOCK(1, NULL);
    972 
    973 	ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
    974 	ct->ct_cfdata = cf;
    975 	TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
    976 
    977 	if (scannow)
    978 		rescan_with_cfdata(cf);
    979 
    980 	KERNEL_UNLOCK_ONE(NULL);
    981 
    982 	return 0;
    983 }
    984 
    985 /*
    986  * Helper for config_cfdata_detach: check whether a device is
    987  * found through any attachment in the config data table.
    988  */
    989 static int
    990 dev_in_cfdata(device_t d, cfdata_t cf)
    991 {
    992 	const struct cfdata *cf1;
    993 
    994 	for (cf1 = cf; cf1->cf_name; cf1++)
    995 		if (d->dv_cfdata == cf1)
    996 			return 1;
    997 
    998 	return 0;
    999 }
   1000 
   1001 /*
   1002  * Detach a supplemental config data table. Detach all devices found
   1003  * through that table (and thus keeping references to it) before.
   1004  */
   1005 int
   1006 config_cfdata_detach(cfdata_t cf)
   1007 {
   1008 	device_t d;
   1009 	int error = 0;
   1010 	struct cftable *ct;
   1011 	deviter_t di;
   1012 
   1013 	KERNEL_LOCK(1, NULL);
   1014 
   1015 	for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
   1016 	     d = deviter_next(&di)) {
   1017 		if (!dev_in_cfdata(d, cf))
   1018 			continue;
   1019 		if ((error = config_detach(d, 0)) != 0)
   1020 			break;
   1021 	}
   1022 	deviter_release(&di);
   1023 	if (error) {
   1024 		aprint_error_dev(d, "unable to detach instance\n");
   1025 		goto out;
   1026 	}
   1027 
   1028 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
   1029 		if (ct->ct_cfdata == cf) {
   1030 			TAILQ_REMOVE(&allcftables, ct, ct_list);
   1031 			kmem_free(ct, sizeof(*ct));
   1032 			error = 0;
   1033 			goto out;
   1034 		}
   1035 	}
   1036 
   1037 	/* not found -- shouldn't happen */
   1038 	error = EINVAL;
   1039 
   1040 out:	KERNEL_UNLOCK_ONE(NULL);
   1041 	return error;
   1042 }
   1043 
   1044 /*
   1045  * Invoke the "match" routine for a cfdata entry on behalf of
   1046  * an external caller, usually a direct config "submatch" routine.
   1047  */
   1048 int
   1049 config_match(device_t parent, cfdata_t cf, void *aux)
   1050 {
   1051 	struct cfattach *ca;
   1052 
   1053 	KASSERT(KERNEL_LOCKED_P());
   1054 
   1055 	ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
   1056 	if (ca == NULL) {
   1057 		/* No attachment for this entry, oh well. */
   1058 		return 0;
   1059 	}
   1060 
   1061 	return (*ca->ca_match)(parent, cf, aux);
   1062 }
   1063 
   1064 /*
   1065  * Invoke the "probe" routine for a cfdata entry on behalf of
   1066  * an external caller, usually an indirect config "search" routine.
   1067  */
   1068 int
   1069 config_probe(device_t parent, cfdata_t cf, void *aux)
   1070 {
   1071 	/*
   1072 	 * This is currently a synonym for config_match(), but this
   1073 	 * is an implementation detail; "match" and "probe" routines
   1074 	 * have different behaviors.
   1075 	 *
   1076 	 * XXX config_probe() should return a bool, because there is
   1077 	 * XXX no match score for probe -- it's either there or it's
   1078 	 * XXX not, but some ports abuse the return value as a way
   1079 	 * XXX to attach "critical" devices before "non-critical"
   1080 	 * XXX devices.
   1081 	 */
   1082 	return config_match(parent, cf, aux);
   1083 }
   1084 
   1085 static struct cfargs_internal *
   1086 cfargs_canonicalize(const struct cfargs * const cfargs,
   1087     struct cfargs_internal * const store)
   1088 {
   1089 	struct cfargs_internal *args = store;
   1090 
   1091 	memset(args, 0, sizeof(*args));
   1092 
   1093 	/* If none specified, are all-NULL pointers are good. */
   1094 	if (cfargs == NULL) {
   1095 		return args;
   1096 	}
   1097 
   1098 	/*
   1099 	 * Only one arguments version is recognized at this time.
   1100 	 */
   1101 	if (cfargs->cfargs_version != CFARGS_VERSION) {
   1102 		panic("cfargs_canonicalize: unknown version %lu\n",
   1103 		    (unsigned long)cfargs->cfargs_version);
   1104 	}
   1105 
   1106 	/*
   1107 	 * submatch and search are mutually-exclusive.
   1108 	 */
   1109 	if (cfargs->submatch != NULL && cfargs->search != NULL) {
   1110 		panic("cfargs_canonicalize: submatch and search are "
   1111 		      "mutually-exclusive");
   1112 	}
   1113 	if (cfargs->submatch != NULL) {
   1114 		args->submatch = cfargs->submatch;
   1115 	} else if (cfargs->search != NULL) {
   1116 		args->search = cfargs->search;
   1117 	}
   1118 
   1119 	args->iattr = cfargs->iattr;
   1120 	args->locators = cfargs->locators;
   1121 	args->devhandle = cfargs->devhandle;
   1122 
   1123 	return args;
   1124 }
   1125 
   1126 /*
   1127  * Iterate over all potential children of some device, calling the given
   1128  * function (default being the child's match function) for each one.
   1129  * Nonzero returns are matches; the highest value returned is considered
   1130  * the best match.  Return the `found child' if we got a match, or NULL
   1131  * otherwise.  The `aux' pointer is simply passed on through.
   1132  *
   1133  * Note that this function is designed so that it can be used to apply
   1134  * an arbitrary function to all potential children (its return value
   1135  * can be ignored).
   1136  */
   1137 static cfdata_t
   1138 config_search_internal(device_t parent, void *aux,
   1139     const struct cfargs_internal * const args)
   1140 {
   1141 	struct cftable *ct;
   1142 	cfdata_t cf;
   1143 	struct matchinfo m;
   1144 
   1145 	KASSERT(config_initialized);
   1146 	KASSERTMSG((!args->iattr ||
   1147 		cfdriver_get_iattr(parent->dv_cfdriver, args->iattr)),
   1148 	    "%s searched for child at interface attribute %s,"
   1149 	    " but device %s(4) has no such interface attribute in config(5)",
   1150 	    device_xname(parent), args->iattr,
   1151 	    parent->dv_cfdriver->cd_name);
   1152 	KASSERTMSG((args->iattr ||
   1153 		cfdriver_iattr_count(parent->dv_cfdriver) < 2),
   1154 	    "%s searched for child without interface attribute,"
   1155 	    " needed to disambiguate among the %d declared for in %s(4)"
   1156 	    " in config(5)",
   1157 	    device_xname(parent),
   1158 	    cfdriver_iattr_count(parent->dv_cfdriver),
   1159 	    parent->dv_cfdriver->cd_name);
   1160 
   1161 	m.fn = args->submatch;		/* N.B. union */
   1162 	m.parent = parent;
   1163 	m.locs = args->locators;
   1164 	m.aux = aux;
   1165 	m.match = NULL;
   1166 	m.pri = 0;
   1167 
   1168 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
   1169 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
   1170 
   1171 			/* We don't match root nodes here. */
   1172 			if (!cf->cf_pspec)
   1173 				continue;
   1174 
   1175 			/*
   1176 			 * Skip cf if no longer eligible, otherwise scan
   1177 			 * through parents for one matching `parent', and
   1178 			 * try match function.
   1179 			 */
   1180 			if (cf->cf_fstate == FSTATE_FOUND)
   1181 				continue;
   1182 			if (cf->cf_fstate == FSTATE_DNOTFOUND ||
   1183 			    cf->cf_fstate == FSTATE_DSTAR)
   1184 				continue;
   1185 
   1186 			/*
   1187 			 * If an interface attribute was specified,
   1188 			 * consider only children which attach to
   1189 			 * that attribute.
   1190 			 */
   1191 			if (args->iattr != NULL &&
   1192 			    !STREQ(args->iattr, cfdata_ifattr(cf)))
   1193 				continue;
   1194 
   1195 			if (cfparent_match(parent, cf->cf_pspec))
   1196 				mapply(&m, cf);
   1197 		}
   1198 	}
   1199 	rnd_add_uint32(&rnd_autoconf_source, 0);
   1200 	return m.match;
   1201 }
   1202 
   1203 cfdata_t
   1204 config_search(device_t parent, void *aux, const struct cfargs *cfargs)
   1205 {
   1206 	cfdata_t cf;
   1207 	struct cfargs_internal store;
   1208 
   1209 	cf = config_search_internal(parent, aux,
   1210 	    cfargs_canonicalize(cfargs, &store));
   1211 
   1212 	return cf;
   1213 }
   1214 
   1215 /*
   1216  * Find the given root device.
   1217  * This is much like config_search, but there is no parent.
   1218  * Don't bother with multiple cfdata tables; the root node
   1219  * must always be in the initial table.
   1220  */
   1221 cfdata_t
   1222 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
   1223 {
   1224 	cfdata_t cf;
   1225 	const short *p;
   1226 	struct matchinfo m;
   1227 
   1228 	m.fn = fn;
   1229 	m.parent = ROOT;
   1230 	m.aux = aux;
   1231 	m.match = NULL;
   1232 	m.pri = 0;
   1233 	m.locs = 0;
   1234 	/*
   1235 	 * Look at root entries for matching name.  We do not bother
   1236 	 * with found-state here since only one root should ever be
   1237 	 * searched (and it must be done first).
   1238 	 */
   1239 	for (p = cfroots; *p >= 0; p++) {
   1240 		cf = &cfdata[*p];
   1241 		if (strcmp(cf->cf_name, rootname) == 0)
   1242 			mapply(&m, cf);
   1243 	}
   1244 	return m.match;
   1245 }
   1246 
   1247 static const char * const msgs[] = {
   1248 [QUIET]		=	"",
   1249 [UNCONF]	=	" not configured\n",
   1250 [UNSUPP]	=	" unsupported\n",
   1251 };
   1252 
   1253 /*
   1254  * The given `aux' argument describes a device that has been found
   1255  * on the given parent, but not necessarily configured.  Locate the
   1256  * configuration data for that device (using the submatch function
   1257  * provided, or using candidates' cd_match configuration driver
   1258  * functions) and attach it, and return its device_t.  If the device was
   1259  * not configured, call the given `print' function and return NULL.
   1260  */
   1261 device_t
   1262 config_found_acquire(device_t parent, void *aux, cfprint_t print,
   1263     const struct cfargs * const cfargs)
   1264 {
   1265 	cfdata_t cf;
   1266 	struct cfargs_internal store;
   1267 	const struct cfargs_internal * const args =
   1268 	    cfargs_canonicalize(cfargs, &store);
   1269 	device_t dev;
   1270 
   1271 	KERNEL_LOCK(1, NULL);
   1272 
   1273 	cf = config_search_internal(parent, aux, args);
   1274 	if (cf != NULL) {
   1275 		dev = config_attach_internal(parent, cf, aux, print, args);
   1276 		goto out;
   1277 	}
   1278 
   1279 	if (print) {
   1280 		if (config_do_twiddle && cold)
   1281 			twiddle();
   1282 
   1283 		const int pret = (*print)(aux, device_xname(parent));
   1284 		KASSERT(pret >= 0);
   1285 		KASSERT(pret < __arraycount(msgs));
   1286 		KASSERT(msgs[pret] != NULL);
   1287 		aprint_normal("%s", msgs[pret]);
   1288 	}
   1289 
   1290 	dev = NULL;
   1291 
   1292 out:	KERNEL_UNLOCK_ONE(NULL);
   1293 	return dev;
   1294 }
   1295 
   1296 /*
   1297  * config_found(parent, aux, print, cfargs)
   1298  *
   1299  *	Legacy entry point for callers whose use of the returned
   1300  *	device_t is not delimited by device_release.
   1301  *
   1302  *	The caller is required to hold the kernel lock as a fragile
   1303  *	defence against races.
   1304  *
   1305  *	Callers should ignore the return value or be converted to
   1306  *	config_found_acquire with a matching device_release once they
   1307  *	have finished with the returned device_t.
   1308  */
   1309 device_t
   1310 config_found(device_t parent, void *aux, cfprint_t print,
   1311     const struct cfargs * const cfargs)
   1312 {
   1313 	device_t dev;
   1314 
   1315 	KASSERT(KERNEL_LOCKED_P());
   1316 
   1317 	dev = config_found_acquire(parent, aux, print, cfargs);
   1318 	if (dev == NULL)
   1319 		return NULL;
   1320 	device_release(dev);
   1321 
   1322 	return dev;
   1323 }
   1324 
   1325 /*
   1326  * As above, but for root devices.
   1327  */
   1328 device_t
   1329 config_rootfound(const char *rootname, void *aux)
   1330 {
   1331 	cfdata_t cf;
   1332 	device_t dev = NULL;
   1333 
   1334 	KERNEL_LOCK(1, NULL);
   1335 	if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
   1336 		dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE);
   1337 	else
   1338 		aprint_error("root device %s not configured\n", rootname);
   1339 	KERNEL_UNLOCK_ONE(NULL);
   1340 	return dev;
   1341 }
   1342 
   1343 /* just like sprintf(buf, "%d") except that it works from the end */
   1344 static char *
   1345 number(char *ep, int n)
   1346 {
   1347 
   1348 	*--ep = 0;
   1349 	while (n >= 10) {
   1350 		*--ep = (n % 10) + '0';
   1351 		n /= 10;
   1352 	}
   1353 	*--ep = n + '0';
   1354 	return ep;
   1355 }
   1356 
   1357 /*
   1358  * Expand the size of the cd_devs array if necessary.
   1359  *
   1360  * The caller must hold alldevs_lock. config_makeroom() may release and
   1361  * re-acquire alldevs_lock, so callers should re-check conditions such
   1362  * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
   1363  * returns.
   1364  */
   1365 static void
   1366 config_makeroom(int n, struct cfdriver *cd)
   1367 {
   1368 	int ondevs, nndevs;
   1369 	device_t *osp, *nsp;
   1370 
   1371 	KASSERT(mutex_owned(&alldevs_lock));
   1372 	alldevs_nwrite++;
   1373 
   1374 	/* XXX arithmetic overflow */
   1375 	for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
   1376 		;
   1377 
   1378 	while (n >= cd->cd_ndevs) {
   1379 		/*
   1380 		 * Need to expand the array.
   1381 		 */
   1382 		ondevs = cd->cd_ndevs;
   1383 		osp = cd->cd_devs;
   1384 
   1385 		/*
   1386 		 * Release alldevs_lock around allocation, which may
   1387 		 * sleep.
   1388 		 */
   1389 		mutex_exit(&alldevs_lock);
   1390 		nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
   1391 		mutex_enter(&alldevs_lock);
   1392 
   1393 		/*
   1394 		 * If another thread moved the array while we did
   1395 		 * not hold alldevs_lock, try again.
   1396 		 */
   1397 		if (cd->cd_devs != osp || cd->cd_ndevs != ondevs) {
   1398 			mutex_exit(&alldevs_lock);
   1399 			kmem_free(nsp, sizeof(device_t) * nndevs);
   1400 			mutex_enter(&alldevs_lock);
   1401 			continue;
   1402 		}
   1403 
   1404 		memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
   1405 		if (ondevs != 0)
   1406 			memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
   1407 
   1408 		cd->cd_ndevs = nndevs;
   1409 		cd->cd_devs = nsp;
   1410 		if (ondevs != 0) {
   1411 			mutex_exit(&alldevs_lock);
   1412 			kmem_free(osp, sizeof(device_t) * ondevs);
   1413 			mutex_enter(&alldevs_lock);
   1414 		}
   1415 	}
   1416 	KASSERT(mutex_owned(&alldevs_lock));
   1417 	alldevs_nwrite--;
   1418 }
   1419 
   1420 /*
   1421  * Put dev into the devices list.
   1422  */
   1423 static void
   1424 config_devlink(device_t dev)
   1425 {
   1426 
   1427 	mutex_enter(&alldevs_lock);
   1428 
   1429 	KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
   1430 
   1431 	dev->dv_add_gen = alldevs_gen;
   1432 	/* It is safe to add a device to the tail of the list while
   1433 	 * readers and writers are in the list.
   1434 	 */
   1435 	TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
   1436 	mutex_exit(&alldevs_lock);
   1437 }
   1438 
   1439 static void
   1440 config_devfree(device_t dev)
   1441 {
   1442 
   1443 	KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
   1444 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
   1445 
   1446 	if (dev->dv_cfattach->ca_devsize > 0)
   1447 		kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
   1448 	kmem_free(dev, sizeof(*dev));
   1449 }
   1450 
   1451 /*
   1452  * Caller must hold alldevs_lock.
   1453  */
   1454 static void
   1455 config_devunlink(device_t dev, struct devicelist *garbage)
   1456 {
   1457 	struct device_garbage *dg = &dev->dv_garbage;
   1458 	cfdriver_t cd = device_cfdriver(dev);
   1459 	int i;
   1460 
   1461 	KASSERT(mutex_owned(&alldevs_lock));
   1462 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
   1463 
   1464  	/* Unlink from device list.  Link to garbage list. */
   1465 	TAILQ_REMOVE(&alldevs, dev, dv_list);
   1466 	TAILQ_INSERT_TAIL(garbage, dev, dv_list);
   1467 
   1468 	/* Remove from cfdriver's array. */
   1469 	cd->cd_devs[dev->dv_unit] = NULL;
   1470 
   1471 	/*
   1472 	 * If the device now has no units in use, unlink its softc array.
   1473 	 */
   1474 	for (i = 0; i < cd->cd_ndevs; i++) {
   1475 		if (cd->cd_devs[i] != NULL)
   1476 			break;
   1477 	}
   1478 	/* Nothing found.  Unlink, now.  Deallocate, later. */
   1479 	if (i == cd->cd_ndevs) {
   1480 		dg->dg_ndevs = cd->cd_ndevs;
   1481 		dg->dg_devs = cd->cd_devs;
   1482 		cd->cd_devs = NULL;
   1483 		cd->cd_ndevs = 0;
   1484 	}
   1485 }
   1486 
   1487 static void
   1488 config_devdelete(device_t dev)
   1489 {
   1490 	struct device_garbage *dg = &dev->dv_garbage;
   1491 	device_lock_t dvl = device_getlock(dev);
   1492 
   1493 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
   1494 
   1495 	if (dg->dg_devs != NULL)
   1496 		kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
   1497 
   1498 	localcount_fini(dev->dv_localcount);
   1499 	kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount));
   1500 
   1501 	cv_destroy(&dvl->dvl_cv);
   1502 	mutex_destroy(&dvl->dvl_mtx);
   1503 
   1504 	KASSERT(dev->dv_properties != NULL);
   1505 	prop_object_release(dev->dv_properties);
   1506 
   1507 	if (dev->dv_activity_handlers)
   1508 		panic("%s with registered handlers", __func__);
   1509 
   1510 	if (dev->dv_locators) {
   1511 		size_t amount = *--dev->dv_locators;
   1512 		kmem_free(dev->dv_locators, amount);
   1513 	}
   1514 
   1515 	config_devfree(dev);
   1516 }
   1517 
   1518 static int
   1519 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
   1520 {
   1521 	int unit = cf->cf_unit;
   1522 
   1523 	KASSERT(mutex_owned(&alldevs_lock));
   1524 
   1525 	if (unit < 0)
   1526 		return -1;
   1527 	if (cf->cf_fstate == FSTATE_STAR) {
   1528 		for (; unit < cd->cd_ndevs; unit++)
   1529 			if (cd->cd_devs[unit] == NULL)
   1530 				break;
   1531 		/*
   1532 		 * unit is now the unit of the first NULL device pointer,
   1533 		 * or max(cd->cd_ndevs,cf->cf_unit).
   1534 		 */
   1535 	} else {
   1536 		if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
   1537 			unit = -1;
   1538 	}
   1539 	return unit;
   1540 }
   1541 
   1542 static int
   1543 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
   1544 {
   1545 	struct alldevs_foray af;
   1546 	int unit;
   1547 
   1548 	config_alldevs_enter(&af);
   1549 	for (;;) {
   1550 		unit = config_unit_nextfree(cd, cf);
   1551 		if (unit == -1)
   1552 			break;
   1553 		if (unit < cd->cd_ndevs) {
   1554 			cd->cd_devs[unit] = dev;
   1555 			dev->dv_unit = unit;
   1556 			break;
   1557 		}
   1558 		config_makeroom(unit, cd);
   1559 	}
   1560 	config_alldevs_exit(&af);
   1561 
   1562 	return unit;
   1563 }
   1564 
   1565 static device_t
   1566 config_devalloc(const device_t parent, const cfdata_t cf,
   1567     const struct cfargs_internal * const args)
   1568 {
   1569 	cfdriver_t cd;
   1570 	cfattach_t ca;
   1571 	size_t lname, lunit;
   1572 	const char *xunit;
   1573 	int myunit;
   1574 	char num[10];
   1575 	device_t dev;
   1576 	void *dev_private;
   1577 	const struct cfiattrdata *ia;
   1578 	device_lock_t dvl;
   1579 
   1580 	cd = config_cfdriver_lookup(cf->cf_name);
   1581 	if (cd == NULL)
   1582 		return NULL;
   1583 
   1584 	ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
   1585 	if (ca == NULL)
   1586 		return NULL;
   1587 
   1588 	/* get memory for all device vars */
   1589 	KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
   1590 	if (ca->ca_devsize > 0) {
   1591 		dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
   1592 	} else {
   1593 		dev_private = NULL;
   1594 	}
   1595 	dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
   1596 
   1597 	dev->dv_handle = args->devhandle;
   1598 
   1599 	dev->dv_class = cd->cd_class;
   1600 	dev->dv_cfdata = cf;
   1601 	dev->dv_cfdriver = cd;
   1602 	dev->dv_cfattach = ca;
   1603 	dev->dv_activity_count = 0;
   1604 	dev->dv_activity_handlers = NULL;
   1605 	dev->dv_private = dev_private;
   1606 	dev->dv_flags = ca->ca_flags;	/* inherit flags from class */
   1607 	dev->dv_attaching = curlwp;
   1608 
   1609 	myunit = config_unit_alloc(dev, cd, cf);
   1610 	if (myunit == -1) {
   1611 		config_devfree(dev);
   1612 		return NULL;
   1613 	}
   1614 
   1615 	/* compute length of name and decimal expansion of unit number */
   1616 	lname = strlen(cd->cd_name);
   1617 	xunit = number(&num[sizeof(num)], myunit);
   1618 	lunit = &num[sizeof(num)] - xunit;
   1619 	if (lname + lunit > sizeof(dev->dv_xname))
   1620 		panic("config_devalloc: device name too long");
   1621 
   1622 	dvl = device_getlock(dev);
   1623 
   1624 	mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
   1625 	cv_init(&dvl->dvl_cv, "pmfsusp");
   1626 
   1627 	memcpy(dev->dv_xname, cd->cd_name, lname);
   1628 	memcpy(dev->dv_xname + lname, xunit, lunit);
   1629 	dev->dv_parent = parent;
   1630 	if (parent != NULL)
   1631 		dev->dv_depth = parent->dv_depth + 1;
   1632 	else
   1633 		dev->dv_depth = 0;
   1634 	dev->dv_flags |= DVF_ACTIVE;	/* always initially active */
   1635 	if (args->locators) {
   1636 		KASSERT(parent); /* no locators at root */
   1637 		ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
   1638 		dev->dv_locators =
   1639 		    kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
   1640 		*dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
   1641 		memcpy(dev->dv_locators, args->locators,
   1642 		    sizeof(int) * ia->ci_loclen);
   1643 	}
   1644 	dev->dv_properties = prop_dictionary_create();
   1645 	KASSERT(dev->dv_properties != NULL);
   1646 
   1647 	prop_dictionary_set_string_nocopy(dev->dv_properties,
   1648 	    "device-driver", dev->dv_cfdriver->cd_name);
   1649 	prop_dictionary_set_uint16(dev->dv_properties,
   1650 	    "device-unit", dev->dv_unit);
   1651 	if (parent != NULL) {
   1652 		prop_dictionary_set_string(dev->dv_properties,
   1653 		    "device-parent", device_xname(parent));
   1654 	}
   1655 
   1656 	dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount),
   1657 	    KM_SLEEP);
   1658 	localcount_init(dev->dv_localcount);
   1659 
   1660 	if (dev->dv_cfdriver->cd_attrs != NULL)
   1661 		config_add_attrib_dict(dev);
   1662 
   1663 	return dev;
   1664 }
   1665 
   1666 /*
   1667  * Create an array of device attach attributes and add it
   1668  * to the device's dv_properties dictionary.
   1669  *
   1670  * <key>interface-attributes</key>
   1671  * <array>
   1672  *    <dict>
   1673  *       <key>attribute-name</key>
   1674  *       <string>foo</string>
   1675  *       <key>locators</key>
   1676  *       <array>
   1677  *          <dict>
   1678  *             <key>loc-name</key>
   1679  *             <string>foo-loc1</string>
   1680  *          </dict>
   1681  *          <dict>
   1682  *             <key>loc-name</key>
   1683  *             <string>foo-loc2</string>
   1684  *             <key>default</key>
   1685  *             <string>foo-loc2-default</string>
   1686  *          </dict>
   1687  *          ...
   1688  *       </array>
   1689  *    </dict>
   1690  *    ...
   1691  * </array>
   1692  */
   1693 
   1694 static void
   1695 config_add_attrib_dict(device_t dev)
   1696 {
   1697 	int i, j;
   1698 	const struct cfiattrdata *ci;
   1699 	prop_dictionary_t attr_dict, loc_dict;
   1700 	prop_array_t attr_array, loc_array;
   1701 
   1702 	if ((attr_array = prop_array_create()) == NULL)
   1703 		return;
   1704 
   1705 	for (i = 0; ; i++) {
   1706 		if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
   1707 			break;
   1708 		if ((attr_dict = prop_dictionary_create()) == NULL)
   1709 			break;
   1710 		prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
   1711 		    ci->ci_name);
   1712 
   1713 		/* Create an array of the locator names and defaults */
   1714 
   1715 		if (ci->ci_loclen != 0 &&
   1716 		    (loc_array = prop_array_create()) != NULL) {
   1717 			for (j = 0; j < ci->ci_loclen; j++) {
   1718 				loc_dict = prop_dictionary_create();
   1719 				if (loc_dict == NULL)
   1720 					continue;
   1721 				prop_dictionary_set_string_nocopy(loc_dict,
   1722 				    "loc-name", ci->ci_locdesc[j].cld_name);
   1723 				if (ci->ci_locdesc[j].cld_defaultstr != NULL)
   1724 					prop_dictionary_set_string_nocopy(
   1725 					    loc_dict, "default",
   1726 					    ci->ci_locdesc[j].cld_defaultstr);
   1727 				prop_array_set(loc_array, j, loc_dict);
   1728 				prop_object_release(loc_dict);
   1729 			}
   1730 			prop_dictionary_set_and_rel(attr_dict, "locators",
   1731 			    loc_array);
   1732 		}
   1733 		prop_array_add(attr_array, attr_dict);
   1734 		prop_object_release(attr_dict);
   1735 	}
   1736 	if (i == 0)
   1737 		prop_object_release(attr_array);
   1738 	else
   1739 		prop_dictionary_set_and_rel(dev->dv_properties,
   1740 		    "interface-attributes", attr_array);
   1741 
   1742 	return;
   1743 }
   1744 
   1745 /*
   1746  * Attach a found device.
   1747  *
   1748  * Returns the device referenced, to be released with device_release.
   1749  */
   1750 static device_t
   1751 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
   1752     const struct cfargs_internal * const args)
   1753 {
   1754 	device_t dev;
   1755 	struct cftable *ct;
   1756 	const char *drvname;
   1757 	bool deferred;
   1758 
   1759 	KASSERT(KERNEL_LOCKED_P());
   1760 
   1761 	dev = config_devalloc(parent, cf, args);
   1762 	if (!dev)
   1763 		panic("config_attach: allocation of device softc failed");
   1764 
   1765 	/* XXX redundant - see below? */
   1766 	if (cf->cf_fstate != FSTATE_STAR) {
   1767 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
   1768 		cf->cf_fstate = FSTATE_FOUND;
   1769 	}
   1770 
   1771 	config_devlink(dev);
   1772 
   1773 	if (config_do_twiddle && cold)
   1774 		twiddle();
   1775 	else
   1776 		aprint_naive("Found ");
   1777 	/*
   1778 	 * We want the next two printfs for normal, verbose, and quiet,
   1779 	 * but not silent (in which case, we're twiddling, instead).
   1780 	 */
   1781 	if (parent == ROOT) {
   1782 		aprint_naive("%s (root)", device_xname(dev));
   1783 		aprint_normal("%s (root)", device_xname(dev));
   1784 	} else {
   1785 		aprint_naive("%s at %s", device_xname(dev),
   1786 		    device_xname(parent));
   1787 		aprint_normal("%s at %s", device_xname(dev),
   1788 		    device_xname(parent));
   1789 		if (print)
   1790 			(void) (*print)(aux, NULL);
   1791 	}
   1792 
   1793 	/*
   1794 	 * Before attaching, clobber any unfound devices that are
   1795 	 * otherwise identical.
   1796 	 * XXX code above is redundant?
   1797 	 */
   1798 	drvname = dev->dv_cfdriver->cd_name;
   1799 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
   1800 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
   1801 			if (STREQ(cf->cf_name, drvname) &&
   1802 			    cf->cf_unit == dev->dv_unit) {
   1803 				if (cf->cf_fstate == FSTATE_NOTFOUND)
   1804 					cf->cf_fstate = FSTATE_FOUND;
   1805 			}
   1806 		}
   1807 	}
   1808 	device_register(dev, aux);
   1809 
   1810 	/* Let userland know */
   1811 	devmon_report_device(dev, true);
   1812 
   1813 	/*
   1814 	 * Prevent detach until the driver's attach function, and all
   1815 	 * deferred actions, have finished.
   1816 	 */
   1817 	config_pending_incr(dev);
   1818 
   1819 	/*
   1820 	 * Prevent concurrent detach from destroying the device_t until
   1821 	 * the caller has released the device.
   1822 	 */
   1823 	device_acquire(dev);
   1824 
   1825 	/* Call the driver's attach function.  */
   1826 	(*dev->dv_cfattach->ca_attach)(parent, dev, aux);
   1827 
   1828 	/*
   1829 	 * Allow other threads to acquire references to the device now
   1830 	 * that the driver's attach function is done.
   1831 	 */
   1832 	mutex_enter(&config_misc_lock);
   1833 	KASSERT(dev->dv_attaching == curlwp);
   1834 	dev->dv_attaching = NULL;
   1835 	cv_broadcast(&config_misc_cv);
   1836 	mutex_exit(&config_misc_lock);
   1837 
   1838 	/*
   1839 	 * Synchronous parts of attach are done.  Allow detach, unless
   1840 	 * the driver's attach function scheduled deferred actions.
   1841 	 */
   1842 	config_pending_decr(dev);
   1843 
   1844 	mutex_enter(&config_misc_lock);
   1845 	deferred = (dev->dv_pending != 0);
   1846 	mutex_exit(&config_misc_lock);
   1847 
   1848 	if (!deferred && !device_pmf_is_registered(dev))
   1849 		aprint_debug_dev(dev,
   1850 		    "WARNING: power management not supported\n");
   1851 
   1852 	config_process_deferred(&deferred_config_queue, dev);
   1853 
   1854 	device_register_post_config(dev, aux);
   1855 	rnd_add_uint32(&rnd_autoconf_source, 0);
   1856 	return dev;
   1857 }
   1858 
   1859 device_t
   1860 config_attach_acquire(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
   1861     const struct cfargs *cfargs)
   1862 {
   1863 	struct cfargs_internal store;
   1864 	device_t dev;
   1865 
   1866 	KERNEL_LOCK(1, NULL);
   1867 	dev = config_attach_internal(parent, cf, aux, print,
   1868 	    cfargs_canonicalize(cfargs, &store));
   1869 	KERNEL_UNLOCK_ONE(NULL);
   1870 
   1871 	return dev;
   1872 }
   1873 
   1874 /*
   1875  * config_attach(parent, cf, aux, print, cfargs)
   1876  *
   1877  *	Legacy entry point for callers whose use of the returned
   1878  *	device_t is not delimited by device_release.
   1879  *
   1880  *	The caller is required to hold the kernel lock as a fragile
   1881  *	defence against races.
   1882  *
   1883  *	Callers should ignore the return value or be converted to
   1884  *	config_attach_acquire with a matching device_release once they
   1885  *	have finished with the returned device_t.
   1886  */
   1887 device_t
   1888 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
   1889     const struct cfargs *cfargs)
   1890 {
   1891 	device_t dev;
   1892 
   1893 	KASSERT(KERNEL_LOCKED_P());
   1894 
   1895 	dev = config_attach_acquire(parent, cf, aux, print, cfargs);
   1896 	if (dev == NULL)
   1897 		return NULL;
   1898 	device_release(dev);
   1899 
   1900 	return dev;
   1901 }
   1902 
   1903 /*
   1904  * As above, but for pseudo-devices.  Pseudo-devices attached in this
   1905  * way are silently inserted into the device tree, and their children
   1906  * attached.
   1907  *
   1908  * Note that because pseudo-devices are attached silently, any information
   1909  * the attach routine wishes to print should be prefixed with the device
   1910  * name by the attach routine.
   1911  */
   1912 device_t
   1913 config_attach_pseudo_acquire(cfdata_t cf, void *aux)
   1914 {
   1915 	device_t dev;
   1916 
   1917 	KERNEL_LOCK(1, NULL);
   1918 
   1919 	struct cfargs_internal args = { };
   1920 	dev = config_devalloc(ROOT, cf, &args);
   1921 	if (!dev)
   1922 		goto out;
   1923 
   1924 	/* XXX mark busy in cfdata */
   1925 
   1926 	if (cf->cf_fstate != FSTATE_STAR) {
   1927 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
   1928 		cf->cf_fstate = FSTATE_FOUND;
   1929 	}
   1930 
   1931 	config_devlink(dev);
   1932 
   1933 #if 0	/* XXXJRT not yet */
   1934 	device_register(dev, NULL);	/* like a root node */
   1935 #endif
   1936 
   1937 	/* Let userland know */
   1938 	devmon_report_device(dev, true);
   1939 
   1940 	/*
   1941 	 * Prevent detach until the driver's attach function, and all
   1942 	 * deferred actions, have finished.
   1943 	 */
   1944 	config_pending_incr(dev);
   1945 
   1946 	/*
   1947 	 * Prevent concurrent detach from destroying the device_t until
   1948 	 * the caller has released the device.
   1949 	 */
   1950 	device_acquire(dev);
   1951 
   1952 	/* Call the driver's attach function.  */
   1953 	(*dev->dv_cfattach->ca_attach)(ROOT, dev, aux);
   1954 
   1955 	/*
   1956 	 * Allow other threads to acquire references to the device now
   1957 	 * that the driver's attach function is done.
   1958 	 */
   1959 	mutex_enter(&config_misc_lock);
   1960 	KASSERT(dev->dv_attaching == curlwp);
   1961 	dev->dv_attaching = NULL;
   1962 	cv_broadcast(&config_misc_cv);
   1963 	mutex_exit(&config_misc_lock);
   1964 
   1965 	/*
   1966 	 * Synchronous parts of attach are done.  Allow detach, unless
   1967 	 * the driver's attach function scheduled deferred actions.
   1968 	 */
   1969 	config_pending_decr(dev);
   1970 
   1971 	config_process_deferred(&deferred_config_queue, dev);
   1972 
   1973 out:	KERNEL_UNLOCK_ONE(NULL);
   1974 	return dev;
   1975 }
   1976 
   1977 /*
   1978  * config_attach_pseudo(cf)
   1979  *
   1980  *	Legacy entry point for callers whose use of the returned
   1981  *	device_t is not delimited by device_release.
   1982  *
   1983  *	The caller is required to hold the kernel lock as a fragile
   1984  *	defence against races.
   1985  *
   1986  *	Callers should ignore the return value or be converted to
   1987  *	config_attach_pseudo_acquire with a matching device_release
   1988  *	once they have finished with the returned device_t.  As a
   1989  *	bonus, config_attach_pseudo_acquire can pass a non-null aux
   1990  *	argument into the driver's attach routine.
   1991  */
   1992 device_t
   1993 config_attach_pseudo(cfdata_t cf)
   1994 {
   1995 	device_t dev;
   1996 
   1997 	dev = config_attach_pseudo_acquire(cf, NULL);
   1998 	if (dev == NULL)
   1999 		return dev;
   2000 	device_release(dev);
   2001 
   2002 	return dev;
   2003 }
   2004 
   2005 /*
   2006  * Caller must hold alldevs_lock.
   2007  */
   2008 static void
   2009 config_collect_garbage(struct devicelist *garbage)
   2010 {
   2011 	device_t dv;
   2012 
   2013 	KASSERT(!cpu_intr_p());
   2014 	KASSERT(!cpu_softintr_p());
   2015 	KASSERT(mutex_owned(&alldevs_lock));
   2016 
   2017 	while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
   2018 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
   2019 			if (dv->dv_del_gen != 0)
   2020 				break;
   2021 		}
   2022 		if (dv == NULL) {
   2023 			alldevs_garbage = false;
   2024 			break;
   2025 		}
   2026 		config_devunlink(dv, garbage);
   2027 	}
   2028 	KASSERT(mutex_owned(&alldevs_lock));
   2029 }
   2030 
   2031 static void
   2032 config_dump_garbage(struct devicelist *garbage)
   2033 {
   2034 	device_t dv;
   2035 
   2036 	while ((dv = TAILQ_FIRST(garbage)) != NULL) {
   2037 		TAILQ_REMOVE(garbage, dv, dv_list);
   2038 		config_devdelete(dv);
   2039 	}
   2040 }
   2041 
   2042 static int
   2043 config_detach_enter(device_t dev)
   2044 {
   2045 	struct lwp *l __diagused;
   2046 	int error = 0;
   2047 
   2048 	mutex_enter(&config_misc_lock);
   2049 
   2050 	/*
   2051 	 * Wait until attach has fully completed, and until any
   2052 	 * concurrent detach (e.g., drvctl racing with USB event
   2053 	 * thread) has completed.
   2054 	 *
   2055 	 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via
   2056 	 * deviter) to ensure the winner of the race doesn't free the
   2057 	 * device leading the loser of the race into use-after-free.
   2058 	 *
   2059 	 * XXX Not all callers do this!
   2060 	 */
   2061 	while (dev->dv_pending || dev->dv_detaching) {
   2062 		KASSERTMSG(dev->dv_detaching != curlwp,
   2063 		    "recursively detaching %s", device_xname(dev));
   2064 		error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
   2065 		if (error)
   2066 			goto out;
   2067 	}
   2068 
   2069 	/*
   2070 	 * Attach has completed, and no other concurrent detach is
   2071 	 * running.  Claim the device for detaching.  This will cause
   2072 	 * all new attempts to acquire references to block.
   2073 	 */
   2074 	KASSERTMSG((l = dev->dv_attaching) == NULL,
   2075 	    "lwp %ld [%s] @ %p attaching %s",
   2076 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
   2077 	    device_xname(dev));
   2078 	KASSERTMSG((l = dev->dv_detaching) == NULL,
   2079 	    "lwp %ld [%s] @ %p detaching %s",
   2080 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
   2081 	    device_xname(dev));
   2082 	dev->dv_detaching = curlwp;
   2083 
   2084 out:	mutex_exit(&config_misc_lock);
   2085 	return error;
   2086 }
   2087 
   2088 static void
   2089 config_detach_exit(device_t dev)
   2090 {
   2091 	struct lwp *l __diagused;
   2092 
   2093 	mutex_enter(&config_misc_lock);
   2094 	KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
   2095 	    device_xname(dev));
   2096 	KASSERTMSG((l = dev->dv_detaching) == curlwp,
   2097 	    "lwp %ld [%s] @ %p detaching %s",
   2098 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
   2099 	    device_xname(dev));
   2100 	dev->dv_detaching = NULL;
   2101 	cv_broadcast(&config_misc_cv);
   2102 	mutex_exit(&config_misc_lock);
   2103 }
   2104 
   2105 /*
   2106  * Detach a device.  Optionally forced (e.g. because of hardware
   2107  * removal) and quiet.  Returns zero if successful, non-zero
   2108  * (an error code) otherwise.
   2109  *
   2110  * Note that this code wants to be run from a process context, so
   2111  * that the detach can sleep to allow processes which have a device
   2112  * open to run and unwind their stacks.
   2113  *
   2114  * Caller must hold a reference with device_acquire or
   2115  * device_lookup_acquire.
   2116  */
   2117 int
   2118 config_detach_release(device_t dev, int flags)
   2119 {
   2120 	struct alldevs_foray af;
   2121 	struct cftable *ct;
   2122 	cfdata_t cf;
   2123 	const struct cfattach *ca;
   2124 	struct cfdriver *cd;
   2125 	device_t d __diagused;
   2126 	int rv = 0;
   2127 
   2128 	KERNEL_LOCK(1, NULL);
   2129 
   2130 	cf = dev->dv_cfdata;
   2131 	KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
   2132 		cf->cf_fstate == FSTATE_STAR),
   2133 	    "config_detach: %s: bad device fstate: %d",
   2134 	    device_xname(dev), cf ? cf->cf_fstate : -1);
   2135 
   2136 	cd = dev->dv_cfdriver;
   2137 	KASSERT(cd != NULL);
   2138 
   2139 	ca = dev->dv_cfattach;
   2140 	KASSERT(ca != NULL);
   2141 
   2142 	/*
   2143 	 * Only one detach at a time, please -- and not until fully
   2144 	 * attached.
   2145 	 */
   2146 	rv = config_detach_enter(dev);
   2147 	device_release(dev);
   2148 	if (rv) {
   2149 		KERNEL_UNLOCK_ONE(NULL);
   2150 		return rv;
   2151 	}
   2152 
   2153 	mutex_enter(&alldevs_lock);
   2154 	if (dev->dv_del_gen != 0) {
   2155 		mutex_exit(&alldevs_lock);
   2156 #ifdef DIAGNOSTIC
   2157 		printf("%s: %s is already detached\n", __func__,
   2158 		    device_xname(dev));
   2159 #endif /* DIAGNOSTIC */
   2160 		config_detach_exit(dev);
   2161 		KERNEL_UNLOCK_ONE(NULL);
   2162 		return ENOENT;
   2163 	}
   2164 	alldevs_nwrite++;
   2165 	mutex_exit(&alldevs_lock);
   2166 
   2167 	/*
   2168 	 * Call the driver's .ca_detach function, unless it has none or
   2169 	 * we are skipping it because it's unforced shutdown time and
   2170 	 * the driver didn't ask to detach on shutdown.
   2171 	 */
   2172 	if (!detachall &&
   2173 	    (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
   2174 	    (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
   2175 		rv = EOPNOTSUPP;
   2176 	} else if (ca->ca_detach != NULL) {
   2177 		rv = (*ca->ca_detach)(dev, flags);
   2178 	} else
   2179 		rv = EOPNOTSUPP;
   2180 
   2181 	KASSERTMSG(!dev->dv_detach_done, "%s detached twice, error=%d",
   2182 	    device_xname(dev), rv);
   2183 
   2184 	/*
   2185 	 * If it was not possible to detach the device, then we either
   2186 	 * panic() (for the forced but failed case), or return an error.
   2187 	 */
   2188 	if (rv) {
   2189 		/*
   2190 		 * Detach failed -- likely EOPNOTSUPP or EBUSY.  Driver
   2191 		 * must not have called config_detach_commit.
   2192 		 */
   2193 		KASSERTMSG(!dev->dv_detach_committed,
   2194 		    "%s committed to detaching and then backed out, error=%d",
   2195 		    device_xname(dev), rv);
   2196 		if (flags & DETACH_FORCE) {
   2197 			panic("config_detach: forced detach of %s failed (%d)",
   2198 			    device_xname(dev), rv);
   2199 		}
   2200 		goto out;
   2201 	}
   2202 
   2203 	/*
   2204 	 * The device has now been successfully detached.
   2205 	 */
   2206 	dev->dv_detach_done = true;
   2207 
   2208 	/*
   2209 	 * If .ca_detach didn't commit to detach, then do that for it.
   2210 	 * This wakes any pending device_lookup_acquire calls so they
   2211 	 * will fail.
   2212 	 */
   2213 	config_detach_commit(dev);
   2214 
   2215 	/*
   2216 	 * If it was possible to detach the device, ensure that the
   2217 	 * device is deactivated.
   2218 	 */
   2219 	dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */
   2220 
   2221 	/*
   2222 	 * Wait for all device_lookup_acquire references -- mostly, for
   2223 	 * all attempts to open the device -- to drain.  It is the
   2224 	 * responsibility of .ca_detach to ensure anything with open
   2225 	 * references will be interrupted and release them promptly,
   2226 	 * not block indefinitely.  All new attempts to acquire
   2227 	 * references will fail, as config_detach_commit has arranged
   2228 	 * by now.
   2229 	 */
   2230 	mutex_enter(&config_misc_lock);
   2231 	localcount_drain(dev->dv_localcount,
   2232 	    &config_misc_cv, &config_misc_lock);
   2233 	mutex_exit(&config_misc_lock);
   2234 
   2235 	/* Let userland know */
   2236 	devmon_report_device(dev, false);
   2237 
   2238 #ifdef DIAGNOSTIC
   2239 	/*
   2240 	 * Sanity: If you're successfully detached, you should have no
   2241 	 * children.  (Note that because children must be attached
   2242 	 * after parents, we only need to search the latter part of
   2243 	 * the list.)
   2244 	 */
   2245 	mutex_enter(&alldevs_lock);
   2246 	for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
   2247 	    d = TAILQ_NEXT(d, dv_list)) {
   2248 		if (d->dv_parent == dev && d->dv_del_gen == 0) {
   2249 			printf("config_detach: detached device %s"
   2250 			    " has children %s\n", device_xname(dev),
   2251 			    device_xname(d));
   2252 			panic("config_detach");
   2253 		}
   2254 	}
   2255 	mutex_exit(&alldevs_lock);
   2256 #endif
   2257 
   2258 	/* notify the parent that the child is gone */
   2259 	if (dev->dv_parent) {
   2260 		device_t p = dev->dv_parent;
   2261 		if (p->dv_cfattach->ca_childdetached)
   2262 			(*p->dv_cfattach->ca_childdetached)(p, dev);
   2263 	}
   2264 
   2265 	/*
   2266 	 * Mark cfdata to show that the unit can be reused, if possible.
   2267 	 */
   2268 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
   2269 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
   2270 			if (STREQ(cf->cf_name, cd->cd_name)) {
   2271 				if (cf->cf_fstate == FSTATE_FOUND &&
   2272 				    cf->cf_unit == dev->dv_unit)
   2273 					cf->cf_fstate = FSTATE_NOTFOUND;
   2274 			}
   2275 		}
   2276 	}
   2277 
   2278 	if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
   2279 		aprint_normal_dev(dev, "detached\n");
   2280 
   2281 out:
   2282 	config_detach_exit(dev);
   2283 
   2284 	config_alldevs_enter(&af);
   2285 	KASSERT(alldevs_nwrite != 0);
   2286 	--alldevs_nwrite;
   2287 	if (rv == 0 && dev->dv_del_gen == 0) {
   2288 		if (alldevs_nwrite == 0 && alldevs_nread == 0)
   2289 			config_devunlink(dev, &af.af_garbage);
   2290 		else {
   2291 			dev->dv_del_gen = alldevs_gen;
   2292 			alldevs_garbage = true;
   2293 		}
   2294 	}
   2295 	config_alldevs_exit(&af);
   2296 
   2297 	KERNEL_UNLOCK_ONE(NULL);
   2298 
   2299 	return rv;
   2300 }
   2301 
   2302 /*
   2303  * config_detach(dev, flags)
   2304  *
   2305  *	Legacy entry point for callers that have not acquired a
   2306  *	reference to dev.
   2307  *
   2308  *	The caller is required to hold the kernel lock as a fragile
   2309  *	defence against races.
   2310  *
   2311  *	Callers should be converted to use device_acquire under a lock
   2312  *	taken also by .ca_childdetached to synchronize access to the
   2313  *	device_t, and then config_detach_release ouside the lock.
   2314  *	Alternatively, most drivers detach children only in their own
   2315  *	detach routines, which can be done with config_detach_children
   2316  *	instead.
   2317  */
   2318 int
   2319 config_detach(device_t dev, int flags)
   2320 {
   2321 
   2322 	KASSERT(KERNEL_LOCKED_P());
   2323 
   2324 	device_acquire(dev);
   2325 	return config_detach_release(dev, flags);
   2326 }
   2327 
   2328 /*
   2329  * config_detach_commit(dev)
   2330  *
   2331  *	Issued by a driver's .ca_detach routine to notify anyone
   2332  *	waiting in device_lookup_acquire that the driver is committed
   2333  *	to detaching the device, which allows device_lookup_acquire to
   2334  *	wake up and fail immediately.
   2335  *
   2336  *	Safe to call multiple times -- idempotent.  Must be called
   2337  *	during config_detach_enter/exit.  Safe to use with
   2338  *	device_lookup because the device is not actually removed from
   2339  *	the table until after config_detach_exit.
   2340  */
   2341 void
   2342 config_detach_commit(device_t dev)
   2343 {
   2344 	struct lwp *l __diagused;
   2345 
   2346 	mutex_enter(&config_misc_lock);
   2347 	KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
   2348 	    device_xname(dev));
   2349 	KASSERTMSG((l = dev->dv_detaching) == curlwp,
   2350 	    "lwp %ld [%s] @ %p detaching %s",
   2351 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
   2352 	    device_xname(dev));
   2353 	dev->dv_detach_committed = true;
   2354 	cv_broadcast(&config_misc_cv);
   2355 	mutex_exit(&config_misc_lock);
   2356 }
   2357 
   2358 int
   2359 config_detach_children(device_t parent, int flags)
   2360 {
   2361 	device_t dv;
   2362 	deviter_t di;
   2363 	int error = 0;
   2364 
   2365 	KASSERT(KERNEL_LOCKED_P());
   2366 
   2367 	for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
   2368 	     dv = deviter_next(&di)) {
   2369 		if (device_parent(dv) != parent)
   2370 			continue;
   2371 		if ((error = config_detach(dv, flags)) != 0)
   2372 			break;
   2373 	}
   2374 	deviter_release(&di);
   2375 	return error;
   2376 }
   2377 
   2378 device_t
   2379 shutdown_first(struct shutdown_state *s)
   2380 {
   2381 	if (!s->initialized) {
   2382 		deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
   2383 		s->initialized = true;
   2384 	}
   2385 	return shutdown_next(s);
   2386 }
   2387 
   2388 device_t
   2389 shutdown_next(struct shutdown_state *s)
   2390 {
   2391 	device_t dv;
   2392 
   2393 	while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
   2394 		;
   2395 
   2396 	if (dv == NULL)
   2397 		s->initialized = false;
   2398 
   2399 	return dv;
   2400 }
   2401 
   2402 bool
   2403 config_detach_all(int how)
   2404 {
   2405 	static struct shutdown_state s;
   2406 	device_t curdev;
   2407 	bool progress = false;
   2408 	int flags;
   2409 
   2410 	KERNEL_LOCK(1, NULL);
   2411 
   2412 	if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
   2413 		goto out;
   2414 
   2415 	if ((how & RB_POWERDOWN) == RB_POWERDOWN)
   2416 		flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
   2417 	else
   2418 		flags = DETACH_SHUTDOWN;
   2419 
   2420 	for (curdev = shutdown_first(&s); curdev != NULL;
   2421 	     curdev = shutdown_next(&s)) {
   2422 		aprint_debug(" detaching %s, ", device_xname(curdev));
   2423 		if (config_detach(curdev, flags) == 0) {
   2424 			progress = true;
   2425 			aprint_debug("success.");
   2426 		} else
   2427 			aprint_debug("failed.");
   2428 	}
   2429 
   2430 out:	KERNEL_UNLOCK_ONE(NULL);
   2431 	return progress;
   2432 }
   2433 
   2434 static bool
   2435 device_is_ancestor_of(device_t ancestor, device_t descendant)
   2436 {
   2437 	device_t dv;
   2438 
   2439 	for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
   2440 		if (device_parent(dv) == ancestor)
   2441 			return true;
   2442 	}
   2443 	return false;
   2444 }
   2445 
   2446 int
   2447 config_deactivate(device_t dev)
   2448 {
   2449 	deviter_t di;
   2450 	const struct cfattach *ca;
   2451 	device_t descendant;
   2452 	int s, rv = 0, oflags;
   2453 
   2454 	for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
   2455 	     descendant != NULL;
   2456 	     descendant = deviter_next(&di)) {
   2457 		if (dev != descendant &&
   2458 		    !device_is_ancestor_of(dev, descendant))
   2459 			continue;
   2460 
   2461 		if ((descendant->dv_flags & DVF_ACTIVE) == 0)
   2462 			continue;
   2463 
   2464 		ca = descendant->dv_cfattach;
   2465 		oflags = descendant->dv_flags;
   2466 
   2467 		descendant->dv_flags &= ~DVF_ACTIVE;
   2468 		if (ca->ca_activate == NULL)
   2469 			continue;
   2470 		s = splhigh();
   2471 		rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
   2472 		splx(s);
   2473 		if (rv != 0)
   2474 			descendant->dv_flags = oflags;
   2475 	}
   2476 	deviter_release(&di);
   2477 	return rv;
   2478 }
   2479 
   2480 /*
   2481  * Defer the configuration of the specified device until all
   2482  * of its parent's devices have been attached.
   2483  */
   2484 void
   2485 config_defer(device_t dev, void (*func)(device_t))
   2486 {
   2487 	struct deferred_config *dc;
   2488 
   2489 	if (dev->dv_parent == NULL)
   2490 		panic("config_defer: can't defer config of a root device");
   2491 
   2492 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
   2493 
   2494 	config_pending_incr(dev);
   2495 
   2496 	mutex_enter(&config_misc_lock);
   2497 #ifdef DIAGNOSTIC
   2498 	struct deferred_config *odc;
   2499 	TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
   2500 		if (odc->dc_dev == dev)
   2501 			panic("config_defer: deferred twice");
   2502 	}
   2503 #endif
   2504 	dc->dc_dev = dev;
   2505 	dc->dc_func = func;
   2506 	TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
   2507 	mutex_exit(&config_misc_lock);
   2508 }
   2509 
   2510 /*
   2511  * Defer some autoconfiguration for a device until after interrupts
   2512  * are enabled.
   2513  */
   2514 void
   2515 config_interrupts(device_t dev, void (*func)(device_t))
   2516 {
   2517 	struct deferred_config *dc;
   2518 
   2519 	/*
   2520 	 * If interrupts are enabled, callback now.
   2521 	 */
   2522 	if (cold == 0) {
   2523 		(*func)(dev);
   2524 		return;
   2525 	}
   2526 
   2527 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
   2528 
   2529 	config_pending_incr(dev);
   2530 
   2531 	mutex_enter(&config_misc_lock);
   2532 #ifdef DIAGNOSTIC
   2533 	struct deferred_config *odc;
   2534 	TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
   2535 		if (odc->dc_dev == dev)
   2536 			panic("config_interrupts: deferred twice");
   2537 	}
   2538 #endif
   2539 	dc->dc_dev = dev;
   2540 	dc->dc_func = func;
   2541 	TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
   2542 	mutex_exit(&config_misc_lock);
   2543 }
   2544 
   2545 /*
   2546  * Defer some autoconfiguration for a device until after root file system
   2547  * is mounted (to load firmware etc).
   2548  */
   2549 void
   2550 config_mountroot(device_t dev, void (*func)(device_t))
   2551 {
   2552 	struct deferred_config *dc;
   2553 
   2554 	/*
   2555 	 * If root file system is mounted, callback now.
   2556 	 */
   2557 	if (root_is_mounted) {
   2558 		(*func)(dev);
   2559 		return;
   2560 	}
   2561 
   2562 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
   2563 
   2564 	mutex_enter(&config_misc_lock);
   2565 #ifdef DIAGNOSTIC
   2566 	struct deferred_config *odc;
   2567 	TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
   2568 		if (odc->dc_dev == dev)
   2569 			panic("%s: deferred twice", __func__);
   2570 	}
   2571 #endif
   2572 
   2573 	dc->dc_dev = dev;
   2574 	dc->dc_func = func;
   2575 	TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
   2576 	mutex_exit(&config_misc_lock);
   2577 }
   2578 
   2579 /*
   2580  * Process a deferred configuration queue.
   2581  */
   2582 static void
   2583 config_process_deferred(struct deferred_config_head *queue, device_t parent)
   2584 {
   2585 	struct deferred_config *dc;
   2586 
   2587 	KASSERT(KERNEL_LOCKED_P());
   2588 
   2589 	mutex_enter(&config_misc_lock);
   2590 	dc = TAILQ_FIRST(queue);
   2591 	while (dc) {
   2592 		if (parent == NULL || dc->dc_dev->dv_parent == parent) {
   2593 			TAILQ_REMOVE(queue, dc, dc_queue);
   2594 			mutex_exit(&config_misc_lock);
   2595 
   2596 			(*dc->dc_func)(dc->dc_dev);
   2597 			config_pending_decr(dc->dc_dev);
   2598 			kmem_free(dc, sizeof(*dc));
   2599 
   2600 			mutex_enter(&config_misc_lock);
   2601 			/* Restart, queue might have changed */
   2602 			dc = TAILQ_FIRST(queue);
   2603 		} else {
   2604 			dc = TAILQ_NEXT(dc, dc_queue);
   2605 		}
   2606 	}
   2607 	mutex_exit(&config_misc_lock);
   2608 }
   2609 
   2610 /*
   2611  * Manipulate the config_pending semaphore.
   2612  */
   2613 void
   2614 config_pending_incr(device_t dev)
   2615 {
   2616 
   2617 	mutex_enter(&config_misc_lock);
   2618 	KASSERTMSG(dev->dv_pending < INT_MAX,
   2619 	    "%s: excess config_pending_incr", device_xname(dev));
   2620 	if (dev->dv_pending++ == 0)
   2621 		TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
   2622 #ifdef DEBUG_AUTOCONF
   2623 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
   2624 #endif
   2625 	mutex_exit(&config_misc_lock);
   2626 }
   2627 
   2628 void
   2629 config_pending_decr(device_t dev)
   2630 {
   2631 
   2632 	mutex_enter(&config_misc_lock);
   2633 	KASSERTMSG(dev->dv_pending > 0,
   2634 	    "%s: excess config_pending_decr", device_xname(dev));
   2635 	if (--dev->dv_pending == 0) {
   2636 		TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
   2637 		cv_broadcast(&config_misc_cv);
   2638 	}
   2639 #ifdef DEBUG_AUTOCONF
   2640 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
   2641 #endif
   2642 	mutex_exit(&config_misc_lock);
   2643 }
   2644 
   2645 /*
   2646  * Register a "finalization" routine.  Finalization routines are
   2647  * called iteratively once all real devices have been found during
   2648  * autoconfiguration, for as long as any one finalizer has done
   2649  * any work.
   2650  */
   2651 int
   2652 config_finalize_register(device_t dev, int (*fn)(device_t))
   2653 {
   2654 	struct finalize_hook *f;
   2655 	int error = 0;
   2656 
   2657 	KERNEL_LOCK(1, NULL);
   2658 
   2659 	/*
   2660 	 * If finalization has already been done, invoke the
   2661 	 * callback function now.
   2662 	 */
   2663 	if (config_finalize_done) {
   2664 		while ((*fn)(dev) != 0)
   2665 			/* loop */ ;
   2666 		goto out;
   2667 	}
   2668 
   2669 	/* Ensure this isn't already on the list. */
   2670 	TAILQ_FOREACH(f, &config_finalize_list, f_list) {
   2671 		if (f->f_func == fn && f->f_dev == dev) {
   2672 			error = EEXIST;
   2673 			goto out;
   2674 		}
   2675 	}
   2676 
   2677 	f = kmem_alloc(sizeof(*f), KM_SLEEP);
   2678 	f->f_func = fn;
   2679 	f->f_dev = dev;
   2680 	TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
   2681 
   2682 	/* Success!  */
   2683 	error = 0;
   2684 
   2685 out:	KERNEL_UNLOCK_ONE(NULL);
   2686 	return error;
   2687 }
   2688 
   2689 void
   2690 config_finalize(void)
   2691 {
   2692 	struct finalize_hook *f;
   2693 	struct pdevinit *pdev;
   2694 	extern struct pdevinit pdevinit[];
   2695 	int errcnt, rv;
   2696 
   2697 	/*
   2698 	 * Now that device driver threads have been created, wait for
   2699 	 * them to finish any deferred autoconfiguration.
   2700 	 */
   2701 	mutex_enter(&config_misc_lock);
   2702 	while (!TAILQ_EMPTY(&config_pending)) {
   2703 		device_t dev;
   2704 		int error;
   2705 
   2706 		error = cv_timedwait(&config_misc_cv, &config_misc_lock,
   2707 		    mstohz(1000));
   2708 		if (error == EWOULDBLOCK) {
   2709 			aprint_debug("waiting for devices:");
   2710 			TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
   2711 				aprint_debug(" %s", device_xname(dev));
   2712 			aprint_debug("\n");
   2713 		}
   2714 	}
   2715 	mutex_exit(&config_misc_lock);
   2716 
   2717 	KERNEL_LOCK(1, NULL);
   2718 
   2719 	/* Attach pseudo-devices. */
   2720 	for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
   2721 		(*pdev->pdev_attach)(pdev->pdev_count);
   2722 
   2723 	/* Run the hooks until none of them does any work. */
   2724 	do {
   2725 		rv = 0;
   2726 		TAILQ_FOREACH(f, &config_finalize_list, f_list)
   2727 			rv |= (*f->f_func)(f->f_dev);
   2728 	} while (rv != 0);
   2729 
   2730 	config_finalize_done = 1;
   2731 
   2732 	/* Now free all the hooks. */
   2733 	while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
   2734 		TAILQ_REMOVE(&config_finalize_list, f, f_list);
   2735 		kmem_free(f, sizeof(*f));
   2736 	}
   2737 
   2738 	KERNEL_UNLOCK_ONE(NULL);
   2739 
   2740 	errcnt = aprint_get_error_count();
   2741 	if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
   2742 	    (boothowto & AB_VERBOSE) == 0) {
   2743 		mutex_enter(&config_misc_lock);
   2744 		if (config_do_twiddle) {
   2745 			config_do_twiddle = 0;
   2746 			printf_nolog(" done.\n");
   2747 		}
   2748 		mutex_exit(&config_misc_lock);
   2749 	}
   2750 	if (errcnt != 0) {
   2751 		printf("WARNING: %d error%s while detecting hardware; "
   2752 		    "check system log.\n", errcnt,
   2753 		    errcnt == 1 ? "" : "s");
   2754 	}
   2755 }
   2756 
   2757 void
   2758 config_twiddle_init(void)
   2759 {
   2760 
   2761 	if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
   2762 		config_do_twiddle = 1;
   2763 	}
   2764 	callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
   2765 }
   2766 
   2767 void
   2768 config_twiddle_fn(void *cookie)
   2769 {
   2770 
   2771 	mutex_enter(&config_misc_lock);
   2772 	if (config_do_twiddle) {
   2773 		twiddle();
   2774 		callout_schedule(&config_twiddle_ch, mstohz(100));
   2775 	}
   2776 	mutex_exit(&config_misc_lock);
   2777 }
   2778 
   2779 static void
   2780 config_alldevs_enter(struct alldevs_foray *af)
   2781 {
   2782 	TAILQ_INIT(&af->af_garbage);
   2783 	mutex_enter(&alldevs_lock);
   2784 	config_collect_garbage(&af->af_garbage);
   2785 }
   2786 
   2787 static void
   2788 config_alldevs_exit(struct alldevs_foray *af)
   2789 {
   2790 	mutex_exit(&alldevs_lock);
   2791 	config_dump_garbage(&af->af_garbage);
   2792 }
   2793 
   2794 /*
   2795  * device_lookup:
   2796  *
   2797  *	Look up a device instance for a given driver.
   2798  *
   2799  *	Caller is responsible for ensuring the device's state is
   2800  *	stable, either by holding a reference already obtained with
   2801  *	device_lookup_acquire or by otherwise ensuring the device is
   2802  *	attached and can't be detached (e.g., holding an open device
   2803  *	node and ensuring *_detach calls vdevgone).
   2804  *
   2805  *	XXX Find a way to assert this.
   2806  *
   2807  *	Safe for use up to and including interrupt context at IPL_VM.
   2808  *	Never sleeps.
   2809  */
   2810 device_t
   2811 device_lookup(cfdriver_t cd, int unit)
   2812 {
   2813 	device_t dv;
   2814 
   2815 	mutex_enter(&alldevs_lock);
   2816 	if (unit < 0 || unit >= cd->cd_ndevs)
   2817 		dv = NULL;
   2818 	else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
   2819 		dv = NULL;
   2820 	mutex_exit(&alldevs_lock);
   2821 
   2822 	return dv;
   2823 }
   2824 
   2825 /*
   2826  * device_lookup_private:
   2827  *
   2828  *	Look up a softc instance for a given driver.
   2829  */
   2830 void *
   2831 device_lookup_private(cfdriver_t cd, int unit)
   2832 {
   2833 
   2834 	return device_private(device_lookup(cd, unit));
   2835 }
   2836 
   2837 /*
   2838  * device_lookup_acquire:
   2839  *
   2840  *	Look up a device instance for a given driver, and return a
   2841  *	reference to it that must be released by device_release.
   2842  *
   2843  *	=> If the device is still attaching, blocks until *_attach has
   2844  *	   returned.
   2845  *
   2846  *	=> If the device is detaching, blocks until *_detach has
   2847  *	   returned.  May succeed or fail in that case, depending on
   2848  *	   whether *_detach has backed out (EBUSY) or committed to
   2849  *	   detaching.
   2850  *
   2851  *	May sleep.
   2852  */
   2853 device_t
   2854 device_lookup_acquire(cfdriver_t cd, int unit)
   2855 {
   2856 	device_t dv;
   2857 
   2858 	ASSERT_SLEEPABLE();
   2859 
   2860 	/* XXX This should have a pserialized fast path -- TBD.  */
   2861 	mutex_enter(&config_misc_lock);
   2862 	mutex_enter(&alldevs_lock);
   2863 retry:	if (unit < 0 || unit >= cd->cd_ndevs ||
   2864 	    (dv = cd->cd_devs[unit]) == NULL ||
   2865 	    dv->dv_del_gen != 0 ||
   2866 	    dv->dv_detach_committed) {
   2867 		dv = NULL;
   2868 	} else {
   2869 		/*
   2870 		 * Wait for the device to stabilize, if attaching or
   2871 		 * detaching.  Either way we must wait for *_attach or
   2872 		 * *_detach to complete, and either way we must retry:
   2873 		 * even if detaching, *_detach might fail (EBUSY) so
   2874 		 * the device may still be there.
   2875 		 */
   2876 		if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) ||
   2877 		    dv->dv_detaching != NULL) {
   2878 			mutex_exit(&alldevs_lock);
   2879 			cv_wait(&config_misc_cv, &config_misc_lock);
   2880 			mutex_enter(&alldevs_lock);
   2881 			goto retry;
   2882 		}
   2883 		device_acquire(dv);
   2884 	}
   2885 	mutex_exit(&alldevs_lock);
   2886 	mutex_exit(&config_misc_lock);
   2887 
   2888 	return dv;
   2889 }
   2890 
   2891 /*
   2892  * device_acquire:
   2893  *
   2894  *	Acquire a reference to a device.  It is the caller's
   2895  *	responsibility to ensure that the device's .ca_detach routine
   2896  *	cannot return before calling this.  Caller must release the
   2897  *	reference with device_release or config_detach_release.
   2898  */
   2899 void
   2900 device_acquire(device_t dv)
   2901 {
   2902 
   2903 	/*
   2904 	 * No lock because the caller has promised that this can't
   2905 	 * change concurrently with device_acquire.
   2906 	 */
   2907 	KASSERTMSG(!dv->dv_detach_done, "%s",
   2908 	    dv == NULL ? "(null)" : device_xname(dv));
   2909 	localcount_acquire(dv->dv_localcount);
   2910 }
   2911 
   2912 /*
   2913  * device_release:
   2914  *
   2915  *	Release a reference to a device acquired with device_acquire or
   2916  *	device_lookup_acquire.
   2917  */
   2918 void
   2919 device_release(device_t dv)
   2920 {
   2921 
   2922 	localcount_release(dv->dv_localcount,
   2923 	    &config_misc_cv, &config_misc_lock);
   2924 }
   2925 
   2926 /*
   2927  * device_find_by_xname:
   2928  *
   2929  *	Returns the device of the given name or NULL if it doesn't exist.
   2930  */
   2931 device_t
   2932 device_find_by_xname(const char *name)
   2933 {
   2934 	device_t dv;
   2935 	deviter_t di;
   2936 
   2937 	for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
   2938 		if (strcmp(device_xname(dv), name) == 0)
   2939 			break;
   2940 	}
   2941 	deviter_release(&di);
   2942 
   2943 	return dv;
   2944 }
   2945 
   2946 /*
   2947  * device_find_by_driver_unit:
   2948  *
   2949  *	Returns the device of the given driver name and unit or
   2950  *	NULL if it doesn't exist.
   2951  */
   2952 device_t
   2953 device_find_by_driver_unit(const char *name, int unit)
   2954 {
   2955 	struct cfdriver *cd;
   2956 
   2957 	if ((cd = config_cfdriver_lookup(name)) == NULL)
   2958 		return NULL;
   2959 	return device_lookup(cd, unit);
   2960 }
   2961 
   2962 static bool
   2963 match_strcmp(const char * const s1, const char * const s2)
   2964 {
   2965 	return strcmp(s1, s2) == 0;
   2966 }
   2967 
   2968 static bool
   2969 match_pmatch(const char * const s1, const char * const s2)
   2970 {
   2971 	return pmatch(s1, s2, NULL) == 2;
   2972 }
   2973 
   2974 static bool
   2975 strarray_match_internal(const char ** const strings,
   2976     unsigned int const nstrings, const char * const str,
   2977     unsigned int * const indexp,
   2978     bool (*match_fn)(const char *, const char *))
   2979 {
   2980 	unsigned int i;
   2981 
   2982 	if (strings == NULL || nstrings == 0) {
   2983 		return false;
   2984 	}
   2985 
   2986 	for (i = 0; i < nstrings; i++) {
   2987 		if ((*match_fn)(strings[i], str)) {
   2988 			*indexp = i;
   2989 			return true;
   2990 		}
   2991 	}
   2992 
   2993 	return false;
   2994 }
   2995 
   2996 static int
   2997 strarray_match(const char ** const strings, unsigned int const nstrings,
   2998     const char * const str)
   2999 {
   3000 	unsigned int idx;
   3001 
   3002 	if (strarray_match_internal(strings, nstrings, str, &idx,
   3003 				    match_strcmp)) {
   3004 		return (int)(nstrings - idx);
   3005 	}
   3006 	return 0;
   3007 }
   3008 
   3009 static int
   3010 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
   3011     const char * const pattern)
   3012 {
   3013 	unsigned int idx;
   3014 
   3015 	if (strarray_match_internal(strings, nstrings, pattern, &idx,
   3016 				    match_pmatch)) {
   3017 		return (int)(nstrings - idx);
   3018 	}
   3019 	return 0;
   3020 }
   3021 
   3022 static int
   3023 device_compatible_match_strarray_internal(
   3024     const char **device_compats, int ndevice_compats,
   3025     const struct device_compatible_entry *driver_compats,
   3026     const struct device_compatible_entry **matching_entryp,
   3027     int (*match_fn)(const char **, unsigned int, const char *))
   3028 {
   3029 	const struct device_compatible_entry *dce = NULL;
   3030 	int rv;
   3031 
   3032 	if (ndevice_compats == 0 || device_compats == NULL ||
   3033 	    driver_compats == NULL)
   3034 		return 0;
   3035 
   3036 	for (dce = driver_compats; dce->compat != NULL; dce++) {
   3037 		rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
   3038 		if (rv != 0) {
   3039 			if (matching_entryp != NULL) {
   3040 				*matching_entryp = dce;
   3041 			}
   3042 			return rv;
   3043 		}
   3044 	}
   3045 	return 0;
   3046 }
   3047 
   3048 /*
   3049  * device_compatible_match:
   3050  *
   3051  *	Match a driver's "compatible" data against a device's
   3052  *	"compatible" strings.  Returns resulted weighted by
   3053  *	which device "compatible" string was matched.
   3054  */
   3055 int
   3056 device_compatible_match(const char **device_compats, int ndevice_compats,
   3057     const struct device_compatible_entry *driver_compats)
   3058 {
   3059 	return device_compatible_match_strarray_internal(device_compats,
   3060 	    ndevice_compats, driver_compats, NULL, strarray_match);
   3061 }
   3062 
   3063 /*
   3064  * device_compatible_pmatch:
   3065  *
   3066  *	Like device_compatible_match(), but uses pmatch(9) to compare
   3067  *	the device "compatible" strings against patterns in the
   3068  *	driver's "compatible" data.
   3069  */
   3070 int
   3071 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
   3072     const struct device_compatible_entry *driver_compats)
   3073 {
   3074 	return device_compatible_match_strarray_internal(device_compats,
   3075 	    ndevice_compats, driver_compats, NULL, strarray_pmatch);
   3076 }
   3077 
   3078 static int
   3079 device_compatible_match_strlist_internal(
   3080     const char * const device_compats, size_t const device_compatsize,
   3081     const struct device_compatible_entry *driver_compats,
   3082     const struct device_compatible_entry **matching_entryp,
   3083     int (*match_fn)(const char *, size_t, const char *))
   3084 {
   3085 	const struct device_compatible_entry *dce = NULL;
   3086 	int rv;
   3087 
   3088 	if (device_compats == NULL || device_compatsize == 0 ||
   3089 	    driver_compats == NULL)
   3090 		return 0;
   3091 
   3092 	for (dce = driver_compats; dce->compat != NULL; dce++) {
   3093 		rv = (*match_fn)(device_compats, device_compatsize,
   3094 		    dce->compat);
   3095 		if (rv != 0) {
   3096 			if (matching_entryp != NULL) {
   3097 				*matching_entryp = dce;
   3098 			}
   3099 			return rv;
   3100 		}
   3101 	}
   3102 	return 0;
   3103 }
   3104 
   3105 /*
   3106  * device_compatible_match_strlist:
   3107  *
   3108  *	Like device_compatible_match(), but take the device
   3109  *	"compatible" strings as an OpenFirmware-style string
   3110  *	list.
   3111  */
   3112 int
   3113 device_compatible_match_strlist(
   3114     const char * const device_compats, size_t const device_compatsize,
   3115     const struct device_compatible_entry *driver_compats)
   3116 {
   3117 	return device_compatible_match_strlist_internal(device_compats,
   3118 	    device_compatsize, driver_compats, NULL, strlist_match);
   3119 }
   3120 
   3121 /*
   3122  * device_compatible_pmatch_strlist:
   3123  *
   3124  *	Like device_compatible_pmatch(), but take the device
   3125  *	"compatible" strings as an OpenFirmware-style string
   3126  *	list.
   3127  */
   3128 int
   3129 device_compatible_pmatch_strlist(
   3130     const char * const device_compats, size_t const device_compatsize,
   3131     const struct device_compatible_entry *driver_compats)
   3132 {
   3133 	return device_compatible_match_strlist_internal(device_compats,
   3134 	    device_compatsize, driver_compats, NULL, strlist_pmatch);
   3135 }
   3136 
   3137 static int
   3138 device_compatible_match_id_internal(
   3139     uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
   3140     const struct device_compatible_entry *driver_compats,
   3141     const struct device_compatible_entry **matching_entryp)
   3142 {
   3143 	const struct device_compatible_entry *dce = NULL;
   3144 
   3145 	if (mask == 0)
   3146 		return 0;
   3147 
   3148 	for (dce = driver_compats; dce->id != sentinel_id; dce++) {
   3149 		if ((id & mask) == dce->id) {
   3150 			if (matching_entryp != NULL) {
   3151 				*matching_entryp = dce;
   3152 			}
   3153 			return 1;
   3154 		}
   3155 	}
   3156 	return 0;
   3157 }
   3158 
   3159 /*
   3160  * device_compatible_match_id:
   3161  *
   3162  *	Like device_compatible_match(), but takes a single
   3163  *	unsigned integer device ID.
   3164  */
   3165 int
   3166 device_compatible_match_id(
   3167     uintptr_t const id, uintptr_t const sentinel_id,
   3168     const struct device_compatible_entry *driver_compats)
   3169 {
   3170 	return device_compatible_match_id_internal(id, (uintptr_t)-1,
   3171 	    sentinel_id, driver_compats, NULL);
   3172 }
   3173 
   3174 /*
   3175  * device_compatible_lookup:
   3176  *
   3177  *	Look up and return the device_compatible_entry, using the
   3178  *	same matching criteria used by device_compatible_match().
   3179  */
   3180 const struct device_compatible_entry *
   3181 device_compatible_lookup(const char **device_compats, int ndevice_compats,
   3182 			 const struct device_compatible_entry *driver_compats)
   3183 {
   3184 	const struct device_compatible_entry *dce;
   3185 
   3186 	if (device_compatible_match_strarray_internal(device_compats,
   3187 	    ndevice_compats, driver_compats, &dce, strarray_match)) {
   3188 		return dce;
   3189 	}
   3190 	return NULL;
   3191 }
   3192 
   3193 /*
   3194  * device_compatible_plookup:
   3195  *
   3196  *	Look up and return the device_compatible_entry, using the
   3197  *	same matching criteria used by device_compatible_pmatch().
   3198  */
   3199 const struct device_compatible_entry *
   3200 device_compatible_plookup(const char **device_compats, int ndevice_compats,
   3201 			  const struct device_compatible_entry *driver_compats)
   3202 {
   3203 	const struct device_compatible_entry *dce;
   3204 
   3205 	if (device_compatible_match_strarray_internal(device_compats,
   3206 	    ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
   3207 		return dce;
   3208 	}
   3209 	return NULL;
   3210 }
   3211 
   3212 /*
   3213  * device_compatible_lookup_strlist:
   3214  *
   3215  *	Like device_compatible_lookup(), but take the device
   3216  *	"compatible" strings as an OpenFirmware-style string
   3217  *	list.
   3218  */
   3219 const struct device_compatible_entry *
   3220 device_compatible_lookup_strlist(
   3221     const char * const device_compats, size_t const device_compatsize,
   3222     const struct device_compatible_entry *driver_compats)
   3223 {
   3224 	const struct device_compatible_entry *dce;
   3225 
   3226 	if (device_compatible_match_strlist_internal(device_compats,
   3227 	    device_compatsize, driver_compats, &dce, strlist_match)) {
   3228 		return dce;
   3229 	}
   3230 	return NULL;
   3231 }
   3232 
   3233 /*
   3234  * device_compatible_plookup_strlist:
   3235  *
   3236  *	Like device_compatible_plookup(), but take the device
   3237  *	"compatible" strings as an OpenFirmware-style string
   3238  *	list.
   3239  */
   3240 const struct device_compatible_entry *
   3241 device_compatible_plookup_strlist(
   3242     const char * const device_compats, size_t const device_compatsize,
   3243     const struct device_compatible_entry *driver_compats)
   3244 {
   3245 	const struct device_compatible_entry *dce;
   3246 
   3247 	if (device_compatible_match_strlist_internal(device_compats,
   3248 	    device_compatsize, driver_compats, &dce, strlist_pmatch)) {
   3249 		return dce;
   3250 	}
   3251 	return NULL;
   3252 }
   3253 
   3254 /*
   3255  * device_compatible_lookup_id:
   3256  *
   3257  *	Like device_compatible_lookup(), but takes a single
   3258  *	unsigned integer device ID.
   3259  */
   3260 const struct device_compatible_entry *
   3261 device_compatible_lookup_id(
   3262     uintptr_t const id, uintptr_t const sentinel_id,
   3263     const struct device_compatible_entry *driver_compats)
   3264 {
   3265 	const struct device_compatible_entry *dce;
   3266 
   3267 	if (device_compatible_match_id_internal(id, (uintptr_t)-1,
   3268 	    sentinel_id, driver_compats, &dce)) {
   3269 		return dce;
   3270 	}
   3271 	return NULL;
   3272 }
   3273 
   3274 /*
   3275  * Power management related functions.
   3276  */
   3277 
   3278 bool
   3279 device_pmf_is_registered(device_t dev)
   3280 {
   3281 	return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
   3282 }
   3283 
   3284 bool
   3285 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
   3286 {
   3287 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
   3288 		return true;
   3289 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
   3290 		return false;
   3291 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
   3292 	    dev->dv_driver_suspend != NULL &&
   3293 	    !(*dev->dv_driver_suspend)(dev, qual))
   3294 		return false;
   3295 
   3296 	dev->dv_flags |= DVF_DRIVER_SUSPENDED;
   3297 	return true;
   3298 }
   3299 
   3300 bool
   3301 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
   3302 {
   3303 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
   3304 		return true;
   3305 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
   3306 		return false;
   3307 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
   3308 	    dev->dv_driver_resume != NULL &&
   3309 	    !(*dev->dv_driver_resume)(dev, qual))
   3310 		return false;
   3311 
   3312 	dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
   3313 	return true;
   3314 }
   3315 
   3316 bool
   3317 device_pmf_driver_shutdown(device_t dev, int how)
   3318 {
   3319 
   3320 	if (*dev->dv_driver_shutdown != NULL &&
   3321 	    !(*dev->dv_driver_shutdown)(dev, how))
   3322 		return false;
   3323 	return true;
   3324 }
   3325 
   3326 void
   3327 device_pmf_driver_register(device_t dev,
   3328     bool (*suspend)(device_t, const pmf_qual_t *),
   3329     bool (*resume)(device_t, const pmf_qual_t *),
   3330     bool (*shutdown)(device_t, int))
   3331 {
   3332 
   3333 	dev->dv_driver_suspend = suspend;
   3334 	dev->dv_driver_resume = resume;
   3335 	dev->dv_driver_shutdown = shutdown;
   3336 	dev->dv_flags |= DVF_POWER_HANDLERS;
   3337 }
   3338 
   3339 void
   3340 device_pmf_driver_deregister(device_t dev)
   3341 {
   3342 	device_lock_t dvl = device_getlock(dev);
   3343 
   3344 	dev->dv_driver_suspend = NULL;
   3345 	dev->dv_driver_resume = NULL;
   3346 
   3347 	mutex_enter(&dvl->dvl_mtx);
   3348 	dev->dv_flags &= ~DVF_POWER_HANDLERS;
   3349 	while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
   3350 		/* Wake a thread that waits for the lock.  That
   3351 		 * thread will fail to acquire the lock, and then
   3352 		 * it will wake the next thread that waits for the
   3353 		 * lock, or else it will wake us.
   3354 		 */
   3355 		cv_signal(&dvl->dvl_cv);
   3356 		pmflock_debug(dev, __func__, __LINE__);
   3357 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
   3358 		pmflock_debug(dev, __func__, __LINE__);
   3359 	}
   3360 	mutex_exit(&dvl->dvl_mtx);
   3361 }
   3362 
   3363 void
   3364 device_pmf_driver_child_register(device_t dev)
   3365 {
   3366 	device_t parent = device_parent(dev);
   3367 
   3368 	if (parent == NULL || parent->dv_driver_child_register == NULL)
   3369 		return;
   3370 	(*parent->dv_driver_child_register)(dev);
   3371 }
   3372 
   3373 void
   3374 device_pmf_driver_set_child_register(device_t dev,
   3375     void (*child_register)(device_t))
   3376 {
   3377 	dev->dv_driver_child_register = child_register;
   3378 }
   3379 
   3380 static void
   3381 pmflock_debug(device_t dev, const char *func, int line)
   3382 {
   3383 #ifdef PMFLOCK_DEBUG
   3384 	device_lock_t dvl = device_getlock(dev);
   3385 	const char *curlwp_name;
   3386 
   3387 	if (curlwp->l_name != NULL)
   3388 		curlwp_name = curlwp->l_name;
   3389 	else
   3390 		curlwp_name = curlwp->l_proc->p_comm;
   3391 
   3392 	aprint_debug_dev(dev,
   3393 	    "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
   3394 	    curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
   3395 #endif	/* PMFLOCK_DEBUG */
   3396 }
   3397 
   3398 static bool
   3399 device_pmf_lock1(device_t dev)
   3400 {
   3401 	device_lock_t dvl = device_getlock(dev);
   3402 
   3403 	while (device_pmf_is_registered(dev) &&
   3404 	    dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
   3405 		dvl->dvl_nwait++;
   3406 		pmflock_debug(dev, __func__, __LINE__);
   3407 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
   3408 		pmflock_debug(dev, __func__, __LINE__);
   3409 		dvl->dvl_nwait--;
   3410 	}
   3411 	if (!device_pmf_is_registered(dev)) {
   3412 		pmflock_debug(dev, __func__, __LINE__);
   3413 		/* We could not acquire the lock, but some other thread may
   3414 		 * wait for it, also.  Wake that thread.
   3415 		 */
   3416 		cv_signal(&dvl->dvl_cv);
   3417 		return false;
   3418 	}
   3419 	dvl->dvl_nlock++;
   3420 	dvl->dvl_holder = curlwp;
   3421 	pmflock_debug(dev, __func__, __LINE__);
   3422 	return true;
   3423 }
   3424 
   3425 bool
   3426 device_pmf_lock(device_t dev)
   3427 {
   3428 	bool rc;
   3429 	device_lock_t dvl = device_getlock(dev);
   3430 
   3431 	mutex_enter(&dvl->dvl_mtx);
   3432 	rc = device_pmf_lock1(dev);
   3433 	mutex_exit(&dvl->dvl_mtx);
   3434 
   3435 	return rc;
   3436 }
   3437 
   3438 void
   3439 device_pmf_unlock(device_t dev)
   3440 {
   3441 	device_lock_t dvl = device_getlock(dev);
   3442 
   3443 	KASSERT(dvl->dvl_nlock > 0);
   3444 	mutex_enter(&dvl->dvl_mtx);
   3445 	if (--dvl->dvl_nlock == 0)
   3446 		dvl->dvl_holder = NULL;
   3447 	cv_signal(&dvl->dvl_cv);
   3448 	pmflock_debug(dev, __func__, __LINE__);
   3449 	mutex_exit(&dvl->dvl_mtx);
   3450 }
   3451 
   3452 device_lock_t
   3453 device_getlock(device_t dev)
   3454 {
   3455 	return &dev->dv_lock;
   3456 }
   3457 
   3458 void *
   3459 device_pmf_bus_private(device_t dev)
   3460 {
   3461 	return dev->dv_bus_private;
   3462 }
   3463 
   3464 bool
   3465 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
   3466 {
   3467 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
   3468 		return true;
   3469 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
   3470 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
   3471 		return false;
   3472 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
   3473 	    dev->dv_bus_suspend != NULL &&
   3474 	    !(*dev->dv_bus_suspend)(dev, qual))
   3475 		return false;
   3476 
   3477 	dev->dv_flags |= DVF_BUS_SUSPENDED;
   3478 	return true;
   3479 }
   3480 
   3481 bool
   3482 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
   3483 {
   3484 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
   3485 		return true;
   3486 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
   3487 	    dev->dv_bus_resume != NULL &&
   3488 	    !(*dev->dv_bus_resume)(dev, qual))
   3489 		return false;
   3490 
   3491 	dev->dv_flags &= ~DVF_BUS_SUSPENDED;
   3492 	return true;
   3493 }
   3494 
   3495 bool
   3496 device_pmf_bus_shutdown(device_t dev, int how)
   3497 {
   3498 
   3499 	if (*dev->dv_bus_shutdown != NULL &&
   3500 	    !(*dev->dv_bus_shutdown)(dev, how))
   3501 		return false;
   3502 	return true;
   3503 }
   3504 
   3505 void
   3506 device_pmf_bus_register(device_t dev, void *priv,
   3507     bool (*suspend)(device_t, const pmf_qual_t *),
   3508     bool (*resume)(device_t, const pmf_qual_t *),
   3509     bool (*shutdown)(device_t, int), void (*deregister)(device_t))
   3510 {
   3511 	dev->dv_bus_private = priv;
   3512 	dev->dv_bus_resume = resume;
   3513 	dev->dv_bus_suspend = suspend;
   3514 	dev->dv_bus_shutdown = shutdown;
   3515 	dev->dv_bus_deregister = deregister;
   3516 }
   3517 
   3518 void
   3519 device_pmf_bus_deregister(device_t dev)
   3520 {
   3521 	if (dev->dv_bus_deregister == NULL)
   3522 		return;
   3523 	(*dev->dv_bus_deregister)(dev);
   3524 	dev->dv_bus_private = NULL;
   3525 	dev->dv_bus_suspend = NULL;
   3526 	dev->dv_bus_resume = NULL;
   3527 	dev->dv_bus_deregister = NULL;
   3528 }
   3529 
   3530 void *
   3531 device_pmf_class_private(device_t dev)
   3532 {
   3533 	return dev->dv_class_private;
   3534 }
   3535 
   3536 bool
   3537 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
   3538 {
   3539 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
   3540 		return true;
   3541 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
   3542 	    dev->dv_class_suspend != NULL &&
   3543 	    !(*dev->dv_class_suspend)(dev, qual))
   3544 		return false;
   3545 
   3546 	dev->dv_flags |= DVF_CLASS_SUSPENDED;
   3547 	return true;
   3548 }
   3549 
   3550 bool
   3551 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
   3552 {
   3553 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
   3554 		return true;
   3555 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
   3556 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
   3557 		return false;
   3558 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
   3559 	    dev->dv_class_resume != NULL &&
   3560 	    !(*dev->dv_class_resume)(dev, qual))
   3561 		return false;
   3562 
   3563 	dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
   3564 	return true;
   3565 }
   3566 
   3567 void
   3568 device_pmf_class_register(device_t dev, void *priv,
   3569     bool (*suspend)(device_t, const pmf_qual_t *),
   3570     bool (*resume)(device_t, const pmf_qual_t *),
   3571     void (*deregister)(device_t))
   3572 {
   3573 	dev->dv_class_private = priv;
   3574 	dev->dv_class_suspend = suspend;
   3575 	dev->dv_class_resume = resume;
   3576 	dev->dv_class_deregister = deregister;
   3577 }
   3578 
   3579 void
   3580 device_pmf_class_deregister(device_t dev)
   3581 {
   3582 	if (dev->dv_class_deregister == NULL)
   3583 		return;
   3584 	(*dev->dv_class_deregister)(dev);
   3585 	dev->dv_class_private = NULL;
   3586 	dev->dv_class_suspend = NULL;
   3587 	dev->dv_class_resume = NULL;
   3588 	dev->dv_class_deregister = NULL;
   3589 }
   3590 
   3591 bool
   3592 device_active(device_t dev, devactive_t type)
   3593 {
   3594 	size_t i;
   3595 
   3596 	if (dev->dv_activity_count == 0)
   3597 		return false;
   3598 
   3599 	for (i = 0; i < dev->dv_activity_count; ++i) {
   3600 		if (dev->dv_activity_handlers[i] == NULL)
   3601 			break;
   3602 		(*dev->dv_activity_handlers[i])(dev, type);
   3603 	}
   3604 
   3605 	return true;
   3606 }
   3607 
   3608 bool
   3609 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
   3610 {
   3611 	void (**new_handlers)(device_t, devactive_t);
   3612 	void (**old_handlers)(device_t, devactive_t);
   3613 	size_t i, old_size, new_size;
   3614 	int s;
   3615 
   3616 	old_handlers = dev->dv_activity_handlers;
   3617 	old_size = dev->dv_activity_count;
   3618 
   3619 	KASSERT(old_size == 0 || old_handlers != NULL);
   3620 
   3621 	for (i = 0; i < old_size; ++i) {
   3622 		KASSERT(old_handlers[i] != handler);
   3623 		if (old_handlers[i] == NULL) {
   3624 			old_handlers[i] = handler;
   3625 			return true;
   3626 		}
   3627 	}
   3628 
   3629 	new_size = old_size + 4;
   3630 	new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
   3631 
   3632 	for (i = 0; i < old_size; ++i)
   3633 		new_handlers[i] = old_handlers[i];
   3634 	new_handlers[old_size] = handler;
   3635 	for (i = old_size+1; i < new_size; ++i)
   3636 		new_handlers[i] = NULL;
   3637 
   3638 	s = splhigh();
   3639 	dev->dv_activity_count = new_size;
   3640 	dev->dv_activity_handlers = new_handlers;
   3641 	splx(s);
   3642 
   3643 	if (old_size > 0)
   3644 		kmem_free(old_handlers, sizeof(void *) * old_size);
   3645 
   3646 	return true;
   3647 }
   3648 
   3649 void
   3650 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
   3651 {
   3652 	void (**old_handlers)(device_t, devactive_t);
   3653 	size_t i, old_size;
   3654 	int s;
   3655 
   3656 	old_handlers = dev->dv_activity_handlers;
   3657 	old_size = dev->dv_activity_count;
   3658 
   3659 	for (i = 0; i < old_size; ++i) {
   3660 		if (old_handlers[i] == handler)
   3661 			break;
   3662 		if (old_handlers[i] == NULL)
   3663 			return; /* XXX panic? */
   3664 	}
   3665 
   3666 	if (i == old_size)
   3667 		return; /* XXX panic? */
   3668 
   3669 	for (; i < old_size - 1; ++i) {
   3670 		if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
   3671 			continue;
   3672 
   3673 		if (i == 0) {
   3674 			s = splhigh();
   3675 			dev->dv_activity_count = 0;
   3676 			dev->dv_activity_handlers = NULL;
   3677 			splx(s);
   3678 			kmem_free(old_handlers, sizeof(void *) * old_size);
   3679 		}
   3680 		return;
   3681 	}
   3682 	old_handlers[i] = NULL;
   3683 }
   3684 
   3685 /* Return true iff the device_t `dev' exists at generation `gen'. */
   3686 static bool
   3687 device_exists_at(device_t dv, devgen_t gen)
   3688 {
   3689 	return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
   3690 	    dv->dv_add_gen <= gen;
   3691 }
   3692 
   3693 static bool
   3694 deviter_visits(const deviter_t *di, device_t dv)
   3695 {
   3696 	return device_exists_at(dv, di->di_gen);
   3697 }
   3698 
   3699 /*
   3700  * Device Iteration
   3701  *
   3702  * deviter_t: a device iterator.  Holds state for a "walk" visiting
   3703  *     each device_t's in the device tree.
   3704  *
   3705  * deviter_init(di, flags): initialize the device iterator `di'
   3706  *     to "walk" the device tree.  deviter_next(di) will return
   3707  *     the first device_t in the device tree, or NULL if there are
   3708  *     no devices.
   3709  *
   3710  *     `flags' is one or more of DEVITER_F_RW, indicating that the
   3711  *     caller intends to modify the device tree by calling
   3712  *     config_detach(9) on devices in the order that the iterator
   3713  *     returns them; DEVITER_F_ROOT_FIRST, asking for the devices
   3714  *     nearest the "root" of the device tree to be returned, first;
   3715  *     DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
   3716  *     the root of the device tree, first; and DEVITER_F_SHUTDOWN,
   3717  *     indicating both that deviter_init() should not respect any
   3718  *     locks on the device tree, and that deviter_next(di) may run
   3719  *     in more than one LWP before the walk has finished.
   3720  *
   3721  *     Only one DEVITER_F_RW iterator may be in the device tree at
   3722  *     once.
   3723  *
   3724  *     DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
   3725  *
   3726  *     Results are undefined if the flags DEVITER_F_ROOT_FIRST and
   3727  *     DEVITER_F_LEAVES_FIRST are used in combination.
   3728  *
   3729  * deviter_first(di, flags): initialize the device iterator `di'
   3730  *     and return the first device_t in the device tree, or NULL
   3731  *     if there are no devices.  The statement
   3732  *
   3733  *         dv = deviter_first(di);
   3734  *
   3735  *     is shorthand for
   3736  *
   3737  *         deviter_init(di);
   3738  *         dv = deviter_next(di);
   3739  *
   3740  * deviter_next(di): return the next device_t in the device tree,
   3741  *     or NULL if there are no more devices.  deviter_next(di)
   3742  *     is undefined if `di' was not initialized with deviter_init() or
   3743  *     deviter_first().
   3744  *
   3745  * deviter_release(di): stops iteration (subsequent calls to
   3746  *     deviter_next() will return NULL), releases any locks and
   3747  *     resources held by the device iterator.
   3748  *
   3749  * Device iteration does not return device_t's in any particular
   3750  * order.  An iterator will never return the same device_t twice.
   3751  * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
   3752  * is called repeatedly on the same `di', it will eventually return
   3753  * NULL.  It is ok to attach/detach devices during device iteration.
   3754  */
   3755 void
   3756 deviter_init(deviter_t *di, deviter_flags_t flags)
   3757 {
   3758 	device_t dv;
   3759 
   3760 	memset(di, 0, sizeof(*di));
   3761 
   3762 	if ((flags & DEVITER_F_SHUTDOWN) != 0)
   3763 		flags |= DEVITER_F_RW;
   3764 
   3765 	mutex_enter(&alldevs_lock);
   3766 	if ((flags & DEVITER_F_RW) != 0)
   3767 		alldevs_nwrite++;
   3768 	else
   3769 		alldevs_nread++;
   3770 	di->di_gen = alldevs_gen++;
   3771 	di->di_flags = flags;
   3772 
   3773 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
   3774 	case DEVITER_F_LEAVES_FIRST:
   3775 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
   3776 			if (!deviter_visits(di, dv))
   3777 				continue;
   3778 			di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
   3779 		}
   3780 		break;
   3781 	case DEVITER_F_ROOT_FIRST:
   3782 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
   3783 			if (!deviter_visits(di, dv))
   3784 				continue;
   3785 			di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
   3786 		}
   3787 		break;
   3788 	default:
   3789 		break;
   3790 	}
   3791 
   3792 	deviter_reinit(di);
   3793 	mutex_exit(&alldevs_lock);
   3794 }
   3795 
   3796 static void
   3797 deviter_reinit(deviter_t *di)
   3798 {
   3799 
   3800 	KASSERT(mutex_owned(&alldevs_lock));
   3801 	if ((di->di_flags & DEVITER_F_RW) != 0)
   3802 		di->di_prev = TAILQ_LAST(&alldevs, devicelist);
   3803 	else
   3804 		di->di_prev = TAILQ_FIRST(&alldevs);
   3805 }
   3806 
   3807 device_t
   3808 deviter_first(deviter_t *di, deviter_flags_t flags)
   3809 {
   3810 
   3811 	deviter_init(di, flags);
   3812 	return deviter_next(di);
   3813 }
   3814 
   3815 static device_t
   3816 deviter_next2(deviter_t *di)
   3817 {
   3818 	device_t dv;
   3819 
   3820 	KASSERT(mutex_owned(&alldevs_lock));
   3821 
   3822 	dv = di->di_prev;
   3823 
   3824 	if (dv == NULL)
   3825 		return NULL;
   3826 
   3827 	if ((di->di_flags & DEVITER_F_RW) != 0)
   3828 		di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
   3829 	else
   3830 		di->di_prev = TAILQ_NEXT(dv, dv_list);
   3831 
   3832 	return dv;
   3833 }
   3834 
   3835 static device_t
   3836 deviter_next1(deviter_t *di)
   3837 {
   3838 	device_t dv;
   3839 
   3840 	KASSERT(mutex_owned(&alldevs_lock));
   3841 
   3842 	do {
   3843 		dv = deviter_next2(di);
   3844 	} while (dv != NULL && !deviter_visits(di, dv));
   3845 
   3846 	return dv;
   3847 }
   3848 
   3849 device_t
   3850 deviter_next(deviter_t *di)
   3851 {
   3852 	device_t dv = NULL;
   3853 
   3854 	mutex_enter(&alldevs_lock);
   3855 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
   3856 	case 0:
   3857 		dv = deviter_next1(di);
   3858 		break;
   3859 	case DEVITER_F_LEAVES_FIRST:
   3860 		while (di->di_curdepth >= 0) {
   3861 			if ((dv = deviter_next1(di)) == NULL) {
   3862 				di->di_curdepth--;
   3863 				deviter_reinit(di);
   3864 			} else if (dv->dv_depth == di->di_curdepth)
   3865 				break;
   3866 		}
   3867 		break;
   3868 	case DEVITER_F_ROOT_FIRST:
   3869 		while (di->di_curdepth <= di->di_maxdepth) {
   3870 			if ((dv = deviter_next1(di)) == NULL) {
   3871 				di->di_curdepth++;
   3872 				deviter_reinit(di);
   3873 			} else if (dv->dv_depth == di->di_curdepth)
   3874 				break;
   3875 		}
   3876 		break;
   3877 	default:
   3878 		break;
   3879 	}
   3880 	mutex_exit(&alldevs_lock);
   3881 
   3882 	return dv;
   3883 }
   3884 
   3885 void
   3886 deviter_release(deviter_t *di)
   3887 {
   3888 	bool rw = (di->di_flags & DEVITER_F_RW) != 0;
   3889 
   3890 	mutex_enter(&alldevs_lock);
   3891 	if (rw)
   3892 		--alldevs_nwrite;
   3893 	else
   3894 		--alldevs_nread;
   3895 	/* XXX wake a garbage-collection thread */
   3896 	mutex_exit(&alldevs_lock);
   3897 }
   3898 
   3899 const char *
   3900 cfdata_ifattr(const struct cfdata *cf)
   3901 {
   3902 	return cf->cf_pspec->cfp_iattr;
   3903 }
   3904 
   3905 bool
   3906 ifattr_match(const char *snull, const char *t)
   3907 {
   3908 	return (snull == NULL) || strcmp(snull, t) == 0;
   3909 }
   3910 
   3911 void
   3912 null_childdetached(device_t self, device_t child)
   3913 {
   3914 	/* do nothing */
   3915 }
   3916 
   3917 static void
   3918 sysctl_detach_setup(struct sysctllog **clog)
   3919 {
   3920 
   3921 	sysctl_createv(clog, 0, NULL, NULL,
   3922 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   3923 		CTLTYPE_BOOL, "detachall",
   3924 		SYSCTL_DESCR("Detach all devices at shutdown"),
   3925 		NULL, 0, &detachall, 0,
   3926 		CTL_KERN, CTL_CREATE, CTL_EOL);
   3927 }
   3928