Home | History | Annotate | Line # | Download | only in kern
      1 /* $NetBSD: subr_autoconf.c,v 1.318 2026/01/17 02:01:39 thorpej Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1996, 2000 Christopher G. Demetriou
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *          This product includes software developed for the
     18  *          NetBSD Project.  See http://www.NetBSD.org/ for
     19  *          information about NetBSD.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
     35  */
     36 
     37 /*
     38  * Copyright (c) 1992, 1993
     39  *	The Regents of the University of California.  All rights reserved.
     40  *
     41  * This software was developed by the Computer Systems Engineering group
     42  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     43  * contributed to Berkeley.
     44  *
     45  * All advertising materials mentioning features or use of this software
     46  * must display the following acknowledgement:
     47  *	This product includes software developed by the University of
     48  *	California, Lawrence Berkeley Laboratories.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. Neither the name of the University nor the names of its contributors
     59  *    may be used to endorse or promote products derived from this software
     60  *    without specific prior written permission.
     61  *
     62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72  * SUCH DAMAGE.
     73  *
     74  * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp  (LBL)
     75  *
     76  *	@(#)subr_autoconf.c	8.3 (Berkeley) 5/17/94
     77  */
     78 
     79 #include <sys/cdefs.h>
     80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.318 2026/01/17 02:01:39 thorpej Exp $");
     81 
     82 #ifdef _KERNEL_OPT
     83 #include "opt_ddb.h"
     84 #include "drvctl.h"
     85 #endif
     86 
     87 #include <sys/param.h>
     88 #include <sys/types.h>
     89 
     90 #include <sys/buf.h>
     91 #include <sys/callout.h>
     92 #include <sys/conf.h>
     93 #include <sys/cpu.h>
     94 #include <sys/device.h>
     95 #include <sys/device_calls.h>
     96 #include <sys/device_impl.h>
     97 #include <sys/devmon.h>
     98 #include <sys/dirent.h>
     99 #include <sys/disk.h>
    100 #include <sys/disklabel.h>
    101 #include <sys/errno.h>
    102 #include <sys/fcntl.h>
    103 #include <sys/kauth.h>
    104 #include <sys/kernel.h>
    105 #include <sys/kmem.h>
    106 #include <sys/kthread.h>
    107 #include <sys/localcount.h>
    108 #include <sys/lockf.h>
    109 #include <sys/mount.h>
    110 #include <sys/namei.h>
    111 #include <sys/proc.h>
    112 #include <sys/reboot.h>
    113 #include <sys/rndsource.h>
    114 #include <sys/sdt.h>
    115 #include <sys/stdarg.h>
    116 #include <sys/sysctl.h>
    117 #include <sys/systm.h>
    118 #include <sys/unistd.h>
    119 
    120 #include <machine/limits.h>
    121 
    122 /*
    123  * Autoconfiguration subroutines.
    124  */
    125 
    126 /*
    127  * Device autoconfiguration timings are mixed into the entropy pool.
    128  */
    129 static krndsource_t rnd_autoconf_source;
    130 
    131 /*
    132  * ioconf.c exports exactly two names: cfdata and cfroots.  All system
    133  * devices and drivers are found via these tables.
    134  */
    135 extern struct cfdata cfdata[];
    136 extern const short cfroots[];
    137 
    138 /*
    139  * List of all cfdriver structures.  We use this to detect duplicates
    140  * when other cfdrivers are loaded.
    141  */
    142 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
    143 extern struct cfdriver * const cfdriver_list_initial[];
    144 
    145 /*
    146  * Initial list of cfattach's.
    147  */
    148 extern const struct cfattachinit cfattachinit[];
    149 
    150 /*
    151  * List of all cfattach interface attributes.  Kept separately from
    152  * cfattaches themselves because they're extremely uncommon.
    153  */
    154 static LIST_HEAD(, cfattachiattr) allcfattachiattrs =
    155     LIST_HEAD_INITIALIZER(allcfattachiattrs);
    156 
    157 /*
    158  * List of cfdata tables.  We always have one such list -- the one
    159  * built statically when the kernel was configured.
    160  */
    161 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
    162 static struct cftable initcftable;
    163 
    164 #define	ROOT ((device_t)NULL)
    165 
    166 struct matchinfo {
    167 	cfsubmatch_t fn;
    168 	device_t parent;
    169 	const int *locs;
    170 	void	*aux;
    171 	struct	cfdata *match;
    172 	int	pri;
    173 };
    174 
    175 struct alldevs_foray {
    176 	int			af_s;
    177 	struct devicelist	af_garbage;
    178 };
    179 
    180 /*
    181  * Internal version of the cfargs structure; all versions are
    182  * canonicalized to this.
    183  */
    184 struct cfargs_internal {
    185 	union {
    186 		cfsubmatch_t	submatch;/* submatch function (direct config) */
    187 		cfsearch_t	search;	 /* search function (indirect config) */
    188 	};
    189 	const char *	iattr;		/* interface attribute */
    190 	const int *	locators;	/* locators array */
    191 	devhandle_t	devhandle;	/* devhandle_t (by value) */
    192 };
    193 
    194 static char *number(char *, int);
    195 static void mapply(struct matchinfo *, cfdata_t);
    196 static void config_devdelete(device_t);
    197 static void config_devunlink(device_t, struct devicelist *);
    198 static void config_makeroom(int, struct cfdriver *);
    199 static void config_devlink(device_t);
    200 static void config_alldevs_enter(struct alldevs_foray *);
    201 static void config_alldevs_exit(struct alldevs_foray *);
    202 static void config_add_attrib_dict(device_t);
    203 static device_t	config_attach_internal(device_t, cfdata_t, void *,
    204 		    cfprint_t, const struct cfargs_internal *);
    205 
    206 static void config_collect_garbage(struct devicelist *);
    207 static void config_dump_garbage(struct devicelist *);
    208 
    209 static void pmflock_debug(device_t, const char *, int);
    210 
    211 static device_t deviter_next1(deviter_t *);
    212 static void deviter_reinit(deviter_t *);
    213 
    214 struct deferred_config {
    215 	TAILQ_ENTRY(deferred_config) dc_queue;
    216 	device_t dc_dev;
    217 	void (*dc_func)(device_t);
    218 };
    219 
    220 TAILQ_HEAD(deferred_config_head, deferred_config);
    221 
    222 static struct deferred_config_head deferred_config_queue =
    223 	TAILQ_HEAD_INITIALIZER(deferred_config_queue);
    224 static struct deferred_config_head interrupt_config_queue =
    225 	TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
    226 static int interrupt_config_threads = 8;
    227 static struct deferred_config_head mountroot_config_queue =
    228 	TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
    229 static int mountroot_config_threads = 2;
    230 static lwp_t **mountroot_config_lwpids;
    231 static size_t mountroot_config_lwpids_size;
    232 bool root_is_mounted = false;
    233 
    234 static void config_process_deferred(struct deferred_config_head *, device_t);
    235 
    236 /* Hooks to finalize configuration once all real devices have been found. */
    237 struct finalize_hook {
    238 	TAILQ_ENTRY(finalize_hook) f_list;
    239 	int (*f_func)(device_t);
    240 	device_t f_dev;
    241 };
    242 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
    243 	TAILQ_HEAD_INITIALIZER(config_finalize_list);
    244 static int config_finalize_done;
    245 
    246 /* list of all devices */
    247 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
    248 static kmutex_t alldevs_lock __cacheline_aligned;
    249 static devgen_t alldevs_gen = 1;
    250 static int alldevs_nread = 0;
    251 static int alldevs_nwrite = 0;
    252 static bool alldevs_garbage = false;
    253 
    254 static struct devicelist config_pending =
    255     TAILQ_HEAD_INITIALIZER(config_pending);
    256 static kmutex_t config_misc_lock;
    257 static kcondvar_t config_misc_cv;
    258 
    259 static bool detachall = false;
    260 
    261 #define	STREQ(s1, s2)			\
    262 	(*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
    263 
    264 static bool config_initialized = false;	/* config_init() has been called. */
    265 
    266 static int config_do_twiddle;
    267 static callout_t config_twiddle_ch;
    268 
    269 static void sysctl_detach_setup(struct sysctllog **);
    270 
    271 int no_devmon_insert(const char *, prop_dictionary_t);
    272 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
    273 
    274 typedef int (*cfdriver_fn)(struct cfdriver *);
    275 static int
    276 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
    277 	cfdriver_fn drv_do, cfdriver_fn drv_undo,
    278 	const char *style, bool dopanic)
    279 {
    280 	void (*pr)(const char *, ...) __printflike(1, 2) =
    281 	    dopanic ? panic : printf;
    282 	int i, error = 0, e2 __diagused;
    283 
    284 	for (i = 0; cfdriverv[i] != NULL; i++) {
    285 		if ((error = drv_do(cfdriverv[i])) != 0) {
    286 			pr("configure: `%s' driver %s failed: %d",
    287 			    cfdriverv[i]->cd_name, style, error);
    288 			goto bad;
    289 		}
    290 	}
    291 
    292 	KASSERT(error == 0);
    293 	return 0;
    294 
    295  bad:
    296 	printf("\n");
    297 	for (i--; i >= 0; i--) {
    298 		e2 = drv_undo(cfdriverv[i]);
    299 		KASSERT(e2 == 0);
    300 	}
    301 
    302 	return error;
    303 }
    304 
    305 typedef int (*cfattach_fn)(const char *, struct cfattach *,
    306     struct cfattachiattr * const *);
    307 static int config_cfattach_attach_iattrs(const char *, struct cfattach *,
    308     struct cfattachiattr * const *);
    309 static int config_cfattach_detach_iattrs(const char *, struct cfattach *,
    310     struct cfattachiattr * const *);
    311 static int
    312 frob_cfattachvec(const struct cfattachinit *cfattachv,
    313 	cfattach_fn att_do, cfattach_fn att_undo,
    314 	const char *style, bool dopanic)
    315 {
    316 	const struct cfattachinit *cfai = NULL;
    317 	void (*pr)(const char *, ...) __printflike(1, 2) =
    318 	    dopanic ? panic : printf;
    319 	int j = 0, error = 0, e2 __diagused;
    320 
    321 	for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
    322 		for (j = 0; cfai->cfai_list[j] != NULL; j++) {
    323 			if ((error = att_do(cfai->cfai_name,
    324 			    cfai->cfai_list[j], cfai->cfai_iattrs)) != 0) {
    325 				pr("configure: attachment `%s' "
    326 				    "of `%s' driver %s failed: %d",
    327 				    cfai->cfai_list[j]->ca_name,
    328 				    cfai->cfai_name, style, error);
    329 				goto bad;
    330 			}
    331 		}
    332 	}
    333 
    334 	KASSERT(error == 0);
    335 	return 0;
    336 
    337  bad:
    338 	/*
    339 	 * Rollback in reverse order.  dunno if super-important, but
    340 	 * do that anyway.  Although the code looks a little like
    341 	 * someone did a little integration (in the math sense).
    342 	 */
    343 	printf("\n");
    344 	if (cfai) {
    345 		bool last;
    346 
    347 		for (last = false; last == false; ) {
    348 			if (cfai == &cfattachv[0])
    349 				last = true;
    350 			for (j--; j >= 0; j--) {
    351 				e2 = att_undo(cfai->cfai_name,
    352 				    cfai->cfai_list[j], cfai->cfai_iattrs);
    353 				KASSERT(e2 == 0);
    354 			}
    355 			if (!last) {
    356 				cfai--;
    357 				for (j = 0; cfai->cfai_list[j] != NULL; j++)
    358 					;
    359 			}
    360 		}
    361 	}
    362 
    363 	return error;
    364 }
    365 
    366 /*
    367  * Initialize the autoconfiguration data structures.  Normally this
    368  * is done by configure(), but some platforms need to do this very
    369  * early (to e.g. initialize the console).
    370  */
    371 void
    372 config_init(void)
    373 {
    374 
    375 	KASSERT(config_initialized == false);
    376 
    377 	mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
    378 
    379 	mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
    380 	cv_init(&config_misc_cv, "cfgmisc");
    381 
    382 	callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
    383 
    384 	frob_cfdrivervec(cfdriver_list_initial,
    385 	    config_cfdriver_attach, NULL, "bootstrap", true);
    386 	frob_cfattachvec(cfattachinit,
    387 	    config_cfattach_attach_iattrs, NULL, "bootstrap", true);
    388 
    389 	initcftable.ct_cfdata = cfdata;
    390 	TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
    391 
    392 	rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
    393 	    RND_FLAG_COLLECT_TIME);
    394 
    395 	config_initialized = true;
    396 }
    397 
    398 /*
    399  * Init or fini drivers and attachments.  Either all or none
    400  * are processed (via rollback).  It would be nice if this were
    401  * atomic to outside consumers, but with the current state of
    402  * locking ...
    403  */
    404 int
    405 config_init_component(struct cfdriver * const *cfdriverv,
    406 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
    407 {
    408 	int error;
    409 
    410 	KERNEL_LOCK(1, NULL);
    411 
    412 	if ((error = frob_cfdrivervec(cfdriverv,
    413 	    config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
    414 		goto out;
    415 	if ((error = frob_cfattachvec(cfattachv,
    416 	    config_cfattach_attach_iattrs, config_cfattach_detach_iattrs,
    417 	    "init", false)) != 0) {
    418 		frob_cfdrivervec(cfdriverv,
    419 	            config_cfdriver_detach, NULL, "init rollback", true);
    420 		goto out;
    421 	}
    422 	if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
    423 		frob_cfattachvec(cfattachv,
    424 		    config_cfattach_detach_iattrs, NULL, "init rollback", true);
    425 		frob_cfdrivervec(cfdriverv,
    426 	            config_cfdriver_detach, NULL, "init rollback", true);
    427 		goto out;
    428 	}
    429 
    430 	/* Success!  */
    431 	error = 0;
    432 
    433 out:	KERNEL_UNLOCK_ONE(NULL);
    434 	return error;
    435 }
    436 
    437 int
    438 config_fini_component(struct cfdriver * const *cfdriverv,
    439 	const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
    440 {
    441 	int error;
    442 
    443 	KERNEL_LOCK(1, NULL);
    444 
    445 	if ((error = config_cfdata_detach(cfdatav)) != 0)
    446 		goto out;
    447 	if ((error = frob_cfattachvec(cfattachv,
    448 	    config_cfattach_detach_iattrs, config_cfattach_attach_iattrs,
    449 	    "fini", false)) != 0) {
    450 		if (config_cfdata_attach(cfdatav, 0) != 0)
    451 			panic("config_cfdata fini rollback failed");
    452 		goto out;
    453 	}
    454 	if ((error = frob_cfdrivervec(cfdriverv,
    455 	    config_cfdriver_detach, config_cfdriver_attach,
    456 	    "fini", false)) != 0) {
    457 		frob_cfattachvec(cfattachv,
    458 	            config_cfattach_attach_iattrs, NULL, "fini rollback", true);
    459 		if (config_cfdata_attach(cfdatav, 0) != 0)
    460 			panic("config_cfdata fini rollback failed");
    461 		goto out;
    462 	}
    463 
    464 	/* Success!  */
    465 	error = 0;
    466 
    467 out:	KERNEL_UNLOCK_ONE(NULL);
    468 	return error;
    469 }
    470 
    471 void
    472 config_init_mi(void)
    473 {
    474 
    475 	if (!config_initialized)
    476 		config_init();
    477 
    478 	sysctl_detach_setup(NULL);
    479 }
    480 
    481 void
    482 config_deferred(device_t dev)
    483 {
    484 
    485 	KASSERT(KERNEL_LOCKED_P());
    486 
    487 	config_process_deferred(&deferred_config_queue, dev);
    488 	config_process_deferred(&interrupt_config_queue, dev);
    489 	config_process_deferred(&mountroot_config_queue, dev);
    490 }
    491 
    492 static void
    493 config_interrupts_thread(void *cookie)
    494 {
    495 	struct deferred_config *dc;
    496 	device_t dev;
    497 
    498 	mutex_enter(&config_misc_lock);
    499 	while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
    500 		TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
    501 		mutex_exit(&config_misc_lock);
    502 
    503 		dev = dc->dc_dev;
    504 		(*dc->dc_func)(dev);
    505 		if (!device_pmf_is_registered(dev))
    506 			aprint_debug_dev(dev,
    507 			    "WARNING: power management not supported\n");
    508 		config_pending_decr(dev);
    509 		kmem_free(dc, sizeof(*dc));
    510 
    511 		mutex_enter(&config_misc_lock);
    512 	}
    513 	mutex_exit(&config_misc_lock);
    514 
    515 	kthread_exit(0);
    516 }
    517 
    518 void
    519 config_create_interruptthreads(void)
    520 {
    521 	int i;
    522 
    523 	for (i = 0; i < interrupt_config_threads; i++) {
    524 		(void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
    525 		    config_interrupts_thread, NULL, NULL, "configintr");
    526 	}
    527 }
    528 
    529 static void
    530 config_mountroot_thread(void *cookie)
    531 {
    532 	struct deferred_config *dc;
    533 
    534 	mutex_enter(&config_misc_lock);
    535 	while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
    536 		TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
    537 		mutex_exit(&config_misc_lock);
    538 
    539 		(*dc->dc_func)(dc->dc_dev);
    540 		kmem_free(dc, sizeof(*dc));
    541 
    542 		mutex_enter(&config_misc_lock);
    543 	}
    544 	mutex_exit(&config_misc_lock);
    545 
    546 	kthread_exit(0);
    547 }
    548 
    549 void
    550 config_create_mountrootthreads(void)
    551 {
    552 	int i;
    553 
    554 	if (!root_is_mounted)
    555 		root_is_mounted = true;
    556 
    557 	mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
    558 				       mountroot_config_threads;
    559 	mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
    560 					     KM_NOSLEEP);
    561 	KASSERT(mountroot_config_lwpids);
    562 	for (i = 0; i < mountroot_config_threads; i++) {
    563 		mountroot_config_lwpids[i] = 0;
    564 		(void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
    565 				     NULL, config_mountroot_thread, NULL,
    566 				     &mountroot_config_lwpids[i],
    567 				     "configroot");
    568 	}
    569 }
    570 
    571 void
    572 config_finalize_mountroot(void)
    573 {
    574 	int i, error;
    575 
    576 	for (i = 0; i < mountroot_config_threads; i++) {
    577 		if (mountroot_config_lwpids[i] == 0)
    578 			continue;
    579 
    580 		error = kthread_join(mountroot_config_lwpids[i]);
    581 		if (error)
    582 			printf("%s: thread %x joined with error %d\n",
    583 			       __func__, i, error);
    584 	}
    585 	kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
    586 }
    587 
    588 /*
    589  * Announce device attach/detach to userland listeners.
    590  */
    591 
    592 int
    593 no_devmon_insert(const char *name, prop_dictionary_t p)
    594 {
    595 
    596 	return SET_ERROR(ENODEV);
    597 }
    598 
    599 static void
    600 devmon_report_device(device_t dev, bool isattach)
    601 {
    602 	prop_dictionary_t ev, dict = device_properties(dev);
    603 	const char *parent;
    604 	const char *what;
    605 	const char *where;
    606 	device_t pdev = device_parent(dev);
    607 
    608 	/* If currently no drvctl device, just return */
    609 	if (devmon_insert_vec == no_devmon_insert)
    610 		return;
    611 
    612 	ev = prop_dictionary_create();
    613 	if (ev == NULL)
    614 		return;
    615 
    616 	what = (isattach ? "device-attach" : "device-detach");
    617 	parent = (pdev == NULL ? "root" : device_xname(pdev));
    618 	if (prop_dictionary_get_string(dict, "location", &where)) {
    619 		prop_dictionary_set_string(ev, "location", where);
    620 		aprint_debug("ev: %s %s at %s in [%s]\n",
    621 		    what, device_xname(dev), parent, where);
    622 	}
    623 	if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
    624 	    !prop_dictionary_set_string(ev, "parent", parent)) {
    625 		prop_object_release(ev);
    626 		return;
    627 	}
    628 
    629 	if ((*devmon_insert_vec)(what, ev) != 0)
    630 		prop_object_release(ev);
    631 }
    632 
    633 /*
    634  * Add a cfdriver to the system.
    635  */
    636 int
    637 config_cfdriver_attach(struct cfdriver *cd)
    638 {
    639 	struct cfdriver *lcd;
    640 
    641 	/* Make sure this driver isn't already in the system. */
    642 	LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
    643 		if (STREQ(lcd->cd_name, cd->cd_name))
    644 			return SET_ERROR(EEXIST);
    645 	}
    646 
    647 	LIST_INIT(&cd->cd_attach);
    648 	LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
    649 
    650 	return 0;
    651 }
    652 
    653 /*
    654  * Remove a cfdriver from the system.
    655  */
    656 int
    657 config_cfdriver_detach(struct cfdriver *cd)
    658 {
    659 	struct alldevs_foray af;
    660 	int i, rc = 0;
    661 
    662 	config_alldevs_enter(&af);
    663 	/* Make sure there are no active instances. */
    664 	for (i = 0; i < cd->cd_ndevs; i++) {
    665 		if (cd->cd_devs[i] != NULL) {
    666 			rc = SET_ERROR(EBUSY);
    667 			break;
    668 		}
    669 	}
    670 	config_alldevs_exit(&af);
    671 
    672 	if (rc != 0)
    673 		return rc;
    674 
    675 	/* ...and no attachments loaded. */
    676 	if (LIST_EMPTY(&cd->cd_attach) == 0)
    677 		return SET_ERROR(EBUSY);
    678 
    679 	LIST_REMOVE(cd, cd_list);
    680 
    681 	KASSERT(cd->cd_devs == NULL);
    682 
    683 	return 0;
    684 }
    685 
    686 /*
    687  * Look up a cfdriver by name.
    688  */
    689 struct cfdriver *
    690 config_cfdriver_lookup(const char *name)
    691 {
    692 	struct cfdriver *cd;
    693 
    694 	LIST_FOREACH(cd, &allcfdrivers, cd_list) {
    695 		if (STREQ(cd->cd_name, name))
    696 			return cd;
    697 	}
    698 
    699 	return NULL;
    700 }
    701 
    702 /*
    703  * Add a cfattach to the specified driver.
    704  */
    705 static int
    706 config_cfattach_attach_iattrs(const char *driver, struct cfattach *ca,
    707     struct cfattachiattr * const *cfias)
    708 {
    709 	struct cfattach *lca;
    710 	struct cfdriver *cd;
    711 
    712 	cd = config_cfdriver_lookup(driver);
    713 	if (cd == NULL)
    714 		return SET_ERROR(ESRCH);
    715 
    716 	/* Make sure this attachment isn't already on this driver. */
    717 	LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
    718 		if (STREQ(lca->ca_name, ca->ca_name))
    719 			return SET_ERROR(EEXIST);
    720 	}
    721 
    722 	LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
    723 
    724 	if (cfias != NULL) {
    725 		struct cfattachiattr *cfia;
    726 		for (; (cfia = *cfias) != NULL; cfias++) {
    727 			if (cfia->cfia_attach == ca) {
    728 				LIST_INSERT_HEAD(&allcfattachiattrs, cfia,
    729 				    cfia_list);
    730 			}
    731 		}
    732 	}
    733 
    734 	return 0;
    735 }
    736 
    737 int
    738 config_cfattach_attach(const char *driver, struct cfattach *ca)
    739 {
    740 	return config_cfattach_attach_iattrs(driver, ca, NULL);
    741 }
    742 
    743 /*
    744  * Remove a cfattach from the specified driver.
    745  */
    746 int
    747 config_cfattach_detach_iattrs(const char *driver, struct cfattach *ca,
    748     struct cfattachiattr * const *cfias __unused)
    749 {
    750 	struct alldevs_foray af;
    751 	struct cfdriver *cd;
    752 	device_t dev;
    753 	int i, rc = 0;
    754 
    755 	cd = config_cfdriver_lookup(driver);
    756 	if (cd == NULL)
    757 		return SET_ERROR(ESRCH);
    758 
    759 	config_alldevs_enter(&af);
    760 	/* Make sure there are no active instances. */
    761 	for (i = 0; i < cd->cd_ndevs; i++) {
    762 		if ((dev = cd->cd_devs[i]) == NULL)
    763 			continue;
    764 		if (dev->dv_cfattach == ca) {
    765 			rc = SET_ERROR(EBUSY);
    766 			break;
    767 		}
    768 	}
    769 	config_alldevs_exit(&af);
    770 
    771 	if (rc != 0)
    772 		return rc;
    773 
    774 	LIST_REMOVE(ca, ca_list);
    775 
    776 	/*
    777 	 * The cfattach is going away, so we always traverse the
    778 	 * list of cfattach iattrs and remove any that reference
    779 	 * it.  The "cfias" argument is largely due to the need
    780 	 * to have this fuction's signature match.
    781 	 */
    782 	struct cfattachiattr *cfia, *next_cfia;
    783 	LIST_FOREACH_SAFE(cfia, &allcfattachiattrs, cfia_list, next_cfia) {
    784 		if (cfia->cfia_attach == ca) {
    785 			LIST_REMOVE(cfia, cfia_list);
    786 		}
    787 	}
    788 
    789 	return 0;
    790 }
    791 
    792 int
    793 config_cfattach_detach(const char *driver, struct cfattach *ca)
    794 {
    795 	return config_cfattach_detach_iattrs(driver, ca, NULL);
    796 }
    797 
    798 /*
    799  * Look up a cfattach by name.
    800  */
    801 static struct cfattach *
    802 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
    803 {
    804 	struct cfattach *ca;
    805 
    806 	LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
    807 		if (STREQ(ca->ca_name, atname))
    808 			return ca;
    809 	}
    810 
    811 	return NULL;
    812 }
    813 
    814 /*
    815  * Look up a cfattach by driver/attachment name.
    816  */
    817 struct cfattach *
    818 config_cfattach_lookup(const char *name, const char *atname)
    819 {
    820 	struct cfdriver *cd;
    821 
    822 	cd = config_cfdriver_lookup(name);
    823 	if (cd == NULL)
    824 		return NULL;
    825 
    826 	return config_cfattach_lookup_cd(cd, atname);
    827 }
    828 
    829 /*
    830  * Apply the matching function and choose the best.  This is used
    831  * a few times and we want to keep the code small.
    832  */
    833 static void
    834 mapply(struct matchinfo *m, cfdata_t cf)
    835 {
    836 	int pri;
    837 
    838 	if (m->fn != NULL) {
    839 		pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
    840 	} else {
    841 		pri = config_match(m->parent, cf, m->aux);
    842 	}
    843 	if (pri > m->pri) {
    844 		m->match = cf;
    845 		m->pri = pri;
    846 	}
    847 }
    848 
    849 int
    850 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
    851 {
    852 	const struct cfiattrdata *ci;
    853 	const struct cflocdesc *cl;
    854 	int nlocs, i;
    855 
    856 	ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver,
    857 	    parent->dv_cfattach);
    858 	KASSERT(ci);
    859 	nlocs = ci->ci_loclen;
    860 	KASSERT(!nlocs || locs);
    861 	for (i = 0; i < nlocs; i++) {
    862 		cl = &ci->ci_locdesc[i];
    863 		if (cl->cld_defaultstr != NULL &&
    864 		    cf->cf_loc[i] == cl->cld_default)
    865 			continue;
    866 		if (cf->cf_loc[i] == locs[i])
    867 			continue;
    868 		return 0;
    869 	}
    870 
    871 	return config_match(parent, cf, aux);
    872 }
    873 
    874 /*
    875  * Helper function: check whether the driver supports the interface attribute
    876  * and return its descriptor structure.
    877  */
    878 static const struct cfiattrdata *
    879 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
    880 {
    881 	const struct cfiattrdata * const *cpp;
    882 
    883 	if (cd->cd_attrs == NULL)
    884 		return 0;
    885 
    886 	for (cpp = cd->cd_attrs; *cpp; cpp++) {
    887 		if (STREQ((*cpp)->ci_name, ia)) {
    888 			/* Match. */
    889 			return *cpp;
    890 		}
    891 	}
    892 	return 0;
    893 }
    894 
    895 /*
    896  * Like above, but for attachments.
    897  */
    898 static const struct cfiattrdata *
    899 cfattach_get_iattr(const struct cfattach *ca, const char *ia)
    900 {
    901 	struct cfattachiattr *cfia;
    902 
    903 	LIST_FOREACH(cfia, &allcfattachiattrs, cfia_list) {
    904 		if (cfia->cfia_attach == ca &&
    905 		    STREQ(cfia->cfia_iattr->ci_name, ia)) {
    906 			/* Match. */
    907 			return cfia->cfia_iattr;
    908 		}
    909 	}
    910 	return NULL;
    911 }
    912 
    913 /*
    914  * Short-hand for looking at both the cfdriver and cfattach of a
    915  * device for an interface attribute.
    916  */
    917 static const struct cfiattrdata *
    918 device_get_iattr(device_t dev, const char *name)
    919 {
    920 	const struct cfiattrdata *ia;
    921 
    922 	ia = cfdriver_get_iattr(dev->dv_cfdriver, name);
    923 	if (ia == NULL) {
    924 		ia = cfattach_get_iattr(dev->dv_cfattach, name);
    925 	}
    926 	return ia;
    927 }
    928 
    929 static int __diagused
    930 cfdriver_iattr_count(const struct cfdriver *cd)
    931 {
    932 	const struct cfiattrdata * const *cpp;
    933 	int i;
    934 
    935 	if (cd->cd_attrs == NULL)
    936 		return 0;
    937 
    938 	for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
    939 		i++;
    940 	}
    941 	return i;
    942 }
    943 
    944 /*
    945  * Lookup an interface attribute description by name.
    946  * If the driver is given, consider only its supported attributes.
    947  */
    948 const struct cfiattrdata *
    949 cfiattr_lookup(const char *name, const struct cfdriver *cd,
    950     const struct cfattach *ca)
    951 {
    952 	const struct cfdriver *d;
    953 	const struct cfiattrdata *ia;
    954 
    955 	if (cd) {
    956 		ia = cfdriver_get_iattr(cd, name);
    957 		if (ia == NULL)
    958 			ia = cfattach_get_iattr(ca, name);
    959 		return ia;
    960 	}
    961 
    962 	LIST_FOREACH(d, &allcfdrivers, cd_list) {
    963 		ia = cfdriver_get_iattr(d, name);
    964 		if (ia)
    965 			return ia;
    966 	}
    967 	return 0;
    968 }
    969 
    970 /*
    971  * Determine if `parent' is a potential parent for a device spec based
    972  * on `cfp'.
    973  */
    974 static int
    975 cfparent_match(const device_t parent, const struct cfparent *cfp)
    976 {
    977 	struct cfdriver *pcd;
    978 
    979 	/* We don't match root nodes here. */
    980 	if (cfp == NULL)
    981 		return 0;
    982 
    983 	pcd = parent->dv_cfdriver;
    984 	KASSERT(pcd != NULL);
    985 
    986 	/*
    987 	 * First, ensure this parent has the correct interface
    988 	 * attribute.
    989 	 */
    990 	if (!device_get_iattr(parent, cfp->cfp_iattr))
    991 		return 0;
    992 
    993 	/*
    994 	 * If no specific parent device instance was specified (i.e.
    995 	 * we're attaching to the attribute only), we're done!
    996 	 */
    997 	if (cfp->cfp_parent == NULL)
    998 		return 1;
    999 
   1000 	/*
   1001 	 * Check the parent device's name.
   1002 	 */
   1003 	if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
   1004 		return 0;	/* not the same parent */
   1005 
   1006 	/*
   1007 	 * Make sure the unit number matches.
   1008 	 */
   1009 	if (cfp->cfp_unit == DVUNIT_ANY ||	/* wildcard */
   1010 	    cfp->cfp_unit == parent->dv_unit)
   1011 		return 1;
   1012 
   1013 	/* Unit numbers don't match. */
   1014 	return 0;
   1015 }
   1016 
   1017 /*
   1018  * Helper for config_cfdata_attach(): check all devices whether it could be
   1019  * parent any attachment in the config data table passed, and rescan.
   1020  */
   1021 static void
   1022 rescan_with_cfdata(const struct cfdata *cf)
   1023 {
   1024 	device_t d;
   1025 	const struct cfdata *cf1;
   1026 	deviter_t di;
   1027 
   1028 	KASSERT(KERNEL_LOCKED_P());
   1029 
   1030 	/*
   1031 	 * "alldevs" is likely longer than a modules's cfdata, so make it
   1032 	 * the outer loop.
   1033 	 */
   1034 	for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
   1035 
   1036 		if (!(d->dv_cfattach->ca_rescan))
   1037 			continue;
   1038 
   1039 		for (cf1 = cf; cf1->cf_name; cf1++) {
   1040 
   1041 			if (!cfparent_match(d, cf1->cf_pspec))
   1042 				continue;
   1043 
   1044 			(*d->dv_cfattach->ca_rescan)(d,
   1045 				cfdata_ifattr(cf1), cf1->cf_loc);
   1046 
   1047 			config_deferred(d);
   1048 		}
   1049 	}
   1050 	deviter_release(&di);
   1051 }
   1052 
   1053 /*
   1054  * Attach a supplemental config data table and rescan potential
   1055  * parent devices if required.
   1056  */
   1057 int
   1058 config_cfdata_attach(cfdata_t cf, int scannow)
   1059 {
   1060 	struct cftable *ct;
   1061 
   1062 	KERNEL_LOCK(1, NULL);
   1063 
   1064 	ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
   1065 	ct->ct_cfdata = cf;
   1066 	TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
   1067 
   1068 	if (scannow)
   1069 		rescan_with_cfdata(cf);
   1070 
   1071 	KERNEL_UNLOCK_ONE(NULL);
   1072 
   1073 	return 0;
   1074 }
   1075 
   1076 /*
   1077  * Helper for config_cfdata_detach: check whether a device is
   1078  * found through any attachment in the config data table.
   1079  */
   1080 static int
   1081 dev_in_cfdata(device_t d, cfdata_t cf)
   1082 {
   1083 	const struct cfdata *cf1;
   1084 
   1085 	for (cf1 = cf; cf1->cf_name; cf1++)
   1086 		if (d->dv_cfdata == cf1)
   1087 			return 1;
   1088 
   1089 	return 0;
   1090 }
   1091 
   1092 /*
   1093  * Detach a supplemental config data table. Detach all devices found
   1094  * through that table (and thus keeping references to it) before.
   1095  */
   1096 int
   1097 config_cfdata_detach(cfdata_t cf)
   1098 {
   1099 	device_t d;
   1100 	int error = 0;
   1101 	struct cftable *ct;
   1102 	deviter_t di;
   1103 
   1104 	KERNEL_LOCK(1, NULL);
   1105 
   1106 	for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
   1107 	     d = deviter_next(&di)) {
   1108 		if (!dev_in_cfdata(d, cf))
   1109 			continue;
   1110 		if ((error = config_detach(d, 0)) != 0)
   1111 			break;
   1112 	}
   1113 	deviter_release(&di);
   1114 	if (error) {
   1115 		aprint_error_dev(d, "unable to detach instance\n");
   1116 		goto out;
   1117 	}
   1118 
   1119 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
   1120 		if (ct->ct_cfdata == cf) {
   1121 			TAILQ_REMOVE(&allcftables, ct, ct_list);
   1122 			kmem_free(ct, sizeof(*ct));
   1123 			error = 0;
   1124 			goto out;
   1125 		}
   1126 	}
   1127 
   1128 	/* not found -- shouldn't happen */
   1129 	error = SET_ERROR(EINVAL);
   1130 
   1131 out:	KERNEL_UNLOCK_ONE(NULL);
   1132 	return error;
   1133 }
   1134 
   1135 /*
   1136  * Invoke the "match" routine for a cfdata entry on behalf of
   1137  * an external caller, usually a direct config "submatch" routine.
   1138  */
   1139 int
   1140 config_match(device_t parent, cfdata_t cf, void *aux)
   1141 {
   1142 	struct cfattach *ca;
   1143 
   1144 	KASSERT(KERNEL_LOCKED_P());
   1145 
   1146 	ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
   1147 	if (ca == NULL) {
   1148 		/* No attachment for this entry, oh well. */
   1149 		return 0;
   1150 	}
   1151 
   1152 	return (*ca->ca_match)(parent, cf, aux);
   1153 }
   1154 
   1155 /*
   1156  * Invoke the "probe" routine for a cfdata entry on behalf of
   1157  * an external caller, usually an indirect config "search" routine.
   1158  */
   1159 int
   1160 config_probe(device_t parent, cfdata_t cf, void *aux)
   1161 {
   1162 	/*
   1163 	 * This is currently a synonym for config_match(), but this
   1164 	 * is an implementation detail; "match" and "probe" routines
   1165 	 * have different behaviors.
   1166 	 *
   1167 	 * XXX config_probe() should return a bool, because there is
   1168 	 * XXX no match score for probe -- it's either there or it's
   1169 	 * XXX not, but some ports abuse the return value as a way
   1170 	 * XXX to attach "critical" devices before "non-critical"
   1171 	 * XXX devices.
   1172 	 */
   1173 	return config_match(parent, cf, aux);
   1174 }
   1175 
   1176 static struct cfargs_internal *
   1177 cfargs_canonicalize(const struct cfargs * const cfargs,
   1178     struct cfargs_internal * const store)
   1179 {
   1180 	struct cfargs_internal *args = store;
   1181 
   1182 	memset(args, 0, sizeof(*args));
   1183 
   1184 	/* If none specified, are all-NULL pointers are good. */
   1185 	if (cfargs == NULL) {
   1186 		return args;
   1187 	}
   1188 
   1189 	/*
   1190 	 * Only one arguments version is recognized at this time.
   1191 	 */
   1192 	if (cfargs->cfargs_version != CFARGS_VERSION) {
   1193 		panic("cfargs_canonicalize: unknown version %lu\n",
   1194 		    (unsigned long)cfargs->cfargs_version);
   1195 	}
   1196 
   1197 	/*
   1198 	 * submatch and search are mutually-exclusive.
   1199 	 */
   1200 	if (cfargs->submatch != NULL && cfargs->search != NULL) {
   1201 		panic("cfargs_canonicalize: submatch and search are "
   1202 		      "mutually-exclusive");
   1203 	}
   1204 	if (cfargs->submatch != NULL) {
   1205 		args->submatch = cfargs->submatch;
   1206 	} else if (cfargs->search != NULL) {
   1207 		args->search = cfargs->search;
   1208 	}
   1209 
   1210 	args->iattr = cfargs->iattr;
   1211 	args->locators = cfargs->locators;
   1212 	args->devhandle = cfargs->devhandle;
   1213 
   1214 	return args;
   1215 }
   1216 
   1217 /*
   1218  * Iterate over all potential children of some device, calling the given
   1219  * function (default being the child's match function) for each one.
   1220  * Nonzero returns are matches; the highest value returned is considered
   1221  * the best match.  Return the `found child' if we got a match, or NULL
   1222  * otherwise.  The `aux' pointer is simply passed on through.
   1223  *
   1224  * Note that this function is designed so that it can be used to apply
   1225  * an arbitrary function to all potential children (its return value
   1226  * can be ignored).
   1227  */
   1228 static cfdata_t
   1229 config_search_internal(device_t parent, void *aux,
   1230     const struct cfargs_internal * const args)
   1231 {
   1232 	struct cftable *ct;
   1233 	cfdata_t cf;
   1234 	struct matchinfo m;
   1235 
   1236 	KASSERT(config_initialized);
   1237 	KASSERTMSG((!args->iattr ||
   1238 		device_get_iattr(parent, args->iattr)),
   1239 	    "%s searched for child at interface attribute %s,"
   1240 	    " but device %s(4) has no such interface attribute in config(5)",
   1241 	    device_xname(parent), args->iattr,
   1242 	    parent->dv_cfdriver->cd_name);
   1243 	KASSERTMSG((args->iattr ||
   1244 		cfdriver_iattr_count(parent->dv_cfdriver) < 2),
   1245 	    "%s searched for child without interface attribute,"
   1246 	    " needed to disambiguate among the %d declared for in %s(4)"
   1247 	    " in config(5)",
   1248 	    device_xname(parent),
   1249 	    cfdriver_iattr_count(parent->dv_cfdriver),
   1250 	    parent->dv_cfdriver->cd_name);
   1251 
   1252 	m.fn = args->submatch;		/* N.B. union */
   1253 	m.parent = parent;
   1254 	m.locs = args->locators;
   1255 	m.aux = aux;
   1256 	m.match = NULL;
   1257 	m.pri = 0;
   1258 
   1259 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
   1260 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
   1261 
   1262 			/* We don't match root nodes here. */
   1263 			if (!cf->cf_pspec)
   1264 				continue;
   1265 
   1266 			/*
   1267 			 * Skip cf if no longer eligible, otherwise scan
   1268 			 * through parents for one matching `parent', and
   1269 			 * try match function.
   1270 			 */
   1271 			if (cf->cf_fstate == FSTATE_FOUND)
   1272 				continue;
   1273 			if (cf->cf_fstate == FSTATE_DNOTFOUND ||
   1274 			    cf->cf_fstate == FSTATE_DSTAR)
   1275 				continue;
   1276 
   1277 			/*
   1278 			 * If an interface attribute was specified,
   1279 			 * consider only children which attach to
   1280 			 * that attribute.
   1281 			 */
   1282 			if (args->iattr != NULL &&
   1283 			    !STREQ(args->iattr, cfdata_ifattr(cf)))
   1284 				continue;
   1285 
   1286 			if (cfparent_match(parent, cf->cf_pspec))
   1287 				mapply(&m, cf);
   1288 		}
   1289 	}
   1290 	rnd_add_uint32(&rnd_autoconf_source, 0);
   1291 	return m.match;
   1292 }
   1293 
   1294 cfdata_t
   1295 config_search(device_t parent, void *aux, const struct cfargs *cfargs)
   1296 {
   1297 	cfdata_t cf;
   1298 	struct cfargs_internal store;
   1299 
   1300 	cf = config_search_internal(parent, aux,
   1301 	    cfargs_canonicalize(cfargs, &store));
   1302 
   1303 	return cf;
   1304 }
   1305 
   1306 /*
   1307  * Find the given root device.
   1308  * This is much like config_search, but there is no parent.
   1309  * Don't bother with multiple cfdata tables; the root node
   1310  * must always be in the initial table.
   1311  */
   1312 cfdata_t
   1313 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
   1314 {
   1315 	cfdata_t cf;
   1316 	const short *p;
   1317 	struct matchinfo m;
   1318 
   1319 	m.fn = fn;
   1320 	m.parent = ROOT;
   1321 	m.aux = aux;
   1322 	m.match = NULL;
   1323 	m.pri = 0;
   1324 	m.locs = 0;
   1325 	/*
   1326 	 * Look at root entries for matching name.  We do not bother
   1327 	 * with found-state here since only one root should ever be
   1328 	 * searched (and it must be done first).
   1329 	 */
   1330 	for (p = cfroots; *p >= 0; p++) {
   1331 		cf = &cfdata[*p];
   1332 		if (strcmp(cf->cf_name, rootname) == 0)
   1333 			mapply(&m, cf);
   1334 	}
   1335 	return m.match;
   1336 }
   1337 
   1338 static const char * const msgs[] = {
   1339 [QUIET]		=	"",
   1340 [UNCONF]	=	" not configured\n",
   1341 [UNSUPP]	=	" unsupported\n",
   1342 };
   1343 
   1344 /*
   1345  * The given `aux' argument describes a device that has been found
   1346  * on the given parent, but not necessarily configured.  Locate the
   1347  * configuration data for that device (using the submatch function
   1348  * provided, or using candidates' cd_match configuration driver
   1349  * functions) and attach it, and return its device_t.  If the device was
   1350  * not configured, call the given `print' function and return NULL.
   1351  */
   1352 device_t
   1353 config_found_acquire(device_t parent, void *aux, cfprint_t print,
   1354     const struct cfargs * const cfargs)
   1355 {
   1356 	cfdata_t cf;
   1357 	struct cfargs_internal store;
   1358 	const struct cfargs_internal * const args =
   1359 	    cfargs_canonicalize(cfargs, &store);
   1360 	device_t dev;
   1361 
   1362 	KERNEL_LOCK(1, NULL);
   1363 
   1364 	cf = config_search_internal(parent, aux, args);
   1365 	if (cf != NULL) {
   1366 		dev = config_attach_internal(parent, cf, aux, print, args);
   1367 		goto out;
   1368 	}
   1369 
   1370 	if (print) {
   1371 		if (config_do_twiddle && cold)
   1372 			twiddle();
   1373 
   1374 		const int pret = (*print)(aux, device_xname(parent));
   1375 		KASSERT(pret >= 0);
   1376 		KASSERT(pret < __arraycount(msgs));
   1377 		KASSERT(msgs[pret] != NULL);
   1378 		aprint_normal("%s", msgs[pret]);
   1379 	}
   1380 
   1381 	dev = NULL;
   1382 
   1383 out:	KERNEL_UNLOCK_ONE(NULL);
   1384 	return dev;
   1385 }
   1386 
   1387 /*
   1388  * config_found(parent, aux, print, cfargs)
   1389  *
   1390  *	Legacy entry point for callers whose use of the returned
   1391  *	device_t is not delimited by device_release.
   1392  *
   1393  *	The caller is required to hold the kernel lock as a fragile
   1394  *	defence against races.
   1395  *
   1396  *	Callers should ignore the return value or be converted to
   1397  *	config_found_acquire with a matching device_release once they
   1398  *	have finished with the returned device_t.
   1399  */
   1400 device_t
   1401 config_found(device_t parent, void *aux, cfprint_t print,
   1402     const struct cfargs * const cfargs)
   1403 {
   1404 	device_t dev;
   1405 
   1406 	KASSERT(KERNEL_LOCKED_P());
   1407 
   1408 	dev = config_found_acquire(parent, aux, print, cfargs);
   1409 	if (dev == NULL)
   1410 		return NULL;
   1411 	device_release(dev);
   1412 
   1413 	return dev;
   1414 }
   1415 
   1416 /*
   1417  * As above, but for root devices.
   1418  */
   1419 device_t
   1420 config_rootfound(const char *rootname, void *aux)
   1421 {
   1422 	cfdata_t cf;
   1423 	device_t dev = NULL;
   1424 
   1425 	KERNEL_LOCK(1, NULL);
   1426 	if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
   1427 		dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE);
   1428 	else
   1429 		aprint_error("root device %s not configured\n", rootname);
   1430 	KERNEL_UNLOCK_ONE(NULL);
   1431 	return dev;
   1432 }
   1433 
   1434 /* just like sprintf(buf, "%d") except that it works from the end */
   1435 static char *
   1436 number(char *ep, int n)
   1437 {
   1438 
   1439 	*--ep = 0;
   1440 	while (n >= 10) {
   1441 		*--ep = (n % 10) + '0';
   1442 		n /= 10;
   1443 	}
   1444 	*--ep = n + '0';
   1445 	return ep;
   1446 }
   1447 
   1448 /*
   1449  * Expand the size of the cd_devs array if necessary.
   1450  *
   1451  * The caller must hold alldevs_lock. config_makeroom() may release and
   1452  * re-acquire alldevs_lock, so callers should re-check conditions such
   1453  * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
   1454  * returns.
   1455  */
   1456 static void
   1457 config_makeroom(int n, struct cfdriver *cd)
   1458 {
   1459 	int ondevs, nndevs;
   1460 	device_t *osp, *nsp;
   1461 
   1462 	KASSERT(mutex_owned(&alldevs_lock));
   1463 	alldevs_nwrite++;
   1464 
   1465 	/* XXX arithmetic overflow */
   1466 	for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
   1467 		;
   1468 
   1469 	while (n >= cd->cd_ndevs) {
   1470 		/*
   1471 		 * Need to expand the array.
   1472 		 */
   1473 		ondevs = cd->cd_ndevs;
   1474 		osp = cd->cd_devs;
   1475 
   1476 		/*
   1477 		 * Release alldevs_lock around allocation, which may
   1478 		 * sleep.
   1479 		 */
   1480 		mutex_exit(&alldevs_lock);
   1481 		nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
   1482 		mutex_enter(&alldevs_lock);
   1483 
   1484 		/*
   1485 		 * If another thread moved the array while we did
   1486 		 * not hold alldevs_lock, try again.
   1487 		 */
   1488 		if (cd->cd_devs != osp || cd->cd_ndevs != ondevs) {
   1489 			mutex_exit(&alldevs_lock);
   1490 			kmem_free(nsp, sizeof(device_t) * nndevs);
   1491 			mutex_enter(&alldevs_lock);
   1492 			continue;
   1493 		}
   1494 
   1495 		memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
   1496 		if (ondevs != 0)
   1497 			memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
   1498 
   1499 		cd->cd_ndevs = nndevs;
   1500 		cd->cd_devs = nsp;
   1501 		if (ondevs != 0) {
   1502 			mutex_exit(&alldevs_lock);
   1503 			kmem_free(osp, sizeof(device_t) * ondevs);
   1504 			mutex_enter(&alldevs_lock);
   1505 		}
   1506 	}
   1507 	KASSERT(mutex_owned(&alldevs_lock));
   1508 	alldevs_nwrite--;
   1509 }
   1510 
   1511 /*
   1512  * Put dev into the devices list.
   1513  */
   1514 static void
   1515 config_devlink(device_t dev)
   1516 {
   1517 
   1518 	mutex_enter(&alldevs_lock);
   1519 
   1520 	KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
   1521 
   1522 	dev->dv_add_gen = alldevs_gen;
   1523 	/* It is safe to add a device to the tail of the list while
   1524 	 * readers and writers are in the list.
   1525 	 */
   1526 	TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
   1527 	mutex_exit(&alldevs_lock);
   1528 }
   1529 
   1530 static void
   1531 config_devfree(device_t dev)
   1532 {
   1533 
   1534 	KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
   1535 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
   1536 
   1537 	if (dev->dv_cfattach->ca_devsize > 0)
   1538 		kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
   1539 	kmem_free(dev, sizeof(*dev));
   1540 }
   1541 
   1542 /*
   1543  * Caller must hold alldevs_lock.
   1544  */
   1545 static void
   1546 config_devunlink(device_t dev, struct devicelist *garbage)
   1547 {
   1548 	struct device_garbage *dg = &dev->dv_garbage;
   1549 	cfdriver_t cd = device_cfdriver(dev);
   1550 	int i;
   1551 
   1552 	KASSERT(mutex_owned(&alldevs_lock));
   1553 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
   1554 
   1555  	/* Unlink from device list.  Link to garbage list. */
   1556 	TAILQ_REMOVE(&alldevs, dev, dv_list);
   1557 	TAILQ_INSERT_TAIL(garbage, dev, dv_list);
   1558 
   1559 	/* Remove from cfdriver's array. */
   1560 	cd->cd_devs[dev->dv_unit] = NULL;
   1561 
   1562 	/*
   1563 	 * If the device now has no units in use, unlink its softc array.
   1564 	 */
   1565 	for (i = 0; i < cd->cd_ndevs; i++) {
   1566 		if (cd->cd_devs[i] != NULL)
   1567 			break;
   1568 	}
   1569 	/* Nothing found.  Unlink, now.  Deallocate, later. */
   1570 	if (i == cd->cd_ndevs) {
   1571 		dg->dg_ndevs = cd->cd_ndevs;
   1572 		dg->dg_devs = cd->cd_devs;
   1573 		cd->cd_devs = NULL;
   1574 		cd->cd_ndevs = 0;
   1575 	}
   1576 }
   1577 
   1578 static void
   1579 config_devdelete(device_t dev)
   1580 {
   1581 	struct device_garbage *dg = &dev->dv_garbage;
   1582 	device_lock_t dvl = device_getlock(dev);
   1583 
   1584 	KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
   1585 
   1586 	if (dg->dg_devs != NULL)
   1587 		kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
   1588 
   1589 	localcount_fini(dev->dv_localcount);
   1590 	kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount));
   1591 
   1592 	cv_destroy(&dvl->dvl_cv);
   1593 	mutex_destroy(&dvl->dvl_mtx);
   1594 
   1595 	KASSERT(dev->dv_properties != NULL);
   1596 	prop_object_release(dev->dv_properties);
   1597 
   1598 	if (dev->dv_activity_handlers)
   1599 		panic("%s with registered handlers", __func__);
   1600 
   1601 	if (dev->dv_locators) {
   1602 		size_t amount = *--dev->dv_locators;
   1603 		kmem_free(dev->dv_locators, amount);
   1604 	}
   1605 
   1606 	config_devfree(dev);
   1607 }
   1608 
   1609 static int
   1610 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
   1611 {
   1612 	int unit = cf->cf_unit;
   1613 
   1614 	KASSERT(mutex_owned(&alldevs_lock));
   1615 
   1616 	if (unit < 0)
   1617 		return -1;
   1618 	if (cf->cf_fstate == FSTATE_STAR) {
   1619 		for (; unit < cd->cd_ndevs; unit++)
   1620 			if (cd->cd_devs[unit] == NULL)
   1621 				break;
   1622 		/*
   1623 		 * unit is now the unit of the first NULL device pointer,
   1624 		 * or max(cd->cd_ndevs,cf->cf_unit).
   1625 		 */
   1626 	} else {
   1627 		if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
   1628 			unit = -1;
   1629 	}
   1630 	return unit;
   1631 }
   1632 
   1633 static int
   1634 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
   1635 {
   1636 	struct alldevs_foray af;
   1637 	int unit;
   1638 
   1639 	config_alldevs_enter(&af);
   1640 	for (;;) {
   1641 		unit = config_unit_nextfree(cd, cf);
   1642 		if (unit == -1)
   1643 			break;
   1644 		if (unit < cd->cd_ndevs) {
   1645 			cd->cd_devs[unit] = dev;
   1646 			dev->dv_unit = unit;
   1647 			break;
   1648 		}
   1649 		config_makeroom(unit, cd);
   1650 	}
   1651 	config_alldevs_exit(&af);
   1652 
   1653 	return unit;
   1654 }
   1655 
   1656 static device_t
   1657 config_devalloc(const device_t parent, const cfdata_t cf,
   1658     const struct cfargs_internal * const args)
   1659 {
   1660 	cfdriver_t cd;
   1661 	cfattach_t ca;
   1662 	size_t lname, lunit;
   1663 	const char *xunit;
   1664 	int myunit;
   1665 	char num[10];
   1666 	device_t dev;
   1667 	void *dev_private;
   1668 	const struct cfiattrdata *ia;
   1669 	device_lock_t dvl;
   1670 
   1671 	cd = config_cfdriver_lookup(cf->cf_name);
   1672 	if (cd == NULL)
   1673 		return NULL;
   1674 
   1675 	ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
   1676 	if (ca == NULL)
   1677 		return NULL;
   1678 
   1679 	/* get memory for all device vars */
   1680 	KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
   1681 	if (ca->ca_devsize > 0) {
   1682 		dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
   1683 	} else {
   1684 		dev_private = NULL;
   1685 	}
   1686 	dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
   1687 
   1688 	dev->dv_handle = args->devhandle;
   1689 
   1690 	dev->dv_class = cd->cd_class;
   1691 	dev->dv_cfdata = cf;
   1692 	dev->dv_cfdriver = cd;
   1693 	dev->dv_cfattach = ca;
   1694 	dev->dv_activity_count = 0;
   1695 	dev->dv_activity_handlers = NULL;
   1696 	dev->dv_private = dev_private;
   1697 	dev->dv_flags = ca->ca_flags;	/* inherit flags from class */
   1698 	dev->dv_attaching = curlwp;
   1699 
   1700 	myunit = config_unit_alloc(dev, cd, cf);
   1701 	if (myunit == -1) {
   1702 		config_devfree(dev);
   1703 		return NULL;
   1704 	}
   1705 
   1706 	/* compute length of name and decimal expansion of unit number */
   1707 	lname = strlen(cd->cd_name);
   1708 	xunit = number(&num[sizeof(num)], myunit);
   1709 	lunit = &num[sizeof(num)] - xunit;
   1710 	if (lname + lunit > sizeof(dev->dv_xname))
   1711 		panic("config_devalloc: device name too long");
   1712 
   1713 	dvl = device_getlock(dev);
   1714 
   1715 	mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
   1716 	cv_init(&dvl->dvl_cv, "pmfsusp");
   1717 
   1718 	memcpy(dev->dv_xname, cd->cd_name, lname);
   1719 	memcpy(dev->dv_xname + lname, xunit, lunit);
   1720 	dev->dv_parent = parent;
   1721 	if (parent != NULL)
   1722 		dev->dv_depth = parent->dv_depth + 1;
   1723 	else
   1724 		dev->dv_depth = 0;
   1725 	dev->dv_flags |= DVF_ACTIVE;	/* always initially active */
   1726 	if (args->locators) {
   1727 		KASSERT(parent); /* no locators at root */
   1728 		ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver,
   1729 		    parent->dv_cfattach);
   1730 		dev->dv_locators =
   1731 		    kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
   1732 		*dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
   1733 		memcpy(dev->dv_locators, args->locators,
   1734 		    sizeof(int) * ia->ci_loclen);
   1735 	}
   1736 	dev->dv_properties = prop_dictionary_create();
   1737 	KASSERT(dev->dv_properties != NULL);
   1738 
   1739 	prop_dictionary_set_string_nocopy(dev->dv_properties,
   1740 	    "device-driver", dev->dv_cfdriver->cd_name);
   1741 	prop_dictionary_set_uint16(dev->dv_properties,
   1742 	    "device-unit", dev->dv_unit);
   1743 	if (parent != NULL) {
   1744 		prop_dictionary_set_string(dev->dv_properties,
   1745 		    "device-parent", device_xname(parent));
   1746 	}
   1747 
   1748 	dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount),
   1749 	    KM_SLEEP);
   1750 	localcount_init(dev->dv_localcount);
   1751 
   1752 	if (dev->dv_cfdriver->cd_attrs != NULL)
   1753 		config_add_attrib_dict(dev);
   1754 
   1755 	return dev;
   1756 }
   1757 
   1758 /*
   1759  * Create an array of device attach attributes and add it
   1760  * to the device's dv_properties dictionary.
   1761  *
   1762  * <key>interface-attributes</key>
   1763  * <array>
   1764  *    <dict>
   1765  *       <key>attribute-name</key>
   1766  *       <string>foo</string>
   1767  *       <key>locators</key>
   1768  *       <array>
   1769  *          <dict>
   1770  *             <key>loc-name</key>
   1771  *             <string>foo-loc1</string>
   1772  *          </dict>
   1773  *          <dict>
   1774  *             <key>loc-name</key>
   1775  *             <string>foo-loc2</string>
   1776  *             <key>default</key>
   1777  *             <string>foo-loc2-default</string>
   1778  *          </dict>
   1779  *          ...
   1780  *       </array>
   1781  *    </dict>
   1782  *    ...
   1783  * </array>
   1784  */
   1785 
   1786 static void
   1787 config_add_attrib_dict(device_t dev)
   1788 {
   1789 	int i, j;
   1790 	const struct cfiattrdata *ci;
   1791 	prop_dictionary_t attr_dict, loc_dict;
   1792 	prop_array_t attr_array, loc_array;
   1793 
   1794 	if ((attr_array = prop_array_create()) == NULL)
   1795 		return;
   1796 
   1797 	for (i = 0; ; i++) {
   1798 		if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
   1799 			break;
   1800 		if ((attr_dict = prop_dictionary_create()) == NULL)
   1801 			break;
   1802 		prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
   1803 		    ci->ci_name);
   1804 
   1805 		/* Create an array of the locator names and defaults */
   1806 
   1807 		if (ci->ci_loclen != 0 &&
   1808 		    (loc_array = prop_array_create()) != NULL) {
   1809 			for (j = 0; j < ci->ci_loclen; j++) {
   1810 				loc_dict = prop_dictionary_create();
   1811 				if (loc_dict == NULL)
   1812 					continue;
   1813 				prop_dictionary_set_string_nocopy(loc_dict,
   1814 				    "loc-name", ci->ci_locdesc[j].cld_name);
   1815 				if (ci->ci_locdesc[j].cld_defaultstr != NULL)
   1816 					prop_dictionary_set_string_nocopy(
   1817 					    loc_dict, "default",
   1818 					    ci->ci_locdesc[j].cld_defaultstr);
   1819 				prop_array_set(loc_array, j, loc_dict);
   1820 				prop_object_release(loc_dict);
   1821 			}
   1822 			prop_dictionary_set_and_rel(attr_dict, "locators",
   1823 			    loc_array);
   1824 		}
   1825 		prop_array_add(attr_array, attr_dict);
   1826 		prop_object_release(attr_dict);
   1827 	}
   1828 	if (i == 0)
   1829 		prop_object_release(attr_array);
   1830 	else
   1831 		prop_dictionary_set_and_rel(dev->dv_properties,
   1832 		    "interface-attributes", attr_array);
   1833 
   1834 	return;
   1835 }
   1836 
   1837 static void
   1838 config_device_register(device_t dev, void *aux)
   1839 {
   1840 	struct device_register_args args = {
   1841 		.aux = aux,
   1842 	};
   1843 
   1844 	/* We don't really care if this fails. */
   1845 	device_call(dev, DEVICE_REGISTER(&args));
   1846 
   1847 	device_register(dev, aux);
   1848 }
   1849 
   1850 /*
   1851  * Attach a found device.
   1852  *
   1853  * Returns the device referenced, to be released with device_release.
   1854  */
   1855 static device_t
   1856 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
   1857     const struct cfargs_internal * const args)
   1858 {
   1859 	device_t dev;
   1860 	struct cftable *ct;
   1861 	const char *drvname;
   1862 	bool deferred;
   1863 
   1864 	KASSERT(KERNEL_LOCKED_P());
   1865 
   1866 	dev = config_devalloc(parent, cf, args);
   1867 	if (!dev)
   1868 		panic("config_attach: allocation of device softc failed");
   1869 
   1870 	/* XXX redundant - see below? */
   1871 	if (cf->cf_fstate != FSTATE_STAR) {
   1872 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
   1873 		cf->cf_fstate = FSTATE_FOUND;
   1874 	}
   1875 
   1876 	config_devlink(dev);
   1877 
   1878 	if (config_do_twiddle && cold)
   1879 		twiddle();
   1880 	else
   1881 		aprint_naive("Found ");
   1882 	/*
   1883 	 * We want the next two printfs for normal, verbose, and quiet,
   1884 	 * but not silent (in which case, we're twiddling, instead).
   1885 	 */
   1886 	if (parent == ROOT) {
   1887 		aprint_naive("%s (root)", device_xname(dev));
   1888 		aprint_normal("%s (root)", device_xname(dev));
   1889 	} else {
   1890 		aprint_naive("%s at %s", device_xname(dev),
   1891 		    device_xname(parent));
   1892 		aprint_normal("%s at %s", device_xname(dev),
   1893 		    device_xname(parent));
   1894 		if (print)
   1895 			(void) (*print)(aux, NULL);
   1896 	}
   1897 
   1898 	/*
   1899 	 * Before attaching, clobber any unfound devices that are
   1900 	 * otherwise identical.
   1901 	 * XXX code above is redundant?
   1902 	 */
   1903 	drvname = dev->dv_cfdriver->cd_name;
   1904 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
   1905 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
   1906 			if (STREQ(cf->cf_name, drvname) &&
   1907 			    cf->cf_unit == dev->dv_unit) {
   1908 				if (cf->cf_fstate == FSTATE_NOTFOUND)
   1909 					cf->cf_fstate = FSTATE_FOUND;
   1910 			}
   1911 		}
   1912 	}
   1913 	config_device_register(dev, aux);
   1914 
   1915 	/* Let userland know */
   1916 	devmon_report_device(dev, true);
   1917 
   1918 	/*
   1919 	 * Prevent detach until the driver's attach function, and all
   1920 	 * deferred actions, have finished.
   1921 	 */
   1922 	config_pending_incr(dev);
   1923 
   1924 	/*
   1925 	 * Prevent concurrent detach from destroying the device_t until
   1926 	 * the caller has released the device.
   1927 	 */
   1928 	device_acquire(dev);
   1929 
   1930 	/* Call the driver's attach function.  */
   1931 	(*dev->dv_cfattach->ca_attach)(parent, dev, aux);
   1932 
   1933 	/*
   1934 	 * Allow other threads to acquire references to the device now
   1935 	 * that the driver's attach function is done.
   1936 	 */
   1937 	mutex_enter(&config_misc_lock);
   1938 	KASSERT(dev->dv_attaching == curlwp);
   1939 	dev->dv_attaching = NULL;
   1940 	cv_broadcast(&config_misc_cv);
   1941 	mutex_exit(&config_misc_lock);
   1942 
   1943 	/*
   1944 	 * Synchronous parts of attach are done.  Allow detach, unless
   1945 	 * the driver's attach function scheduled deferred actions.
   1946 	 */
   1947 	config_pending_decr(dev);
   1948 
   1949 	mutex_enter(&config_misc_lock);
   1950 	deferred = (dev->dv_pending != 0);
   1951 	mutex_exit(&config_misc_lock);
   1952 
   1953 	if (!deferred && !device_pmf_is_registered(dev))
   1954 		aprint_debug_dev(dev,
   1955 		    "WARNING: power management not supported\n");
   1956 
   1957 	config_process_deferred(&deferred_config_queue, dev);
   1958 
   1959 	device_register_post_config(dev, aux);
   1960 	rnd_add_uint32(&rnd_autoconf_source, 0);
   1961 	return dev;
   1962 }
   1963 
   1964 device_t
   1965 config_attach_acquire(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
   1966     const struct cfargs *cfargs)
   1967 {
   1968 	struct cfargs_internal store;
   1969 	device_t dev;
   1970 
   1971 	KERNEL_LOCK(1, NULL);
   1972 	dev = config_attach_internal(parent, cf, aux, print,
   1973 	    cfargs_canonicalize(cfargs, &store));
   1974 	KERNEL_UNLOCK_ONE(NULL);
   1975 
   1976 	return dev;
   1977 }
   1978 
   1979 /*
   1980  * config_attach(parent, cf, aux, print, cfargs)
   1981  *
   1982  *	Legacy entry point for callers whose use of the returned
   1983  *	device_t is not delimited by device_release.
   1984  *
   1985  *	The caller is required to hold the kernel lock as a fragile
   1986  *	defence against races.
   1987  *
   1988  *	Callers should ignore the return value or be converted to
   1989  *	config_attach_acquire with a matching device_release once they
   1990  *	have finished with the returned device_t.
   1991  */
   1992 device_t
   1993 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
   1994     const struct cfargs *cfargs)
   1995 {
   1996 	device_t dev;
   1997 
   1998 	KASSERT(KERNEL_LOCKED_P());
   1999 
   2000 	dev = config_attach_acquire(parent, cf, aux, print, cfargs);
   2001 	if (dev == NULL)
   2002 		return NULL;
   2003 	device_release(dev);
   2004 
   2005 	return dev;
   2006 }
   2007 
   2008 /*
   2009  * As above, but for pseudo-devices.  Pseudo-devices attached in this
   2010  * way are silently inserted into the device tree, and their children
   2011  * attached.
   2012  *
   2013  * Note that because pseudo-devices are attached silently, any information
   2014  * the attach routine wishes to print should be prefixed with the device
   2015  * name by the attach routine.
   2016  */
   2017 device_t
   2018 config_attach_pseudo_acquire(cfdata_t cf, void *aux)
   2019 {
   2020 	device_t dev;
   2021 
   2022 	KERNEL_LOCK(1, NULL);
   2023 
   2024 	struct cfargs_internal args = { };
   2025 	dev = config_devalloc(ROOT, cf, &args);
   2026 	if (!dev)
   2027 		goto out;
   2028 
   2029 	/* XXX mark busy in cfdata */
   2030 
   2031 	if (cf->cf_fstate != FSTATE_STAR) {
   2032 		KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
   2033 		cf->cf_fstate = FSTATE_FOUND;
   2034 	}
   2035 
   2036 	config_devlink(dev);
   2037 
   2038 #if 0	/* XXXJRT not yet */
   2039 	config_device_register(dev, NULL);	/* like a root node */
   2040 #endif
   2041 
   2042 	/* Let userland know */
   2043 	devmon_report_device(dev, true);
   2044 
   2045 	/*
   2046 	 * Prevent detach until the driver's attach function, and all
   2047 	 * deferred actions, have finished.
   2048 	 */
   2049 	config_pending_incr(dev);
   2050 
   2051 	/*
   2052 	 * Prevent concurrent detach from destroying the device_t until
   2053 	 * the caller has released the device.
   2054 	 */
   2055 	device_acquire(dev);
   2056 
   2057 	/* Call the driver's attach function.  */
   2058 	(*dev->dv_cfattach->ca_attach)(ROOT, dev, aux);
   2059 
   2060 	/*
   2061 	 * Allow other threads to acquire references to the device now
   2062 	 * that the driver's attach function is done.
   2063 	 */
   2064 	mutex_enter(&config_misc_lock);
   2065 	KASSERT(dev->dv_attaching == curlwp);
   2066 	dev->dv_attaching = NULL;
   2067 	cv_broadcast(&config_misc_cv);
   2068 	mutex_exit(&config_misc_lock);
   2069 
   2070 	/*
   2071 	 * Synchronous parts of attach are done.  Allow detach, unless
   2072 	 * the driver's attach function scheduled deferred actions.
   2073 	 */
   2074 	config_pending_decr(dev);
   2075 
   2076 	config_process_deferred(&deferred_config_queue, dev);
   2077 
   2078 out:	KERNEL_UNLOCK_ONE(NULL);
   2079 	return dev;
   2080 }
   2081 
   2082 /*
   2083  * config_attach_pseudo(cf)
   2084  *
   2085  *	Legacy entry point for callers whose use of the returned
   2086  *	device_t is not delimited by device_release.
   2087  *
   2088  *	The caller is required to hold the kernel lock as a fragile
   2089  *	defence against races.
   2090  *
   2091  *	Callers should ignore the return value or be converted to
   2092  *	config_attach_pseudo_acquire with a matching device_release
   2093  *	once they have finished with the returned device_t.  As a
   2094  *	bonus, config_attach_pseudo_acquire can pass a non-null aux
   2095  *	argument into the driver's attach routine.
   2096  */
   2097 device_t
   2098 config_attach_pseudo(cfdata_t cf)
   2099 {
   2100 	device_t dev;
   2101 
   2102 	dev = config_attach_pseudo_acquire(cf, NULL);
   2103 	if (dev == NULL)
   2104 		return dev;
   2105 	device_release(dev);
   2106 
   2107 	return dev;
   2108 }
   2109 
   2110 /*
   2111  * Caller must hold alldevs_lock.
   2112  */
   2113 static void
   2114 config_collect_garbage(struct devicelist *garbage)
   2115 {
   2116 	device_t dv;
   2117 
   2118 	KASSERT(!cpu_intr_p());
   2119 	KASSERT(!cpu_softintr_p());
   2120 	KASSERT(mutex_owned(&alldevs_lock));
   2121 
   2122 	while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
   2123 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
   2124 			if (dv->dv_del_gen != 0)
   2125 				break;
   2126 		}
   2127 		if (dv == NULL) {
   2128 			alldevs_garbage = false;
   2129 			break;
   2130 		}
   2131 		config_devunlink(dv, garbage);
   2132 	}
   2133 	KASSERT(mutex_owned(&alldevs_lock));
   2134 }
   2135 
   2136 static void
   2137 config_dump_garbage(struct devicelist *garbage)
   2138 {
   2139 	device_t dv;
   2140 
   2141 	while ((dv = TAILQ_FIRST(garbage)) != NULL) {
   2142 		TAILQ_REMOVE(garbage, dv, dv_list);
   2143 		config_devdelete(dv);
   2144 	}
   2145 }
   2146 
   2147 static int
   2148 config_detach_enter(device_t dev)
   2149 {
   2150 	struct lwp *l __diagused;
   2151 	int error = 0;
   2152 
   2153 	mutex_enter(&config_misc_lock);
   2154 
   2155 	/*
   2156 	 * Wait until attach has fully completed, and until any
   2157 	 * concurrent detach (e.g., drvctl racing with USB event
   2158 	 * thread) has completed.
   2159 	 *
   2160 	 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via
   2161 	 * deviter) to ensure the winner of the race doesn't free the
   2162 	 * device leading the loser of the race into use-after-free.
   2163 	 *
   2164 	 * XXX Not all callers do this!
   2165 	 */
   2166 	while (dev->dv_pending || dev->dv_detaching) {
   2167 		KASSERTMSG(dev->dv_detaching != curlwp,
   2168 		    "recursively detaching %s", device_xname(dev));
   2169 		error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
   2170 		if (error)
   2171 			goto out;
   2172 	}
   2173 
   2174 	/*
   2175 	 * Attach has completed, and no other concurrent detach is
   2176 	 * running.  Claim the device for detaching.  This will cause
   2177 	 * all new attempts to acquire references to block.
   2178 	 */
   2179 	KASSERTMSG((l = dev->dv_attaching) == NULL,
   2180 	    "lwp %ld [%s] @ %p attaching %s",
   2181 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
   2182 	    device_xname(dev));
   2183 	KASSERTMSG((l = dev->dv_detaching) == NULL,
   2184 	    "lwp %ld [%s] @ %p detaching %s",
   2185 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
   2186 	    device_xname(dev));
   2187 	dev->dv_detaching = curlwp;
   2188 
   2189 out:	mutex_exit(&config_misc_lock);
   2190 	return error;
   2191 }
   2192 
   2193 static void
   2194 config_detach_exit(device_t dev)
   2195 {
   2196 	struct lwp *l __diagused;
   2197 
   2198 	mutex_enter(&config_misc_lock);
   2199 	KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
   2200 	    device_xname(dev));
   2201 	KASSERTMSG((l = dev->dv_detaching) == curlwp,
   2202 	    "lwp %ld [%s] @ %p detaching %s",
   2203 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
   2204 	    device_xname(dev));
   2205 	dev->dv_detaching = NULL;
   2206 	cv_broadcast(&config_misc_cv);
   2207 	mutex_exit(&config_misc_lock);
   2208 }
   2209 
   2210 /*
   2211  * Detach a device.  Optionally forced (e.g. because of hardware
   2212  * removal) and quiet.  Returns zero if successful, non-zero
   2213  * (an error code) otherwise.
   2214  *
   2215  * Note that this code wants to be run from a process context, so
   2216  * that the detach can sleep to allow processes which have a device
   2217  * open to run and unwind their stacks.
   2218  *
   2219  * Caller must hold a reference with device_acquire or
   2220  * device_lookup_acquire.
   2221  */
   2222 int
   2223 config_detach_release(device_t dev, int flags)
   2224 {
   2225 	struct alldevs_foray af;
   2226 	struct cftable *ct;
   2227 	cfdata_t cf;
   2228 	const struct cfattach *ca;
   2229 	struct cfdriver *cd;
   2230 	device_t d __diagused;
   2231 	int rv = 0;
   2232 
   2233 	KERNEL_LOCK(1, NULL);
   2234 
   2235 	cf = dev->dv_cfdata;
   2236 	KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
   2237 		cf->cf_fstate == FSTATE_STAR),
   2238 	    "config_detach: %s: bad device fstate: %d",
   2239 	    device_xname(dev), cf ? cf->cf_fstate : -1);
   2240 
   2241 	cd = dev->dv_cfdriver;
   2242 	KASSERT(cd != NULL);
   2243 
   2244 	ca = dev->dv_cfattach;
   2245 	KASSERT(ca != NULL);
   2246 
   2247 	/*
   2248 	 * Only one detach at a time, please -- and not until fully
   2249 	 * attached.
   2250 	 */
   2251 	rv = config_detach_enter(dev);
   2252 	device_release(dev);
   2253 	if (rv) {
   2254 		KERNEL_UNLOCK_ONE(NULL);
   2255 		return rv;
   2256 	}
   2257 
   2258 	mutex_enter(&alldevs_lock);
   2259 	if (dev->dv_del_gen != 0) {
   2260 		mutex_exit(&alldevs_lock);
   2261 #ifdef DIAGNOSTIC
   2262 		printf("%s: %s is already detached\n", __func__,
   2263 		    device_xname(dev));
   2264 #endif /* DIAGNOSTIC */
   2265 		config_detach_exit(dev);
   2266 		KERNEL_UNLOCK_ONE(NULL);
   2267 		return SET_ERROR(ENOENT);
   2268 	}
   2269 	alldevs_nwrite++;
   2270 	mutex_exit(&alldevs_lock);
   2271 
   2272 	/*
   2273 	 * Call the driver's .ca_detach function, unless it has none or
   2274 	 * we are skipping it because it's unforced shutdown time and
   2275 	 * the driver didn't ask to detach on shutdown.
   2276 	 */
   2277 	if (!detachall &&
   2278 	    (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
   2279 	    (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
   2280 		rv = SET_ERROR(EOPNOTSUPP);
   2281 	} else if (ca->ca_detach != NULL) {
   2282 		rv = (*ca->ca_detach)(dev, flags);
   2283 	} else
   2284 		rv = SET_ERROR(EOPNOTSUPP);
   2285 
   2286 	KASSERTMSG(!dev->dv_detach_done, "%s detached twice, error=%d",
   2287 	    device_xname(dev), rv);
   2288 
   2289 	/*
   2290 	 * If it was not possible to detach the device, then we either
   2291 	 * panic() (for the forced but failed case), or return an error.
   2292 	 */
   2293 	if (rv) {
   2294 		/*
   2295 		 * Detach failed -- likely EOPNOTSUPP or EBUSY.  Driver
   2296 		 * must not have called config_detach_commit.
   2297 		 */
   2298 		KASSERTMSG(!dev->dv_detach_committed,
   2299 		    "%s committed to detaching and then backed out, error=%d",
   2300 		    device_xname(dev), rv);
   2301 		if (flags & DETACH_FORCE) {
   2302 			panic("config_detach: forced detach of %s failed (%d)",
   2303 			    device_xname(dev), rv);
   2304 		}
   2305 		goto out;
   2306 	}
   2307 
   2308 	/*
   2309 	 * The device has now been successfully detached.
   2310 	 */
   2311 	dev->dv_detach_done = true;
   2312 
   2313 	/*
   2314 	 * If .ca_detach didn't commit to detach, then do that for it.
   2315 	 * This wakes any pending device_lookup_acquire calls so they
   2316 	 * will fail.
   2317 	 */
   2318 	config_detach_commit(dev);
   2319 
   2320 	/*
   2321 	 * If it was possible to detach the device, ensure that the
   2322 	 * device is deactivated.
   2323 	 */
   2324 	dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */
   2325 
   2326 	/*
   2327 	 * Wait for all device_lookup_acquire references -- mostly, for
   2328 	 * all attempts to open the device -- to drain.  It is the
   2329 	 * responsibility of .ca_detach to ensure anything with open
   2330 	 * references will be interrupted and release them promptly,
   2331 	 * not block indefinitely.  All new attempts to acquire
   2332 	 * references will fail, as config_detach_commit has arranged
   2333 	 * by now.
   2334 	 */
   2335 	mutex_enter(&config_misc_lock);
   2336 	localcount_drain(dev->dv_localcount,
   2337 	    &config_misc_cv, &config_misc_lock);
   2338 	mutex_exit(&config_misc_lock);
   2339 
   2340 	/* Let userland know */
   2341 	devmon_report_device(dev, false);
   2342 
   2343 #ifdef DIAGNOSTIC
   2344 	/*
   2345 	 * Sanity: If you're successfully detached, you should have no
   2346 	 * children.  (Note that because children must be attached
   2347 	 * after parents, we only need to search the latter part of
   2348 	 * the list.)
   2349 	 */
   2350 	mutex_enter(&alldevs_lock);
   2351 	for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
   2352 	    d = TAILQ_NEXT(d, dv_list)) {
   2353 		if (d->dv_parent == dev && d->dv_del_gen == 0) {
   2354 			printf("config_detach: detached device %s"
   2355 			    " has children %s\n", device_xname(dev),
   2356 			    device_xname(d));
   2357 			panic("config_detach");
   2358 		}
   2359 	}
   2360 	mutex_exit(&alldevs_lock);
   2361 #endif
   2362 
   2363 	/* notify the parent that the child is gone */
   2364 	if (dev->dv_parent) {
   2365 		device_t p = dev->dv_parent;
   2366 		if (p->dv_cfattach->ca_childdetached)
   2367 			(*p->dv_cfattach->ca_childdetached)(p, dev);
   2368 	}
   2369 
   2370 	/*
   2371 	 * Mark cfdata to show that the unit can be reused, if possible.
   2372 	 */
   2373 	TAILQ_FOREACH(ct, &allcftables, ct_list) {
   2374 		for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
   2375 			if (STREQ(cf->cf_name, cd->cd_name)) {
   2376 				if (cf->cf_fstate == FSTATE_FOUND &&
   2377 				    cf->cf_unit == dev->dv_unit)
   2378 					cf->cf_fstate = FSTATE_NOTFOUND;
   2379 			}
   2380 		}
   2381 	}
   2382 
   2383 	if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
   2384 		aprint_normal_dev(dev, "detached\n");
   2385 
   2386 out:
   2387 	config_detach_exit(dev);
   2388 
   2389 	config_alldevs_enter(&af);
   2390 	KASSERT(alldevs_nwrite != 0);
   2391 	--alldevs_nwrite;
   2392 	if (rv == 0 && dev->dv_del_gen == 0) {
   2393 		if (alldevs_nwrite == 0 && alldevs_nread == 0)
   2394 			config_devunlink(dev, &af.af_garbage);
   2395 		else {
   2396 			dev->dv_del_gen = alldevs_gen;
   2397 			alldevs_garbage = true;
   2398 		}
   2399 	}
   2400 	config_alldevs_exit(&af);
   2401 
   2402 	KERNEL_UNLOCK_ONE(NULL);
   2403 
   2404 	return rv;
   2405 }
   2406 
   2407 /*
   2408  * config_detach(dev, flags)
   2409  *
   2410  *	Legacy entry point for callers that have not acquired a
   2411  *	reference to dev.
   2412  *
   2413  *	The caller is required to hold the kernel lock as a fragile
   2414  *	defence against races.
   2415  *
   2416  *	Callers should be converted to use device_acquire under a lock
   2417  *	taken also by .ca_childdetached to synchronize access to the
   2418  *	device_t, and then config_detach_release ouside the lock.
   2419  *	Alternatively, most drivers detach children only in their own
   2420  *	detach routines, which can be done with config_detach_children
   2421  *	instead.
   2422  */
   2423 int
   2424 config_detach(device_t dev, int flags)
   2425 {
   2426 
   2427 	device_acquire(dev);
   2428 	return config_detach_release(dev, flags);
   2429 }
   2430 
   2431 /*
   2432  * config_detach_commit(dev)
   2433  *
   2434  *	Issued by a driver's .ca_detach routine to notify anyone
   2435  *	waiting in device_lookup_acquire that the driver is committed
   2436  *	to detaching the device, which allows device_lookup_acquire to
   2437  *	wake up and fail immediately.
   2438  *
   2439  *	Safe to call multiple times -- idempotent.  Must be called
   2440  *	during config_detach_enter/exit.  Safe to use with
   2441  *	device_lookup because the device is not actually removed from
   2442  *	the table until after config_detach_exit.
   2443  */
   2444 void
   2445 config_detach_commit(device_t dev)
   2446 {
   2447 	struct lwp *l __diagused;
   2448 
   2449 	mutex_enter(&config_misc_lock);
   2450 	KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
   2451 	    device_xname(dev));
   2452 	KASSERTMSG((l = dev->dv_detaching) == curlwp,
   2453 	    "lwp %ld [%s] @ %p detaching %s",
   2454 	    (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
   2455 	    device_xname(dev));
   2456 	dev->dv_detach_committed = true;
   2457 	cv_broadcast(&config_misc_cv);
   2458 	mutex_exit(&config_misc_lock);
   2459 }
   2460 
   2461 int
   2462 config_detach_children(device_t parent, int flags)
   2463 {
   2464 	device_t dv;
   2465 	deviter_t di;
   2466 	int error = 0;
   2467 
   2468 	KASSERT(KERNEL_LOCKED_P());
   2469 
   2470 	for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
   2471 	     dv = deviter_next(&di)) {
   2472 		if (device_parent(dv) != parent)
   2473 			continue;
   2474 		if ((error = config_detach(dv, flags)) != 0)
   2475 			break;
   2476 	}
   2477 	deviter_release(&di);
   2478 	return error;
   2479 }
   2480 
   2481 device_t
   2482 shutdown_first(struct shutdown_state *s)
   2483 {
   2484 	if (!s->initialized) {
   2485 		deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
   2486 		s->initialized = true;
   2487 	}
   2488 	return shutdown_next(s);
   2489 }
   2490 
   2491 device_t
   2492 shutdown_next(struct shutdown_state *s)
   2493 {
   2494 	device_t dv;
   2495 
   2496 	while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
   2497 		;
   2498 
   2499 	if (dv == NULL)
   2500 		s->initialized = false;
   2501 
   2502 	return dv;
   2503 }
   2504 
   2505 bool
   2506 config_detach_all(int how)
   2507 {
   2508 	static struct shutdown_state s;
   2509 	device_t curdev;
   2510 	bool progress = false;
   2511 	int flags;
   2512 
   2513 	KERNEL_LOCK(1, NULL);
   2514 
   2515 	if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
   2516 		goto out;
   2517 
   2518 	if ((how & RB_POWERDOWN) == RB_POWERDOWN)
   2519 		flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
   2520 	else
   2521 		flags = DETACH_SHUTDOWN;
   2522 
   2523 	for (curdev = shutdown_first(&s); curdev != NULL;
   2524 	     curdev = shutdown_next(&s)) {
   2525 		aprint_debug(" detaching %s, ", device_xname(curdev));
   2526 		if (config_detach(curdev, flags) == 0) {
   2527 			progress = true;
   2528 			aprint_debug("success.");
   2529 		} else
   2530 			aprint_debug("failed.");
   2531 	}
   2532 
   2533 out:	KERNEL_UNLOCK_ONE(NULL);
   2534 	return progress;
   2535 }
   2536 
   2537 static bool
   2538 device_is_ancestor_of(device_t ancestor, device_t descendant)
   2539 {
   2540 	device_t dv;
   2541 
   2542 	for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
   2543 		if (device_parent(dv) == ancestor)
   2544 			return true;
   2545 	}
   2546 	return false;
   2547 }
   2548 
   2549 int
   2550 config_deactivate(device_t dev)
   2551 {
   2552 	deviter_t di;
   2553 	const struct cfattach *ca;
   2554 	device_t descendant;
   2555 	int s, rv = 0, oflags;
   2556 
   2557 	for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
   2558 	     descendant != NULL;
   2559 	     descendant = deviter_next(&di)) {
   2560 		if (dev != descendant &&
   2561 		    !device_is_ancestor_of(dev, descendant))
   2562 			continue;
   2563 
   2564 		if ((descendant->dv_flags & DVF_ACTIVE) == 0)
   2565 			continue;
   2566 
   2567 		ca = descendant->dv_cfattach;
   2568 		oflags = descendant->dv_flags;
   2569 
   2570 		descendant->dv_flags &= ~DVF_ACTIVE;
   2571 		if (ca->ca_activate == NULL)
   2572 			continue;
   2573 		s = splhigh();
   2574 		rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
   2575 		splx(s);
   2576 		if (rv != 0)
   2577 			descendant->dv_flags = oflags;
   2578 	}
   2579 	deviter_release(&di);
   2580 	return rv;
   2581 }
   2582 
   2583 /*
   2584  * Defer the configuration of the specified device until all
   2585  * of its parent's devices have been attached.
   2586  */
   2587 void
   2588 config_defer(device_t dev, void (*func)(device_t))
   2589 {
   2590 	struct deferred_config *dc;
   2591 
   2592 	if (dev->dv_parent == NULL)
   2593 		panic("config_defer: can't defer config of a root device");
   2594 
   2595 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
   2596 
   2597 	config_pending_incr(dev);
   2598 
   2599 	mutex_enter(&config_misc_lock);
   2600 #ifdef DIAGNOSTIC
   2601 	struct deferred_config *odc;
   2602 	TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
   2603 		if (odc->dc_dev == dev)
   2604 			panic("config_defer: deferred twice");
   2605 	}
   2606 #endif
   2607 	dc->dc_dev = dev;
   2608 	dc->dc_func = func;
   2609 	TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
   2610 	mutex_exit(&config_misc_lock);
   2611 }
   2612 
   2613 /*
   2614  * Defer some autoconfiguration for a device until after interrupts
   2615  * are enabled.
   2616  */
   2617 void
   2618 config_interrupts(device_t dev, void (*func)(device_t))
   2619 {
   2620 	struct deferred_config *dc;
   2621 
   2622 	/*
   2623 	 * If interrupts are enabled, callback now.
   2624 	 */
   2625 	if (cold == 0) {
   2626 		(*func)(dev);
   2627 		return;
   2628 	}
   2629 
   2630 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
   2631 
   2632 	config_pending_incr(dev);
   2633 
   2634 	mutex_enter(&config_misc_lock);
   2635 #ifdef DIAGNOSTIC
   2636 	struct deferred_config *odc;
   2637 	TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
   2638 		if (odc->dc_dev == dev)
   2639 			panic("config_interrupts: deferred twice");
   2640 	}
   2641 #endif
   2642 	dc->dc_dev = dev;
   2643 	dc->dc_func = func;
   2644 	TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
   2645 	mutex_exit(&config_misc_lock);
   2646 }
   2647 
   2648 /*
   2649  * Defer some autoconfiguration for a device until after root file system
   2650  * is mounted (to load firmware etc).
   2651  */
   2652 void
   2653 config_mountroot(device_t dev, void (*func)(device_t))
   2654 {
   2655 	struct deferred_config *dc;
   2656 
   2657 	/*
   2658 	 * If root file system is mounted, callback now.
   2659 	 */
   2660 	if (root_is_mounted) {
   2661 		(*func)(dev);
   2662 		return;
   2663 	}
   2664 
   2665 	dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
   2666 
   2667 	mutex_enter(&config_misc_lock);
   2668 #ifdef DIAGNOSTIC
   2669 	struct deferred_config *odc;
   2670 	TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
   2671 		if (odc->dc_dev == dev)
   2672 			panic("%s: deferred twice", __func__);
   2673 	}
   2674 #endif
   2675 
   2676 	dc->dc_dev = dev;
   2677 	dc->dc_func = func;
   2678 	TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
   2679 	mutex_exit(&config_misc_lock);
   2680 }
   2681 
   2682 /*
   2683  * Process a deferred configuration queue.
   2684  */
   2685 static void
   2686 config_process_deferred(struct deferred_config_head *queue, device_t parent)
   2687 {
   2688 	struct deferred_config *dc;
   2689 
   2690 	KASSERT(KERNEL_LOCKED_P());
   2691 
   2692 	mutex_enter(&config_misc_lock);
   2693 	dc = TAILQ_FIRST(queue);
   2694 	while (dc) {
   2695 		if (parent == NULL || dc->dc_dev->dv_parent == parent) {
   2696 			TAILQ_REMOVE(queue, dc, dc_queue);
   2697 			mutex_exit(&config_misc_lock);
   2698 
   2699 			(*dc->dc_func)(dc->dc_dev);
   2700 			config_pending_decr(dc->dc_dev);
   2701 			kmem_free(dc, sizeof(*dc));
   2702 
   2703 			mutex_enter(&config_misc_lock);
   2704 			/* Restart, queue might have changed */
   2705 			dc = TAILQ_FIRST(queue);
   2706 		} else {
   2707 			dc = TAILQ_NEXT(dc, dc_queue);
   2708 		}
   2709 	}
   2710 	mutex_exit(&config_misc_lock);
   2711 }
   2712 
   2713 /*
   2714  * Manipulate the config_pending semaphore.
   2715  */
   2716 void
   2717 config_pending_incr(device_t dev)
   2718 {
   2719 
   2720 	mutex_enter(&config_misc_lock);
   2721 	KASSERTMSG(dev->dv_pending < INT_MAX,
   2722 	    "%s: excess config_pending_incr", device_xname(dev));
   2723 	if (dev->dv_pending++ == 0)
   2724 		TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
   2725 #ifdef DEBUG_AUTOCONF
   2726 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
   2727 #endif
   2728 	mutex_exit(&config_misc_lock);
   2729 }
   2730 
   2731 void
   2732 config_pending_decr(device_t dev)
   2733 {
   2734 
   2735 	mutex_enter(&config_misc_lock);
   2736 	KASSERTMSG(dev->dv_pending > 0,
   2737 	    "%s: excess config_pending_decr", device_xname(dev));
   2738 	if (--dev->dv_pending == 0) {
   2739 		TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
   2740 		cv_broadcast(&config_misc_cv);
   2741 	}
   2742 #ifdef DEBUG_AUTOCONF
   2743 	printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
   2744 #endif
   2745 	mutex_exit(&config_misc_lock);
   2746 }
   2747 
   2748 /*
   2749  * Register a "finalization" routine.  Finalization routines are
   2750  * called iteratively once all real devices have been found during
   2751  * autoconfiguration, for as long as any one finalizer has done
   2752  * any work.
   2753  */
   2754 int
   2755 config_finalize_register(device_t dev, int (*fn)(device_t))
   2756 {
   2757 	struct finalize_hook *f;
   2758 	int error = 0;
   2759 
   2760 	KERNEL_LOCK(1, NULL);
   2761 
   2762 	/*
   2763 	 * If finalization has already been done, invoke the
   2764 	 * callback function now.
   2765 	 */
   2766 	if (config_finalize_done) {
   2767 		while ((*fn)(dev) != 0)
   2768 			/* loop */ ;
   2769 		goto out;
   2770 	}
   2771 
   2772 	/* Ensure this isn't already on the list. */
   2773 	TAILQ_FOREACH(f, &config_finalize_list, f_list) {
   2774 		if (f->f_func == fn && f->f_dev == dev) {
   2775 			error = SET_ERROR(EEXIST);
   2776 			goto out;
   2777 		}
   2778 	}
   2779 
   2780 	f = kmem_alloc(sizeof(*f), KM_SLEEP);
   2781 	f->f_func = fn;
   2782 	f->f_dev = dev;
   2783 	TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
   2784 
   2785 	/* Success!  */
   2786 	error = 0;
   2787 
   2788 out:	KERNEL_UNLOCK_ONE(NULL);
   2789 	return error;
   2790 }
   2791 
   2792 void
   2793 config_finalize(void)
   2794 {
   2795 	struct finalize_hook *f;
   2796 	struct pdevinit *pdev;
   2797 	extern struct pdevinit pdevinit[];
   2798 	unsigned t0 = getticks();
   2799 	int errcnt, rv;
   2800 
   2801 	/*
   2802 	 * Now that device driver threads have been created, wait for
   2803 	 * them to finish any deferred autoconfiguration.
   2804 	 */
   2805 	mutex_enter(&config_misc_lock);
   2806 	while (!TAILQ_EMPTY(&config_pending)) {
   2807 		const unsigned t1 = getticks();
   2808 
   2809 		if (t1 - t0 >= hz) {
   2810 			void (*pr)(const char *, ...) __printflike(1,2);
   2811 			device_t dev;
   2812 
   2813 			if (t1 - t0 >= 60*hz) {
   2814 				pr = aprint_normal;
   2815 				t0 = t1;
   2816 			} else {
   2817 				pr = aprint_debug;
   2818 			}
   2819 
   2820 			(*pr)("waiting for devices:");
   2821 			TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
   2822 				(*pr)(" %s", device_xname(dev));
   2823 			(*pr)("\n");
   2824 		}
   2825 
   2826 		(void)cv_timedwait(&config_misc_cv, &config_misc_lock,
   2827 		    mstohz(1000));
   2828 	}
   2829 	mutex_exit(&config_misc_lock);
   2830 
   2831 	KERNEL_LOCK(1, NULL);
   2832 
   2833 	/* Attach pseudo-devices. */
   2834 	for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
   2835 		(*pdev->pdev_attach)(pdev->pdev_count);
   2836 
   2837 	/* Run the hooks until none of them does any work. */
   2838 	do {
   2839 		rv = 0;
   2840 		TAILQ_FOREACH(f, &config_finalize_list, f_list)
   2841 			rv |= (*f->f_func)(f->f_dev);
   2842 	} while (rv != 0);
   2843 
   2844 	config_finalize_done = 1;
   2845 
   2846 	/* Now free all the hooks. */
   2847 	while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
   2848 		TAILQ_REMOVE(&config_finalize_list, f, f_list);
   2849 		kmem_free(f, sizeof(*f));
   2850 	}
   2851 
   2852 	KERNEL_UNLOCK_ONE(NULL);
   2853 
   2854 	errcnt = aprint_get_error_count();
   2855 	if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
   2856 	    (boothowto & AB_VERBOSE) == 0) {
   2857 		mutex_enter(&config_misc_lock);
   2858 		if (config_do_twiddle) {
   2859 			config_do_twiddle = 0;
   2860 			printf_nolog(" done.\n");
   2861 		}
   2862 		mutex_exit(&config_misc_lock);
   2863 	}
   2864 	if (errcnt != 0) {
   2865 		printf("WARNING: %d error%s while detecting hardware; "
   2866 		    "check system log.\n", errcnt,
   2867 		    errcnt == 1 ? "" : "s");
   2868 	}
   2869 }
   2870 
   2871 void
   2872 config_twiddle_init(void)
   2873 {
   2874 
   2875 	if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
   2876 		config_do_twiddle = 1;
   2877 	}
   2878 	callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
   2879 }
   2880 
   2881 void
   2882 config_twiddle_fn(void *cookie)
   2883 {
   2884 
   2885 	mutex_enter(&config_misc_lock);
   2886 	if (config_do_twiddle) {
   2887 		twiddle();
   2888 		callout_schedule(&config_twiddle_ch, mstohz(100));
   2889 	}
   2890 	mutex_exit(&config_misc_lock);
   2891 }
   2892 
   2893 static void
   2894 config_alldevs_enter(struct alldevs_foray *af)
   2895 {
   2896 	TAILQ_INIT(&af->af_garbage);
   2897 	mutex_enter(&alldevs_lock);
   2898 	config_collect_garbage(&af->af_garbage);
   2899 }
   2900 
   2901 static void
   2902 config_alldevs_exit(struct alldevs_foray *af)
   2903 {
   2904 	mutex_exit(&alldevs_lock);
   2905 	config_dump_garbage(&af->af_garbage);
   2906 }
   2907 
   2908 /*
   2909  * device_lookup:
   2910  *
   2911  *	Look up a device instance for a given driver.
   2912  *
   2913  *	Caller is responsible for ensuring the device's state is
   2914  *	stable, either by holding a reference already obtained with
   2915  *	device_lookup_acquire or by otherwise ensuring the device is
   2916  *	attached and can't be detached (e.g., holding an open device
   2917  *	node and ensuring *_detach calls vdevgone).
   2918  *
   2919  *	XXX Find a way to assert this.
   2920  *
   2921  *	Safe for use up to and including interrupt context at IPL_VM.
   2922  *	Never sleeps.
   2923  */
   2924 device_t
   2925 device_lookup(cfdriver_t cd, int unit)
   2926 {
   2927 	device_t dv;
   2928 
   2929 	mutex_enter(&alldevs_lock);
   2930 	if (unit < 0 || unit >= cd->cd_ndevs)
   2931 		dv = NULL;
   2932 	else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
   2933 		dv = NULL;
   2934 	mutex_exit(&alldevs_lock);
   2935 
   2936 	return dv;
   2937 }
   2938 
   2939 /*
   2940  * device_lookup_private:
   2941  *
   2942  *	Look up a softc instance for a given driver.
   2943  */
   2944 void *
   2945 device_lookup_private(cfdriver_t cd, int unit)
   2946 {
   2947 
   2948 	return device_private(device_lookup(cd, unit));
   2949 }
   2950 
   2951 /*
   2952  * device_lookup_acquire:
   2953  *
   2954  *	Look up a device instance for a given driver, and return a
   2955  *	reference to it that must be released by device_release.
   2956  *
   2957  *	=> If the device is still attaching, blocks until *_attach has
   2958  *	   returned.
   2959  *
   2960  *	=> If the device is detaching, blocks until *_detach has
   2961  *	   returned.  May succeed or fail in that case, depending on
   2962  *	   whether *_detach has backed out (EBUSY) or committed to
   2963  *	   detaching.
   2964  *
   2965  *	May sleep.
   2966  */
   2967 device_t
   2968 device_lookup_acquire(cfdriver_t cd, int unit)
   2969 {
   2970 	device_t dv;
   2971 
   2972 	ASSERT_SLEEPABLE();
   2973 
   2974 	/* XXX This should have a pserialized fast path -- TBD.  */
   2975 	mutex_enter(&config_misc_lock);
   2976 	mutex_enter(&alldevs_lock);
   2977 retry:	if (unit < 0 || unit >= cd->cd_ndevs ||
   2978 	    (dv = cd->cd_devs[unit]) == NULL ||
   2979 	    dv->dv_del_gen != 0 ||
   2980 	    dv->dv_detach_committed) {
   2981 		dv = NULL;
   2982 	} else {
   2983 		/*
   2984 		 * Wait for the device to stabilize, if attaching or
   2985 		 * detaching.  Either way we must wait for *_attach or
   2986 		 * *_detach to complete, and either way we must retry:
   2987 		 * even if detaching, *_detach might fail (EBUSY) so
   2988 		 * the device may still be there.
   2989 		 */
   2990 		if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) ||
   2991 		    dv->dv_detaching != NULL) {
   2992 			mutex_exit(&alldevs_lock);
   2993 			cv_wait(&config_misc_cv, &config_misc_lock);
   2994 			mutex_enter(&alldevs_lock);
   2995 			goto retry;
   2996 		}
   2997 		device_acquire(dv);
   2998 	}
   2999 	mutex_exit(&alldevs_lock);
   3000 	mutex_exit(&config_misc_lock);
   3001 
   3002 	return dv;
   3003 }
   3004 
   3005 /*
   3006  * device_acquire:
   3007  *
   3008  *	Acquire a reference to a device.  It is the caller's
   3009  *	responsibility to ensure that the device's .ca_detach routine
   3010  *	cannot return before calling this.  Caller must release the
   3011  *	reference with device_release or config_detach_release.
   3012  */
   3013 void
   3014 device_acquire(device_t dv)
   3015 {
   3016 
   3017 	/*
   3018 	 * No lock because the caller has promised that this can't
   3019 	 * change concurrently with device_acquire.
   3020 	 */
   3021 	KASSERTMSG(!dv->dv_detach_done, "%s",
   3022 	    dv == NULL ? "(null)" : device_xname(dv));
   3023 	localcount_acquire(dv->dv_localcount);
   3024 }
   3025 
   3026 /*
   3027  * device_release:
   3028  *
   3029  *	Release a reference to a device acquired with device_acquire or
   3030  *	device_lookup_acquire.
   3031  */
   3032 void
   3033 device_release(device_t dv)
   3034 {
   3035 
   3036 	localcount_release(dv->dv_localcount,
   3037 	    &config_misc_cv, &config_misc_lock);
   3038 }
   3039 
   3040 /*
   3041  * device_find_by_xname:
   3042  *
   3043  *	Returns the device of the given name or NULL if it doesn't exist.
   3044  */
   3045 device_t
   3046 device_find_by_xname(const char *name)
   3047 {
   3048 	device_t dv;
   3049 	deviter_t di;
   3050 
   3051 	for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
   3052 		if (strcmp(device_xname(dv), name) == 0)
   3053 			break;
   3054 	}
   3055 	deviter_release(&di);
   3056 
   3057 	return dv;
   3058 }
   3059 
   3060 /*
   3061  * device_find_by_driver_unit:
   3062  *
   3063  *	Returns the device of the given driver name and unit or
   3064  *	NULL if it doesn't exist.
   3065  */
   3066 device_t
   3067 device_find_by_driver_unit(const char *name, int unit)
   3068 {
   3069 	struct cfdriver *cd;
   3070 
   3071 	if ((cd = config_cfdriver_lookup(name)) == NULL)
   3072 		return NULL;
   3073 	return device_lookup(cd, unit);
   3074 }
   3075 
   3076 static bool
   3077 match_strcmp(const char * const s1, const char * const s2)
   3078 {
   3079 	return strcmp(s1, s2) == 0;
   3080 }
   3081 
   3082 static bool
   3083 match_pmatch(const char * const s1, const char * const s2)
   3084 {
   3085 	return pmatch(s1, s2, NULL) == 2;
   3086 }
   3087 
   3088 static bool
   3089 strarray_match_internal(const char ** const strings,
   3090     unsigned int const nstrings, const char * const str,
   3091     unsigned int * const indexp,
   3092     bool (*match_fn)(const char *, const char *))
   3093 {
   3094 	unsigned int i;
   3095 
   3096 	if (strings == NULL || nstrings == 0) {
   3097 		return false;
   3098 	}
   3099 
   3100 	for (i = 0; i < nstrings; i++) {
   3101 		if ((*match_fn)(strings[i], str)) {
   3102 			*indexp = i;
   3103 			return true;
   3104 		}
   3105 	}
   3106 
   3107 	return false;
   3108 }
   3109 
   3110 static int
   3111 strarray_match(const char ** const strings, unsigned int const nstrings,
   3112     const char * const str)
   3113 {
   3114 	unsigned int idx;
   3115 
   3116 	if (strarray_match_internal(strings, nstrings, str, &idx,
   3117 				    match_strcmp)) {
   3118 		return (int)(nstrings - idx);
   3119 	}
   3120 	return 0;
   3121 }
   3122 
   3123 static int
   3124 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
   3125     const char * const pattern)
   3126 {
   3127 	unsigned int idx;
   3128 
   3129 	if (strarray_match_internal(strings, nstrings, pattern, &idx,
   3130 				    match_pmatch)) {
   3131 		return (int)(nstrings - idx);
   3132 	}
   3133 	return 0;
   3134 }
   3135 
   3136 static int
   3137 device_compatible_match_strarray_internal(
   3138     const char **device_compats, int ndevice_compats,
   3139     const struct device_compatible_entry *driver_compats,
   3140     const struct device_compatible_entry **matching_entryp,
   3141     int (*match_fn)(const char **, unsigned int, const char *))
   3142 {
   3143 	const struct device_compatible_entry *dce = NULL;
   3144 	int rv;
   3145 
   3146 	if (ndevice_compats == 0 || device_compats == NULL ||
   3147 	    driver_compats == NULL)
   3148 		return 0;
   3149 
   3150 	for (dce = driver_compats; dce->compat != NULL; dce++) {
   3151 		rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
   3152 		if (rv != 0) {
   3153 			if (matching_entryp != NULL) {
   3154 				*matching_entryp = dce;
   3155 			}
   3156 			return rv;
   3157 		}
   3158 	}
   3159 	return 0;
   3160 }
   3161 
   3162 /*
   3163  * device_compatible_match:
   3164  *
   3165  *	Match a driver's "compatible" data against a device's
   3166  *	"compatible" strings.  Returns resulted weighted by
   3167  *	which device "compatible" string was matched.
   3168  */
   3169 int
   3170 device_compatible_match(const char **device_compats, int ndevice_compats,
   3171     const struct device_compatible_entry *driver_compats)
   3172 {
   3173 	return device_compatible_match_strarray_internal(device_compats,
   3174 	    ndevice_compats, driver_compats, NULL, strarray_match);
   3175 }
   3176 
   3177 /*
   3178  * device_compatible_pmatch:
   3179  *
   3180  *	Like device_compatible_match(), but uses pmatch(9) to compare
   3181  *	the device "compatible" strings against patterns in the
   3182  *	driver's "compatible" data.
   3183  */
   3184 int
   3185 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
   3186     const struct device_compatible_entry *driver_compats)
   3187 {
   3188 	return device_compatible_match_strarray_internal(device_compats,
   3189 	    ndevice_compats, driver_compats, NULL, strarray_pmatch);
   3190 }
   3191 
   3192 static int
   3193 device_compatible_match_strlist_internal(
   3194     const char * const device_compats, size_t const device_compatsize,
   3195     const struct device_compatible_entry *driver_compats,
   3196     const struct device_compatible_entry **matching_entryp,
   3197     int (*match_fn)(const char *, size_t, const char *))
   3198 {
   3199 	const struct device_compatible_entry *dce = NULL;
   3200 	int rv;
   3201 
   3202 	if (device_compats == NULL || device_compatsize == 0 ||
   3203 	    driver_compats == NULL)
   3204 		return 0;
   3205 
   3206 	for (dce = driver_compats; dce->compat != NULL; dce++) {
   3207 		rv = (*match_fn)(device_compats, device_compatsize,
   3208 		    dce->compat);
   3209 		if (rv != 0) {
   3210 			if (matching_entryp != NULL) {
   3211 				*matching_entryp = dce;
   3212 			}
   3213 			return rv;
   3214 		}
   3215 	}
   3216 	return 0;
   3217 }
   3218 
   3219 /*
   3220  * device_compatible_match_strlist:
   3221  *
   3222  *	Like device_compatible_match(), but take the device
   3223  *	"compatible" strings as an OpenFirmware-style string
   3224  *	list.
   3225  */
   3226 int
   3227 device_compatible_match_strlist(
   3228     const char * const device_compats, size_t const device_compatsize,
   3229     const struct device_compatible_entry *driver_compats)
   3230 {
   3231 	return device_compatible_match_strlist_internal(device_compats,
   3232 	    device_compatsize, driver_compats, NULL, strlist_match);
   3233 }
   3234 
   3235 /*
   3236  * device_compatible_pmatch_strlist:
   3237  *
   3238  *	Like device_compatible_pmatch(), but take the device
   3239  *	"compatible" strings as an OpenFirmware-style string
   3240  *	list.
   3241  */
   3242 int
   3243 device_compatible_pmatch_strlist(
   3244     const char * const device_compats, size_t const device_compatsize,
   3245     const struct device_compatible_entry *driver_compats)
   3246 {
   3247 	return device_compatible_match_strlist_internal(device_compats,
   3248 	    device_compatsize, driver_compats, NULL, strlist_pmatch);
   3249 }
   3250 
   3251 static int
   3252 device_compatible_match_id_internal(
   3253     uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
   3254     const struct device_compatible_entry *driver_compats,
   3255     const struct device_compatible_entry **matching_entryp)
   3256 {
   3257 	const struct device_compatible_entry *dce = NULL;
   3258 
   3259 	if (mask == 0)
   3260 		return 0;
   3261 
   3262 	for (dce = driver_compats; dce->id != sentinel_id; dce++) {
   3263 		if ((id & mask) == dce->id) {
   3264 			if (matching_entryp != NULL) {
   3265 				*matching_entryp = dce;
   3266 			}
   3267 			return 1;
   3268 		}
   3269 	}
   3270 	return 0;
   3271 }
   3272 
   3273 /*
   3274  * device_compatible_match_id:
   3275  *
   3276  *	Like device_compatible_match(), but takes a single
   3277  *	unsigned integer device ID.
   3278  */
   3279 int
   3280 device_compatible_match_id(
   3281     uintptr_t const id, uintptr_t const sentinel_id,
   3282     const struct device_compatible_entry *driver_compats)
   3283 {
   3284 	return device_compatible_match_id_internal(id, (uintptr_t)-1,
   3285 	    sentinel_id, driver_compats, NULL);
   3286 }
   3287 
   3288 /*
   3289  * device_compatible_lookup:
   3290  *
   3291  *	Look up and return the device_compatible_entry, using the
   3292  *	same matching criteria used by device_compatible_match().
   3293  */
   3294 const struct device_compatible_entry *
   3295 device_compatible_lookup(const char **device_compats, int ndevice_compats,
   3296 			 const struct device_compatible_entry *driver_compats)
   3297 {
   3298 	const struct device_compatible_entry *dce;
   3299 
   3300 	if (device_compatible_match_strarray_internal(device_compats,
   3301 	    ndevice_compats, driver_compats, &dce, strarray_match)) {
   3302 		return dce;
   3303 	}
   3304 	return NULL;
   3305 }
   3306 
   3307 /*
   3308  * device_compatible_plookup:
   3309  *
   3310  *	Look up and return the device_compatible_entry, using the
   3311  *	same matching criteria used by device_compatible_pmatch().
   3312  */
   3313 const struct device_compatible_entry *
   3314 device_compatible_plookup(const char **device_compats, int ndevice_compats,
   3315 			  const struct device_compatible_entry *driver_compats)
   3316 {
   3317 	const struct device_compatible_entry *dce;
   3318 
   3319 	if (device_compatible_match_strarray_internal(device_compats,
   3320 	    ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
   3321 		return dce;
   3322 	}
   3323 	return NULL;
   3324 }
   3325 
   3326 /*
   3327  * device_compatible_lookup_strlist:
   3328  *
   3329  *	Like device_compatible_lookup(), but take the device
   3330  *	"compatible" strings as an OpenFirmware-style string
   3331  *	list.
   3332  */
   3333 const struct device_compatible_entry *
   3334 device_compatible_lookup_strlist(
   3335     const char * const device_compats, size_t const device_compatsize,
   3336     const struct device_compatible_entry *driver_compats)
   3337 {
   3338 	const struct device_compatible_entry *dce;
   3339 
   3340 	if (device_compatible_match_strlist_internal(device_compats,
   3341 	    device_compatsize, driver_compats, &dce, strlist_match)) {
   3342 		return dce;
   3343 	}
   3344 	return NULL;
   3345 }
   3346 
   3347 /*
   3348  * device_compatible_plookup_strlist:
   3349  *
   3350  *	Like device_compatible_plookup(), but take the device
   3351  *	"compatible" strings as an OpenFirmware-style string
   3352  *	list.
   3353  */
   3354 const struct device_compatible_entry *
   3355 device_compatible_plookup_strlist(
   3356     const char * const device_compats, size_t const device_compatsize,
   3357     const struct device_compatible_entry *driver_compats)
   3358 {
   3359 	const struct device_compatible_entry *dce;
   3360 
   3361 	if (device_compatible_match_strlist_internal(device_compats,
   3362 	    device_compatsize, driver_compats, &dce, strlist_pmatch)) {
   3363 		return dce;
   3364 	}
   3365 	return NULL;
   3366 }
   3367 
   3368 /*
   3369  * device_compatible_lookup_id:
   3370  *
   3371  *	Like device_compatible_lookup(), but takes a single
   3372  *	unsigned integer device ID.
   3373  */
   3374 const struct device_compatible_entry *
   3375 device_compatible_lookup_id(
   3376     uintptr_t const id, uintptr_t const sentinel_id,
   3377     const struct device_compatible_entry *driver_compats)
   3378 {
   3379 	const struct device_compatible_entry *dce;
   3380 
   3381 	if (device_compatible_match_id_internal(id, (uintptr_t)-1,
   3382 	    sentinel_id, driver_compats, &dce)) {
   3383 		return dce;
   3384 	}
   3385 	return NULL;
   3386 }
   3387 
   3388 /*
   3389  * Power management related functions.
   3390  */
   3391 
   3392 bool
   3393 device_pmf_is_registered(device_t dev)
   3394 {
   3395 	return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
   3396 }
   3397 
   3398 bool
   3399 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
   3400 {
   3401 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
   3402 		return true;
   3403 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
   3404 		return false;
   3405 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
   3406 	    dev->dv_driver_suspend != NULL &&
   3407 	    !(*dev->dv_driver_suspend)(dev, qual))
   3408 		return false;
   3409 
   3410 	dev->dv_flags |= DVF_DRIVER_SUSPENDED;
   3411 	return true;
   3412 }
   3413 
   3414 bool
   3415 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
   3416 {
   3417 	if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
   3418 		return true;
   3419 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
   3420 		return false;
   3421 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
   3422 	    dev->dv_driver_resume != NULL &&
   3423 	    !(*dev->dv_driver_resume)(dev, qual))
   3424 		return false;
   3425 
   3426 	dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
   3427 	return true;
   3428 }
   3429 
   3430 bool
   3431 device_pmf_driver_shutdown(device_t dev, int how)
   3432 {
   3433 
   3434 	if (*dev->dv_driver_shutdown != NULL &&
   3435 	    !(*dev->dv_driver_shutdown)(dev, how))
   3436 		return false;
   3437 	return true;
   3438 }
   3439 
   3440 void
   3441 device_pmf_driver_register(device_t dev,
   3442     bool (*suspend)(device_t, const pmf_qual_t *),
   3443     bool (*resume)(device_t, const pmf_qual_t *),
   3444     bool (*shutdown)(device_t, int))
   3445 {
   3446 
   3447 	dev->dv_driver_suspend = suspend;
   3448 	dev->dv_driver_resume = resume;
   3449 	dev->dv_driver_shutdown = shutdown;
   3450 	dev->dv_flags |= DVF_POWER_HANDLERS;
   3451 }
   3452 
   3453 void
   3454 device_pmf_driver_deregister(device_t dev)
   3455 {
   3456 	device_lock_t dvl = device_getlock(dev);
   3457 
   3458 	dev->dv_driver_suspend = NULL;
   3459 	dev->dv_driver_resume = NULL;
   3460 
   3461 	mutex_enter(&dvl->dvl_mtx);
   3462 	dev->dv_flags &= ~DVF_POWER_HANDLERS;
   3463 	while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
   3464 		/* Wake a thread that waits for the lock.  That
   3465 		 * thread will fail to acquire the lock, and then
   3466 		 * it will wake the next thread that waits for the
   3467 		 * lock, or else it will wake us.
   3468 		 */
   3469 		cv_signal(&dvl->dvl_cv);
   3470 		pmflock_debug(dev, __func__, __LINE__);
   3471 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
   3472 		pmflock_debug(dev, __func__, __LINE__);
   3473 	}
   3474 	mutex_exit(&dvl->dvl_mtx);
   3475 }
   3476 
   3477 void
   3478 device_pmf_driver_child_register(device_t dev)
   3479 {
   3480 	device_t parent = device_parent(dev);
   3481 
   3482 	if (parent == NULL || parent->dv_driver_child_register == NULL)
   3483 		return;
   3484 	(*parent->dv_driver_child_register)(dev);
   3485 }
   3486 
   3487 void
   3488 device_pmf_driver_set_child_register(device_t dev,
   3489     void (*child_register)(device_t))
   3490 {
   3491 	dev->dv_driver_child_register = child_register;
   3492 }
   3493 
   3494 static void
   3495 pmflock_debug(device_t dev, const char *func, int line)
   3496 {
   3497 #ifdef PMFLOCK_DEBUG
   3498 	device_lock_t dvl = device_getlock(dev);
   3499 	const char *curlwp_name;
   3500 
   3501 	if (curlwp->l_name != NULL)
   3502 		curlwp_name = curlwp->l_name;
   3503 	else
   3504 		curlwp_name = curlwp->l_proc->p_comm;
   3505 
   3506 	aprint_debug_dev(dev,
   3507 	    "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
   3508 	    curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
   3509 #endif	/* PMFLOCK_DEBUG */
   3510 }
   3511 
   3512 static bool
   3513 device_pmf_lock1(device_t dev)
   3514 {
   3515 	device_lock_t dvl = device_getlock(dev);
   3516 
   3517 	while (device_pmf_is_registered(dev) &&
   3518 	    dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
   3519 		dvl->dvl_nwait++;
   3520 		pmflock_debug(dev, __func__, __LINE__);
   3521 		cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
   3522 		pmflock_debug(dev, __func__, __LINE__);
   3523 		dvl->dvl_nwait--;
   3524 	}
   3525 	if (!device_pmf_is_registered(dev)) {
   3526 		pmflock_debug(dev, __func__, __LINE__);
   3527 		/* We could not acquire the lock, but some other thread may
   3528 		 * wait for it, also.  Wake that thread.
   3529 		 */
   3530 		cv_signal(&dvl->dvl_cv);
   3531 		return false;
   3532 	}
   3533 	dvl->dvl_nlock++;
   3534 	dvl->dvl_holder = curlwp;
   3535 	pmflock_debug(dev, __func__, __LINE__);
   3536 	return true;
   3537 }
   3538 
   3539 bool
   3540 device_pmf_lock(device_t dev)
   3541 {
   3542 	bool rc;
   3543 	device_lock_t dvl = device_getlock(dev);
   3544 
   3545 	mutex_enter(&dvl->dvl_mtx);
   3546 	rc = device_pmf_lock1(dev);
   3547 	mutex_exit(&dvl->dvl_mtx);
   3548 
   3549 	return rc;
   3550 }
   3551 
   3552 void
   3553 device_pmf_unlock(device_t dev)
   3554 {
   3555 	device_lock_t dvl = device_getlock(dev);
   3556 
   3557 	KASSERT(dvl->dvl_nlock > 0);
   3558 	mutex_enter(&dvl->dvl_mtx);
   3559 	if (--dvl->dvl_nlock == 0)
   3560 		dvl->dvl_holder = NULL;
   3561 	cv_signal(&dvl->dvl_cv);
   3562 	pmflock_debug(dev, __func__, __LINE__);
   3563 	mutex_exit(&dvl->dvl_mtx);
   3564 }
   3565 
   3566 device_lock_t
   3567 device_getlock(device_t dev)
   3568 {
   3569 	return &dev->dv_lock;
   3570 }
   3571 
   3572 void *
   3573 device_pmf_bus_private(device_t dev)
   3574 {
   3575 	return dev->dv_bus_private;
   3576 }
   3577 
   3578 bool
   3579 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
   3580 {
   3581 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
   3582 		return true;
   3583 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
   3584 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
   3585 		return false;
   3586 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
   3587 	    dev->dv_bus_suspend != NULL &&
   3588 	    !(*dev->dv_bus_suspend)(dev, qual))
   3589 		return false;
   3590 
   3591 	dev->dv_flags |= DVF_BUS_SUSPENDED;
   3592 	return true;
   3593 }
   3594 
   3595 bool
   3596 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
   3597 {
   3598 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
   3599 		return true;
   3600 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
   3601 	    dev->dv_bus_resume != NULL &&
   3602 	    !(*dev->dv_bus_resume)(dev, qual))
   3603 		return false;
   3604 
   3605 	dev->dv_flags &= ~DVF_BUS_SUSPENDED;
   3606 	return true;
   3607 }
   3608 
   3609 bool
   3610 device_pmf_bus_shutdown(device_t dev, int how)
   3611 {
   3612 
   3613 	if (*dev->dv_bus_shutdown != NULL &&
   3614 	    !(*dev->dv_bus_shutdown)(dev, how))
   3615 		return false;
   3616 	return true;
   3617 }
   3618 
   3619 void
   3620 device_pmf_bus_register(device_t dev, void *priv,
   3621     bool (*suspend)(device_t, const pmf_qual_t *),
   3622     bool (*resume)(device_t, const pmf_qual_t *),
   3623     bool (*shutdown)(device_t, int), void (*deregister)(device_t))
   3624 {
   3625 	dev->dv_bus_private = priv;
   3626 	dev->dv_bus_resume = resume;
   3627 	dev->dv_bus_suspend = suspend;
   3628 	dev->dv_bus_shutdown = shutdown;
   3629 	dev->dv_bus_deregister = deregister;
   3630 }
   3631 
   3632 void
   3633 device_pmf_bus_deregister(device_t dev)
   3634 {
   3635 	if (dev->dv_bus_deregister == NULL)
   3636 		return;
   3637 	(*dev->dv_bus_deregister)(dev);
   3638 	dev->dv_bus_private = NULL;
   3639 	dev->dv_bus_suspend = NULL;
   3640 	dev->dv_bus_resume = NULL;
   3641 	dev->dv_bus_deregister = NULL;
   3642 }
   3643 
   3644 void *
   3645 device_pmf_class_private(device_t dev)
   3646 {
   3647 	return dev->dv_class_private;
   3648 }
   3649 
   3650 bool
   3651 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
   3652 {
   3653 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
   3654 		return true;
   3655 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
   3656 	    dev->dv_class_suspend != NULL &&
   3657 	    !(*dev->dv_class_suspend)(dev, qual))
   3658 		return false;
   3659 
   3660 	dev->dv_flags |= DVF_CLASS_SUSPENDED;
   3661 	return true;
   3662 }
   3663 
   3664 bool
   3665 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
   3666 {
   3667 	if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
   3668 		return true;
   3669 	if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
   3670 	    (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
   3671 		return false;
   3672 	if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
   3673 	    dev->dv_class_resume != NULL &&
   3674 	    !(*dev->dv_class_resume)(dev, qual))
   3675 		return false;
   3676 
   3677 	dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
   3678 	return true;
   3679 }
   3680 
   3681 void
   3682 device_pmf_class_register(device_t dev, void *priv,
   3683     bool (*suspend)(device_t, const pmf_qual_t *),
   3684     bool (*resume)(device_t, const pmf_qual_t *),
   3685     void (*deregister)(device_t))
   3686 {
   3687 	dev->dv_class_private = priv;
   3688 	dev->dv_class_suspend = suspend;
   3689 	dev->dv_class_resume = resume;
   3690 	dev->dv_class_deregister = deregister;
   3691 }
   3692 
   3693 void
   3694 device_pmf_class_deregister(device_t dev)
   3695 {
   3696 	if (dev->dv_class_deregister == NULL)
   3697 		return;
   3698 	(*dev->dv_class_deregister)(dev);
   3699 	dev->dv_class_private = NULL;
   3700 	dev->dv_class_suspend = NULL;
   3701 	dev->dv_class_resume = NULL;
   3702 	dev->dv_class_deregister = NULL;
   3703 }
   3704 
   3705 bool
   3706 device_active(device_t dev, devactive_t type)
   3707 {
   3708 	size_t i;
   3709 
   3710 	if (dev->dv_activity_count == 0)
   3711 		return false;
   3712 
   3713 	for (i = 0; i < dev->dv_activity_count; ++i) {
   3714 		if (dev->dv_activity_handlers[i] == NULL)
   3715 			break;
   3716 		(*dev->dv_activity_handlers[i])(dev, type);
   3717 	}
   3718 
   3719 	return true;
   3720 }
   3721 
   3722 bool
   3723 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
   3724 {
   3725 	void (**new_handlers)(device_t, devactive_t);
   3726 	void (**old_handlers)(device_t, devactive_t);
   3727 	size_t i, old_size, new_size;
   3728 	int s;
   3729 
   3730 	old_handlers = dev->dv_activity_handlers;
   3731 	old_size = dev->dv_activity_count;
   3732 
   3733 	KASSERT(old_size == 0 || old_handlers != NULL);
   3734 
   3735 	for (i = 0; i < old_size; ++i) {
   3736 		KASSERT(old_handlers[i] != handler);
   3737 		if (old_handlers[i] == NULL) {
   3738 			old_handlers[i] = handler;
   3739 			return true;
   3740 		}
   3741 	}
   3742 
   3743 	new_size = old_size + 4;
   3744 	new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
   3745 
   3746 	for (i = 0; i < old_size; ++i)
   3747 		new_handlers[i] = old_handlers[i];
   3748 	new_handlers[old_size] = handler;
   3749 	for (i = old_size+1; i < new_size; ++i)
   3750 		new_handlers[i] = NULL;
   3751 
   3752 	s = splhigh();
   3753 	dev->dv_activity_count = new_size;
   3754 	dev->dv_activity_handlers = new_handlers;
   3755 	splx(s);
   3756 
   3757 	if (old_size > 0)
   3758 		kmem_free(old_handlers, sizeof(void *) * old_size);
   3759 
   3760 	return true;
   3761 }
   3762 
   3763 void
   3764 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
   3765 {
   3766 	void (**old_handlers)(device_t, devactive_t);
   3767 	size_t i, old_size;
   3768 	int s;
   3769 
   3770 	old_handlers = dev->dv_activity_handlers;
   3771 	old_size = dev->dv_activity_count;
   3772 
   3773 	for (i = 0; i < old_size; ++i) {
   3774 		if (old_handlers[i] == handler)
   3775 			break;
   3776 		if (old_handlers[i] == NULL)
   3777 			return; /* XXX panic? */
   3778 	}
   3779 
   3780 	if (i == old_size)
   3781 		return; /* XXX panic? */
   3782 
   3783 	for (; i < old_size - 1; ++i) {
   3784 		if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
   3785 			continue;
   3786 
   3787 		if (i == 0) {
   3788 			s = splhigh();
   3789 			dev->dv_activity_count = 0;
   3790 			dev->dv_activity_handlers = NULL;
   3791 			splx(s);
   3792 			kmem_free(old_handlers, sizeof(void *) * old_size);
   3793 		}
   3794 		return;
   3795 	}
   3796 	old_handlers[i] = NULL;
   3797 }
   3798 
   3799 /* Return true iff the device_t `dev' exists at generation `gen'. */
   3800 static bool
   3801 device_exists_at(device_t dv, devgen_t gen)
   3802 {
   3803 	return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
   3804 	    dv->dv_add_gen <= gen;
   3805 }
   3806 
   3807 static bool
   3808 deviter_visits(const deviter_t *di, device_t dv)
   3809 {
   3810 	return device_exists_at(dv, di->di_gen);
   3811 }
   3812 
   3813 /*
   3814  * Device Iteration
   3815  *
   3816  * deviter_t: a device iterator.  Holds state for a "walk" visiting
   3817  *     each device_t's in the device tree.
   3818  *
   3819  * deviter_init(di, flags): initialize the device iterator `di'
   3820  *     to "walk" the device tree.  deviter_next(di) will return
   3821  *     the first device_t in the device tree, or NULL if there are
   3822  *     no devices.
   3823  *
   3824  *     `flags' is one or more of DEVITER_F_RW, indicating that the
   3825  *     caller intends to modify the device tree by calling
   3826  *     config_detach(9) on devices in the order that the iterator
   3827  *     returns them; DEVITER_F_ROOT_FIRST, asking for the devices
   3828  *     nearest the "root" of the device tree to be returned, first;
   3829  *     DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
   3830  *     the root of the device tree, first; and DEVITER_F_SHUTDOWN,
   3831  *     indicating both that deviter_init() should not respect any
   3832  *     locks on the device tree, and that deviter_next(di) may run
   3833  *     in more than one LWP before the walk has finished.
   3834  *
   3835  *     Only one DEVITER_F_RW iterator may be in the device tree at
   3836  *     once.
   3837  *
   3838  *     DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
   3839  *
   3840  *     Results are undefined if the flags DEVITER_F_ROOT_FIRST and
   3841  *     DEVITER_F_LEAVES_FIRST are used in combination.
   3842  *
   3843  * deviter_first(di, flags): initialize the device iterator `di'
   3844  *     and return the first device_t in the device tree, or NULL
   3845  *     if there are no devices.  The statement
   3846  *
   3847  *         dv = deviter_first(di);
   3848  *
   3849  *     is shorthand for
   3850  *
   3851  *         deviter_init(di);
   3852  *         dv = deviter_next(di);
   3853  *
   3854  * deviter_next(di): return the next device_t in the device tree,
   3855  *     or NULL if there are no more devices.  deviter_next(di)
   3856  *     is undefined if `di' was not initialized with deviter_init() or
   3857  *     deviter_first().
   3858  *
   3859  * deviter_release(di): stops iteration (subsequent calls to
   3860  *     deviter_next() will return NULL), releases any locks and
   3861  *     resources held by the device iterator.
   3862  *
   3863  * Device iteration does not return device_t's in any particular
   3864  * order.  An iterator will never return the same device_t twice.
   3865  * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
   3866  * is called repeatedly on the same `di', it will eventually return
   3867  * NULL.  It is ok to attach/detach devices during device iteration.
   3868  */
   3869 void
   3870 deviter_init(deviter_t *di, deviter_flags_t flags)
   3871 {
   3872 	device_t dv;
   3873 
   3874 	memset(di, 0, sizeof(*di));
   3875 
   3876 	if ((flags & DEVITER_F_SHUTDOWN) != 0)
   3877 		flags |= DEVITER_F_RW;
   3878 
   3879 	mutex_enter(&alldevs_lock);
   3880 	if ((flags & DEVITER_F_RW) != 0)
   3881 		alldevs_nwrite++;
   3882 	else
   3883 		alldevs_nread++;
   3884 	di->di_gen = alldevs_gen++;
   3885 	di->di_flags = flags;
   3886 
   3887 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
   3888 	case DEVITER_F_LEAVES_FIRST:
   3889 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
   3890 			if (!deviter_visits(di, dv))
   3891 				continue;
   3892 			di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
   3893 		}
   3894 		break;
   3895 	case DEVITER_F_ROOT_FIRST:
   3896 		TAILQ_FOREACH(dv, &alldevs, dv_list) {
   3897 			if (!deviter_visits(di, dv))
   3898 				continue;
   3899 			di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
   3900 		}
   3901 		break;
   3902 	default:
   3903 		break;
   3904 	}
   3905 
   3906 	deviter_reinit(di);
   3907 	mutex_exit(&alldevs_lock);
   3908 }
   3909 
   3910 static void
   3911 deviter_reinit(deviter_t *di)
   3912 {
   3913 
   3914 	KASSERT(mutex_owned(&alldevs_lock));
   3915 	if ((di->di_flags & DEVITER_F_RW) != 0)
   3916 		di->di_prev = TAILQ_LAST(&alldevs, devicelist);
   3917 	else
   3918 		di->di_prev = TAILQ_FIRST(&alldevs);
   3919 }
   3920 
   3921 device_t
   3922 deviter_first(deviter_t *di, deviter_flags_t flags)
   3923 {
   3924 
   3925 	deviter_init(di, flags);
   3926 	return deviter_next(di);
   3927 }
   3928 
   3929 static device_t
   3930 deviter_next2(deviter_t *di)
   3931 {
   3932 	device_t dv;
   3933 
   3934 	KASSERT(mutex_owned(&alldevs_lock));
   3935 
   3936 	dv = di->di_prev;
   3937 
   3938 	if (dv == NULL)
   3939 		return NULL;
   3940 
   3941 	if ((di->di_flags & DEVITER_F_RW) != 0)
   3942 		di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
   3943 	else
   3944 		di->di_prev = TAILQ_NEXT(dv, dv_list);
   3945 
   3946 	return dv;
   3947 }
   3948 
   3949 static device_t
   3950 deviter_next1(deviter_t *di)
   3951 {
   3952 	device_t dv;
   3953 
   3954 	KASSERT(mutex_owned(&alldevs_lock));
   3955 
   3956 	do {
   3957 		dv = deviter_next2(di);
   3958 	} while (dv != NULL && !deviter_visits(di, dv));
   3959 
   3960 	return dv;
   3961 }
   3962 
   3963 device_t
   3964 deviter_next(deviter_t *di)
   3965 {
   3966 	device_t dv = NULL;
   3967 
   3968 	mutex_enter(&alldevs_lock);
   3969 	switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
   3970 	case 0:
   3971 		dv = deviter_next1(di);
   3972 		break;
   3973 	case DEVITER_F_LEAVES_FIRST:
   3974 		while (di->di_curdepth >= 0) {
   3975 			if ((dv = deviter_next1(di)) == NULL) {
   3976 				di->di_curdepth--;
   3977 				deviter_reinit(di);
   3978 			} else if (dv->dv_depth == di->di_curdepth)
   3979 				break;
   3980 		}
   3981 		break;
   3982 	case DEVITER_F_ROOT_FIRST:
   3983 		while (di->di_curdepth <= di->di_maxdepth) {
   3984 			if ((dv = deviter_next1(di)) == NULL) {
   3985 				di->di_curdepth++;
   3986 				deviter_reinit(di);
   3987 			} else if (dv->dv_depth == di->di_curdepth)
   3988 				break;
   3989 		}
   3990 		break;
   3991 	default:
   3992 		break;
   3993 	}
   3994 	mutex_exit(&alldevs_lock);
   3995 
   3996 	return dv;
   3997 }
   3998 
   3999 void
   4000 deviter_release(deviter_t *di)
   4001 {
   4002 	bool rw = (di->di_flags & DEVITER_F_RW) != 0;
   4003 
   4004 	mutex_enter(&alldevs_lock);
   4005 	if (rw)
   4006 		--alldevs_nwrite;
   4007 	else
   4008 		--alldevs_nread;
   4009 	/* XXX wake a garbage-collection thread */
   4010 	mutex_exit(&alldevs_lock);
   4011 }
   4012 
   4013 const char *
   4014 cfdata_ifattr(const struct cfdata *cf)
   4015 {
   4016 	return cf->cf_pspec->cfp_iattr;
   4017 }
   4018 
   4019 bool
   4020 ifattr_match(const char *snull, const char *t)
   4021 {
   4022 	return (snull == NULL) || strcmp(snull, t) == 0;
   4023 }
   4024 
   4025 void
   4026 null_childdetached(device_t self, device_t child)
   4027 {
   4028 	/* do nothing */
   4029 }
   4030 
   4031 static void
   4032 sysctl_detach_setup(struct sysctllog **clog)
   4033 {
   4034 
   4035 	sysctl_createv(clog, 0, NULL, NULL,
   4036 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   4037 		CTLTYPE_BOOL, "detachall",
   4038 		SYSCTL_DESCR("Detach all devices at shutdown"),
   4039 		NULL, 0, &detachall, 0,
   4040 		CTL_KERN, CTL_CREATE, CTL_EOL);
   4041 }
   4042