subr_autoconf.c revision 1.309 1 /* $NetBSD: subr_autoconf.c,v 1.309 2023/04/16 11:18:25 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1996, 2000 Christopher G. Demetriou
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the
18 * NetBSD Project. See http://www.NetBSD.org/ for
19 * information about NetBSD.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
35 */
36
37 /*
38 * Copyright (c) 1992, 1993
39 * The Regents of the University of California. All rights reserved.
40 *
41 * This software was developed by the Computer Systems Engineering group
42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43 * contributed to Berkeley.
44 *
45 * All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Lawrence Berkeley Laboratories.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp (LBL)
75 *
76 * @(#)subr_autoconf.c 8.3 (Berkeley) 5/17/94
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.309 2023/04/16 11:18:25 riastradh Exp $");
81
82 #ifdef _KERNEL_OPT
83 #include "opt_ddb.h"
84 #include "drvctl.h"
85 #endif
86
87 #include <sys/param.h>
88 #include <sys/device.h>
89 #include <sys/device_impl.h>
90 #include <sys/disklabel.h>
91 #include <sys/conf.h>
92 #include <sys/kauth.h>
93 #include <sys/kmem.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/errno.h>
97 #include <sys/proc.h>
98 #include <sys/reboot.h>
99 #include <sys/kthread.h>
100 #include <sys/buf.h>
101 #include <sys/dirent.h>
102 #include <sys/mount.h>
103 #include <sys/namei.h>
104 #include <sys/unistd.h>
105 #include <sys/fcntl.h>
106 #include <sys/lockf.h>
107 #include <sys/callout.h>
108 #include <sys/devmon.h>
109 #include <sys/cpu.h>
110 #include <sys/sysctl.h>
111 #include <sys/stdarg.h>
112 #include <sys/localcount.h>
113
114 #include <sys/disk.h>
115
116 #include <sys/rndsource.h>
117
118 #include <machine/limits.h>
119
120 /*
121 * Autoconfiguration subroutines.
122 */
123
124 /*
125 * Device autoconfiguration timings are mixed into the entropy pool.
126 */
127 static krndsource_t rnd_autoconf_source;
128
129 /*
130 * ioconf.c exports exactly two names: cfdata and cfroots. All system
131 * devices and drivers are found via these tables.
132 */
133 extern struct cfdata cfdata[];
134 extern const short cfroots[];
135
136 /*
137 * List of all cfdriver structures. We use this to detect duplicates
138 * when other cfdrivers are loaded.
139 */
140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
141 extern struct cfdriver * const cfdriver_list_initial[];
142
143 /*
144 * Initial list of cfattach's.
145 */
146 extern const struct cfattachinit cfattachinit[];
147
148 /*
149 * List of cfdata tables. We always have one such list -- the one
150 * built statically when the kernel was configured.
151 */
152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
153 static struct cftable initcftable;
154
155 #define ROOT ((device_t)NULL)
156
157 struct matchinfo {
158 cfsubmatch_t fn;
159 device_t parent;
160 const int *locs;
161 void *aux;
162 struct cfdata *match;
163 int pri;
164 };
165
166 struct alldevs_foray {
167 int af_s;
168 struct devicelist af_garbage;
169 };
170
171 /*
172 * Internal version of the cfargs structure; all versions are
173 * canonicalized to this.
174 */
175 struct cfargs_internal {
176 union {
177 cfsubmatch_t submatch;/* submatch function (direct config) */
178 cfsearch_t search; /* search function (indirect config) */
179 };
180 const char * iattr; /* interface attribute */
181 const int * locators; /* locators array */
182 devhandle_t devhandle; /* devhandle_t (by value) */
183 };
184
185 static char *number(char *, int);
186 static void mapply(struct matchinfo *, cfdata_t);
187 static void config_devdelete(device_t);
188 static void config_devunlink(device_t, struct devicelist *);
189 static void config_makeroom(int, struct cfdriver *);
190 static void config_devlink(device_t);
191 static void config_alldevs_enter(struct alldevs_foray *);
192 static void config_alldevs_exit(struct alldevs_foray *);
193 static void config_add_attrib_dict(device_t);
194 static device_t config_attach_internal(device_t, cfdata_t, void *,
195 cfprint_t, const struct cfargs_internal *);
196
197 static void config_collect_garbage(struct devicelist *);
198 static void config_dump_garbage(struct devicelist *);
199
200 static void pmflock_debug(device_t, const char *, int);
201
202 static device_t deviter_next1(deviter_t *);
203 static void deviter_reinit(deviter_t *);
204
205 struct deferred_config {
206 TAILQ_ENTRY(deferred_config) dc_queue;
207 device_t dc_dev;
208 void (*dc_func)(device_t);
209 };
210
211 TAILQ_HEAD(deferred_config_head, deferred_config);
212
213 static struct deferred_config_head deferred_config_queue =
214 TAILQ_HEAD_INITIALIZER(deferred_config_queue);
215 static struct deferred_config_head interrupt_config_queue =
216 TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
217 static int interrupt_config_threads = 8;
218 static struct deferred_config_head mountroot_config_queue =
219 TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
220 static int mountroot_config_threads = 2;
221 static lwp_t **mountroot_config_lwpids;
222 static size_t mountroot_config_lwpids_size;
223 bool root_is_mounted = false;
224
225 static void config_process_deferred(struct deferred_config_head *, device_t);
226
227 /* Hooks to finalize configuration once all real devices have been found. */
228 struct finalize_hook {
229 TAILQ_ENTRY(finalize_hook) f_list;
230 int (*f_func)(device_t);
231 device_t f_dev;
232 };
233 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
234 TAILQ_HEAD_INITIALIZER(config_finalize_list);
235 static int config_finalize_done;
236
237 /* list of all devices */
238 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
239 static kmutex_t alldevs_lock __cacheline_aligned;
240 static devgen_t alldevs_gen = 1;
241 static int alldevs_nread = 0;
242 static int alldevs_nwrite = 0;
243 static bool alldevs_garbage = false;
244
245 static struct devicelist config_pending =
246 TAILQ_HEAD_INITIALIZER(config_pending);
247 static kmutex_t config_misc_lock;
248 static kcondvar_t config_misc_cv;
249
250 static bool detachall = false;
251
252 #define STREQ(s1, s2) \
253 (*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
254
255 static bool config_initialized = false; /* config_init() has been called. */
256
257 static int config_do_twiddle;
258 static callout_t config_twiddle_ch;
259
260 static void sysctl_detach_setup(struct sysctllog **);
261
262 int no_devmon_insert(const char *, prop_dictionary_t);
263 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
264
265 typedef int (*cfdriver_fn)(struct cfdriver *);
266 static int
267 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
268 cfdriver_fn drv_do, cfdriver_fn drv_undo,
269 const char *style, bool dopanic)
270 {
271 void (*pr)(const char *, ...) __printflike(1, 2) =
272 dopanic ? panic : printf;
273 int i, error = 0, e2 __diagused;
274
275 for (i = 0; cfdriverv[i] != NULL; i++) {
276 if ((error = drv_do(cfdriverv[i])) != 0) {
277 pr("configure: `%s' driver %s failed: %d",
278 cfdriverv[i]->cd_name, style, error);
279 goto bad;
280 }
281 }
282
283 KASSERT(error == 0);
284 return 0;
285
286 bad:
287 printf("\n");
288 for (i--; i >= 0; i--) {
289 e2 = drv_undo(cfdriverv[i]);
290 KASSERT(e2 == 0);
291 }
292
293 return error;
294 }
295
296 typedef int (*cfattach_fn)(const char *, struct cfattach *);
297 static int
298 frob_cfattachvec(const struct cfattachinit *cfattachv,
299 cfattach_fn att_do, cfattach_fn att_undo,
300 const char *style, bool dopanic)
301 {
302 const struct cfattachinit *cfai = NULL;
303 void (*pr)(const char *, ...) __printflike(1, 2) =
304 dopanic ? panic : printf;
305 int j = 0, error = 0, e2 __diagused;
306
307 for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
308 for (j = 0; cfai->cfai_list[j] != NULL; j++) {
309 if ((error = att_do(cfai->cfai_name,
310 cfai->cfai_list[j])) != 0) {
311 pr("configure: attachment `%s' "
312 "of `%s' driver %s failed: %d",
313 cfai->cfai_list[j]->ca_name,
314 cfai->cfai_name, style, error);
315 goto bad;
316 }
317 }
318 }
319
320 KASSERT(error == 0);
321 return 0;
322
323 bad:
324 /*
325 * Rollback in reverse order. dunno if super-important, but
326 * do that anyway. Although the code looks a little like
327 * someone did a little integration (in the math sense).
328 */
329 printf("\n");
330 if (cfai) {
331 bool last;
332
333 for (last = false; last == false; ) {
334 if (cfai == &cfattachv[0])
335 last = true;
336 for (j--; j >= 0; j--) {
337 e2 = att_undo(cfai->cfai_name,
338 cfai->cfai_list[j]);
339 KASSERT(e2 == 0);
340 }
341 if (!last) {
342 cfai--;
343 for (j = 0; cfai->cfai_list[j] != NULL; j++)
344 ;
345 }
346 }
347 }
348
349 return error;
350 }
351
352 /*
353 * Initialize the autoconfiguration data structures. Normally this
354 * is done by configure(), but some platforms need to do this very
355 * early (to e.g. initialize the console).
356 */
357 void
358 config_init(void)
359 {
360
361 KASSERT(config_initialized == false);
362
363 mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
364
365 mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
366 cv_init(&config_misc_cv, "cfgmisc");
367
368 callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
369
370 frob_cfdrivervec(cfdriver_list_initial,
371 config_cfdriver_attach, NULL, "bootstrap", true);
372 frob_cfattachvec(cfattachinit,
373 config_cfattach_attach, NULL, "bootstrap", true);
374
375 initcftable.ct_cfdata = cfdata;
376 TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
377
378 rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
379 RND_FLAG_COLLECT_TIME);
380
381 config_initialized = true;
382 }
383
384 /*
385 * Init or fini drivers and attachments. Either all or none
386 * are processed (via rollback). It would be nice if this were
387 * atomic to outside consumers, but with the current state of
388 * locking ...
389 */
390 int
391 config_init_component(struct cfdriver * const *cfdriverv,
392 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
393 {
394 int error;
395
396 KERNEL_LOCK(1, NULL);
397
398 if ((error = frob_cfdrivervec(cfdriverv,
399 config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
400 goto out;
401 if ((error = frob_cfattachvec(cfattachv,
402 config_cfattach_attach, config_cfattach_detach,
403 "init", false)) != 0) {
404 frob_cfdrivervec(cfdriverv,
405 config_cfdriver_detach, NULL, "init rollback", true);
406 goto out;
407 }
408 if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
409 frob_cfattachvec(cfattachv,
410 config_cfattach_detach, NULL, "init rollback", true);
411 frob_cfdrivervec(cfdriverv,
412 config_cfdriver_detach, NULL, "init rollback", true);
413 goto out;
414 }
415
416 /* Success! */
417 error = 0;
418
419 out: KERNEL_UNLOCK_ONE(NULL);
420 return error;
421 }
422
423 int
424 config_fini_component(struct cfdriver * const *cfdriverv,
425 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
426 {
427 int error;
428
429 KERNEL_LOCK(1, NULL);
430
431 if ((error = config_cfdata_detach(cfdatav)) != 0)
432 goto out;
433 if ((error = frob_cfattachvec(cfattachv,
434 config_cfattach_detach, config_cfattach_attach,
435 "fini", false)) != 0) {
436 if (config_cfdata_attach(cfdatav, 0) != 0)
437 panic("config_cfdata fini rollback failed");
438 goto out;
439 }
440 if ((error = frob_cfdrivervec(cfdriverv,
441 config_cfdriver_detach, config_cfdriver_attach,
442 "fini", false)) != 0) {
443 frob_cfattachvec(cfattachv,
444 config_cfattach_attach, NULL, "fini rollback", true);
445 if (config_cfdata_attach(cfdatav, 0) != 0)
446 panic("config_cfdata fini rollback failed");
447 goto out;
448 }
449
450 /* Success! */
451 error = 0;
452
453 out: KERNEL_UNLOCK_ONE(NULL);
454 return error;
455 }
456
457 void
458 config_init_mi(void)
459 {
460
461 if (!config_initialized)
462 config_init();
463
464 sysctl_detach_setup(NULL);
465 }
466
467 void
468 config_deferred(device_t dev)
469 {
470
471 KASSERT(KERNEL_LOCKED_P());
472
473 config_process_deferred(&deferred_config_queue, dev);
474 config_process_deferred(&interrupt_config_queue, dev);
475 config_process_deferred(&mountroot_config_queue, dev);
476 }
477
478 static void
479 config_interrupts_thread(void *cookie)
480 {
481 struct deferred_config *dc;
482 device_t dev;
483
484 mutex_enter(&config_misc_lock);
485 while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
486 TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
487 mutex_exit(&config_misc_lock);
488
489 dev = dc->dc_dev;
490 (*dc->dc_func)(dev);
491 if (!device_pmf_is_registered(dev))
492 aprint_debug_dev(dev,
493 "WARNING: power management not supported\n");
494 config_pending_decr(dev);
495 kmem_free(dc, sizeof(*dc));
496
497 mutex_enter(&config_misc_lock);
498 }
499 mutex_exit(&config_misc_lock);
500
501 kthread_exit(0);
502 }
503
504 void
505 config_create_interruptthreads(void)
506 {
507 int i;
508
509 for (i = 0; i < interrupt_config_threads; i++) {
510 (void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
511 config_interrupts_thread, NULL, NULL, "configintr");
512 }
513 }
514
515 static void
516 config_mountroot_thread(void *cookie)
517 {
518 struct deferred_config *dc;
519
520 mutex_enter(&config_misc_lock);
521 while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
522 TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
523 mutex_exit(&config_misc_lock);
524
525 (*dc->dc_func)(dc->dc_dev);
526 kmem_free(dc, sizeof(*dc));
527
528 mutex_enter(&config_misc_lock);
529 }
530 mutex_exit(&config_misc_lock);
531
532 kthread_exit(0);
533 }
534
535 void
536 config_create_mountrootthreads(void)
537 {
538 int i;
539
540 if (!root_is_mounted)
541 root_is_mounted = true;
542
543 mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
544 mountroot_config_threads;
545 mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
546 KM_NOSLEEP);
547 KASSERT(mountroot_config_lwpids);
548 for (i = 0; i < mountroot_config_threads; i++) {
549 mountroot_config_lwpids[i] = 0;
550 (void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
551 NULL, config_mountroot_thread, NULL,
552 &mountroot_config_lwpids[i],
553 "configroot");
554 }
555 }
556
557 void
558 config_finalize_mountroot(void)
559 {
560 int i, error;
561
562 for (i = 0; i < mountroot_config_threads; i++) {
563 if (mountroot_config_lwpids[i] == 0)
564 continue;
565
566 error = kthread_join(mountroot_config_lwpids[i]);
567 if (error)
568 printf("%s: thread %x joined with error %d\n",
569 __func__, i, error);
570 }
571 kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
572 }
573
574 /*
575 * Announce device attach/detach to userland listeners.
576 */
577
578 int
579 no_devmon_insert(const char *name, prop_dictionary_t p)
580 {
581
582 return ENODEV;
583 }
584
585 static void
586 devmon_report_device(device_t dev, bool isattach)
587 {
588 prop_dictionary_t ev, dict = device_properties(dev);
589 const char *parent;
590 const char *what;
591 const char *where;
592 device_t pdev = device_parent(dev);
593
594 /* If currently no drvctl device, just return */
595 if (devmon_insert_vec == no_devmon_insert)
596 return;
597
598 ev = prop_dictionary_create();
599 if (ev == NULL)
600 return;
601
602 what = (isattach ? "device-attach" : "device-detach");
603 parent = (pdev == NULL ? "root" : device_xname(pdev));
604 if (prop_dictionary_get_string(dict, "location", &where)) {
605 prop_dictionary_set_string(ev, "location", where);
606 aprint_debug("ev: %s %s at %s in [%s]\n",
607 what, device_xname(dev), parent, where);
608 }
609 if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
610 !prop_dictionary_set_string(ev, "parent", parent)) {
611 prop_object_release(ev);
612 return;
613 }
614
615 if ((*devmon_insert_vec)(what, ev) != 0)
616 prop_object_release(ev);
617 }
618
619 /*
620 * Add a cfdriver to the system.
621 */
622 int
623 config_cfdriver_attach(struct cfdriver *cd)
624 {
625 struct cfdriver *lcd;
626
627 /* Make sure this driver isn't already in the system. */
628 LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
629 if (STREQ(lcd->cd_name, cd->cd_name))
630 return EEXIST;
631 }
632
633 LIST_INIT(&cd->cd_attach);
634 LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
635
636 return 0;
637 }
638
639 /*
640 * Remove a cfdriver from the system.
641 */
642 int
643 config_cfdriver_detach(struct cfdriver *cd)
644 {
645 struct alldevs_foray af;
646 int i, rc = 0;
647
648 config_alldevs_enter(&af);
649 /* Make sure there are no active instances. */
650 for (i = 0; i < cd->cd_ndevs; i++) {
651 if (cd->cd_devs[i] != NULL) {
652 rc = EBUSY;
653 break;
654 }
655 }
656 config_alldevs_exit(&af);
657
658 if (rc != 0)
659 return rc;
660
661 /* ...and no attachments loaded. */
662 if (LIST_EMPTY(&cd->cd_attach) == 0)
663 return EBUSY;
664
665 LIST_REMOVE(cd, cd_list);
666
667 KASSERT(cd->cd_devs == NULL);
668
669 return 0;
670 }
671
672 /*
673 * Look up a cfdriver by name.
674 */
675 struct cfdriver *
676 config_cfdriver_lookup(const char *name)
677 {
678 struct cfdriver *cd;
679
680 LIST_FOREACH(cd, &allcfdrivers, cd_list) {
681 if (STREQ(cd->cd_name, name))
682 return cd;
683 }
684
685 return NULL;
686 }
687
688 /*
689 * Add a cfattach to the specified driver.
690 */
691 int
692 config_cfattach_attach(const char *driver, struct cfattach *ca)
693 {
694 struct cfattach *lca;
695 struct cfdriver *cd;
696
697 cd = config_cfdriver_lookup(driver);
698 if (cd == NULL)
699 return ESRCH;
700
701 /* Make sure this attachment isn't already on this driver. */
702 LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
703 if (STREQ(lca->ca_name, ca->ca_name))
704 return EEXIST;
705 }
706
707 LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
708
709 return 0;
710 }
711
712 /*
713 * Remove a cfattach from the specified driver.
714 */
715 int
716 config_cfattach_detach(const char *driver, struct cfattach *ca)
717 {
718 struct alldevs_foray af;
719 struct cfdriver *cd;
720 device_t dev;
721 int i, rc = 0;
722
723 cd = config_cfdriver_lookup(driver);
724 if (cd == NULL)
725 return ESRCH;
726
727 config_alldevs_enter(&af);
728 /* Make sure there are no active instances. */
729 for (i = 0; i < cd->cd_ndevs; i++) {
730 if ((dev = cd->cd_devs[i]) == NULL)
731 continue;
732 if (dev->dv_cfattach == ca) {
733 rc = EBUSY;
734 break;
735 }
736 }
737 config_alldevs_exit(&af);
738
739 if (rc != 0)
740 return rc;
741
742 LIST_REMOVE(ca, ca_list);
743
744 return 0;
745 }
746
747 /*
748 * Look up a cfattach by name.
749 */
750 static struct cfattach *
751 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
752 {
753 struct cfattach *ca;
754
755 LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
756 if (STREQ(ca->ca_name, atname))
757 return ca;
758 }
759
760 return NULL;
761 }
762
763 /*
764 * Look up a cfattach by driver/attachment name.
765 */
766 struct cfattach *
767 config_cfattach_lookup(const char *name, const char *atname)
768 {
769 struct cfdriver *cd;
770
771 cd = config_cfdriver_lookup(name);
772 if (cd == NULL)
773 return NULL;
774
775 return config_cfattach_lookup_cd(cd, atname);
776 }
777
778 /*
779 * Apply the matching function and choose the best. This is used
780 * a few times and we want to keep the code small.
781 */
782 static void
783 mapply(struct matchinfo *m, cfdata_t cf)
784 {
785 int pri;
786
787 if (m->fn != NULL) {
788 pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
789 } else {
790 pri = config_match(m->parent, cf, m->aux);
791 }
792 if (pri > m->pri) {
793 m->match = cf;
794 m->pri = pri;
795 }
796 }
797
798 int
799 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
800 {
801 const struct cfiattrdata *ci;
802 const struct cflocdesc *cl;
803 int nlocs, i;
804
805 ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
806 KASSERT(ci);
807 nlocs = ci->ci_loclen;
808 KASSERT(!nlocs || locs);
809 for (i = 0; i < nlocs; i++) {
810 cl = &ci->ci_locdesc[i];
811 if (cl->cld_defaultstr != NULL &&
812 cf->cf_loc[i] == cl->cld_default)
813 continue;
814 if (cf->cf_loc[i] == locs[i])
815 continue;
816 return 0;
817 }
818
819 return config_match(parent, cf, aux);
820 }
821
822 /*
823 * Helper function: check whether the driver supports the interface attribute
824 * and return its descriptor structure.
825 */
826 static const struct cfiattrdata *
827 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
828 {
829 const struct cfiattrdata * const *cpp;
830
831 if (cd->cd_attrs == NULL)
832 return 0;
833
834 for (cpp = cd->cd_attrs; *cpp; cpp++) {
835 if (STREQ((*cpp)->ci_name, ia)) {
836 /* Match. */
837 return *cpp;
838 }
839 }
840 return 0;
841 }
842
843 static int __diagused
844 cfdriver_iattr_count(const struct cfdriver *cd)
845 {
846 const struct cfiattrdata * const *cpp;
847 int i;
848
849 if (cd->cd_attrs == NULL)
850 return 0;
851
852 for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
853 i++;
854 }
855 return i;
856 }
857
858 /*
859 * Lookup an interface attribute description by name.
860 * If the driver is given, consider only its supported attributes.
861 */
862 const struct cfiattrdata *
863 cfiattr_lookup(const char *name, const struct cfdriver *cd)
864 {
865 const struct cfdriver *d;
866 const struct cfiattrdata *ia;
867
868 if (cd)
869 return cfdriver_get_iattr(cd, name);
870
871 LIST_FOREACH(d, &allcfdrivers, cd_list) {
872 ia = cfdriver_get_iattr(d, name);
873 if (ia)
874 return ia;
875 }
876 return 0;
877 }
878
879 /*
880 * Determine if `parent' is a potential parent for a device spec based
881 * on `cfp'.
882 */
883 static int
884 cfparent_match(const device_t parent, const struct cfparent *cfp)
885 {
886 struct cfdriver *pcd;
887
888 /* We don't match root nodes here. */
889 if (cfp == NULL)
890 return 0;
891
892 pcd = parent->dv_cfdriver;
893 KASSERT(pcd != NULL);
894
895 /*
896 * First, ensure this parent has the correct interface
897 * attribute.
898 */
899 if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr))
900 return 0;
901
902 /*
903 * If no specific parent device instance was specified (i.e.
904 * we're attaching to the attribute only), we're done!
905 */
906 if (cfp->cfp_parent == NULL)
907 return 1;
908
909 /*
910 * Check the parent device's name.
911 */
912 if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
913 return 0; /* not the same parent */
914
915 /*
916 * Make sure the unit number matches.
917 */
918 if (cfp->cfp_unit == DVUNIT_ANY || /* wildcard */
919 cfp->cfp_unit == parent->dv_unit)
920 return 1;
921
922 /* Unit numbers don't match. */
923 return 0;
924 }
925
926 /*
927 * Helper for config_cfdata_attach(): check all devices whether it could be
928 * parent any attachment in the config data table passed, and rescan.
929 */
930 static void
931 rescan_with_cfdata(const struct cfdata *cf)
932 {
933 device_t d;
934 const struct cfdata *cf1;
935 deviter_t di;
936
937 KASSERT(KERNEL_LOCKED_P());
938
939 /*
940 * "alldevs" is likely longer than a modules's cfdata, so make it
941 * the outer loop.
942 */
943 for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
944
945 if (!(d->dv_cfattach->ca_rescan))
946 continue;
947
948 for (cf1 = cf; cf1->cf_name; cf1++) {
949
950 if (!cfparent_match(d, cf1->cf_pspec))
951 continue;
952
953 (*d->dv_cfattach->ca_rescan)(d,
954 cfdata_ifattr(cf1), cf1->cf_loc);
955
956 config_deferred(d);
957 }
958 }
959 deviter_release(&di);
960 }
961
962 /*
963 * Attach a supplemental config data table and rescan potential
964 * parent devices if required.
965 */
966 int
967 config_cfdata_attach(cfdata_t cf, int scannow)
968 {
969 struct cftable *ct;
970
971 KERNEL_LOCK(1, NULL);
972
973 ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
974 ct->ct_cfdata = cf;
975 TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
976
977 if (scannow)
978 rescan_with_cfdata(cf);
979
980 KERNEL_UNLOCK_ONE(NULL);
981
982 return 0;
983 }
984
985 /*
986 * Helper for config_cfdata_detach: check whether a device is
987 * found through any attachment in the config data table.
988 */
989 static int
990 dev_in_cfdata(device_t d, cfdata_t cf)
991 {
992 const struct cfdata *cf1;
993
994 for (cf1 = cf; cf1->cf_name; cf1++)
995 if (d->dv_cfdata == cf1)
996 return 1;
997
998 return 0;
999 }
1000
1001 /*
1002 * Detach a supplemental config data table. Detach all devices found
1003 * through that table (and thus keeping references to it) before.
1004 */
1005 int
1006 config_cfdata_detach(cfdata_t cf)
1007 {
1008 device_t d;
1009 int error = 0;
1010 struct cftable *ct;
1011 deviter_t di;
1012
1013 KERNEL_LOCK(1, NULL);
1014
1015 for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
1016 d = deviter_next(&di)) {
1017 if (!dev_in_cfdata(d, cf))
1018 continue;
1019 if ((error = config_detach(d, 0)) != 0)
1020 break;
1021 }
1022 deviter_release(&di);
1023 if (error) {
1024 aprint_error_dev(d, "unable to detach instance\n");
1025 goto out;
1026 }
1027
1028 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1029 if (ct->ct_cfdata == cf) {
1030 TAILQ_REMOVE(&allcftables, ct, ct_list);
1031 kmem_free(ct, sizeof(*ct));
1032 error = 0;
1033 goto out;
1034 }
1035 }
1036
1037 /* not found -- shouldn't happen */
1038 error = EINVAL;
1039
1040 out: KERNEL_UNLOCK_ONE(NULL);
1041 return error;
1042 }
1043
1044 /*
1045 * Invoke the "match" routine for a cfdata entry on behalf of
1046 * an external caller, usually a direct config "submatch" routine.
1047 */
1048 int
1049 config_match(device_t parent, cfdata_t cf, void *aux)
1050 {
1051 struct cfattach *ca;
1052
1053 KASSERT(KERNEL_LOCKED_P());
1054
1055 ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
1056 if (ca == NULL) {
1057 /* No attachment for this entry, oh well. */
1058 return 0;
1059 }
1060
1061 return (*ca->ca_match)(parent, cf, aux);
1062 }
1063
1064 /*
1065 * Invoke the "probe" routine for a cfdata entry on behalf of
1066 * an external caller, usually an indirect config "search" routine.
1067 */
1068 int
1069 config_probe(device_t parent, cfdata_t cf, void *aux)
1070 {
1071 /*
1072 * This is currently a synonym for config_match(), but this
1073 * is an implementation detail; "match" and "probe" routines
1074 * have different behaviors.
1075 *
1076 * XXX config_probe() should return a bool, because there is
1077 * XXX no match score for probe -- it's either there or it's
1078 * XXX not, but some ports abuse the return value as a way
1079 * XXX to attach "critical" devices before "non-critical"
1080 * XXX devices.
1081 */
1082 return config_match(parent, cf, aux);
1083 }
1084
1085 static struct cfargs_internal *
1086 cfargs_canonicalize(const struct cfargs * const cfargs,
1087 struct cfargs_internal * const store)
1088 {
1089 struct cfargs_internal *args = store;
1090
1091 memset(args, 0, sizeof(*args));
1092
1093 /* If none specified, are all-NULL pointers are good. */
1094 if (cfargs == NULL) {
1095 return args;
1096 }
1097
1098 /*
1099 * Only one arguments version is recognized at this time.
1100 */
1101 if (cfargs->cfargs_version != CFARGS_VERSION) {
1102 panic("cfargs_canonicalize: unknown version %lu\n",
1103 (unsigned long)cfargs->cfargs_version);
1104 }
1105
1106 /*
1107 * submatch and search are mutually-exclusive.
1108 */
1109 if (cfargs->submatch != NULL && cfargs->search != NULL) {
1110 panic("cfargs_canonicalize: submatch and search are "
1111 "mutually-exclusive");
1112 }
1113 if (cfargs->submatch != NULL) {
1114 args->submatch = cfargs->submatch;
1115 } else if (cfargs->search != NULL) {
1116 args->search = cfargs->search;
1117 }
1118
1119 args->iattr = cfargs->iattr;
1120 args->locators = cfargs->locators;
1121 args->devhandle = cfargs->devhandle;
1122
1123 return args;
1124 }
1125
1126 /*
1127 * Iterate over all potential children of some device, calling the given
1128 * function (default being the child's match function) for each one.
1129 * Nonzero returns are matches; the highest value returned is considered
1130 * the best match. Return the `found child' if we got a match, or NULL
1131 * otherwise. The `aux' pointer is simply passed on through.
1132 *
1133 * Note that this function is designed so that it can be used to apply
1134 * an arbitrary function to all potential children (its return value
1135 * can be ignored).
1136 */
1137 static cfdata_t
1138 config_search_internal(device_t parent, void *aux,
1139 const struct cfargs_internal * const args)
1140 {
1141 struct cftable *ct;
1142 cfdata_t cf;
1143 struct matchinfo m;
1144
1145 KASSERT(config_initialized);
1146 KASSERTMSG((!args->iattr ||
1147 cfdriver_get_iattr(parent->dv_cfdriver, args->iattr)),
1148 "%s searched for child at interface attribute %s,"
1149 " but device %s(4) has no such interface attribute in config(5)",
1150 device_xname(parent), args->iattr,
1151 parent->dv_cfdriver->cd_name);
1152 KASSERTMSG((args->iattr ||
1153 cfdriver_iattr_count(parent->dv_cfdriver) < 2),
1154 "%s searched for child without interface attribute,"
1155 " needed to disambiguate among the %d declared for in %s(4)"
1156 " in config(5)",
1157 device_xname(parent),
1158 cfdriver_iattr_count(parent->dv_cfdriver),
1159 parent->dv_cfdriver->cd_name);
1160
1161 m.fn = args->submatch; /* N.B. union */
1162 m.parent = parent;
1163 m.locs = args->locators;
1164 m.aux = aux;
1165 m.match = NULL;
1166 m.pri = 0;
1167
1168 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1169 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1170
1171 /* We don't match root nodes here. */
1172 if (!cf->cf_pspec)
1173 continue;
1174
1175 /*
1176 * Skip cf if no longer eligible, otherwise scan
1177 * through parents for one matching `parent', and
1178 * try match function.
1179 */
1180 if (cf->cf_fstate == FSTATE_FOUND)
1181 continue;
1182 if (cf->cf_fstate == FSTATE_DNOTFOUND ||
1183 cf->cf_fstate == FSTATE_DSTAR)
1184 continue;
1185
1186 /*
1187 * If an interface attribute was specified,
1188 * consider only children which attach to
1189 * that attribute.
1190 */
1191 if (args->iattr != NULL &&
1192 !STREQ(args->iattr, cfdata_ifattr(cf)))
1193 continue;
1194
1195 if (cfparent_match(parent, cf->cf_pspec))
1196 mapply(&m, cf);
1197 }
1198 }
1199 rnd_add_uint32(&rnd_autoconf_source, 0);
1200 return m.match;
1201 }
1202
1203 cfdata_t
1204 config_search(device_t parent, void *aux, const struct cfargs *cfargs)
1205 {
1206 cfdata_t cf;
1207 struct cfargs_internal store;
1208
1209 cf = config_search_internal(parent, aux,
1210 cfargs_canonicalize(cfargs, &store));
1211
1212 return cf;
1213 }
1214
1215 /*
1216 * Find the given root device.
1217 * This is much like config_search, but there is no parent.
1218 * Don't bother with multiple cfdata tables; the root node
1219 * must always be in the initial table.
1220 */
1221 cfdata_t
1222 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
1223 {
1224 cfdata_t cf;
1225 const short *p;
1226 struct matchinfo m;
1227
1228 m.fn = fn;
1229 m.parent = ROOT;
1230 m.aux = aux;
1231 m.match = NULL;
1232 m.pri = 0;
1233 m.locs = 0;
1234 /*
1235 * Look at root entries for matching name. We do not bother
1236 * with found-state here since only one root should ever be
1237 * searched (and it must be done first).
1238 */
1239 for (p = cfroots; *p >= 0; p++) {
1240 cf = &cfdata[*p];
1241 if (strcmp(cf->cf_name, rootname) == 0)
1242 mapply(&m, cf);
1243 }
1244 return m.match;
1245 }
1246
1247 static const char * const msgs[] = {
1248 [QUIET] = "",
1249 [UNCONF] = " not configured\n",
1250 [UNSUPP] = " unsupported\n",
1251 };
1252
1253 /*
1254 * The given `aux' argument describes a device that has been found
1255 * on the given parent, but not necessarily configured. Locate the
1256 * configuration data for that device (using the submatch function
1257 * provided, or using candidates' cd_match configuration driver
1258 * functions) and attach it, and return its device_t. If the device was
1259 * not configured, call the given `print' function and return NULL.
1260 */
1261 device_t
1262 config_found(device_t parent, void *aux, cfprint_t print,
1263 const struct cfargs * const cfargs)
1264 {
1265 cfdata_t cf;
1266 struct cfargs_internal store;
1267 const struct cfargs_internal * const args =
1268 cfargs_canonicalize(cfargs, &store);
1269
1270 cf = config_search_internal(parent, aux, args);
1271 if (cf != NULL) {
1272 return config_attach_internal(parent, cf, aux, print, args);
1273 }
1274
1275 if (print) {
1276 if (config_do_twiddle && cold)
1277 twiddle();
1278
1279 const int pret = (*print)(aux, device_xname(parent));
1280 KASSERT(pret >= 0);
1281 KASSERT(pret < __arraycount(msgs));
1282 KASSERT(msgs[pret] != NULL);
1283 aprint_normal("%s", msgs[pret]);
1284 }
1285
1286 return NULL;
1287 }
1288
1289 /*
1290 * As above, but for root devices.
1291 */
1292 device_t
1293 config_rootfound(const char *rootname, void *aux)
1294 {
1295 cfdata_t cf;
1296 device_t dev = NULL;
1297
1298 KERNEL_LOCK(1, NULL);
1299 if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
1300 dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE);
1301 else
1302 aprint_error("root device %s not configured\n", rootname);
1303 KERNEL_UNLOCK_ONE(NULL);
1304 return dev;
1305 }
1306
1307 /* just like sprintf(buf, "%d") except that it works from the end */
1308 static char *
1309 number(char *ep, int n)
1310 {
1311
1312 *--ep = 0;
1313 while (n >= 10) {
1314 *--ep = (n % 10) + '0';
1315 n /= 10;
1316 }
1317 *--ep = n + '0';
1318 return ep;
1319 }
1320
1321 /*
1322 * Expand the size of the cd_devs array if necessary.
1323 *
1324 * The caller must hold alldevs_lock. config_makeroom() may release and
1325 * re-acquire alldevs_lock, so callers should re-check conditions such
1326 * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
1327 * returns.
1328 */
1329 static void
1330 config_makeroom(int n, struct cfdriver *cd)
1331 {
1332 int ondevs, nndevs;
1333 device_t *osp, *nsp;
1334
1335 KASSERT(mutex_owned(&alldevs_lock));
1336 alldevs_nwrite++;
1337
1338 for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
1339 ;
1340
1341 while (n >= cd->cd_ndevs) {
1342 /*
1343 * Need to expand the array.
1344 */
1345 ondevs = cd->cd_ndevs;
1346 osp = cd->cd_devs;
1347
1348 /*
1349 * Release alldevs_lock around allocation, which may
1350 * sleep.
1351 */
1352 mutex_exit(&alldevs_lock);
1353 nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
1354 mutex_enter(&alldevs_lock);
1355
1356 /*
1357 * If another thread moved the array while we did
1358 * not hold alldevs_lock, try again.
1359 */
1360 if (cd->cd_devs != osp || cd->cd_ndevs != ondevs) {
1361 mutex_exit(&alldevs_lock);
1362 kmem_free(nsp, sizeof(device_t) * nndevs);
1363 mutex_enter(&alldevs_lock);
1364 continue;
1365 }
1366
1367 memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
1368 if (ondevs != 0)
1369 memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
1370
1371 cd->cd_ndevs = nndevs;
1372 cd->cd_devs = nsp;
1373 if (ondevs != 0) {
1374 mutex_exit(&alldevs_lock);
1375 kmem_free(osp, sizeof(device_t) * ondevs);
1376 mutex_enter(&alldevs_lock);
1377 }
1378 }
1379 KASSERT(mutex_owned(&alldevs_lock));
1380 alldevs_nwrite--;
1381 }
1382
1383 /*
1384 * Put dev into the devices list.
1385 */
1386 static void
1387 config_devlink(device_t dev)
1388 {
1389
1390 mutex_enter(&alldevs_lock);
1391
1392 KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
1393
1394 dev->dv_add_gen = alldevs_gen;
1395 /* It is safe to add a device to the tail of the list while
1396 * readers and writers are in the list.
1397 */
1398 TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
1399 mutex_exit(&alldevs_lock);
1400 }
1401
1402 static void
1403 config_devfree(device_t dev)
1404 {
1405
1406 KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
1407 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1408
1409 if (dev->dv_cfattach->ca_devsize > 0)
1410 kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
1411 kmem_free(dev, sizeof(*dev));
1412 }
1413
1414 /*
1415 * Caller must hold alldevs_lock.
1416 */
1417 static void
1418 config_devunlink(device_t dev, struct devicelist *garbage)
1419 {
1420 struct device_garbage *dg = &dev->dv_garbage;
1421 cfdriver_t cd = device_cfdriver(dev);
1422 int i;
1423
1424 KASSERT(mutex_owned(&alldevs_lock));
1425 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1426
1427 /* Unlink from device list. Link to garbage list. */
1428 TAILQ_REMOVE(&alldevs, dev, dv_list);
1429 TAILQ_INSERT_TAIL(garbage, dev, dv_list);
1430
1431 /* Remove from cfdriver's array. */
1432 cd->cd_devs[dev->dv_unit] = NULL;
1433
1434 /*
1435 * If the device now has no units in use, unlink its softc array.
1436 */
1437 for (i = 0; i < cd->cd_ndevs; i++) {
1438 if (cd->cd_devs[i] != NULL)
1439 break;
1440 }
1441 /* Nothing found. Unlink, now. Deallocate, later. */
1442 if (i == cd->cd_ndevs) {
1443 dg->dg_ndevs = cd->cd_ndevs;
1444 dg->dg_devs = cd->cd_devs;
1445 cd->cd_devs = NULL;
1446 cd->cd_ndevs = 0;
1447 }
1448 }
1449
1450 static void
1451 config_devdelete(device_t dev)
1452 {
1453 struct device_garbage *dg = &dev->dv_garbage;
1454 device_lock_t dvl = device_getlock(dev);
1455
1456 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1457
1458 if (dg->dg_devs != NULL)
1459 kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
1460
1461 localcount_fini(dev->dv_localcount);
1462 kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount));
1463
1464 cv_destroy(&dvl->dvl_cv);
1465 mutex_destroy(&dvl->dvl_mtx);
1466
1467 KASSERT(dev->dv_properties != NULL);
1468 prop_object_release(dev->dv_properties);
1469
1470 if (dev->dv_activity_handlers)
1471 panic("%s with registered handlers", __func__);
1472
1473 if (dev->dv_locators) {
1474 size_t amount = *--dev->dv_locators;
1475 kmem_free(dev->dv_locators, amount);
1476 }
1477
1478 config_devfree(dev);
1479 }
1480
1481 static int
1482 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
1483 {
1484 int unit = cf->cf_unit;
1485
1486 KASSERT(mutex_owned(&alldevs_lock));
1487
1488 if (unit < 0)
1489 return -1;
1490 if (cf->cf_fstate == FSTATE_STAR) {
1491 for (; unit < cd->cd_ndevs; unit++)
1492 if (cd->cd_devs[unit] == NULL)
1493 break;
1494 /*
1495 * unit is now the unit of the first NULL device pointer,
1496 * or max(cd->cd_ndevs,cf->cf_unit).
1497 */
1498 } else {
1499 if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
1500 unit = -1;
1501 }
1502 return unit;
1503 }
1504
1505 static int
1506 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
1507 {
1508 struct alldevs_foray af;
1509 int unit;
1510
1511 config_alldevs_enter(&af);
1512 for (;;) {
1513 unit = config_unit_nextfree(cd, cf);
1514 if (unit == -1)
1515 break;
1516 if (unit < cd->cd_ndevs) {
1517 cd->cd_devs[unit] = dev;
1518 dev->dv_unit = unit;
1519 break;
1520 }
1521 config_makeroom(unit, cd);
1522 }
1523 config_alldevs_exit(&af);
1524
1525 return unit;
1526 }
1527
1528 static device_t
1529 config_devalloc(const device_t parent, const cfdata_t cf,
1530 const struct cfargs_internal * const args)
1531 {
1532 cfdriver_t cd;
1533 cfattach_t ca;
1534 size_t lname, lunit;
1535 const char *xunit;
1536 int myunit;
1537 char num[10];
1538 device_t dev;
1539 void *dev_private;
1540 const struct cfiattrdata *ia;
1541 device_lock_t dvl;
1542
1543 cd = config_cfdriver_lookup(cf->cf_name);
1544 if (cd == NULL)
1545 return NULL;
1546
1547 ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
1548 if (ca == NULL)
1549 return NULL;
1550
1551 /* get memory for all device vars */
1552 KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
1553 if (ca->ca_devsize > 0) {
1554 dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
1555 } else {
1556 dev_private = NULL;
1557 }
1558 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
1559
1560 dev->dv_handle = args->devhandle;
1561
1562 dev->dv_class = cd->cd_class;
1563 dev->dv_cfdata = cf;
1564 dev->dv_cfdriver = cd;
1565 dev->dv_cfattach = ca;
1566 dev->dv_activity_count = 0;
1567 dev->dv_activity_handlers = NULL;
1568 dev->dv_private = dev_private;
1569 dev->dv_flags = ca->ca_flags; /* inherit flags from class */
1570 dev->dv_attaching = curlwp;
1571
1572 myunit = config_unit_alloc(dev, cd, cf);
1573 if (myunit == -1) {
1574 config_devfree(dev);
1575 return NULL;
1576 }
1577
1578 /* compute length of name and decimal expansion of unit number */
1579 lname = strlen(cd->cd_name);
1580 xunit = number(&num[sizeof(num)], myunit);
1581 lunit = &num[sizeof(num)] - xunit;
1582 if (lname + lunit > sizeof(dev->dv_xname))
1583 panic("config_devalloc: device name too long");
1584
1585 dvl = device_getlock(dev);
1586
1587 mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
1588 cv_init(&dvl->dvl_cv, "pmfsusp");
1589
1590 memcpy(dev->dv_xname, cd->cd_name, lname);
1591 memcpy(dev->dv_xname + lname, xunit, lunit);
1592 dev->dv_parent = parent;
1593 if (parent != NULL)
1594 dev->dv_depth = parent->dv_depth + 1;
1595 else
1596 dev->dv_depth = 0;
1597 dev->dv_flags |= DVF_ACTIVE; /* always initially active */
1598 if (args->locators) {
1599 KASSERT(parent); /* no locators at root */
1600 ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
1601 dev->dv_locators =
1602 kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
1603 *dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
1604 memcpy(dev->dv_locators, args->locators,
1605 sizeof(int) * ia->ci_loclen);
1606 }
1607 dev->dv_properties = prop_dictionary_create();
1608 KASSERT(dev->dv_properties != NULL);
1609
1610 prop_dictionary_set_string_nocopy(dev->dv_properties,
1611 "device-driver", dev->dv_cfdriver->cd_name);
1612 prop_dictionary_set_uint16(dev->dv_properties,
1613 "device-unit", dev->dv_unit);
1614 if (parent != NULL) {
1615 prop_dictionary_set_string(dev->dv_properties,
1616 "device-parent", device_xname(parent));
1617 }
1618
1619 dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount),
1620 KM_SLEEP);
1621 localcount_init(dev->dv_localcount);
1622
1623 if (dev->dv_cfdriver->cd_attrs != NULL)
1624 config_add_attrib_dict(dev);
1625
1626 return dev;
1627 }
1628
1629 /*
1630 * Create an array of device attach attributes and add it
1631 * to the device's dv_properties dictionary.
1632 *
1633 * <key>interface-attributes</key>
1634 * <array>
1635 * <dict>
1636 * <key>attribute-name</key>
1637 * <string>foo</string>
1638 * <key>locators</key>
1639 * <array>
1640 * <dict>
1641 * <key>loc-name</key>
1642 * <string>foo-loc1</string>
1643 * </dict>
1644 * <dict>
1645 * <key>loc-name</key>
1646 * <string>foo-loc2</string>
1647 * <key>default</key>
1648 * <string>foo-loc2-default</string>
1649 * </dict>
1650 * ...
1651 * </array>
1652 * </dict>
1653 * ...
1654 * </array>
1655 */
1656
1657 static void
1658 config_add_attrib_dict(device_t dev)
1659 {
1660 int i, j;
1661 const struct cfiattrdata *ci;
1662 prop_dictionary_t attr_dict, loc_dict;
1663 prop_array_t attr_array, loc_array;
1664
1665 if ((attr_array = prop_array_create()) == NULL)
1666 return;
1667
1668 for (i = 0; ; i++) {
1669 if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
1670 break;
1671 if ((attr_dict = prop_dictionary_create()) == NULL)
1672 break;
1673 prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
1674 ci->ci_name);
1675
1676 /* Create an array of the locator names and defaults */
1677
1678 if (ci->ci_loclen != 0 &&
1679 (loc_array = prop_array_create()) != NULL) {
1680 for (j = 0; j < ci->ci_loclen; j++) {
1681 loc_dict = prop_dictionary_create();
1682 if (loc_dict == NULL)
1683 continue;
1684 prop_dictionary_set_string_nocopy(loc_dict,
1685 "loc-name", ci->ci_locdesc[j].cld_name);
1686 if (ci->ci_locdesc[j].cld_defaultstr != NULL)
1687 prop_dictionary_set_string_nocopy(
1688 loc_dict, "default",
1689 ci->ci_locdesc[j].cld_defaultstr);
1690 prop_array_set(loc_array, j, loc_dict);
1691 prop_object_release(loc_dict);
1692 }
1693 prop_dictionary_set_and_rel(attr_dict, "locators",
1694 loc_array);
1695 }
1696 prop_array_add(attr_array, attr_dict);
1697 prop_object_release(attr_dict);
1698 }
1699 if (i == 0)
1700 prop_object_release(attr_array);
1701 else
1702 prop_dictionary_set_and_rel(dev->dv_properties,
1703 "interface-attributes", attr_array);
1704
1705 return;
1706 }
1707
1708 /*
1709 * Attach a found device.
1710 */
1711 static device_t
1712 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1713 const struct cfargs_internal * const args)
1714 {
1715 device_t dev;
1716 struct cftable *ct;
1717 const char *drvname;
1718 bool deferred;
1719
1720 KASSERT(KERNEL_LOCKED_P());
1721
1722 dev = config_devalloc(parent, cf, args);
1723 if (!dev)
1724 panic("config_attach: allocation of device softc failed");
1725
1726 /* XXX redundant - see below? */
1727 if (cf->cf_fstate != FSTATE_STAR) {
1728 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1729 cf->cf_fstate = FSTATE_FOUND;
1730 }
1731
1732 config_devlink(dev);
1733
1734 if (config_do_twiddle && cold)
1735 twiddle();
1736 else
1737 aprint_naive("Found ");
1738 /*
1739 * We want the next two printfs for normal, verbose, and quiet,
1740 * but not silent (in which case, we're twiddling, instead).
1741 */
1742 if (parent == ROOT) {
1743 aprint_naive("%s (root)", device_xname(dev));
1744 aprint_normal("%s (root)", device_xname(dev));
1745 } else {
1746 aprint_naive("%s at %s", device_xname(dev),
1747 device_xname(parent));
1748 aprint_normal("%s at %s", device_xname(dev),
1749 device_xname(parent));
1750 if (print)
1751 (void) (*print)(aux, NULL);
1752 }
1753
1754 /*
1755 * Before attaching, clobber any unfound devices that are
1756 * otherwise identical.
1757 * XXX code above is redundant?
1758 */
1759 drvname = dev->dv_cfdriver->cd_name;
1760 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1761 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1762 if (STREQ(cf->cf_name, drvname) &&
1763 cf->cf_unit == dev->dv_unit) {
1764 if (cf->cf_fstate == FSTATE_NOTFOUND)
1765 cf->cf_fstate = FSTATE_FOUND;
1766 }
1767 }
1768 }
1769 device_register(dev, aux);
1770
1771 /* Let userland know */
1772 devmon_report_device(dev, true);
1773
1774 /*
1775 * Prevent detach until the driver's attach function, and all
1776 * deferred actions, have finished.
1777 */
1778 config_pending_incr(dev);
1779
1780 /* Call the driver's attach function. */
1781 (*dev->dv_cfattach->ca_attach)(parent, dev, aux);
1782
1783 /*
1784 * Allow other threads to acquire references to the device now
1785 * that the driver's attach function is done.
1786 */
1787 mutex_enter(&config_misc_lock);
1788 KASSERT(dev->dv_attaching == curlwp);
1789 dev->dv_attaching = NULL;
1790 cv_broadcast(&config_misc_cv);
1791 mutex_exit(&config_misc_lock);
1792
1793 /*
1794 * Synchronous parts of attach are done. Allow detach, unless
1795 * the driver's attach function scheduled deferred actions.
1796 */
1797 config_pending_decr(dev);
1798
1799 mutex_enter(&config_misc_lock);
1800 deferred = (dev->dv_pending != 0);
1801 mutex_exit(&config_misc_lock);
1802
1803 if (!deferred && !device_pmf_is_registered(dev))
1804 aprint_debug_dev(dev,
1805 "WARNING: power management not supported\n");
1806
1807 config_process_deferred(&deferred_config_queue, dev);
1808
1809 device_register_post_config(dev, aux);
1810 rnd_add_uint32(&rnd_autoconf_source, 0);
1811 return dev;
1812 }
1813
1814 device_t
1815 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1816 const struct cfargs *cfargs)
1817 {
1818 struct cfargs_internal store;
1819
1820 KASSERT(KERNEL_LOCKED_P());
1821
1822 return config_attach_internal(parent, cf, aux, print,
1823 cfargs_canonicalize(cfargs, &store));
1824 }
1825
1826 /*
1827 * As above, but for pseudo-devices. Pseudo-devices attached in this
1828 * way are silently inserted into the device tree, and their children
1829 * attached.
1830 *
1831 * Note that because pseudo-devices are attached silently, any information
1832 * the attach routine wishes to print should be prefixed with the device
1833 * name by the attach routine.
1834 */
1835 device_t
1836 config_attach_pseudo(cfdata_t cf)
1837 {
1838 device_t dev;
1839
1840 KERNEL_LOCK(1, NULL);
1841
1842 struct cfargs_internal args = { };
1843 dev = config_devalloc(ROOT, cf, &args);
1844 if (!dev)
1845 goto out;
1846
1847 /* XXX mark busy in cfdata */
1848
1849 if (cf->cf_fstate != FSTATE_STAR) {
1850 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1851 cf->cf_fstate = FSTATE_FOUND;
1852 }
1853
1854 config_devlink(dev);
1855
1856 #if 0 /* XXXJRT not yet */
1857 device_register(dev, NULL); /* like a root node */
1858 #endif
1859
1860 /* Let userland know */
1861 devmon_report_device(dev, true);
1862
1863 /*
1864 * Prevent detach until the driver's attach function, and all
1865 * deferred actions, have finished.
1866 */
1867 config_pending_incr(dev);
1868
1869 /* Call the driver's attach function. */
1870 (*dev->dv_cfattach->ca_attach)(ROOT, dev, NULL);
1871
1872 /*
1873 * Allow other threads to acquire references to the device now
1874 * that the driver's attach function is done.
1875 */
1876 mutex_enter(&config_misc_lock);
1877 KASSERT(dev->dv_attaching == curlwp);
1878 dev->dv_attaching = NULL;
1879 cv_broadcast(&config_misc_cv);
1880 mutex_exit(&config_misc_lock);
1881
1882 /*
1883 * Synchronous parts of attach are done. Allow detach, unless
1884 * the driver's attach function scheduled deferred actions.
1885 */
1886 config_pending_decr(dev);
1887
1888 config_process_deferred(&deferred_config_queue, dev);
1889
1890 out: KERNEL_UNLOCK_ONE(NULL);
1891 return dev;
1892 }
1893
1894 /*
1895 * Caller must hold alldevs_lock.
1896 */
1897 static void
1898 config_collect_garbage(struct devicelist *garbage)
1899 {
1900 device_t dv;
1901
1902 KASSERT(!cpu_intr_p());
1903 KASSERT(!cpu_softintr_p());
1904 KASSERT(mutex_owned(&alldevs_lock));
1905
1906 while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
1907 TAILQ_FOREACH(dv, &alldevs, dv_list) {
1908 if (dv->dv_del_gen != 0)
1909 break;
1910 }
1911 if (dv == NULL) {
1912 alldevs_garbage = false;
1913 break;
1914 }
1915 config_devunlink(dv, garbage);
1916 }
1917 KASSERT(mutex_owned(&alldevs_lock));
1918 }
1919
1920 static void
1921 config_dump_garbage(struct devicelist *garbage)
1922 {
1923 device_t dv;
1924
1925 while ((dv = TAILQ_FIRST(garbage)) != NULL) {
1926 TAILQ_REMOVE(garbage, dv, dv_list);
1927 config_devdelete(dv);
1928 }
1929 }
1930
1931 static int
1932 config_detach_enter(device_t dev)
1933 {
1934 struct lwp *l __diagused;
1935 int error = 0;
1936
1937 mutex_enter(&config_misc_lock);
1938
1939 /*
1940 * Wait until attach has fully completed, and until any
1941 * concurrent detach (e.g., drvctl racing with USB event
1942 * thread) has completed.
1943 *
1944 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via
1945 * deviter) to ensure the winner of the race doesn't free the
1946 * device leading the loser of the race into use-after-free.
1947 *
1948 * XXX Not all callers do this!
1949 */
1950 while (dev->dv_pending || dev->dv_detaching) {
1951 KASSERTMSG(dev->dv_detaching != curlwp,
1952 "recursively detaching %s", device_xname(dev));
1953 error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
1954 if (error)
1955 goto out;
1956 }
1957
1958 /*
1959 * Attach has completed, and no other concurrent detach is
1960 * running. Claim the device for detaching. This will cause
1961 * all new attempts to acquire references to block.
1962 */
1963 KASSERTMSG((l = dev->dv_attaching) == NULL,
1964 "lwp %ld [%s] @ %p attaching %s",
1965 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
1966 device_xname(dev));
1967 KASSERTMSG((l = dev->dv_detaching) == NULL,
1968 "lwp %ld [%s] @ %p detaching %s",
1969 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
1970 device_xname(dev));
1971 dev->dv_detaching = curlwp;
1972
1973 out: mutex_exit(&config_misc_lock);
1974 return error;
1975 }
1976
1977 static void
1978 config_detach_exit(device_t dev)
1979 {
1980 struct lwp *l __diagused;
1981
1982 mutex_enter(&config_misc_lock);
1983 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
1984 device_xname(dev));
1985 KASSERTMSG((l = dev->dv_detaching) == curlwp,
1986 "lwp %ld [%s] @ %p detaching %s",
1987 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
1988 device_xname(dev));
1989 dev->dv_detaching = NULL;
1990 cv_broadcast(&config_misc_cv);
1991 mutex_exit(&config_misc_lock);
1992 }
1993
1994 /*
1995 * Detach a device. Optionally forced (e.g. because of hardware
1996 * removal) and quiet. Returns zero if successful, non-zero
1997 * (an error code) otherwise.
1998 *
1999 * Note that this code wants to be run from a process context, so
2000 * that the detach can sleep to allow processes which have a device
2001 * open to run and unwind their stacks.
2002 */
2003 int
2004 config_detach(device_t dev, int flags)
2005 {
2006 struct alldevs_foray af;
2007 struct cftable *ct;
2008 cfdata_t cf;
2009 const struct cfattach *ca;
2010 struct cfdriver *cd;
2011 device_t d __diagused;
2012 int rv = 0;
2013
2014 KERNEL_LOCK(1, NULL);
2015
2016 cf = dev->dv_cfdata;
2017 KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
2018 cf->cf_fstate == FSTATE_STAR),
2019 "config_detach: %s: bad device fstate: %d",
2020 device_xname(dev), cf ? cf->cf_fstate : -1);
2021
2022 cd = dev->dv_cfdriver;
2023 KASSERT(cd != NULL);
2024
2025 ca = dev->dv_cfattach;
2026 KASSERT(ca != NULL);
2027
2028 /*
2029 * Only one detach at a time, please -- and not until fully
2030 * attached.
2031 */
2032 rv = config_detach_enter(dev);
2033 if (rv) {
2034 KERNEL_UNLOCK_ONE(NULL);
2035 return rv;
2036 }
2037
2038 mutex_enter(&alldevs_lock);
2039 if (dev->dv_del_gen != 0) {
2040 mutex_exit(&alldevs_lock);
2041 #ifdef DIAGNOSTIC
2042 printf("%s: %s is already detached\n", __func__,
2043 device_xname(dev));
2044 #endif /* DIAGNOSTIC */
2045 config_detach_exit(dev);
2046 KERNEL_UNLOCK_ONE(NULL);
2047 return ENOENT;
2048 }
2049 alldevs_nwrite++;
2050 mutex_exit(&alldevs_lock);
2051
2052 /*
2053 * Call the driver's .ca_detach function, unless it has none or
2054 * we are skipping it because it's unforced shutdown time and
2055 * the driver didn't ask to detach on shutdown.
2056 */
2057 if (!detachall &&
2058 (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
2059 (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
2060 rv = EOPNOTSUPP;
2061 } else if (ca->ca_detach != NULL) {
2062 rv = (*ca->ca_detach)(dev, flags);
2063 } else
2064 rv = EOPNOTSUPP;
2065
2066 KASSERTMSG(!dev->dv_detach_done, "%s detached twice, error=%d",
2067 device_xname(dev), rv);
2068
2069 /*
2070 * If it was not possible to detach the device, then we either
2071 * panic() (for the forced but failed case), or return an error.
2072 */
2073 if (rv) {
2074 /*
2075 * Detach failed -- likely EOPNOTSUPP or EBUSY. Driver
2076 * must not have called config_detach_commit.
2077 */
2078 KASSERTMSG(!dev->dv_detach_committed,
2079 "%s committed to detaching and then backed out, error=%d",
2080 device_xname(dev), rv);
2081 if (flags & DETACH_FORCE) {
2082 panic("config_detach: forced detach of %s failed (%d)",
2083 device_xname(dev), rv);
2084 }
2085 goto out;
2086 }
2087
2088 /*
2089 * The device has now been successfully detached.
2090 */
2091 dev->dv_detach_done = true;
2092
2093 /*
2094 * If .ca_detach didn't commit to detach, then do that for it.
2095 * This wakes any pending device_lookup_acquire calls so they
2096 * will fail.
2097 */
2098 config_detach_commit(dev);
2099
2100 /*
2101 * If it was possible to detach the device, ensure that the
2102 * device is deactivated.
2103 */
2104 dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */
2105
2106 /*
2107 * Wait for all device_lookup_acquire references -- mostly, for
2108 * all attempts to open the device -- to drain. It is the
2109 * responsibility of .ca_detach to ensure anything with open
2110 * references will be interrupted and release them promptly,
2111 * not block indefinitely. All new attempts to acquire
2112 * references will fail, as config_detach_commit has arranged
2113 * by now.
2114 */
2115 mutex_enter(&config_misc_lock);
2116 localcount_drain(dev->dv_localcount,
2117 &config_misc_cv, &config_misc_lock);
2118 mutex_exit(&config_misc_lock);
2119
2120 /* Let userland know */
2121 devmon_report_device(dev, false);
2122
2123 #ifdef DIAGNOSTIC
2124 /*
2125 * Sanity: If you're successfully detached, you should have no
2126 * children. (Note that because children must be attached
2127 * after parents, we only need to search the latter part of
2128 * the list.)
2129 */
2130 mutex_enter(&alldevs_lock);
2131 for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
2132 d = TAILQ_NEXT(d, dv_list)) {
2133 if (d->dv_parent == dev && d->dv_del_gen == 0) {
2134 printf("config_detach: detached device %s"
2135 " has children %s\n", device_xname(dev),
2136 device_xname(d));
2137 panic("config_detach");
2138 }
2139 }
2140 mutex_exit(&alldevs_lock);
2141 #endif
2142
2143 /* notify the parent that the child is gone */
2144 if (dev->dv_parent) {
2145 device_t p = dev->dv_parent;
2146 if (p->dv_cfattach->ca_childdetached)
2147 (*p->dv_cfattach->ca_childdetached)(p, dev);
2148 }
2149
2150 /*
2151 * Mark cfdata to show that the unit can be reused, if possible.
2152 */
2153 TAILQ_FOREACH(ct, &allcftables, ct_list) {
2154 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
2155 if (STREQ(cf->cf_name, cd->cd_name)) {
2156 if (cf->cf_fstate == FSTATE_FOUND &&
2157 cf->cf_unit == dev->dv_unit)
2158 cf->cf_fstate = FSTATE_NOTFOUND;
2159 }
2160 }
2161 }
2162
2163 if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
2164 aprint_normal_dev(dev, "detached\n");
2165
2166 out:
2167 config_detach_exit(dev);
2168
2169 config_alldevs_enter(&af);
2170 KASSERT(alldevs_nwrite != 0);
2171 --alldevs_nwrite;
2172 if (rv == 0 && dev->dv_del_gen == 0) {
2173 if (alldevs_nwrite == 0 && alldevs_nread == 0)
2174 config_devunlink(dev, &af.af_garbage);
2175 else {
2176 dev->dv_del_gen = alldevs_gen;
2177 alldevs_garbage = true;
2178 }
2179 }
2180 config_alldevs_exit(&af);
2181
2182 KERNEL_UNLOCK_ONE(NULL);
2183
2184 return rv;
2185 }
2186
2187 /*
2188 * config_detach_commit(dev)
2189 *
2190 * Issued by a driver's .ca_detach routine to notify anyone
2191 * waiting in device_lookup_acquire that the driver is committed
2192 * to detaching the device, which allows device_lookup_acquire to
2193 * wake up and fail immediately.
2194 *
2195 * Safe to call multiple times -- idempotent. Must be called
2196 * during config_detach_enter/exit. Safe to use with
2197 * device_lookup because the device is not actually removed from
2198 * the table until after config_detach_exit.
2199 */
2200 void
2201 config_detach_commit(device_t dev)
2202 {
2203 struct lwp *l __diagused;
2204
2205 mutex_enter(&config_misc_lock);
2206 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
2207 device_xname(dev));
2208 KASSERTMSG((l = dev->dv_detaching) == curlwp,
2209 "lwp %ld [%s] @ %p detaching %s",
2210 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2211 device_xname(dev));
2212 dev->dv_detach_committed = true;
2213 cv_broadcast(&config_misc_cv);
2214 mutex_exit(&config_misc_lock);
2215 }
2216
2217 int
2218 config_detach_children(device_t parent, int flags)
2219 {
2220 device_t dv;
2221 deviter_t di;
2222 int error = 0;
2223
2224 KASSERT(KERNEL_LOCKED_P());
2225
2226 for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
2227 dv = deviter_next(&di)) {
2228 if (device_parent(dv) != parent)
2229 continue;
2230 if ((error = config_detach(dv, flags)) != 0)
2231 break;
2232 }
2233 deviter_release(&di);
2234 return error;
2235 }
2236
2237 device_t
2238 shutdown_first(struct shutdown_state *s)
2239 {
2240 if (!s->initialized) {
2241 deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
2242 s->initialized = true;
2243 }
2244 return shutdown_next(s);
2245 }
2246
2247 device_t
2248 shutdown_next(struct shutdown_state *s)
2249 {
2250 device_t dv;
2251
2252 while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
2253 ;
2254
2255 if (dv == NULL)
2256 s->initialized = false;
2257
2258 return dv;
2259 }
2260
2261 bool
2262 config_detach_all(int how)
2263 {
2264 static struct shutdown_state s;
2265 device_t curdev;
2266 bool progress = false;
2267 int flags;
2268
2269 KERNEL_LOCK(1, NULL);
2270
2271 if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
2272 goto out;
2273
2274 if ((how & RB_POWERDOWN) == RB_POWERDOWN)
2275 flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
2276 else
2277 flags = DETACH_SHUTDOWN;
2278
2279 for (curdev = shutdown_first(&s); curdev != NULL;
2280 curdev = shutdown_next(&s)) {
2281 aprint_debug(" detaching %s, ", device_xname(curdev));
2282 if (config_detach(curdev, flags) == 0) {
2283 progress = true;
2284 aprint_debug("success.");
2285 } else
2286 aprint_debug("failed.");
2287 }
2288
2289 out: KERNEL_UNLOCK_ONE(NULL);
2290 return progress;
2291 }
2292
2293 static bool
2294 device_is_ancestor_of(device_t ancestor, device_t descendant)
2295 {
2296 device_t dv;
2297
2298 for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
2299 if (device_parent(dv) == ancestor)
2300 return true;
2301 }
2302 return false;
2303 }
2304
2305 int
2306 config_deactivate(device_t dev)
2307 {
2308 deviter_t di;
2309 const struct cfattach *ca;
2310 device_t descendant;
2311 int s, rv = 0, oflags;
2312
2313 for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
2314 descendant != NULL;
2315 descendant = deviter_next(&di)) {
2316 if (dev != descendant &&
2317 !device_is_ancestor_of(dev, descendant))
2318 continue;
2319
2320 if ((descendant->dv_flags & DVF_ACTIVE) == 0)
2321 continue;
2322
2323 ca = descendant->dv_cfattach;
2324 oflags = descendant->dv_flags;
2325
2326 descendant->dv_flags &= ~DVF_ACTIVE;
2327 if (ca->ca_activate == NULL)
2328 continue;
2329 s = splhigh();
2330 rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
2331 splx(s);
2332 if (rv != 0)
2333 descendant->dv_flags = oflags;
2334 }
2335 deviter_release(&di);
2336 return rv;
2337 }
2338
2339 /*
2340 * Defer the configuration of the specified device until all
2341 * of its parent's devices have been attached.
2342 */
2343 void
2344 config_defer(device_t dev, void (*func)(device_t))
2345 {
2346 struct deferred_config *dc;
2347
2348 if (dev->dv_parent == NULL)
2349 panic("config_defer: can't defer config of a root device");
2350
2351 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2352
2353 config_pending_incr(dev);
2354
2355 mutex_enter(&config_misc_lock);
2356 #ifdef DIAGNOSTIC
2357 struct deferred_config *odc;
2358 TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
2359 if (odc->dc_dev == dev)
2360 panic("config_defer: deferred twice");
2361 }
2362 #endif
2363 dc->dc_dev = dev;
2364 dc->dc_func = func;
2365 TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
2366 mutex_exit(&config_misc_lock);
2367 }
2368
2369 /*
2370 * Defer some autoconfiguration for a device until after interrupts
2371 * are enabled.
2372 */
2373 void
2374 config_interrupts(device_t dev, void (*func)(device_t))
2375 {
2376 struct deferred_config *dc;
2377
2378 /*
2379 * If interrupts are enabled, callback now.
2380 */
2381 if (cold == 0) {
2382 (*func)(dev);
2383 return;
2384 }
2385
2386 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2387
2388 config_pending_incr(dev);
2389
2390 mutex_enter(&config_misc_lock);
2391 #ifdef DIAGNOSTIC
2392 struct deferred_config *odc;
2393 TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
2394 if (odc->dc_dev == dev)
2395 panic("config_interrupts: deferred twice");
2396 }
2397 #endif
2398 dc->dc_dev = dev;
2399 dc->dc_func = func;
2400 TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
2401 mutex_exit(&config_misc_lock);
2402 }
2403
2404 /*
2405 * Defer some autoconfiguration for a device until after root file system
2406 * is mounted (to load firmware etc).
2407 */
2408 void
2409 config_mountroot(device_t dev, void (*func)(device_t))
2410 {
2411 struct deferred_config *dc;
2412
2413 /*
2414 * If root file system is mounted, callback now.
2415 */
2416 if (root_is_mounted) {
2417 (*func)(dev);
2418 return;
2419 }
2420
2421 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2422
2423 mutex_enter(&config_misc_lock);
2424 #ifdef DIAGNOSTIC
2425 struct deferred_config *odc;
2426 TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
2427 if (odc->dc_dev == dev)
2428 panic("%s: deferred twice", __func__);
2429 }
2430 #endif
2431
2432 dc->dc_dev = dev;
2433 dc->dc_func = func;
2434 TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
2435 mutex_exit(&config_misc_lock);
2436 }
2437
2438 /*
2439 * Process a deferred configuration queue.
2440 */
2441 static void
2442 config_process_deferred(struct deferred_config_head *queue, device_t parent)
2443 {
2444 struct deferred_config *dc;
2445
2446 KASSERT(KERNEL_LOCKED_P());
2447
2448 mutex_enter(&config_misc_lock);
2449 dc = TAILQ_FIRST(queue);
2450 while (dc) {
2451 if (parent == NULL || dc->dc_dev->dv_parent == parent) {
2452 TAILQ_REMOVE(queue, dc, dc_queue);
2453 mutex_exit(&config_misc_lock);
2454
2455 (*dc->dc_func)(dc->dc_dev);
2456 config_pending_decr(dc->dc_dev);
2457 kmem_free(dc, sizeof(*dc));
2458
2459 mutex_enter(&config_misc_lock);
2460 /* Restart, queue might have changed */
2461 dc = TAILQ_FIRST(queue);
2462 } else {
2463 dc = TAILQ_NEXT(dc, dc_queue);
2464 }
2465 }
2466 mutex_exit(&config_misc_lock);
2467 }
2468
2469 /*
2470 * Manipulate the config_pending semaphore.
2471 */
2472 void
2473 config_pending_incr(device_t dev)
2474 {
2475
2476 mutex_enter(&config_misc_lock);
2477 KASSERTMSG(dev->dv_pending < INT_MAX,
2478 "%s: excess config_pending_incr", device_xname(dev));
2479 if (dev->dv_pending++ == 0)
2480 TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
2481 #ifdef DEBUG_AUTOCONF
2482 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2483 #endif
2484 mutex_exit(&config_misc_lock);
2485 }
2486
2487 void
2488 config_pending_decr(device_t dev)
2489 {
2490
2491 mutex_enter(&config_misc_lock);
2492 KASSERTMSG(dev->dv_pending > 0,
2493 "%s: excess config_pending_decr", device_xname(dev));
2494 if (--dev->dv_pending == 0) {
2495 TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
2496 cv_broadcast(&config_misc_cv);
2497 }
2498 #ifdef DEBUG_AUTOCONF
2499 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2500 #endif
2501 mutex_exit(&config_misc_lock);
2502 }
2503
2504 /*
2505 * Register a "finalization" routine. Finalization routines are
2506 * called iteratively once all real devices have been found during
2507 * autoconfiguration, for as long as any one finalizer has done
2508 * any work.
2509 */
2510 int
2511 config_finalize_register(device_t dev, int (*fn)(device_t))
2512 {
2513 struct finalize_hook *f;
2514 int error = 0;
2515
2516 KERNEL_LOCK(1, NULL);
2517
2518 /*
2519 * If finalization has already been done, invoke the
2520 * callback function now.
2521 */
2522 if (config_finalize_done) {
2523 while ((*fn)(dev) != 0)
2524 /* loop */ ;
2525 goto out;
2526 }
2527
2528 /* Ensure this isn't already on the list. */
2529 TAILQ_FOREACH(f, &config_finalize_list, f_list) {
2530 if (f->f_func == fn && f->f_dev == dev) {
2531 error = EEXIST;
2532 goto out;
2533 }
2534 }
2535
2536 f = kmem_alloc(sizeof(*f), KM_SLEEP);
2537 f->f_func = fn;
2538 f->f_dev = dev;
2539 TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
2540
2541 /* Success! */
2542 error = 0;
2543
2544 out: KERNEL_UNLOCK_ONE(NULL);
2545 return error;
2546 }
2547
2548 void
2549 config_finalize(void)
2550 {
2551 struct finalize_hook *f;
2552 struct pdevinit *pdev;
2553 extern struct pdevinit pdevinit[];
2554 int errcnt, rv;
2555
2556 /*
2557 * Now that device driver threads have been created, wait for
2558 * them to finish any deferred autoconfiguration.
2559 */
2560 mutex_enter(&config_misc_lock);
2561 while (!TAILQ_EMPTY(&config_pending)) {
2562 device_t dev;
2563 int error;
2564
2565 error = cv_timedwait(&config_misc_cv, &config_misc_lock,
2566 mstohz(1000));
2567 if (error == EWOULDBLOCK) {
2568 aprint_debug("waiting for devices:");
2569 TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
2570 aprint_debug(" %s", device_xname(dev));
2571 aprint_debug("\n");
2572 }
2573 }
2574 mutex_exit(&config_misc_lock);
2575
2576 KERNEL_LOCK(1, NULL);
2577
2578 /* Attach pseudo-devices. */
2579 for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
2580 (*pdev->pdev_attach)(pdev->pdev_count);
2581
2582 /* Run the hooks until none of them does any work. */
2583 do {
2584 rv = 0;
2585 TAILQ_FOREACH(f, &config_finalize_list, f_list)
2586 rv |= (*f->f_func)(f->f_dev);
2587 } while (rv != 0);
2588
2589 config_finalize_done = 1;
2590
2591 /* Now free all the hooks. */
2592 while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
2593 TAILQ_REMOVE(&config_finalize_list, f, f_list);
2594 kmem_free(f, sizeof(*f));
2595 }
2596
2597 KERNEL_UNLOCK_ONE(NULL);
2598
2599 errcnt = aprint_get_error_count();
2600 if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
2601 (boothowto & AB_VERBOSE) == 0) {
2602 mutex_enter(&config_misc_lock);
2603 if (config_do_twiddle) {
2604 config_do_twiddle = 0;
2605 printf_nolog(" done.\n");
2606 }
2607 mutex_exit(&config_misc_lock);
2608 }
2609 if (errcnt != 0) {
2610 printf("WARNING: %d error%s while detecting hardware; "
2611 "check system log.\n", errcnt,
2612 errcnt == 1 ? "" : "s");
2613 }
2614 }
2615
2616 void
2617 config_twiddle_init(void)
2618 {
2619
2620 if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
2621 config_do_twiddle = 1;
2622 }
2623 callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
2624 }
2625
2626 void
2627 config_twiddle_fn(void *cookie)
2628 {
2629
2630 mutex_enter(&config_misc_lock);
2631 if (config_do_twiddle) {
2632 twiddle();
2633 callout_schedule(&config_twiddle_ch, mstohz(100));
2634 }
2635 mutex_exit(&config_misc_lock);
2636 }
2637
2638 static void
2639 config_alldevs_enter(struct alldevs_foray *af)
2640 {
2641 TAILQ_INIT(&af->af_garbage);
2642 mutex_enter(&alldevs_lock);
2643 config_collect_garbage(&af->af_garbage);
2644 }
2645
2646 static void
2647 config_alldevs_exit(struct alldevs_foray *af)
2648 {
2649 mutex_exit(&alldevs_lock);
2650 config_dump_garbage(&af->af_garbage);
2651 }
2652
2653 /*
2654 * device_lookup:
2655 *
2656 * Look up a device instance for a given driver.
2657 *
2658 * Caller is responsible for ensuring the device's state is
2659 * stable, either by holding a reference already obtained with
2660 * device_lookup_acquire or by otherwise ensuring the device is
2661 * attached and can't be detached (e.g., holding an open device
2662 * node and ensuring *_detach calls vdevgone).
2663 *
2664 * XXX Find a way to assert this.
2665 *
2666 * Safe for use up to and including interrupt context at IPL_VM.
2667 * Never sleeps.
2668 */
2669 device_t
2670 device_lookup(cfdriver_t cd, int unit)
2671 {
2672 device_t dv;
2673
2674 mutex_enter(&alldevs_lock);
2675 if (unit < 0 || unit >= cd->cd_ndevs)
2676 dv = NULL;
2677 else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
2678 dv = NULL;
2679 mutex_exit(&alldevs_lock);
2680
2681 return dv;
2682 }
2683
2684 /*
2685 * device_lookup_private:
2686 *
2687 * Look up a softc instance for a given driver.
2688 */
2689 void *
2690 device_lookup_private(cfdriver_t cd, int unit)
2691 {
2692
2693 return device_private(device_lookup(cd, unit));
2694 }
2695
2696 /*
2697 * device_lookup_acquire:
2698 *
2699 * Look up a device instance for a given driver, and return a
2700 * reference to it that must be released by device_release.
2701 *
2702 * => If the device is still attaching, blocks until *_attach has
2703 * returned.
2704 *
2705 * => If the device is detaching, blocks until *_detach has
2706 * returned. May succeed or fail in that case, depending on
2707 * whether *_detach has backed out (EBUSY) or committed to
2708 * detaching.
2709 *
2710 * May sleep.
2711 */
2712 device_t
2713 device_lookup_acquire(cfdriver_t cd, int unit)
2714 {
2715 device_t dv;
2716
2717 ASSERT_SLEEPABLE();
2718
2719 /* XXX This should have a pserialized fast path -- TBD. */
2720 mutex_enter(&config_misc_lock);
2721 mutex_enter(&alldevs_lock);
2722 retry: if (unit < 0 || unit >= cd->cd_ndevs ||
2723 (dv = cd->cd_devs[unit]) == NULL ||
2724 dv->dv_del_gen != 0 ||
2725 dv->dv_detach_committed) {
2726 dv = NULL;
2727 } else {
2728 /*
2729 * Wait for the device to stabilize, if attaching or
2730 * detaching. Either way we must wait for *_attach or
2731 * *_detach to complete, and either way we must retry:
2732 * even if detaching, *_detach might fail (EBUSY) so
2733 * the device may still be there.
2734 */
2735 if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) ||
2736 dv->dv_detaching != NULL) {
2737 mutex_exit(&alldevs_lock);
2738 cv_wait(&config_misc_cv, &config_misc_lock);
2739 mutex_enter(&alldevs_lock);
2740 goto retry;
2741 }
2742 localcount_acquire(dv->dv_localcount);
2743 }
2744 mutex_exit(&alldevs_lock);
2745 mutex_exit(&config_misc_lock);
2746
2747 return dv;
2748 }
2749
2750 /*
2751 * device_release:
2752 *
2753 * Release a reference to a device acquired with
2754 * device_lookup_acquire.
2755 */
2756 void
2757 device_release(device_t dv)
2758 {
2759
2760 localcount_release(dv->dv_localcount,
2761 &config_misc_cv, &config_misc_lock);
2762 }
2763
2764 /*
2765 * device_find_by_xname:
2766 *
2767 * Returns the device of the given name or NULL if it doesn't exist.
2768 */
2769 device_t
2770 device_find_by_xname(const char *name)
2771 {
2772 device_t dv;
2773 deviter_t di;
2774
2775 for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
2776 if (strcmp(device_xname(dv), name) == 0)
2777 break;
2778 }
2779 deviter_release(&di);
2780
2781 return dv;
2782 }
2783
2784 /*
2785 * device_find_by_driver_unit:
2786 *
2787 * Returns the device of the given driver name and unit or
2788 * NULL if it doesn't exist.
2789 */
2790 device_t
2791 device_find_by_driver_unit(const char *name, int unit)
2792 {
2793 struct cfdriver *cd;
2794
2795 if ((cd = config_cfdriver_lookup(name)) == NULL)
2796 return NULL;
2797 return device_lookup(cd, unit);
2798 }
2799
2800 static bool
2801 match_strcmp(const char * const s1, const char * const s2)
2802 {
2803 return strcmp(s1, s2) == 0;
2804 }
2805
2806 static bool
2807 match_pmatch(const char * const s1, const char * const s2)
2808 {
2809 return pmatch(s1, s2, NULL) == 2;
2810 }
2811
2812 static bool
2813 strarray_match_internal(const char ** const strings,
2814 unsigned int const nstrings, const char * const str,
2815 unsigned int * const indexp,
2816 bool (*match_fn)(const char *, const char *))
2817 {
2818 unsigned int i;
2819
2820 if (strings == NULL || nstrings == 0) {
2821 return false;
2822 }
2823
2824 for (i = 0; i < nstrings; i++) {
2825 if ((*match_fn)(strings[i], str)) {
2826 *indexp = i;
2827 return true;
2828 }
2829 }
2830
2831 return false;
2832 }
2833
2834 static int
2835 strarray_match(const char ** const strings, unsigned int const nstrings,
2836 const char * const str)
2837 {
2838 unsigned int idx;
2839
2840 if (strarray_match_internal(strings, nstrings, str, &idx,
2841 match_strcmp)) {
2842 return (int)(nstrings - idx);
2843 }
2844 return 0;
2845 }
2846
2847 static int
2848 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
2849 const char * const pattern)
2850 {
2851 unsigned int idx;
2852
2853 if (strarray_match_internal(strings, nstrings, pattern, &idx,
2854 match_pmatch)) {
2855 return (int)(nstrings - idx);
2856 }
2857 return 0;
2858 }
2859
2860 static int
2861 device_compatible_match_strarray_internal(
2862 const char **device_compats, int ndevice_compats,
2863 const struct device_compatible_entry *driver_compats,
2864 const struct device_compatible_entry **matching_entryp,
2865 int (*match_fn)(const char **, unsigned int, const char *))
2866 {
2867 const struct device_compatible_entry *dce = NULL;
2868 int rv;
2869
2870 if (ndevice_compats == 0 || device_compats == NULL ||
2871 driver_compats == NULL)
2872 return 0;
2873
2874 for (dce = driver_compats; dce->compat != NULL; dce++) {
2875 rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
2876 if (rv != 0) {
2877 if (matching_entryp != NULL) {
2878 *matching_entryp = dce;
2879 }
2880 return rv;
2881 }
2882 }
2883 return 0;
2884 }
2885
2886 /*
2887 * device_compatible_match:
2888 *
2889 * Match a driver's "compatible" data against a device's
2890 * "compatible" strings. Returns resulted weighted by
2891 * which device "compatible" string was matched.
2892 */
2893 int
2894 device_compatible_match(const char **device_compats, int ndevice_compats,
2895 const struct device_compatible_entry *driver_compats)
2896 {
2897 return device_compatible_match_strarray_internal(device_compats,
2898 ndevice_compats, driver_compats, NULL, strarray_match);
2899 }
2900
2901 /*
2902 * device_compatible_pmatch:
2903 *
2904 * Like device_compatible_match(), but uses pmatch(9) to compare
2905 * the device "compatible" strings against patterns in the
2906 * driver's "compatible" data.
2907 */
2908 int
2909 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
2910 const struct device_compatible_entry *driver_compats)
2911 {
2912 return device_compatible_match_strarray_internal(device_compats,
2913 ndevice_compats, driver_compats, NULL, strarray_pmatch);
2914 }
2915
2916 static int
2917 device_compatible_match_strlist_internal(
2918 const char * const device_compats, size_t const device_compatsize,
2919 const struct device_compatible_entry *driver_compats,
2920 const struct device_compatible_entry **matching_entryp,
2921 int (*match_fn)(const char *, size_t, const char *))
2922 {
2923 const struct device_compatible_entry *dce = NULL;
2924 int rv;
2925
2926 if (device_compats == NULL || device_compatsize == 0 ||
2927 driver_compats == NULL)
2928 return 0;
2929
2930 for (dce = driver_compats; dce->compat != NULL; dce++) {
2931 rv = (*match_fn)(device_compats, device_compatsize,
2932 dce->compat);
2933 if (rv != 0) {
2934 if (matching_entryp != NULL) {
2935 *matching_entryp = dce;
2936 }
2937 return rv;
2938 }
2939 }
2940 return 0;
2941 }
2942
2943 /*
2944 * device_compatible_match_strlist:
2945 *
2946 * Like device_compatible_match(), but take the device
2947 * "compatible" strings as an OpenFirmware-style string
2948 * list.
2949 */
2950 int
2951 device_compatible_match_strlist(
2952 const char * const device_compats, size_t const device_compatsize,
2953 const struct device_compatible_entry *driver_compats)
2954 {
2955 return device_compatible_match_strlist_internal(device_compats,
2956 device_compatsize, driver_compats, NULL, strlist_match);
2957 }
2958
2959 /*
2960 * device_compatible_pmatch_strlist:
2961 *
2962 * Like device_compatible_pmatch(), but take the device
2963 * "compatible" strings as an OpenFirmware-style string
2964 * list.
2965 */
2966 int
2967 device_compatible_pmatch_strlist(
2968 const char * const device_compats, size_t const device_compatsize,
2969 const struct device_compatible_entry *driver_compats)
2970 {
2971 return device_compatible_match_strlist_internal(device_compats,
2972 device_compatsize, driver_compats, NULL, strlist_pmatch);
2973 }
2974
2975 static int
2976 device_compatible_match_id_internal(
2977 uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
2978 const struct device_compatible_entry *driver_compats,
2979 const struct device_compatible_entry **matching_entryp)
2980 {
2981 const struct device_compatible_entry *dce = NULL;
2982
2983 if (mask == 0)
2984 return 0;
2985
2986 for (dce = driver_compats; dce->id != sentinel_id; dce++) {
2987 if ((id & mask) == dce->id) {
2988 if (matching_entryp != NULL) {
2989 *matching_entryp = dce;
2990 }
2991 return 1;
2992 }
2993 }
2994 return 0;
2995 }
2996
2997 /*
2998 * device_compatible_match_id:
2999 *
3000 * Like device_compatible_match(), but takes a single
3001 * unsigned integer device ID.
3002 */
3003 int
3004 device_compatible_match_id(
3005 uintptr_t const id, uintptr_t const sentinel_id,
3006 const struct device_compatible_entry *driver_compats)
3007 {
3008 return device_compatible_match_id_internal(id, (uintptr_t)-1,
3009 sentinel_id, driver_compats, NULL);
3010 }
3011
3012 /*
3013 * device_compatible_lookup:
3014 *
3015 * Look up and return the device_compatible_entry, using the
3016 * same matching criteria used by device_compatible_match().
3017 */
3018 const struct device_compatible_entry *
3019 device_compatible_lookup(const char **device_compats, int ndevice_compats,
3020 const struct device_compatible_entry *driver_compats)
3021 {
3022 const struct device_compatible_entry *dce;
3023
3024 if (device_compatible_match_strarray_internal(device_compats,
3025 ndevice_compats, driver_compats, &dce, strarray_match)) {
3026 return dce;
3027 }
3028 return NULL;
3029 }
3030
3031 /*
3032 * device_compatible_plookup:
3033 *
3034 * Look up and return the device_compatible_entry, using the
3035 * same matching criteria used by device_compatible_pmatch().
3036 */
3037 const struct device_compatible_entry *
3038 device_compatible_plookup(const char **device_compats, int ndevice_compats,
3039 const struct device_compatible_entry *driver_compats)
3040 {
3041 const struct device_compatible_entry *dce;
3042
3043 if (device_compatible_match_strarray_internal(device_compats,
3044 ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
3045 return dce;
3046 }
3047 return NULL;
3048 }
3049
3050 /*
3051 * device_compatible_lookup_strlist:
3052 *
3053 * Like device_compatible_lookup(), but take the device
3054 * "compatible" strings as an OpenFirmware-style string
3055 * list.
3056 */
3057 const struct device_compatible_entry *
3058 device_compatible_lookup_strlist(
3059 const char * const device_compats, size_t const device_compatsize,
3060 const struct device_compatible_entry *driver_compats)
3061 {
3062 const struct device_compatible_entry *dce;
3063
3064 if (device_compatible_match_strlist_internal(device_compats,
3065 device_compatsize, driver_compats, &dce, strlist_match)) {
3066 return dce;
3067 }
3068 return NULL;
3069 }
3070
3071 /*
3072 * device_compatible_plookup_strlist:
3073 *
3074 * Like device_compatible_plookup(), but take the device
3075 * "compatible" strings as an OpenFirmware-style string
3076 * list.
3077 */
3078 const struct device_compatible_entry *
3079 device_compatible_plookup_strlist(
3080 const char * const device_compats, size_t const device_compatsize,
3081 const struct device_compatible_entry *driver_compats)
3082 {
3083 const struct device_compatible_entry *dce;
3084
3085 if (device_compatible_match_strlist_internal(device_compats,
3086 device_compatsize, driver_compats, &dce, strlist_pmatch)) {
3087 return dce;
3088 }
3089 return NULL;
3090 }
3091
3092 /*
3093 * device_compatible_lookup_id:
3094 *
3095 * Like device_compatible_lookup(), but takes a single
3096 * unsigned integer device ID.
3097 */
3098 const struct device_compatible_entry *
3099 device_compatible_lookup_id(
3100 uintptr_t const id, uintptr_t const sentinel_id,
3101 const struct device_compatible_entry *driver_compats)
3102 {
3103 const struct device_compatible_entry *dce;
3104
3105 if (device_compatible_match_id_internal(id, (uintptr_t)-1,
3106 sentinel_id, driver_compats, &dce)) {
3107 return dce;
3108 }
3109 return NULL;
3110 }
3111
3112 /*
3113 * Power management related functions.
3114 */
3115
3116 bool
3117 device_pmf_is_registered(device_t dev)
3118 {
3119 return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
3120 }
3121
3122 bool
3123 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
3124 {
3125 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3126 return true;
3127 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3128 return false;
3129 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3130 dev->dv_driver_suspend != NULL &&
3131 !(*dev->dv_driver_suspend)(dev, qual))
3132 return false;
3133
3134 dev->dv_flags |= DVF_DRIVER_SUSPENDED;
3135 return true;
3136 }
3137
3138 bool
3139 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
3140 {
3141 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3142 return true;
3143 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3144 return false;
3145 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3146 dev->dv_driver_resume != NULL &&
3147 !(*dev->dv_driver_resume)(dev, qual))
3148 return false;
3149
3150 dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
3151 return true;
3152 }
3153
3154 bool
3155 device_pmf_driver_shutdown(device_t dev, int how)
3156 {
3157
3158 if (*dev->dv_driver_shutdown != NULL &&
3159 !(*dev->dv_driver_shutdown)(dev, how))
3160 return false;
3161 return true;
3162 }
3163
3164 void
3165 device_pmf_driver_register(device_t dev,
3166 bool (*suspend)(device_t, const pmf_qual_t *),
3167 bool (*resume)(device_t, const pmf_qual_t *),
3168 bool (*shutdown)(device_t, int))
3169 {
3170
3171 dev->dv_driver_suspend = suspend;
3172 dev->dv_driver_resume = resume;
3173 dev->dv_driver_shutdown = shutdown;
3174 dev->dv_flags |= DVF_POWER_HANDLERS;
3175 }
3176
3177 void
3178 device_pmf_driver_deregister(device_t dev)
3179 {
3180 device_lock_t dvl = device_getlock(dev);
3181
3182 dev->dv_driver_suspend = NULL;
3183 dev->dv_driver_resume = NULL;
3184
3185 mutex_enter(&dvl->dvl_mtx);
3186 dev->dv_flags &= ~DVF_POWER_HANDLERS;
3187 while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
3188 /* Wake a thread that waits for the lock. That
3189 * thread will fail to acquire the lock, and then
3190 * it will wake the next thread that waits for the
3191 * lock, or else it will wake us.
3192 */
3193 cv_signal(&dvl->dvl_cv);
3194 pmflock_debug(dev, __func__, __LINE__);
3195 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3196 pmflock_debug(dev, __func__, __LINE__);
3197 }
3198 mutex_exit(&dvl->dvl_mtx);
3199 }
3200
3201 void
3202 device_pmf_driver_child_register(device_t dev)
3203 {
3204 device_t parent = device_parent(dev);
3205
3206 if (parent == NULL || parent->dv_driver_child_register == NULL)
3207 return;
3208 (*parent->dv_driver_child_register)(dev);
3209 }
3210
3211 void
3212 device_pmf_driver_set_child_register(device_t dev,
3213 void (*child_register)(device_t))
3214 {
3215 dev->dv_driver_child_register = child_register;
3216 }
3217
3218 static void
3219 pmflock_debug(device_t dev, const char *func, int line)
3220 {
3221 #ifdef PMFLOCK_DEBUG
3222 device_lock_t dvl = device_getlock(dev);
3223 const char *curlwp_name;
3224
3225 if (curlwp->l_name != NULL)
3226 curlwp_name = curlwp->l_name;
3227 else
3228 curlwp_name = curlwp->l_proc->p_comm;
3229
3230 aprint_debug_dev(dev,
3231 "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
3232 curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
3233 #endif /* PMFLOCK_DEBUG */
3234 }
3235
3236 static bool
3237 device_pmf_lock1(device_t dev)
3238 {
3239 device_lock_t dvl = device_getlock(dev);
3240
3241 while (device_pmf_is_registered(dev) &&
3242 dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
3243 dvl->dvl_nwait++;
3244 pmflock_debug(dev, __func__, __LINE__);
3245 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3246 pmflock_debug(dev, __func__, __LINE__);
3247 dvl->dvl_nwait--;
3248 }
3249 if (!device_pmf_is_registered(dev)) {
3250 pmflock_debug(dev, __func__, __LINE__);
3251 /* We could not acquire the lock, but some other thread may
3252 * wait for it, also. Wake that thread.
3253 */
3254 cv_signal(&dvl->dvl_cv);
3255 return false;
3256 }
3257 dvl->dvl_nlock++;
3258 dvl->dvl_holder = curlwp;
3259 pmflock_debug(dev, __func__, __LINE__);
3260 return true;
3261 }
3262
3263 bool
3264 device_pmf_lock(device_t dev)
3265 {
3266 bool rc;
3267 device_lock_t dvl = device_getlock(dev);
3268
3269 mutex_enter(&dvl->dvl_mtx);
3270 rc = device_pmf_lock1(dev);
3271 mutex_exit(&dvl->dvl_mtx);
3272
3273 return rc;
3274 }
3275
3276 void
3277 device_pmf_unlock(device_t dev)
3278 {
3279 device_lock_t dvl = device_getlock(dev);
3280
3281 KASSERT(dvl->dvl_nlock > 0);
3282 mutex_enter(&dvl->dvl_mtx);
3283 if (--dvl->dvl_nlock == 0)
3284 dvl->dvl_holder = NULL;
3285 cv_signal(&dvl->dvl_cv);
3286 pmflock_debug(dev, __func__, __LINE__);
3287 mutex_exit(&dvl->dvl_mtx);
3288 }
3289
3290 device_lock_t
3291 device_getlock(device_t dev)
3292 {
3293 return &dev->dv_lock;
3294 }
3295
3296 void *
3297 device_pmf_bus_private(device_t dev)
3298 {
3299 return dev->dv_bus_private;
3300 }
3301
3302 bool
3303 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
3304 {
3305 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3306 return true;
3307 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
3308 (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3309 return false;
3310 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3311 dev->dv_bus_suspend != NULL &&
3312 !(*dev->dv_bus_suspend)(dev, qual))
3313 return false;
3314
3315 dev->dv_flags |= DVF_BUS_SUSPENDED;
3316 return true;
3317 }
3318
3319 bool
3320 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
3321 {
3322 if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
3323 return true;
3324 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3325 dev->dv_bus_resume != NULL &&
3326 !(*dev->dv_bus_resume)(dev, qual))
3327 return false;
3328
3329 dev->dv_flags &= ~DVF_BUS_SUSPENDED;
3330 return true;
3331 }
3332
3333 bool
3334 device_pmf_bus_shutdown(device_t dev, int how)
3335 {
3336
3337 if (*dev->dv_bus_shutdown != NULL &&
3338 !(*dev->dv_bus_shutdown)(dev, how))
3339 return false;
3340 return true;
3341 }
3342
3343 void
3344 device_pmf_bus_register(device_t dev, void *priv,
3345 bool (*suspend)(device_t, const pmf_qual_t *),
3346 bool (*resume)(device_t, const pmf_qual_t *),
3347 bool (*shutdown)(device_t, int), void (*deregister)(device_t))
3348 {
3349 dev->dv_bus_private = priv;
3350 dev->dv_bus_resume = resume;
3351 dev->dv_bus_suspend = suspend;
3352 dev->dv_bus_shutdown = shutdown;
3353 dev->dv_bus_deregister = deregister;
3354 }
3355
3356 void
3357 device_pmf_bus_deregister(device_t dev)
3358 {
3359 if (dev->dv_bus_deregister == NULL)
3360 return;
3361 (*dev->dv_bus_deregister)(dev);
3362 dev->dv_bus_private = NULL;
3363 dev->dv_bus_suspend = NULL;
3364 dev->dv_bus_resume = NULL;
3365 dev->dv_bus_deregister = NULL;
3366 }
3367
3368 void *
3369 device_pmf_class_private(device_t dev)
3370 {
3371 return dev->dv_class_private;
3372 }
3373
3374 bool
3375 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
3376 {
3377 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
3378 return true;
3379 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3380 dev->dv_class_suspend != NULL &&
3381 !(*dev->dv_class_suspend)(dev, qual))
3382 return false;
3383
3384 dev->dv_flags |= DVF_CLASS_SUSPENDED;
3385 return true;
3386 }
3387
3388 bool
3389 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
3390 {
3391 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3392 return true;
3393 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
3394 (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3395 return false;
3396 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3397 dev->dv_class_resume != NULL &&
3398 !(*dev->dv_class_resume)(dev, qual))
3399 return false;
3400
3401 dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
3402 return true;
3403 }
3404
3405 void
3406 device_pmf_class_register(device_t dev, void *priv,
3407 bool (*suspend)(device_t, const pmf_qual_t *),
3408 bool (*resume)(device_t, const pmf_qual_t *),
3409 void (*deregister)(device_t))
3410 {
3411 dev->dv_class_private = priv;
3412 dev->dv_class_suspend = suspend;
3413 dev->dv_class_resume = resume;
3414 dev->dv_class_deregister = deregister;
3415 }
3416
3417 void
3418 device_pmf_class_deregister(device_t dev)
3419 {
3420 if (dev->dv_class_deregister == NULL)
3421 return;
3422 (*dev->dv_class_deregister)(dev);
3423 dev->dv_class_private = NULL;
3424 dev->dv_class_suspend = NULL;
3425 dev->dv_class_resume = NULL;
3426 dev->dv_class_deregister = NULL;
3427 }
3428
3429 bool
3430 device_active(device_t dev, devactive_t type)
3431 {
3432 size_t i;
3433
3434 if (dev->dv_activity_count == 0)
3435 return false;
3436
3437 for (i = 0; i < dev->dv_activity_count; ++i) {
3438 if (dev->dv_activity_handlers[i] == NULL)
3439 break;
3440 (*dev->dv_activity_handlers[i])(dev, type);
3441 }
3442
3443 return true;
3444 }
3445
3446 bool
3447 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
3448 {
3449 void (**new_handlers)(device_t, devactive_t);
3450 void (**old_handlers)(device_t, devactive_t);
3451 size_t i, old_size, new_size;
3452 int s;
3453
3454 old_handlers = dev->dv_activity_handlers;
3455 old_size = dev->dv_activity_count;
3456
3457 KASSERT(old_size == 0 || old_handlers != NULL);
3458
3459 for (i = 0; i < old_size; ++i) {
3460 KASSERT(old_handlers[i] != handler);
3461 if (old_handlers[i] == NULL) {
3462 old_handlers[i] = handler;
3463 return true;
3464 }
3465 }
3466
3467 new_size = old_size + 4;
3468 new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
3469
3470 for (i = 0; i < old_size; ++i)
3471 new_handlers[i] = old_handlers[i];
3472 new_handlers[old_size] = handler;
3473 for (i = old_size+1; i < new_size; ++i)
3474 new_handlers[i] = NULL;
3475
3476 s = splhigh();
3477 dev->dv_activity_count = new_size;
3478 dev->dv_activity_handlers = new_handlers;
3479 splx(s);
3480
3481 if (old_size > 0)
3482 kmem_free(old_handlers, sizeof(void *) * old_size);
3483
3484 return true;
3485 }
3486
3487 void
3488 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
3489 {
3490 void (**old_handlers)(device_t, devactive_t);
3491 size_t i, old_size;
3492 int s;
3493
3494 old_handlers = dev->dv_activity_handlers;
3495 old_size = dev->dv_activity_count;
3496
3497 for (i = 0; i < old_size; ++i) {
3498 if (old_handlers[i] == handler)
3499 break;
3500 if (old_handlers[i] == NULL)
3501 return; /* XXX panic? */
3502 }
3503
3504 if (i == old_size)
3505 return; /* XXX panic? */
3506
3507 for (; i < old_size - 1; ++i) {
3508 if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
3509 continue;
3510
3511 if (i == 0) {
3512 s = splhigh();
3513 dev->dv_activity_count = 0;
3514 dev->dv_activity_handlers = NULL;
3515 splx(s);
3516 kmem_free(old_handlers, sizeof(void *) * old_size);
3517 }
3518 return;
3519 }
3520 old_handlers[i] = NULL;
3521 }
3522
3523 /* Return true iff the device_t `dev' exists at generation `gen'. */
3524 static bool
3525 device_exists_at(device_t dv, devgen_t gen)
3526 {
3527 return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
3528 dv->dv_add_gen <= gen;
3529 }
3530
3531 static bool
3532 deviter_visits(const deviter_t *di, device_t dv)
3533 {
3534 return device_exists_at(dv, di->di_gen);
3535 }
3536
3537 /*
3538 * Device Iteration
3539 *
3540 * deviter_t: a device iterator. Holds state for a "walk" visiting
3541 * each device_t's in the device tree.
3542 *
3543 * deviter_init(di, flags): initialize the device iterator `di'
3544 * to "walk" the device tree. deviter_next(di) will return
3545 * the first device_t in the device tree, or NULL if there are
3546 * no devices.
3547 *
3548 * `flags' is one or more of DEVITER_F_RW, indicating that the
3549 * caller intends to modify the device tree by calling
3550 * config_detach(9) on devices in the order that the iterator
3551 * returns them; DEVITER_F_ROOT_FIRST, asking for the devices
3552 * nearest the "root" of the device tree to be returned, first;
3553 * DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
3554 * the root of the device tree, first; and DEVITER_F_SHUTDOWN,
3555 * indicating both that deviter_init() should not respect any
3556 * locks on the device tree, and that deviter_next(di) may run
3557 * in more than one LWP before the walk has finished.
3558 *
3559 * Only one DEVITER_F_RW iterator may be in the device tree at
3560 * once.
3561 *
3562 * DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
3563 *
3564 * Results are undefined if the flags DEVITER_F_ROOT_FIRST and
3565 * DEVITER_F_LEAVES_FIRST are used in combination.
3566 *
3567 * deviter_first(di, flags): initialize the device iterator `di'
3568 * and return the first device_t in the device tree, or NULL
3569 * if there are no devices. The statement
3570 *
3571 * dv = deviter_first(di);
3572 *
3573 * is shorthand for
3574 *
3575 * deviter_init(di);
3576 * dv = deviter_next(di);
3577 *
3578 * deviter_next(di): return the next device_t in the device tree,
3579 * or NULL if there are no more devices. deviter_next(di)
3580 * is undefined if `di' was not initialized with deviter_init() or
3581 * deviter_first().
3582 *
3583 * deviter_release(di): stops iteration (subsequent calls to
3584 * deviter_next() will return NULL), releases any locks and
3585 * resources held by the device iterator.
3586 *
3587 * Device iteration does not return device_t's in any particular
3588 * order. An iterator will never return the same device_t twice.
3589 * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
3590 * is called repeatedly on the same `di', it will eventually return
3591 * NULL. It is ok to attach/detach devices during device iteration.
3592 */
3593 void
3594 deviter_init(deviter_t *di, deviter_flags_t flags)
3595 {
3596 device_t dv;
3597
3598 memset(di, 0, sizeof(*di));
3599
3600 if ((flags & DEVITER_F_SHUTDOWN) != 0)
3601 flags |= DEVITER_F_RW;
3602
3603 mutex_enter(&alldevs_lock);
3604 if ((flags & DEVITER_F_RW) != 0)
3605 alldevs_nwrite++;
3606 else
3607 alldevs_nread++;
3608 di->di_gen = alldevs_gen++;
3609 di->di_flags = flags;
3610
3611 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3612 case DEVITER_F_LEAVES_FIRST:
3613 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3614 if (!deviter_visits(di, dv))
3615 continue;
3616 di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
3617 }
3618 break;
3619 case DEVITER_F_ROOT_FIRST:
3620 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3621 if (!deviter_visits(di, dv))
3622 continue;
3623 di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
3624 }
3625 break;
3626 default:
3627 break;
3628 }
3629
3630 deviter_reinit(di);
3631 mutex_exit(&alldevs_lock);
3632 }
3633
3634 static void
3635 deviter_reinit(deviter_t *di)
3636 {
3637
3638 KASSERT(mutex_owned(&alldevs_lock));
3639 if ((di->di_flags & DEVITER_F_RW) != 0)
3640 di->di_prev = TAILQ_LAST(&alldevs, devicelist);
3641 else
3642 di->di_prev = TAILQ_FIRST(&alldevs);
3643 }
3644
3645 device_t
3646 deviter_first(deviter_t *di, deviter_flags_t flags)
3647 {
3648
3649 deviter_init(di, flags);
3650 return deviter_next(di);
3651 }
3652
3653 static device_t
3654 deviter_next2(deviter_t *di)
3655 {
3656 device_t dv;
3657
3658 KASSERT(mutex_owned(&alldevs_lock));
3659
3660 dv = di->di_prev;
3661
3662 if (dv == NULL)
3663 return NULL;
3664
3665 if ((di->di_flags & DEVITER_F_RW) != 0)
3666 di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
3667 else
3668 di->di_prev = TAILQ_NEXT(dv, dv_list);
3669
3670 return dv;
3671 }
3672
3673 static device_t
3674 deviter_next1(deviter_t *di)
3675 {
3676 device_t dv;
3677
3678 KASSERT(mutex_owned(&alldevs_lock));
3679
3680 do {
3681 dv = deviter_next2(di);
3682 } while (dv != NULL && !deviter_visits(di, dv));
3683
3684 return dv;
3685 }
3686
3687 device_t
3688 deviter_next(deviter_t *di)
3689 {
3690 device_t dv = NULL;
3691
3692 mutex_enter(&alldevs_lock);
3693 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3694 case 0:
3695 dv = deviter_next1(di);
3696 break;
3697 case DEVITER_F_LEAVES_FIRST:
3698 while (di->di_curdepth >= 0) {
3699 if ((dv = deviter_next1(di)) == NULL) {
3700 di->di_curdepth--;
3701 deviter_reinit(di);
3702 } else if (dv->dv_depth == di->di_curdepth)
3703 break;
3704 }
3705 break;
3706 case DEVITER_F_ROOT_FIRST:
3707 while (di->di_curdepth <= di->di_maxdepth) {
3708 if ((dv = deviter_next1(di)) == NULL) {
3709 di->di_curdepth++;
3710 deviter_reinit(di);
3711 } else if (dv->dv_depth == di->di_curdepth)
3712 break;
3713 }
3714 break;
3715 default:
3716 break;
3717 }
3718 mutex_exit(&alldevs_lock);
3719
3720 return dv;
3721 }
3722
3723 void
3724 deviter_release(deviter_t *di)
3725 {
3726 bool rw = (di->di_flags & DEVITER_F_RW) != 0;
3727
3728 mutex_enter(&alldevs_lock);
3729 if (rw)
3730 --alldevs_nwrite;
3731 else
3732 --alldevs_nread;
3733 /* XXX wake a garbage-collection thread */
3734 mutex_exit(&alldevs_lock);
3735 }
3736
3737 const char *
3738 cfdata_ifattr(const struct cfdata *cf)
3739 {
3740 return cf->cf_pspec->cfp_iattr;
3741 }
3742
3743 bool
3744 ifattr_match(const char *snull, const char *t)
3745 {
3746 return (snull == NULL) || strcmp(snull, t) == 0;
3747 }
3748
3749 void
3750 null_childdetached(device_t self, device_t child)
3751 {
3752 /* do nothing */
3753 }
3754
3755 static void
3756 sysctl_detach_setup(struct sysctllog **clog)
3757 {
3758
3759 sysctl_createv(clog, 0, NULL, NULL,
3760 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
3761 CTLTYPE_BOOL, "detachall",
3762 SYSCTL_DESCR("Detach all devices at shutdown"),
3763 NULL, 0, &detachall, 0,
3764 CTL_KERN, CTL_CREATE, CTL_EOL);
3765 }
3766