subr_autoconf.c revision 1.305 1 /* $NetBSD: subr_autoconf.c,v 1.305 2022/09/13 09:40:38 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1996, 2000 Christopher G. Demetriou
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the
18 * NetBSD Project. See http://www.NetBSD.org/ for
19 * information about NetBSD.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
35 */
36
37 /*
38 * Copyright (c) 1992, 1993
39 * The Regents of the University of California. All rights reserved.
40 *
41 * This software was developed by the Computer Systems Engineering group
42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43 * contributed to Berkeley.
44 *
45 * All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Lawrence Berkeley Laboratories.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp (LBL)
75 *
76 * @(#)subr_autoconf.c 8.3 (Berkeley) 5/17/94
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.305 2022/09/13 09:40:38 riastradh Exp $");
81
82 #ifdef _KERNEL_OPT
83 #include "opt_ddb.h"
84 #include "drvctl.h"
85 #endif
86
87 #include <sys/param.h>
88 #include <sys/device.h>
89 #include <sys/device_impl.h>
90 #include <sys/disklabel.h>
91 #include <sys/conf.h>
92 #include <sys/kauth.h>
93 #include <sys/kmem.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/errno.h>
97 #include <sys/proc.h>
98 #include <sys/reboot.h>
99 #include <sys/kthread.h>
100 #include <sys/buf.h>
101 #include <sys/dirent.h>
102 #include <sys/mount.h>
103 #include <sys/namei.h>
104 #include <sys/unistd.h>
105 #include <sys/fcntl.h>
106 #include <sys/lockf.h>
107 #include <sys/callout.h>
108 #include <sys/devmon.h>
109 #include <sys/cpu.h>
110 #include <sys/sysctl.h>
111 #include <sys/stdarg.h>
112 #include <sys/localcount.h>
113
114 #include <sys/disk.h>
115
116 #include <sys/rndsource.h>
117
118 #include <machine/limits.h>
119
120 /*
121 * Autoconfiguration subroutines.
122 */
123
124 /*
125 * Device autoconfiguration timings are mixed into the entropy pool.
126 */
127 static krndsource_t rnd_autoconf_source;
128
129 /*
130 * ioconf.c exports exactly two names: cfdata and cfroots. All system
131 * devices and drivers are found via these tables.
132 */
133 extern struct cfdata cfdata[];
134 extern const short cfroots[];
135
136 /*
137 * List of all cfdriver structures. We use this to detect duplicates
138 * when other cfdrivers are loaded.
139 */
140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
141 extern struct cfdriver * const cfdriver_list_initial[];
142
143 /*
144 * Initial list of cfattach's.
145 */
146 extern const struct cfattachinit cfattachinit[];
147
148 /*
149 * List of cfdata tables. We always have one such list -- the one
150 * built statically when the kernel was configured.
151 */
152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
153 static struct cftable initcftable;
154
155 #define ROOT ((device_t)NULL)
156
157 struct matchinfo {
158 cfsubmatch_t fn;
159 device_t parent;
160 const int *locs;
161 void *aux;
162 struct cfdata *match;
163 int pri;
164 };
165
166 struct alldevs_foray {
167 int af_s;
168 struct devicelist af_garbage;
169 };
170
171 /*
172 * Internal version of the cfargs structure; all versions are
173 * canonicalized to this.
174 */
175 struct cfargs_internal {
176 union {
177 cfsubmatch_t submatch;/* submatch function (direct config) */
178 cfsearch_t search; /* search function (indirect config) */
179 };
180 const char * iattr; /* interface attribute */
181 const int * locators; /* locators array */
182 devhandle_t devhandle; /* devhandle_t (by value) */
183 };
184
185 static char *number(char *, int);
186 static void mapply(struct matchinfo *, cfdata_t);
187 static void config_devdelete(device_t);
188 static void config_devunlink(device_t, struct devicelist *);
189 static void config_makeroom(int, struct cfdriver *);
190 static void config_devlink(device_t);
191 static void config_alldevs_enter(struct alldevs_foray *);
192 static void config_alldevs_exit(struct alldevs_foray *);
193 static void config_add_attrib_dict(device_t);
194 static device_t config_attach_internal(device_t, cfdata_t, void *,
195 cfprint_t, const struct cfargs_internal *);
196
197 static void config_collect_garbage(struct devicelist *);
198 static void config_dump_garbage(struct devicelist *);
199
200 static void pmflock_debug(device_t, const char *, int);
201
202 static device_t deviter_next1(deviter_t *);
203 static void deviter_reinit(deviter_t *);
204
205 struct deferred_config {
206 TAILQ_ENTRY(deferred_config) dc_queue;
207 device_t dc_dev;
208 void (*dc_func)(device_t);
209 };
210
211 TAILQ_HEAD(deferred_config_head, deferred_config);
212
213 static struct deferred_config_head deferred_config_queue =
214 TAILQ_HEAD_INITIALIZER(deferred_config_queue);
215 static struct deferred_config_head interrupt_config_queue =
216 TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
217 static int interrupt_config_threads = 8;
218 static struct deferred_config_head mountroot_config_queue =
219 TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
220 static int mountroot_config_threads = 2;
221 static lwp_t **mountroot_config_lwpids;
222 static size_t mountroot_config_lwpids_size;
223 bool root_is_mounted = false;
224
225 static void config_process_deferred(struct deferred_config_head *, device_t);
226
227 /* Hooks to finalize configuration once all real devices have been found. */
228 struct finalize_hook {
229 TAILQ_ENTRY(finalize_hook) f_list;
230 int (*f_func)(device_t);
231 device_t f_dev;
232 };
233 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
234 TAILQ_HEAD_INITIALIZER(config_finalize_list);
235 static int config_finalize_done;
236
237 /* list of all devices */
238 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
239 static kmutex_t alldevs_lock __cacheline_aligned;
240 static devgen_t alldevs_gen = 1;
241 static int alldevs_nread = 0;
242 static int alldevs_nwrite = 0;
243 static bool alldevs_garbage = false;
244
245 static struct devicelist config_pending =
246 TAILQ_HEAD_INITIALIZER(config_pending);
247 static kmutex_t config_misc_lock;
248 static kcondvar_t config_misc_cv;
249
250 static bool detachall = false;
251
252 #define STREQ(s1, s2) \
253 (*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
254
255 static bool config_initialized = false; /* config_init() has been called. */
256
257 static int config_do_twiddle;
258 static callout_t config_twiddle_ch;
259
260 static void sysctl_detach_setup(struct sysctllog **);
261
262 int no_devmon_insert(const char *, prop_dictionary_t);
263 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
264
265 typedef int (*cfdriver_fn)(struct cfdriver *);
266 static int
267 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
268 cfdriver_fn drv_do, cfdriver_fn drv_undo,
269 const char *style, bool dopanic)
270 {
271 void (*pr)(const char *, ...) __printflike(1, 2) =
272 dopanic ? panic : printf;
273 int i, error = 0, e2 __diagused;
274
275 for (i = 0; cfdriverv[i] != NULL; i++) {
276 if ((error = drv_do(cfdriverv[i])) != 0) {
277 pr("configure: `%s' driver %s failed: %d",
278 cfdriverv[i]->cd_name, style, error);
279 goto bad;
280 }
281 }
282
283 KASSERT(error == 0);
284 return 0;
285
286 bad:
287 printf("\n");
288 for (i--; i >= 0; i--) {
289 e2 = drv_undo(cfdriverv[i]);
290 KASSERT(e2 == 0);
291 }
292
293 return error;
294 }
295
296 typedef int (*cfattach_fn)(const char *, struct cfattach *);
297 static int
298 frob_cfattachvec(const struct cfattachinit *cfattachv,
299 cfattach_fn att_do, cfattach_fn att_undo,
300 const char *style, bool dopanic)
301 {
302 const struct cfattachinit *cfai = NULL;
303 void (*pr)(const char *, ...) __printflike(1, 2) =
304 dopanic ? panic : printf;
305 int j = 0, error = 0, e2 __diagused;
306
307 for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
308 for (j = 0; cfai->cfai_list[j] != NULL; j++) {
309 if ((error = att_do(cfai->cfai_name,
310 cfai->cfai_list[j])) != 0) {
311 pr("configure: attachment `%s' "
312 "of `%s' driver %s failed: %d",
313 cfai->cfai_list[j]->ca_name,
314 cfai->cfai_name, style, error);
315 goto bad;
316 }
317 }
318 }
319
320 KASSERT(error == 0);
321 return 0;
322
323 bad:
324 /*
325 * Rollback in reverse order. dunno if super-important, but
326 * do that anyway. Although the code looks a little like
327 * someone did a little integration (in the math sense).
328 */
329 printf("\n");
330 if (cfai) {
331 bool last;
332
333 for (last = false; last == false; ) {
334 if (cfai == &cfattachv[0])
335 last = true;
336 for (j--; j >= 0; j--) {
337 e2 = att_undo(cfai->cfai_name,
338 cfai->cfai_list[j]);
339 KASSERT(e2 == 0);
340 }
341 if (!last) {
342 cfai--;
343 for (j = 0; cfai->cfai_list[j] != NULL; j++)
344 ;
345 }
346 }
347 }
348
349 return error;
350 }
351
352 /*
353 * Initialize the autoconfiguration data structures. Normally this
354 * is done by configure(), but some platforms need to do this very
355 * early (to e.g. initialize the console).
356 */
357 void
358 config_init(void)
359 {
360
361 KASSERT(config_initialized == false);
362
363 mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
364
365 mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
366 cv_init(&config_misc_cv, "cfgmisc");
367
368 callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
369
370 frob_cfdrivervec(cfdriver_list_initial,
371 config_cfdriver_attach, NULL, "bootstrap", true);
372 frob_cfattachvec(cfattachinit,
373 config_cfattach_attach, NULL, "bootstrap", true);
374
375 initcftable.ct_cfdata = cfdata;
376 TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
377
378 rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
379 RND_FLAG_COLLECT_TIME);
380
381 config_initialized = true;
382 }
383
384 /*
385 * Init or fini drivers and attachments. Either all or none
386 * are processed (via rollback). It would be nice if this were
387 * atomic to outside consumers, but with the current state of
388 * locking ...
389 */
390 int
391 config_init_component(struct cfdriver * const *cfdriverv,
392 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
393 {
394 int error;
395
396 KERNEL_LOCK(1, NULL);
397
398 if ((error = frob_cfdrivervec(cfdriverv,
399 config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
400 goto out;
401 if ((error = frob_cfattachvec(cfattachv,
402 config_cfattach_attach, config_cfattach_detach,
403 "init", false)) != 0) {
404 frob_cfdrivervec(cfdriverv,
405 config_cfdriver_detach, NULL, "init rollback", true);
406 goto out;
407 }
408 if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
409 frob_cfattachvec(cfattachv,
410 config_cfattach_detach, NULL, "init rollback", true);
411 frob_cfdrivervec(cfdriverv,
412 config_cfdriver_detach, NULL, "init rollback", true);
413 goto out;
414 }
415
416 /* Success! */
417 error = 0;
418
419 out: KERNEL_UNLOCK_ONE(NULL);
420 return error;
421 }
422
423 int
424 config_fini_component(struct cfdriver * const *cfdriverv,
425 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
426 {
427 int error;
428
429 KERNEL_LOCK(1, NULL);
430
431 if ((error = config_cfdata_detach(cfdatav)) != 0)
432 goto out;
433 if ((error = frob_cfattachvec(cfattachv,
434 config_cfattach_detach, config_cfattach_attach,
435 "fini", false)) != 0) {
436 if (config_cfdata_attach(cfdatav, 0) != 0)
437 panic("config_cfdata fini rollback failed");
438 goto out;
439 }
440 if ((error = frob_cfdrivervec(cfdriverv,
441 config_cfdriver_detach, config_cfdriver_attach,
442 "fini", false)) != 0) {
443 frob_cfattachvec(cfattachv,
444 config_cfattach_attach, NULL, "fini rollback", true);
445 if (config_cfdata_attach(cfdatav, 0) != 0)
446 panic("config_cfdata fini rollback failed");
447 goto out;
448 }
449
450 /* Success! */
451 error = 0;
452
453 out: KERNEL_UNLOCK_ONE(NULL);
454 return error;
455 }
456
457 void
458 config_init_mi(void)
459 {
460
461 if (!config_initialized)
462 config_init();
463
464 sysctl_detach_setup(NULL);
465 }
466
467 void
468 config_deferred(device_t dev)
469 {
470
471 KASSERT(KERNEL_LOCKED_P());
472
473 config_process_deferred(&deferred_config_queue, dev);
474 config_process_deferred(&interrupt_config_queue, dev);
475 config_process_deferred(&mountroot_config_queue, dev);
476 }
477
478 static void
479 config_interrupts_thread(void *cookie)
480 {
481 struct deferred_config *dc;
482 device_t dev;
483
484 mutex_enter(&config_misc_lock);
485 while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
486 TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
487 mutex_exit(&config_misc_lock);
488
489 dev = dc->dc_dev;
490 (*dc->dc_func)(dev);
491 if (!device_pmf_is_registered(dev))
492 aprint_debug_dev(dev,
493 "WARNING: power management not supported\n");
494 config_pending_decr(dev);
495 kmem_free(dc, sizeof(*dc));
496
497 mutex_enter(&config_misc_lock);
498 }
499 mutex_exit(&config_misc_lock);
500
501 kthread_exit(0);
502 }
503
504 void
505 config_create_interruptthreads(void)
506 {
507 int i;
508
509 for (i = 0; i < interrupt_config_threads; i++) {
510 (void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
511 config_interrupts_thread, NULL, NULL, "configintr");
512 }
513 }
514
515 static void
516 config_mountroot_thread(void *cookie)
517 {
518 struct deferred_config *dc;
519
520 mutex_enter(&config_misc_lock);
521 while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
522 TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
523 mutex_exit(&config_misc_lock);
524
525 (*dc->dc_func)(dc->dc_dev);
526 kmem_free(dc, sizeof(*dc));
527
528 mutex_enter(&config_misc_lock);
529 }
530 mutex_exit(&config_misc_lock);
531
532 kthread_exit(0);
533 }
534
535 void
536 config_create_mountrootthreads(void)
537 {
538 int i;
539
540 if (!root_is_mounted)
541 root_is_mounted = true;
542
543 mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
544 mountroot_config_threads;
545 mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
546 KM_NOSLEEP);
547 KASSERT(mountroot_config_lwpids);
548 for (i = 0; i < mountroot_config_threads; i++) {
549 mountroot_config_lwpids[i] = 0;
550 (void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
551 NULL, config_mountroot_thread, NULL,
552 &mountroot_config_lwpids[i],
553 "configroot");
554 }
555 }
556
557 void
558 config_finalize_mountroot(void)
559 {
560 int i, error;
561
562 for (i = 0; i < mountroot_config_threads; i++) {
563 if (mountroot_config_lwpids[i] == 0)
564 continue;
565
566 error = kthread_join(mountroot_config_lwpids[i]);
567 if (error)
568 printf("%s: thread %x joined with error %d\n",
569 __func__, i, error);
570 }
571 kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
572 }
573
574 /*
575 * Announce device attach/detach to userland listeners.
576 */
577
578 int
579 no_devmon_insert(const char *name, prop_dictionary_t p)
580 {
581
582 return ENODEV;
583 }
584
585 static void
586 devmon_report_device(device_t dev, bool isattach)
587 {
588 prop_dictionary_t ev, dict = device_properties(dev);
589 const char *parent;
590 const char *what;
591 const char *where;
592 device_t pdev = device_parent(dev);
593
594 /* If currently no drvctl device, just return */
595 if (devmon_insert_vec == no_devmon_insert)
596 return;
597
598 ev = prop_dictionary_create();
599 if (ev == NULL)
600 return;
601
602 what = (isattach ? "device-attach" : "device-detach");
603 parent = (pdev == NULL ? "root" : device_xname(pdev));
604 if (prop_dictionary_get_string(dict, "location", &where)) {
605 prop_dictionary_set_string(ev, "location", where);
606 aprint_debug("ev: %s %s at %s in [%s]\n",
607 what, device_xname(dev), parent, where);
608 }
609 if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
610 !prop_dictionary_set_string(ev, "parent", parent)) {
611 prop_object_release(ev);
612 return;
613 }
614
615 if ((*devmon_insert_vec)(what, ev) != 0)
616 prop_object_release(ev);
617 }
618
619 /*
620 * Add a cfdriver to the system.
621 */
622 int
623 config_cfdriver_attach(struct cfdriver *cd)
624 {
625 struct cfdriver *lcd;
626
627 /* Make sure this driver isn't already in the system. */
628 LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
629 if (STREQ(lcd->cd_name, cd->cd_name))
630 return EEXIST;
631 }
632
633 LIST_INIT(&cd->cd_attach);
634 LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
635
636 return 0;
637 }
638
639 /*
640 * Remove a cfdriver from the system.
641 */
642 int
643 config_cfdriver_detach(struct cfdriver *cd)
644 {
645 struct alldevs_foray af;
646 int i, rc = 0;
647
648 config_alldevs_enter(&af);
649 /* Make sure there are no active instances. */
650 for (i = 0; i < cd->cd_ndevs; i++) {
651 if (cd->cd_devs[i] != NULL) {
652 rc = EBUSY;
653 break;
654 }
655 }
656 config_alldevs_exit(&af);
657
658 if (rc != 0)
659 return rc;
660
661 /* ...and no attachments loaded. */
662 if (LIST_EMPTY(&cd->cd_attach) == 0)
663 return EBUSY;
664
665 LIST_REMOVE(cd, cd_list);
666
667 KASSERT(cd->cd_devs == NULL);
668
669 return 0;
670 }
671
672 /*
673 * Look up a cfdriver by name.
674 */
675 struct cfdriver *
676 config_cfdriver_lookup(const char *name)
677 {
678 struct cfdriver *cd;
679
680 LIST_FOREACH(cd, &allcfdrivers, cd_list) {
681 if (STREQ(cd->cd_name, name))
682 return cd;
683 }
684
685 return NULL;
686 }
687
688 /*
689 * Add a cfattach to the specified driver.
690 */
691 int
692 config_cfattach_attach(const char *driver, struct cfattach *ca)
693 {
694 struct cfattach *lca;
695 struct cfdriver *cd;
696
697 cd = config_cfdriver_lookup(driver);
698 if (cd == NULL)
699 return ESRCH;
700
701 /* Make sure this attachment isn't already on this driver. */
702 LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
703 if (STREQ(lca->ca_name, ca->ca_name))
704 return EEXIST;
705 }
706
707 LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
708
709 return 0;
710 }
711
712 /*
713 * Remove a cfattach from the specified driver.
714 */
715 int
716 config_cfattach_detach(const char *driver, struct cfattach *ca)
717 {
718 struct alldevs_foray af;
719 struct cfdriver *cd;
720 device_t dev;
721 int i, rc = 0;
722
723 cd = config_cfdriver_lookup(driver);
724 if (cd == NULL)
725 return ESRCH;
726
727 config_alldevs_enter(&af);
728 /* Make sure there are no active instances. */
729 for (i = 0; i < cd->cd_ndevs; i++) {
730 if ((dev = cd->cd_devs[i]) == NULL)
731 continue;
732 if (dev->dv_cfattach == ca) {
733 rc = EBUSY;
734 break;
735 }
736 }
737 config_alldevs_exit(&af);
738
739 if (rc != 0)
740 return rc;
741
742 LIST_REMOVE(ca, ca_list);
743
744 return 0;
745 }
746
747 /*
748 * Look up a cfattach by name.
749 */
750 static struct cfattach *
751 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
752 {
753 struct cfattach *ca;
754
755 LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
756 if (STREQ(ca->ca_name, atname))
757 return ca;
758 }
759
760 return NULL;
761 }
762
763 /*
764 * Look up a cfattach by driver/attachment name.
765 */
766 struct cfattach *
767 config_cfattach_lookup(const char *name, const char *atname)
768 {
769 struct cfdriver *cd;
770
771 cd = config_cfdriver_lookup(name);
772 if (cd == NULL)
773 return NULL;
774
775 return config_cfattach_lookup_cd(cd, atname);
776 }
777
778 /*
779 * Apply the matching function and choose the best. This is used
780 * a few times and we want to keep the code small.
781 */
782 static void
783 mapply(struct matchinfo *m, cfdata_t cf)
784 {
785 int pri;
786
787 if (m->fn != NULL) {
788 pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
789 } else {
790 pri = config_match(m->parent, cf, m->aux);
791 }
792 if (pri > m->pri) {
793 m->match = cf;
794 m->pri = pri;
795 }
796 }
797
798 int
799 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
800 {
801 const struct cfiattrdata *ci;
802 const struct cflocdesc *cl;
803 int nlocs, i;
804
805 ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
806 KASSERT(ci);
807 nlocs = ci->ci_loclen;
808 KASSERT(!nlocs || locs);
809 for (i = 0; i < nlocs; i++) {
810 cl = &ci->ci_locdesc[i];
811 if (cl->cld_defaultstr != NULL &&
812 cf->cf_loc[i] == cl->cld_default)
813 continue;
814 if (cf->cf_loc[i] == locs[i])
815 continue;
816 return 0;
817 }
818
819 return config_match(parent, cf, aux);
820 }
821
822 /*
823 * Helper function: check whether the driver supports the interface attribute
824 * and return its descriptor structure.
825 */
826 static const struct cfiattrdata *
827 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
828 {
829 const struct cfiattrdata * const *cpp;
830
831 if (cd->cd_attrs == NULL)
832 return 0;
833
834 for (cpp = cd->cd_attrs; *cpp; cpp++) {
835 if (STREQ((*cpp)->ci_name, ia)) {
836 /* Match. */
837 return *cpp;
838 }
839 }
840 return 0;
841 }
842
843 static int __diagused
844 cfdriver_iattr_count(const struct cfdriver *cd)
845 {
846 const struct cfiattrdata * const *cpp;
847 int i;
848
849 if (cd->cd_attrs == NULL)
850 return 0;
851
852 for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
853 i++;
854 }
855 return i;
856 }
857
858 /*
859 * Lookup an interface attribute description by name.
860 * If the driver is given, consider only its supported attributes.
861 */
862 const struct cfiattrdata *
863 cfiattr_lookup(const char *name, const struct cfdriver *cd)
864 {
865 const struct cfdriver *d;
866 const struct cfiattrdata *ia;
867
868 if (cd)
869 return cfdriver_get_iattr(cd, name);
870
871 LIST_FOREACH(d, &allcfdrivers, cd_list) {
872 ia = cfdriver_get_iattr(d, name);
873 if (ia)
874 return ia;
875 }
876 return 0;
877 }
878
879 /*
880 * Determine if `parent' is a potential parent for a device spec based
881 * on `cfp'.
882 */
883 static int
884 cfparent_match(const device_t parent, const struct cfparent *cfp)
885 {
886 struct cfdriver *pcd;
887
888 /* We don't match root nodes here. */
889 if (cfp == NULL)
890 return 0;
891
892 pcd = parent->dv_cfdriver;
893 KASSERT(pcd != NULL);
894
895 /*
896 * First, ensure this parent has the correct interface
897 * attribute.
898 */
899 if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr))
900 return 0;
901
902 /*
903 * If no specific parent device instance was specified (i.e.
904 * we're attaching to the attribute only), we're done!
905 */
906 if (cfp->cfp_parent == NULL)
907 return 1;
908
909 /*
910 * Check the parent device's name.
911 */
912 if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
913 return 0; /* not the same parent */
914
915 /*
916 * Make sure the unit number matches.
917 */
918 if (cfp->cfp_unit == DVUNIT_ANY || /* wildcard */
919 cfp->cfp_unit == parent->dv_unit)
920 return 1;
921
922 /* Unit numbers don't match. */
923 return 0;
924 }
925
926 /*
927 * Helper for config_cfdata_attach(): check all devices whether it could be
928 * parent any attachment in the config data table passed, and rescan.
929 */
930 static void
931 rescan_with_cfdata(const struct cfdata *cf)
932 {
933 device_t d;
934 const struct cfdata *cf1;
935 deviter_t di;
936
937 KASSERT(KERNEL_LOCKED_P());
938
939 /*
940 * "alldevs" is likely longer than a modules's cfdata, so make it
941 * the outer loop.
942 */
943 for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
944
945 if (!(d->dv_cfattach->ca_rescan))
946 continue;
947
948 for (cf1 = cf; cf1->cf_name; cf1++) {
949
950 if (!cfparent_match(d, cf1->cf_pspec))
951 continue;
952
953 (*d->dv_cfattach->ca_rescan)(d,
954 cfdata_ifattr(cf1), cf1->cf_loc);
955
956 config_deferred(d);
957 }
958 }
959 deviter_release(&di);
960 }
961
962 /*
963 * Attach a supplemental config data table and rescan potential
964 * parent devices if required.
965 */
966 int
967 config_cfdata_attach(cfdata_t cf, int scannow)
968 {
969 struct cftable *ct;
970
971 KERNEL_LOCK(1, NULL);
972
973 ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
974 ct->ct_cfdata = cf;
975 TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
976
977 if (scannow)
978 rescan_with_cfdata(cf);
979
980 KERNEL_UNLOCK_ONE(NULL);
981
982 return 0;
983 }
984
985 /*
986 * Helper for config_cfdata_detach: check whether a device is
987 * found through any attachment in the config data table.
988 */
989 static int
990 dev_in_cfdata(device_t d, cfdata_t cf)
991 {
992 const struct cfdata *cf1;
993
994 for (cf1 = cf; cf1->cf_name; cf1++)
995 if (d->dv_cfdata == cf1)
996 return 1;
997
998 return 0;
999 }
1000
1001 /*
1002 * Detach a supplemental config data table. Detach all devices found
1003 * through that table (and thus keeping references to it) before.
1004 */
1005 int
1006 config_cfdata_detach(cfdata_t cf)
1007 {
1008 device_t d;
1009 int error = 0;
1010 struct cftable *ct;
1011 deviter_t di;
1012
1013 KERNEL_LOCK(1, NULL);
1014
1015 for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
1016 d = deviter_next(&di)) {
1017 if (!dev_in_cfdata(d, cf))
1018 continue;
1019 if ((error = config_detach(d, 0)) != 0)
1020 break;
1021 }
1022 deviter_release(&di);
1023 if (error) {
1024 aprint_error_dev(d, "unable to detach instance\n");
1025 goto out;
1026 }
1027
1028 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1029 if (ct->ct_cfdata == cf) {
1030 TAILQ_REMOVE(&allcftables, ct, ct_list);
1031 kmem_free(ct, sizeof(*ct));
1032 error = 0;
1033 goto out;
1034 }
1035 }
1036
1037 /* not found -- shouldn't happen */
1038 error = EINVAL;
1039
1040 out: KERNEL_UNLOCK_ONE(NULL);
1041 return error;
1042 }
1043
1044 /*
1045 * Invoke the "match" routine for a cfdata entry on behalf of
1046 * an external caller, usually a direct config "submatch" routine.
1047 */
1048 int
1049 config_match(device_t parent, cfdata_t cf, void *aux)
1050 {
1051 struct cfattach *ca;
1052
1053 KASSERT(KERNEL_LOCKED_P());
1054
1055 ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
1056 if (ca == NULL) {
1057 /* No attachment for this entry, oh well. */
1058 return 0;
1059 }
1060
1061 return (*ca->ca_match)(parent, cf, aux);
1062 }
1063
1064 /*
1065 * Invoke the "probe" routine for a cfdata entry on behalf of
1066 * an external caller, usually an indirect config "search" routine.
1067 */
1068 int
1069 config_probe(device_t parent, cfdata_t cf, void *aux)
1070 {
1071 /*
1072 * This is currently a synonym for config_match(), but this
1073 * is an implementation detail; "match" and "probe" routines
1074 * have different behaviors.
1075 *
1076 * XXX config_probe() should return a bool, because there is
1077 * XXX no match score for probe -- it's either there or it's
1078 * XXX not, but some ports abuse the return value as a way
1079 * XXX to attach "critical" devices before "non-critical"
1080 * XXX devices.
1081 */
1082 return config_match(parent, cf, aux);
1083 }
1084
1085 static struct cfargs_internal *
1086 cfargs_canonicalize(const struct cfargs * const cfargs,
1087 struct cfargs_internal * const store)
1088 {
1089 struct cfargs_internal *args = store;
1090
1091 memset(args, 0, sizeof(*args));
1092
1093 /* If none specified, are all-NULL pointers are good. */
1094 if (cfargs == NULL) {
1095 return args;
1096 }
1097
1098 /*
1099 * Only one arguments version is recognized at this time.
1100 */
1101 if (cfargs->cfargs_version != CFARGS_VERSION) {
1102 panic("cfargs_canonicalize: unknown version %lu\n",
1103 (unsigned long)cfargs->cfargs_version);
1104 }
1105
1106 /*
1107 * submatch and search are mutually-exclusive.
1108 */
1109 if (cfargs->submatch != NULL && cfargs->search != NULL) {
1110 panic("cfargs_canonicalize: submatch and search are "
1111 "mutually-exclusive");
1112 }
1113 if (cfargs->submatch != NULL) {
1114 args->submatch = cfargs->submatch;
1115 } else if (cfargs->search != NULL) {
1116 args->search = cfargs->search;
1117 }
1118
1119 args->iattr = cfargs->iattr;
1120 args->locators = cfargs->locators;
1121 args->devhandle = cfargs->devhandle;
1122
1123 return args;
1124 }
1125
1126 /*
1127 * Iterate over all potential children of some device, calling the given
1128 * function (default being the child's match function) for each one.
1129 * Nonzero returns are matches; the highest value returned is considered
1130 * the best match. Return the `found child' if we got a match, or NULL
1131 * otherwise. The `aux' pointer is simply passed on through.
1132 *
1133 * Note that this function is designed so that it can be used to apply
1134 * an arbitrary function to all potential children (its return value
1135 * can be ignored).
1136 */
1137 static cfdata_t
1138 config_search_internal(device_t parent, void *aux,
1139 const struct cfargs_internal * const args)
1140 {
1141 struct cftable *ct;
1142 cfdata_t cf;
1143 struct matchinfo m;
1144
1145 KASSERT(config_initialized);
1146 KASSERT(!args->iattr ||
1147 cfdriver_get_iattr(parent->dv_cfdriver, args->iattr));
1148 KASSERT(args->iattr ||
1149 cfdriver_iattr_count(parent->dv_cfdriver) < 2);
1150
1151 m.fn = args->submatch; /* N.B. union */
1152 m.parent = parent;
1153 m.locs = args->locators;
1154 m.aux = aux;
1155 m.match = NULL;
1156 m.pri = 0;
1157
1158 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1159 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1160
1161 /* We don't match root nodes here. */
1162 if (!cf->cf_pspec)
1163 continue;
1164
1165 /*
1166 * Skip cf if no longer eligible, otherwise scan
1167 * through parents for one matching `parent', and
1168 * try match function.
1169 */
1170 if (cf->cf_fstate == FSTATE_FOUND)
1171 continue;
1172 if (cf->cf_fstate == FSTATE_DNOTFOUND ||
1173 cf->cf_fstate == FSTATE_DSTAR)
1174 continue;
1175
1176 /*
1177 * If an interface attribute was specified,
1178 * consider only children which attach to
1179 * that attribute.
1180 */
1181 if (args->iattr != NULL &&
1182 !STREQ(args->iattr, cfdata_ifattr(cf)))
1183 continue;
1184
1185 if (cfparent_match(parent, cf->cf_pspec))
1186 mapply(&m, cf);
1187 }
1188 }
1189 rnd_add_uint32(&rnd_autoconf_source, 0);
1190 return m.match;
1191 }
1192
1193 cfdata_t
1194 config_search(device_t parent, void *aux, const struct cfargs *cfargs)
1195 {
1196 cfdata_t cf;
1197 struct cfargs_internal store;
1198
1199 cf = config_search_internal(parent, aux,
1200 cfargs_canonicalize(cfargs, &store));
1201
1202 return cf;
1203 }
1204
1205 /*
1206 * Find the given root device.
1207 * This is much like config_search, but there is no parent.
1208 * Don't bother with multiple cfdata tables; the root node
1209 * must always be in the initial table.
1210 */
1211 cfdata_t
1212 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
1213 {
1214 cfdata_t cf;
1215 const short *p;
1216 struct matchinfo m;
1217
1218 m.fn = fn;
1219 m.parent = ROOT;
1220 m.aux = aux;
1221 m.match = NULL;
1222 m.pri = 0;
1223 m.locs = 0;
1224 /*
1225 * Look at root entries for matching name. We do not bother
1226 * with found-state here since only one root should ever be
1227 * searched (and it must be done first).
1228 */
1229 for (p = cfroots; *p >= 0; p++) {
1230 cf = &cfdata[*p];
1231 if (strcmp(cf->cf_name, rootname) == 0)
1232 mapply(&m, cf);
1233 }
1234 return m.match;
1235 }
1236
1237 static const char * const msgs[] = {
1238 [QUIET] = "",
1239 [UNCONF] = " not configured\n",
1240 [UNSUPP] = " unsupported\n",
1241 };
1242
1243 /*
1244 * The given `aux' argument describes a device that has been found
1245 * on the given parent, but not necessarily configured. Locate the
1246 * configuration data for that device (using the submatch function
1247 * provided, or using candidates' cd_match configuration driver
1248 * functions) and attach it, and return its device_t. If the device was
1249 * not configured, call the given `print' function and return NULL.
1250 */
1251 device_t
1252 config_found(device_t parent, void *aux, cfprint_t print,
1253 const struct cfargs * const cfargs)
1254 {
1255 cfdata_t cf;
1256 struct cfargs_internal store;
1257 const struct cfargs_internal * const args =
1258 cfargs_canonicalize(cfargs, &store);
1259
1260 cf = config_search_internal(parent, aux, args);
1261 if (cf != NULL) {
1262 return config_attach_internal(parent, cf, aux, print, args);
1263 }
1264
1265 if (print) {
1266 if (config_do_twiddle && cold)
1267 twiddle();
1268
1269 const int pret = (*print)(aux, device_xname(parent));
1270 KASSERT(pret >= 0);
1271 KASSERT(pret < __arraycount(msgs));
1272 KASSERT(msgs[pret] != NULL);
1273 aprint_normal("%s", msgs[pret]);
1274 }
1275
1276 return NULL;
1277 }
1278
1279 /*
1280 * As above, but for root devices.
1281 */
1282 device_t
1283 config_rootfound(const char *rootname, void *aux)
1284 {
1285 cfdata_t cf;
1286 device_t dev = NULL;
1287
1288 KERNEL_LOCK(1, NULL);
1289 if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
1290 dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE);
1291 else
1292 aprint_error("root device %s not configured\n", rootname);
1293 KERNEL_UNLOCK_ONE(NULL);
1294 return dev;
1295 }
1296
1297 /* just like sprintf(buf, "%d") except that it works from the end */
1298 static char *
1299 number(char *ep, int n)
1300 {
1301
1302 *--ep = 0;
1303 while (n >= 10) {
1304 *--ep = (n % 10) + '0';
1305 n /= 10;
1306 }
1307 *--ep = n + '0';
1308 return ep;
1309 }
1310
1311 /*
1312 * Expand the size of the cd_devs array if necessary.
1313 *
1314 * The caller must hold alldevs_lock. config_makeroom() may release and
1315 * re-acquire alldevs_lock, so callers should re-check conditions such
1316 * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
1317 * returns.
1318 */
1319 static void
1320 config_makeroom(int n, struct cfdriver *cd)
1321 {
1322 int ondevs, nndevs;
1323 device_t *osp, *nsp;
1324
1325 KASSERT(mutex_owned(&alldevs_lock));
1326 alldevs_nwrite++;
1327
1328 for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
1329 ;
1330
1331 while (n >= cd->cd_ndevs) {
1332 /*
1333 * Need to expand the array.
1334 */
1335 ondevs = cd->cd_ndevs;
1336 osp = cd->cd_devs;
1337
1338 /*
1339 * Release alldevs_lock around allocation, which may
1340 * sleep.
1341 */
1342 mutex_exit(&alldevs_lock);
1343 nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
1344 mutex_enter(&alldevs_lock);
1345
1346 /*
1347 * If another thread moved the array while we did
1348 * not hold alldevs_lock, try again.
1349 */
1350 if (cd->cd_devs != osp) {
1351 mutex_exit(&alldevs_lock);
1352 kmem_free(nsp, sizeof(device_t) * nndevs);
1353 mutex_enter(&alldevs_lock);
1354 continue;
1355 }
1356
1357 memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
1358 if (ondevs != 0)
1359 memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
1360
1361 cd->cd_ndevs = nndevs;
1362 cd->cd_devs = nsp;
1363 if (ondevs != 0) {
1364 mutex_exit(&alldevs_lock);
1365 kmem_free(osp, sizeof(device_t) * ondevs);
1366 mutex_enter(&alldevs_lock);
1367 }
1368 }
1369 KASSERT(mutex_owned(&alldevs_lock));
1370 alldevs_nwrite--;
1371 }
1372
1373 /*
1374 * Put dev into the devices list.
1375 */
1376 static void
1377 config_devlink(device_t dev)
1378 {
1379
1380 mutex_enter(&alldevs_lock);
1381
1382 KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
1383
1384 dev->dv_add_gen = alldevs_gen;
1385 /* It is safe to add a device to the tail of the list while
1386 * readers and writers are in the list.
1387 */
1388 TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
1389 mutex_exit(&alldevs_lock);
1390 }
1391
1392 static void
1393 config_devfree(device_t dev)
1394 {
1395
1396 KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
1397 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1398
1399 if (dev->dv_cfattach->ca_devsize > 0)
1400 kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
1401 kmem_free(dev, sizeof(*dev));
1402 }
1403
1404 /*
1405 * Caller must hold alldevs_lock.
1406 */
1407 static void
1408 config_devunlink(device_t dev, struct devicelist *garbage)
1409 {
1410 struct device_garbage *dg = &dev->dv_garbage;
1411 cfdriver_t cd = device_cfdriver(dev);
1412 int i;
1413
1414 KASSERT(mutex_owned(&alldevs_lock));
1415 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1416
1417 /* Unlink from device list. Link to garbage list. */
1418 TAILQ_REMOVE(&alldevs, dev, dv_list);
1419 TAILQ_INSERT_TAIL(garbage, dev, dv_list);
1420
1421 /* Remove from cfdriver's array. */
1422 cd->cd_devs[dev->dv_unit] = NULL;
1423
1424 /*
1425 * If the device now has no units in use, unlink its softc array.
1426 */
1427 for (i = 0; i < cd->cd_ndevs; i++) {
1428 if (cd->cd_devs[i] != NULL)
1429 break;
1430 }
1431 /* Nothing found. Unlink, now. Deallocate, later. */
1432 if (i == cd->cd_ndevs) {
1433 dg->dg_ndevs = cd->cd_ndevs;
1434 dg->dg_devs = cd->cd_devs;
1435 cd->cd_devs = NULL;
1436 cd->cd_ndevs = 0;
1437 }
1438 }
1439
1440 static void
1441 config_devdelete(device_t dev)
1442 {
1443 struct device_garbage *dg = &dev->dv_garbage;
1444 device_lock_t dvl = device_getlock(dev);
1445
1446 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1447
1448 if (dg->dg_devs != NULL)
1449 kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
1450
1451 localcount_fini(dev->dv_localcount);
1452 kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount));
1453
1454 cv_destroy(&dvl->dvl_cv);
1455 mutex_destroy(&dvl->dvl_mtx);
1456
1457 KASSERT(dev->dv_properties != NULL);
1458 prop_object_release(dev->dv_properties);
1459
1460 if (dev->dv_activity_handlers)
1461 panic("%s with registered handlers", __func__);
1462
1463 if (dev->dv_locators) {
1464 size_t amount = *--dev->dv_locators;
1465 kmem_free(dev->dv_locators, amount);
1466 }
1467
1468 config_devfree(dev);
1469 }
1470
1471 static int
1472 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
1473 {
1474 int unit = cf->cf_unit;
1475
1476 if (unit < 0)
1477 return -1;
1478 if (cf->cf_fstate == FSTATE_STAR) {
1479 for (; unit < cd->cd_ndevs; unit++)
1480 if (cd->cd_devs[unit] == NULL)
1481 break;
1482 /*
1483 * unit is now the unit of the first NULL device pointer,
1484 * or max(cd->cd_ndevs,cf->cf_unit).
1485 */
1486 } else {
1487 if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
1488 unit = -1;
1489 }
1490 return unit;
1491 }
1492
1493 static int
1494 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
1495 {
1496 struct alldevs_foray af;
1497 int unit;
1498
1499 config_alldevs_enter(&af);
1500 for (;;) {
1501 unit = config_unit_nextfree(cd, cf);
1502 if (unit == -1)
1503 break;
1504 if (unit < cd->cd_ndevs) {
1505 cd->cd_devs[unit] = dev;
1506 dev->dv_unit = unit;
1507 break;
1508 }
1509 config_makeroom(unit, cd);
1510 }
1511 config_alldevs_exit(&af);
1512
1513 return unit;
1514 }
1515
1516 static device_t
1517 config_devalloc(const device_t parent, const cfdata_t cf,
1518 const struct cfargs_internal * const args)
1519 {
1520 cfdriver_t cd;
1521 cfattach_t ca;
1522 size_t lname, lunit;
1523 const char *xunit;
1524 int myunit;
1525 char num[10];
1526 device_t dev;
1527 void *dev_private;
1528 const struct cfiattrdata *ia;
1529 device_lock_t dvl;
1530
1531 cd = config_cfdriver_lookup(cf->cf_name);
1532 if (cd == NULL)
1533 return NULL;
1534
1535 ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
1536 if (ca == NULL)
1537 return NULL;
1538
1539 /* get memory for all device vars */
1540 KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
1541 if (ca->ca_devsize > 0) {
1542 dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
1543 } else {
1544 dev_private = NULL;
1545 }
1546 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
1547
1548 dev->dv_handle = args->devhandle;
1549
1550 dev->dv_class = cd->cd_class;
1551 dev->dv_cfdata = cf;
1552 dev->dv_cfdriver = cd;
1553 dev->dv_cfattach = ca;
1554 dev->dv_activity_count = 0;
1555 dev->dv_activity_handlers = NULL;
1556 dev->dv_private = dev_private;
1557 dev->dv_flags = ca->ca_flags; /* inherit flags from class */
1558 dev->dv_attaching = curlwp;
1559
1560 myunit = config_unit_alloc(dev, cd, cf);
1561 if (myunit == -1) {
1562 config_devfree(dev);
1563 return NULL;
1564 }
1565
1566 /* compute length of name and decimal expansion of unit number */
1567 lname = strlen(cd->cd_name);
1568 xunit = number(&num[sizeof(num)], myunit);
1569 lunit = &num[sizeof(num)] - xunit;
1570 if (lname + lunit > sizeof(dev->dv_xname))
1571 panic("config_devalloc: device name too long");
1572
1573 dvl = device_getlock(dev);
1574
1575 mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
1576 cv_init(&dvl->dvl_cv, "pmfsusp");
1577
1578 memcpy(dev->dv_xname, cd->cd_name, lname);
1579 memcpy(dev->dv_xname + lname, xunit, lunit);
1580 dev->dv_parent = parent;
1581 if (parent != NULL)
1582 dev->dv_depth = parent->dv_depth + 1;
1583 else
1584 dev->dv_depth = 0;
1585 dev->dv_flags |= DVF_ACTIVE; /* always initially active */
1586 if (args->locators) {
1587 KASSERT(parent); /* no locators at root */
1588 ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
1589 dev->dv_locators =
1590 kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
1591 *dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
1592 memcpy(dev->dv_locators, args->locators,
1593 sizeof(int) * ia->ci_loclen);
1594 }
1595 dev->dv_properties = prop_dictionary_create();
1596 KASSERT(dev->dv_properties != NULL);
1597
1598 prop_dictionary_set_string_nocopy(dev->dv_properties,
1599 "device-driver", dev->dv_cfdriver->cd_name);
1600 prop_dictionary_set_uint16(dev->dv_properties,
1601 "device-unit", dev->dv_unit);
1602 if (parent != NULL) {
1603 prop_dictionary_set_string(dev->dv_properties,
1604 "device-parent", device_xname(parent));
1605 }
1606
1607 dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount),
1608 KM_SLEEP);
1609 localcount_init(dev->dv_localcount);
1610
1611 if (dev->dv_cfdriver->cd_attrs != NULL)
1612 config_add_attrib_dict(dev);
1613
1614 return dev;
1615 }
1616
1617 /*
1618 * Create an array of device attach attributes and add it
1619 * to the device's dv_properties dictionary.
1620 *
1621 * <key>interface-attributes</key>
1622 * <array>
1623 * <dict>
1624 * <key>attribute-name</key>
1625 * <string>foo</string>
1626 * <key>locators</key>
1627 * <array>
1628 * <dict>
1629 * <key>loc-name</key>
1630 * <string>foo-loc1</string>
1631 * </dict>
1632 * <dict>
1633 * <key>loc-name</key>
1634 * <string>foo-loc2</string>
1635 * <key>default</key>
1636 * <string>foo-loc2-default</string>
1637 * </dict>
1638 * ...
1639 * </array>
1640 * </dict>
1641 * ...
1642 * </array>
1643 */
1644
1645 static void
1646 config_add_attrib_dict(device_t dev)
1647 {
1648 int i, j;
1649 const struct cfiattrdata *ci;
1650 prop_dictionary_t attr_dict, loc_dict;
1651 prop_array_t attr_array, loc_array;
1652
1653 if ((attr_array = prop_array_create()) == NULL)
1654 return;
1655
1656 for (i = 0; ; i++) {
1657 if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
1658 break;
1659 if ((attr_dict = prop_dictionary_create()) == NULL)
1660 break;
1661 prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
1662 ci->ci_name);
1663
1664 /* Create an array of the locator names and defaults */
1665
1666 if (ci->ci_loclen != 0 &&
1667 (loc_array = prop_array_create()) != NULL) {
1668 for (j = 0; j < ci->ci_loclen; j++) {
1669 loc_dict = prop_dictionary_create();
1670 if (loc_dict == NULL)
1671 continue;
1672 prop_dictionary_set_string_nocopy(loc_dict,
1673 "loc-name", ci->ci_locdesc[j].cld_name);
1674 if (ci->ci_locdesc[j].cld_defaultstr != NULL)
1675 prop_dictionary_set_string_nocopy(
1676 loc_dict, "default",
1677 ci->ci_locdesc[j].cld_defaultstr);
1678 prop_array_set(loc_array, j, loc_dict);
1679 prop_object_release(loc_dict);
1680 }
1681 prop_dictionary_set_and_rel(attr_dict, "locators",
1682 loc_array);
1683 }
1684 prop_array_add(attr_array, attr_dict);
1685 prop_object_release(attr_dict);
1686 }
1687 if (i == 0)
1688 prop_object_release(attr_array);
1689 else
1690 prop_dictionary_set_and_rel(dev->dv_properties,
1691 "interface-attributes", attr_array);
1692
1693 return;
1694 }
1695
1696 /*
1697 * Attach a found device.
1698 */
1699 static device_t
1700 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1701 const struct cfargs_internal * const args)
1702 {
1703 device_t dev;
1704 struct cftable *ct;
1705 const char *drvname;
1706 bool deferred;
1707
1708 KASSERT(KERNEL_LOCKED_P());
1709
1710 dev = config_devalloc(parent, cf, args);
1711 if (!dev)
1712 panic("config_attach: allocation of device softc failed");
1713
1714 /* XXX redundant - see below? */
1715 if (cf->cf_fstate != FSTATE_STAR) {
1716 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1717 cf->cf_fstate = FSTATE_FOUND;
1718 }
1719
1720 config_devlink(dev);
1721
1722 if (config_do_twiddle && cold)
1723 twiddle();
1724 else
1725 aprint_naive("Found ");
1726 /*
1727 * We want the next two printfs for normal, verbose, and quiet,
1728 * but not silent (in which case, we're twiddling, instead).
1729 */
1730 if (parent == ROOT) {
1731 aprint_naive("%s (root)", device_xname(dev));
1732 aprint_normal("%s (root)", device_xname(dev));
1733 } else {
1734 aprint_naive("%s at %s", device_xname(dev),
1735 device_xname(parent));
1736 aprint_normal("%s at %s", device_xname(dev),
1737 device_xname(parent));
1738 if (print)
1739 (void) (*print)(aux, NULL);
1740 }
1741
1742 /*
1743 * Before attaching, clobber any unfound devices that are
1744 * otherwise identical.
1745 * XXX code above is redundant?
1746 */
1747 drvname = dev->dv_cfdriver->cd_name;
1748 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1749 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1750 if (STREQ(cf->cf_name, drvname) &&
1751 cf->cf_unit == dev->dv_unit) {
1752 if (cf->cf_fstate == FSTATE_NOTFOUND)
1753 cf->cf_fstate = FSTATE_FOUND;
1754 }
1755 }
1756 }
1757 device_register(dev, aux);
1758
1759 /* Let userland know */
1760 devmon_report_device(dev, true);
1761
1762 /*
1763 * Prevent detach until the driver's attach function, and all
1764 * deferred actions, have finished.
1765 */
1766 config_pending_incr(dev);
1767
1768 /* Call the driver's attach function. */
1769 (*dev->dv_cfattach->ca_attach)(parent, dev, aux);
1770
1771 /*
1772 * Allow other threads to acquire references to the device now
1773 * that the driver's attach function is done.
1774 */
1775 mutex_enter(&config_misc_lock);
1776 KASSERT(dev->dv_attaching == curlwp);
1777 dev->dv_attaching = NULL;
1778 cv_broadcast(&config_misc_cv);
1779 mutex_exit(&config_misc_lock);
1780
1781 /*
1782 * Synchronous parts of attach are done. Allow detach, unless
1783 * the driver's attach function scheduled deferred actions.
1784 */
1785 config_pending_decr(dev);
1786
1787 mutex_enter(&config_misc_lock);
1788 deferred = (dev->dv_pending != 0);
1789 mutex_exit(&config_misc_lock);
1790
1791 if (!deferred && !device_pmf_is_registered(dev))
1792 aprint_debug_dev(dev,
1793 "WARNING: power management not supported\n");
1794
1795 config_process_deferred(&deferred_config_queue, dev);
1796
1797 device_register_post_config(dev, aux);
1798 rnd_add_uint32(&rnd_autoconf_source, 0);
1799 return dev;
1800 }
1801
1802 device_t
1803 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1804 const struct cfargs *cfargs)
1805 {
1806 struct cfargs_internal store;
1807
1808 KASSERT(KERNEL_LOCKED_P());
1809
1810 return config_attach_internal(parent, cf, aux, print,
1811 cfargs_canonicalize(cfargs, &store));
1812 }
1813
1814 /*
1815 * As above, but for pseudo-devices. Pseudo-devices attached in this
1816 * way are silently inserted into the device tree, and their children
1817 * attached.
1818 *
1819 * Note that because pseudo-devices are attached silently, any information
1820 * the attach routine wishes to print should be prefixed with the device
1821 * name by the attach routine.
1822 */
1823 device_t
1824 config_attach_pseudo(cfdata_t cf)
1825 {
1826 device_t dev;
1827
1828 KERNEL_LOCK(1, NULL);
1829
1830 struct cfargs_internal args = { };
1831 dev = config_devalloc(ROOT, cf, &args);
1832 if (!dev)
1833 goto out;
1834
1835 /* XXX mark busy in cfdata */
1836
1837 if (cf->cf_fstate != FSTATE_STAR) {
1838 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1839 cf->cf_fstate = FSTATE_FOUND;
1840 }
1841
1842 config_devlink(dev);
1843
1844 #if 0 /* XXXJRT not yet */
1845 device_register(dev, NULL); /* like a root node */
1846 #endif
1847
1848 /* Let userland know */
1849 devmon_report_device(dev, true);
1850
1851 /*
1852 * Prevent detach until the driver's attach function, and all
1853 * deferred actions, have finished.
1854 */
1855 config_pending_incr(dev);
1856
1857 /* Call the driver's attach function. */
1858 (*dev->dv_cfattach->ca_attach)(ROOT, dev, NULL);
1859
1860 /*
1861 * Allow other threads to acquire references to the device now
1862 * that the driver's attach function is done.
1863 */
1864 mutex_enter(&config_misc_lock);
1865 KASSERT(dev->dv_attaching == curlwp);
1866 dev->dv_attaching = NULL;
1867 cv_broadcast(&config_misc_cv);
1868 mutex_exit(&config_misc_lock);
1869
1870 /*
1871 * Synchronous parts of attach are done. Allow detach, unless
1872 * the driver's attach function scheduled deferred actions.
1873 */
1874 config_pending_decr(dev);
1875
1876 config_process_deferred(&deferred_config_queue, dev);
1877
1878 out: KERNEL_UNLOCK_ONE(NULL);
1879 return dev;
1880 }
1881
1882 /*
1883 * Caller must hold alldevs_lock.
1884 */
1885 static void
1886 config_collect_garbage(struct devicelist *garbage)
1887 {
1888 device_t dv;
1889
1890 KASSERT(!cpu_intr_p());
1891 KASSERT(!cpu_softintr_p());
1892 KASSERT(mutex_owned(&alldevs_lock));
1893
1894 while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
1895 TAILQ_FOREACH(dv, &alldevs, dv_list) {
1896 if (dv->dv_del_gen != 0)
1897 break;
1898 }
1899 if (dv == NULL) {
1900 alldevs_garbage = false;
1901 break;
1902 }
1903 config_devunlink(dv, garbage);
1904 }
1905 KASSERT(mutex_owned(&alldevs_lock));
1906 }
1907
1908 static void
1909 config_dump_garbage(struct devicelist *garbage)
1910 {
1911 device_t dv;
1912
1913 while ((dv = TAILQ_FIRST(garbage)) != NULL) {
1914 TAILQ_REMOVE(garbage, dv, dv_list);
1915 config_devdelete(dv);
1916 }
1917 }
1918
1919 static int
1920 config_detach_enter(device_t dev)
1921 {
1922 struct lwp *l __diagused;
1923 int error = 0;
1924
1925 mutex_enter(&config_misc_lock);
1926
1927 /*
1928 * Wait until attach has fully completed, and until any
1929 * concurrent detach (e.g., drvctl racing with USB event
1930 * thread) has completed.
1931 *
1932 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via
1933 * deviter) to ensure the winner of the race doesn't free the
1934 * device leading the loser of the race into use-after-free.
1935 *
1936 * XXX Not all callers do this!
1937 */
1938 while (dev->dv_pending || dev->dv_detaching) {
1939 KASSERTMSG(dev->dv_detaching != curlwp,
1940 "recursively detaching %s", device_xname(dev));
1941 error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
1942 if (error)
1943 goto out;
1944 }
1945
1946 /*
1947 * Attach has completed, and no other concurrent detach is
1948 * running. Claim the device for detaching. This will cause
1949 * all new attempts to acquire references to block.
1950 */
1951 KASSERTMSG((l = dev->dv_attaching) == NULL,
1952 "lwp %ld [%s] @ %p attaching %s",
1953 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
1954 device_xname(dev));
1955 KASSERTMSG((l = dev->dv_detaching) == NULL,
1956 "lwp %ld [%s] @ %p detaching %s",
1957 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
1958 device_xname(dev));
1959 dev->dv_detaching = curlwp;
1960
1961 out: mutex_exit(&config_misc_lock);
1962 return error;
1963 }
1964
1965 static void
1966 config_detach_exit(device_t dev)
1967 {
1968 struct lwp *l __diagused;
1969
1970 mutex_enter(&config_misc_lock);
1971 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
1972 device_xname(dev));
1973 KASSERTMSG((l = dev->dv_detaching) == curlwp,
1974 "lwp %ld [%s] @ %p detaching %s",
1975 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
1976 device_xname(dev));
1977 dev->dv_detaching = NULL;
1978 cv_broadcast(&config_misc_cv);
1979 mutex_exit(&config_misc_lock);
1980 }
1981
1982 /*
1983 * Detach a device. Optionally forced (e.g. because of hardware
1984 * removal) and quiet. Returns zero if successful, non-zero
1985 * (an error code) otherwise.
1986 *
1987 * Note that this code wants to be run from a process context, so
1988 * that the detach can sleep to allow processes which have a device
1989 * open to run and unwind their stacks.
1990 */
1991 int
1992 config_detach(device_t dev, int flags)
1993 {
1994 struct alldevs_foray af;
1995 struct cftable *ct;
1996 cfdata_t cf;
1997 const struct cfattach *ca;
1998 struct cfdriver *cd;
1999 device_t d __diagused;
2000 int rv = 0;
2001
2002 KERNEL_LOCK(1, NULL);
2003
2004 cf = dev->dv_cfdata;
2005 KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
2006 cf->cf_fstate == FSTATE_STAR),
2007 "config_detach: %s: bad device fstate: %d",
2008 device_xname(dev), cf ? cf->cf_fstate : -1);
2009
2010 cd = dev->dv_cfdriver;
2011 KASSERT(cd != NULL);
2012
2013 ca = dev->dv_cfattach;
2014 KASSERT(ca != NULL);
2015
2016 /*
2017 * Only one detach at a time, please -- and not until fully
2018 * attached.
2019 */
2020 rv = config_detach_enter(dev);
2021 if (rv) {
2022 KERNEL_UNLOCK_ONE(NULL);
2023 return rv;
2024 }
2025
2026 mutex_enter(&alldevs_lock);
2027 if (dev->dv_del_gen != 0) {
2028 mutex_exit(&alldevs_lock);
2029 #ifdef DIAGNOSTIC
2030 printf("%s: %s is already detached\n", __func__,
2031 device_xname(dev));
2032 #endif /* DIAGNOSTIC */
2033 config_detach_exit(dev);
2034 KERNEL_UNLOCK_ONE(NULL);
2035 return ENOENT;
2036 }
2037 alldevs_nwrite++;
2038 mutex_exit(&alldevs_lock);
2039
2040 /*
2041 * Call the driver's .ca_detach function, unless it has none or
2042 * we are skipping it because it's unforced shutdown time and
2043 * the driver didn't ask to detach on shutdown.
2044 */
2045 if (!detachall &&
2046 (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
2047 (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
2048 rv = EOPNOTSUPP;
2049 } else if (ca->ca_detach != NULL) {
2050 rv = (*ca->ca_detach)(dev, flags);
2051 } else
2052 rv = EOPNOTSUPP;
2053
2054 /*
2055 * If it was not possible to detach the device, then we either
2056 * panic() (for the forced but failed case), or return an error.
2057 */
2058 if (rv) {
2059 /*
2060 * Detach failed -- likely EOPNOTSUPP or EBUSY. Driver
2061 * must not have called config_detach_commit.
2062 */
2063 KASSERTMSG(!dev->dv_detached,
2064 "%s committed to detaching and then backed out",
2065 device_xname(dev));
2066 if (flags & DETACH_FORCE) {
2067 panic("config_detach: forced detach of %s failed (%d)",
2068 device_xname(dev), rv);
2069 }
2070 goto out;
2071 }
2072
2073 /*
2074 * The device has now been successfully detached.
2075 */
2076
2077 /*
2078 * If .ca_detach didn't commit to detach, then do that for it.
2079 * This wakes any pending device_lookup_acquire calls so they
2080 * will fail.
2081 */
2082 config_detach_commit(dev);
2083
2084 /*
2085 * If it was possible to detach the device, ensure that the
2086 * device is deactivated.
2087 */
2088 dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */
2089
2090 /*
2091 * Wait for all device_lookup_acquire references -- mostly, for
2092 * all attempts to open the device -- to drain. It is the
2093 * responsibility of .ca_detach to ensure anything with open
2094 * references will be interrupted and release them promptly,
2095 * not block indefinitely. All new attempts to acquire
2096 * references will fail, as config_detach_commit has arranged
2097 * by now.
2098 */
2099 mutex_enter(&config_misc_lock);
2100 localcount_drain(dev->dv_localcount,
2101 &config_misc_cv, &config_misc_lock);
2102 mutex_exit(&config_misc_lock);
2103
2104 /* Let userland know */
2105 devmon_report_device(dev, false);
2106
2107 #ifdef DIAGNOSTIC
2108 /*
2109 * Sanity: If you're successfully detached, you should have no
2110 * children. (Note that because children must be attached
2111 * after parents, we only need to search the latter part of
2112 * the list.)
2113 */
2114 mutex_enter(&alldevs_lock);
2115 for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
2116 d = TAILQ_NEXT(d, dv_list)) {
2117 if (d->dv_parent == dev && d->dv_del_gen == 0) {
2118 printf("config_detach: detached device %s"
2119 " has children %s\n", device_xname(dev),
2120 device_xname(d));
2121 panic("config_detach");
2122 }
2123 }
2124 mutex_exit(&alldevs_lock);
2125 #endif
2126
2127 /* notify the parent that the child is gone */
2128 if (dev->dv_parent) {
2129 device_t p = dev->dv_parent;
2130 if (p->dv_cfattach->ca_childdetached)
2131 (*p->dv_cfattach->ca_childdetached)(p, dev);
2132 }
2133
2134 /*
2135 * Mark cfdata to show that the unit can be reused, if possible.
2136 */
2137 TAILQ_FOREACH(ct, &allcftables, ct_list) {
2138 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
2139 if (STREQ(cf->cf_name, cd->cd_name)) {
2140 if (cf->cf_fstate == FSTATE_FOUND &&
2141 cf->cf_unit == dev->dv_unit)
2142 cf->cf_fstate = FSTATE_NOTFOUND;
2143 }
2144 }
2145 }
2146
2147 if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
2148 aprint_normal_dev(dev, "detached\n");
2149
2150 out:
2151 config_detach_exit(dev);
2152
2153 config_alldevs_enter(&af);
2154 KASSERT(alldevs_nwrite != 0);
2155 --alldevs_nwrite;
2156 if (rv == 0 && dev->dv_del_gen == 0) {
2157 if (alldevs_nwrite == 0 && alldevs_nread == 0)
2158 config_devunlink(dev, &af.af_garbage);
2159 else {
2160 dev->dv_del_gen = alldevs_gen;
2161 alldevs_garbage = true;
2162 }
2163 }
2164 config_alldevs_exit(&af);
2165
2166 KERNEL_UNLOCK_ONE(NULL);
2167
2168 return rv;
2169 }
2170
2171 /*
2172 * config_detach_commit(dev)
2173 *
2174 * Issued by a driver's .ca_detach routine to notify anyone
2175 * waiting in device_lookup_acquire that the driver is committed
2176 * to detaching the device, which allows device_lookup_acquire to
2177 * wake up and fail immediately.
2178 *
2179 * Safe to call multiple times -- idempotent. Must be called
2180 * during config_detach_enter/exit. Safe to use with
2181 * device_lookup because the device is not actually removed from
2182 * the table until after config_detach_exit.
2183 */
2184 void
2185 config_detach_commit(device_t dev)
2186 {
2187 struct lwp *l __diagused;
2188
2189 mutex_enter(&config_misc_lock);
2190 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
2191 device_xname(dev));
2192 KASSERTMSG((l = dev->dv_detaching) == curlwp,
2193 "lwp %ld [%s] @ %p detaching %s",
2194 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2195 device_xname(dev));
2196 dev->dv_detached = true;
2197 cv_broadcast(&config_misc_cv);
2198 mutex_exit(&config_misc_lock);
2199 }
2200
2201 int
2202 config_detach_children(device_t parent, int flags)
2203 {
2204 device_t dv;
2205 deviter_t di;
2206 int error = 0;
2207
2208 KASSERT(KERNEL_LOCKED_P());
2209
2210 for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
2211 dv = deviter_next(&di)) {
2212 if (device_parent(dv) != parent)
2213 continue;
2214 if ((error = config_detach(dv, flags)) != 0)
2215 break;
2216 }
2217 deviter_release(&di);
2218 return error;
2219 }
2220
2221 device_t
2222 shutdown_first(struct shutdown_state *s)
2223 {
2224 if (!s->initialized) {
2225 deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
2226 s->initialized = true;
2227 }
2228 return shutdown_next(s);
2229 }
2230
2231 device_t
2232 shutdown_next(struct shutdown_state *s)
2233 {
2234 device_t dv;
2235
2236 while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
2237 ;
2238
2239 if (dv == NULL)
2240 s->initialized = false;
2241
2242 return dv;
2243 }
2244
2245 bool
2246 config_detach_all(int how)
2247 {
2248 static struct shutdown_state s;
2249 device_t curdev;
2250 bool progress = false;
2251 int flags;
2252
2253 KERNEL_LOCK(1, NULL);
2254
2255 if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
2256 goto out;
2257
2258 if ((how & RB_POWERDOWN) == RB_POWERDOWN)
2259 flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
2260 else
2261 flags = DETACH_SHUTDOWN;
2262
2263 for (curdev = shutdown_first(&s); curdev != NULL;
2264 curdev = shutdown_next(&s)) {
2265 aprint_debug(" detaching %s, ", device_xname(curdev));
2266 if (config_detach(curdev, flags) == 0) {
2267 progress = true;
2268 aprint_debug("success.");
2269 } else
2270 aprint_debug("failed.");
2271 }
2272
2273 out: KERNEL_UNLOCK_ONE(NULL);
2274 return progress;
2275 }
2276
2277 static bool
2278 device_is_ancestor_of(device_t ancestor, device_t descendant)
2279 {
2280 device_t dv;
2281
2282 for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
2283 if (device_parent(dv) == ancestor)
2284 return true;
2285 }
2286 return false;
2287 }
2288
2289 int
2290 config_deactivate(device_t dev)
2291 {
2292 deviter_t di;
2293 const struct cfattach *ca;
2294 device_t descendant;
2295 int s, rv = 0, oflags;
2296
2297 for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
2298 descendant != NULL;
2299 descendant = deviter_next(&di)) {
2300 if (dev != descendant &&
2301 !device_is_ancestor_of(dev, descendant))
2302 continue;
2303
2304 if ((descendant->dv_flags & DVF_ACTIVE) == 0)
2305 continue;
2306
2307 ca = descendant->dv_cfattach;
2308 oflags = descendant->dv_flags;
2309
2310 descendant->dv_flags &= ~DVF_ACTIVE;
2311 if (ca->ca_activate == NULL)
2312 continue;
2313 s = splhigh();
2314 rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
2315 splx(s);
2316 if (rv != 0)
2317 descendant->dv_flags = oflags;
2318 }
2319 deviter_release(&di);
2320 return rv;
2321 }
2322
2323 /*
2324 * Defer the configuration of the specified device until all
2325 * of its parent's devices have been attached.
2326 */
2327 void
2328 config_defer(device_t dev, void (*func)(device_t))
2329 {
2330 struct deferred_config *dc;
2331
2332 if (dev->dv_parent == NULL)
2333 panic("config_defer: can't defer config of a root device");
2334
2335 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2336
2337 config_pending_incr(dev);
2338
2339 mutex_enter(&config_misc_lock);
2340 #ifdef DIAGNOSTIC
2341 struct deferred_config *odc;
2342 TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
2343 if (odc->dc_dev == dev)
2344 panic("config_defer: deferred twice");
2345 }
2346 #endif
2347 dc->dc_dev = dev;
2348 dc->dc_func = func;
2349 TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
2350 mutex_exit(&config_misc_lock);
2351 }
2352
2353 /*
2354 * Defer some autoconfiguration for a device until after interrupts
2355 * are enabled.
2356 */
2357 void
2358 config_interrupts(device_t dev, void (*func)(device_t))
2359 {
2360 struct deferred_config *dc;
2361
2362 /*
2363 * If interrupts are enabled, callback now.
2364 */
2365 if (cold == 0) {
2366 (*func)(dev);
2367 return;
2368 }
2369
2370 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2371
2372 config_pending_incr(dev);
2373
2374 mutex_enter(&config_misc_lock);
2375 #ifdef DIAGNOSTIC
2376 struct deferred_config *odc;
2377 TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
2378 if (odc->dc_dev == dev)
2379 panic("config_interrupts: deferred twice");
2380 }
2381 #endif
2382 dc->dc_dev = dev;
2383 dc->dc_func = func;
2384 TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
2385 mutex_exit(&config_misc_lock);
2386 }
2387
2388 /*
2389 * Defer some autoconfiguration for a device until after root file system
2390 * is mounted (to load firmware etc).
2391 */
2392 void
2393 config_mountroot(device_t dev, void (*func)(device_t))
2394 {
2395 struct deferred_config *dc;
2396
2397 /*
2398 * If root file system is mounted, callback now.
2399 */
2400 if (root_is_mounted) {
2401 (*func)(dev);
2402 return;
2403 }
2404
2405 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2406
2407 mutex_enter(&config_misc_lock);
2408 #ifdef DIAGNOSTIC
2409 struct deferred_config *odc;
2410 TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
2411 if (odc->dc_dev == dev)
2412 panic("%s: deferred twice", __func__);
2413 }
2414 #endif
2415
2416 dc->dc_dev = dev;
2417 dc->dc_func = func;
2418 TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
2419 mutex_exit(&config_misc_lock);
2420 }
2421
2422 /*
2423 * Process a deferred configuration queue.
2424 */
2425 static void
2426 config_process_deferred(struct deferred_config_head *queue, device_t parent)
2427 {
2428 struct deferred_config *dc;
2429
2430 KASSERT(KERNEL_LOCKED_P());
2431
2432 mutex_enter(&config_misc_lock);
2433 dc = TAILQ_FIRST(queue);
2434 while (dc) {
2435 if (parent == NULL || dc->dc_dev->dv_parent == parent) {
2436 TAILQ_REMOVE(queue, dc, dc_queue);
2437 mutex_exit(&config_misc_lock);
2438
2439 (*dc->dc_func)(dc->dc_dev);
2440 config_pending_decr(dc->dc_dev);
2441 kmem_free(dc, sizeof(*dc));
2442
2443 mutex_enter(&config_misc_lock);
2444 /* Restart, queue might have changed */
2445 dc = TAILQ_FIRST(queue);
2446 } else {
2447 dc = TAILQ_NEXT(dc, dc_queue);
2448 }
2449 }
2450 mutex_exit(&config_misc_lock);
2451 }
2452
2453 /*
2454 * Manipulate the config_pending semaphore.
2455 */
2456 void
2457 config_pending_incr(device_t dev)
2458 {
2459
2460 mutex_enter(&config_misc_lock);
2461 KASSERTMSG(dev->dv_pending < INT_MAX,
2462 "%s: excess config_pending_incr", device_xname(dev));
2463 if (dev->dv_pending++ == 0)
2464 TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
2465 #ifdef DEBUG_AUTOCONF
2466 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2467 #endif
2468 mutex_exit(&config_misc_lock);
2469 }
2470
2471 void
2472 config_pending_decr(device_t dev)
2473 {
2474
2475 mutex_enter(&config_misc_lock);
2476 KASSERTMSG(dev->dv_pending > 0,
2477 "%s: excess config_pending_decr", device_xname(dev));
2478 if (--dev->dv_pending == 0) {
2479 TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
2480 cv_broadcast(&config_misc_cv);
2481 }
2482 #ifdef DEBUG_AUTOCONF
2483 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2484 #endif
2485 mutex_exit(&config_misc_lock);
2486 }
2487
2488 /*
2489 * Register a "finalization" routine. Finalization routines are
2490 * called iteratively once all real devices have been found during
2491 * autoconfiguration, for as long as any one finalizer has done
2492 * any work.
2493 */
2494 int
2495 config_finalize_register(device_t dev, int (*fn)(device_t))
2496 {
2497 struct finalize_hook *f;
2498 int error = 0;
2499
2500 KERNEL_LOCK(1, NULL);
2501
2502 /*
2503 * If finalization has already been done, invoke the
2504 * callback function now.
2505 */
2506 if (config_finalize_done) {
2507 while ((*fn)(dev) != 0)
2508 /* loop */ ;
2509 goto out;
2510 }
2511
2512 /* Ensure this isn't already on the list. */
2513 TAILQ_FOREACH(f, &config_finalize_list, f_list) {
2514 if (f->f_func == fn && f->f_dev == dev) {
2515 error = EEXIST;
2516 goto out;
2517 }
2518 }
2519
2520 f = kmem_alloc(sizeof(*f), KM_SLEEP);
2521 f->f_func = fn;
2522 f->f_dev = dev;
2523 TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
2524
2525 /* Success! */
2526 error = 0;
2527
2528 out: KERNEL_UNLOCK_ONE(NULL);
2529 return error;
2530 }
2531
2532 void
2533 config_finalize(void)
2534 {
2535 struct finalize_hook *f;
2536 struct pdevinit *pdev;
2537 extern struct pdevinit pdevinit[];
2538 int errcnt, rv;
2539
2540 /*
2541 * Now that device driver threads have been created, wait for
2542 * them to finish any deferred autoconfiguration.
2543 */
2544 mutex_enter(&config_misc_lock);
2545 while (!TAILQ_EMPTY(&config_pending)) {
2546 device_t dev;
2547 int error;
2548
2549 error = cv_timedwait(&config_misc_cv, &config_misc_lock,
2550 mstohz(1000));
2551 if (error == EWOULDBLOCK) {
2552 aprint_debug("waiting for devices:");
2553 TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
2554 aprint_debug(" %s", device_xname(dev));
2555 aprint_debug("\n");
2556 }
2557 }
2558 mutex_exit(&config_misc_lock);
2559
2560 KERNEL_LOCK(1, NULL);
2561
2562 /* Attach pseudo-devices. */
2563 for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
2564 (*pdev->pdev_attach)(pdev->pdev_count);
2565
2566 /* Run the hooks until none of them does any work. */
2567 do {
2568 rv = 0;
2569 TAILQ_FOREACH(f, &config_finalize_list, f_list)
2570 rv |= (*f->f_func)(f->f_dev);
2571 } while (rv != 0);
2572
2573 config_finalize_done = 1;
2574
2575 /* Now free all the hooks. */
2576 while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
2577 TAILQ_REMOVE(&config_finalize_list, f, f_list);
2578 kmem_free(f, sizeof(*f));
2579 }
2580
2581 KERNEL_UNLOCK_ONE(NULL);
2582
2583 errcnt = aprint_get_error_count();
2584 if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
2585 (boothowto & AB_VERBOSE) == 0) {
2586 mutex_enter(&config_misc_lock);
2587 if (config_do_twiddle) {
2588 config_do_twiddle = 0;
2589 printf_nolog(" done.\n");
2590 }
2591 mutex_exit(&config_misc_lock);
2592 }
2593 if (errcnt != 0) {
2594 printf("WARNING: %d error%s while detecting hardware; "
2595 "check system log.\n", errcnt,
2596 errcnt == 1 ? "" : "s");
2597 }
2598 }
2599
2600 void
2601 config_twiddle_init(void)
2602 {
2603
2604 if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
2605 config_do_twiddle = 1;
2606 }
2607 callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
2608 }
2609
2610 void
2611 config_twiddle_fn(void *cookie)
2612 {
2613
2614 mutex_enter(&config_misc_lock);
2615 if (config_do_twiddle) {
2616 twiddle();
2617 callout_schedule(&config_twiddle_ch, mstohz(100));
2618 }
2619 mutex_exit(&config_misc_lock);
2620 }
2621
2622 static void
2623 config_alldevs_enter(struct alldevs_foray *af)
2624 {
2625 TAILQ_INIT(&af->af_garbage);
2626 mutex_enter(&alldevs_lock);
2627 config_collect_garbage(&af->af_garbage);
2628 }
2629
2630 static void
2631 config_alldevs_exit(struct alldevs_foray *af)
2632 {
2633 mutex_exit(&alldevs_lock);
2634 config_dump_garbage(&af->af_garbage);
2635 }
2636
2637 /*
2638 * device_lookup:
2639 *
2640 * Look up a device instance for a given driver.
2641 *
2642 * Caller is responsible for ensuring the device's state is
2643 * stable, either by holding a reference already obtained with
2644 * device_lookup_acquire or by otherwise ensuring the device is
2645 * attached and can't be detached (e.g., holding an open device
2646 * node and ensuring *_detach calls vdevgone).
2647 *
2648 * XXX Find a way to assert this.
2649 *
2650 * Safe for use up to and including interrupt context at IPL_VM.
2651 * Never sleeps.
2652 */
2653 device_t
2654 device_lookup(cfdriver_t cd, int unit)
2655 {
2656 device_t dv;
2657
2658 mutex_enter(&alldevs_lock);
2659 if (unit < 0 || unit >= cd->cd_ndevs)
2660 dv = NULL;
2661 else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
2662 dv = NULL;
2663 mutex_exit(&alldevs_lock);
2664
2665 return dv;
2666 }
2667
2668 /*
2669 * device_lookup_private:
2670 *
2671 * Look up a softc instance for a given driver.
2672 */
2673 void *
2674 device_lookup_private(cfdriver_t cd, int unit)
2675 {
2676
2677 return device_private(device_lookup(cd, unit));
2678 }
2679
2680 /*
2681 * device_lookup_acquire:
2682 *
2683 * Look up a device instance for a given driver, and return a
2684 * reference to it that must be released by device_release.
2685 *
2686 * => If the device is still attaching, blocks until *_attach has
2687 * returned.
2688 *
2689 * => If the device is detaching, blocks until *_detach has
2690 * returned. May succeed or fail in that case, depending on
2691 * whether *_detach has backed out (EBUSY) or committed to
2692 * detaching.
2693 *
2694 * May sleep.
2695 */
2696 device_t
2697 device_lookup_acquire(cfdriver_t cd, int unit)
2698 {
2699 device_t dv;
2700
2701 ASSERT_SLEEPABLE();
2702
2703 /* XXX This should have a pserialized fast path -- TBD. */
2704 mutex_enter(&config_misc_lock);
2705 mutex_enter(&alldevs_lock);
2706 retry: if (unit < 0 || unit >= cd->cd_ndevs ||
2707 (dv = cd->cd_devs[unit]) == NULL ||
2708 dv->dv_del_gen != 0 ||
2709 dv->dv_detached) {
2710 dv = NULL;
2711 } else {
2712 /*
2713 * Wait for the device to stabilize, if attaching or
2714 * detaching. Either way we must wait for *_attach or
2715 * *_detach to complete, and either way we must retry:
2716 * even if detaching, *_detach might fail (EBUSY) so
2717 * the device may still be there.
2718 */
2719 if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) ||
2720 dv->dv_detaching != NULL) {
2721 mutex_exit(&alldevs_lock);
2722 cv_wait(&config_misc_cv, &config_misc_lock);
2723 mutex_enter(&alldevs_lock);
2724 goto retry;
2725 }
2726 localcount_acquire(dv->dv_localcount);
2727 }
2728 mutex_exit(&alldevs_lock);
2729 mutex_exit(&config_misc_lock);
2730
2731 return dv;
2732 }
2733
2734 /*
2735 * device_release:
2736 *
2737 * Release a reference to a device acquired with
2738 * device_lookup_acquire.
2739 */
2740 void
2741 device_release(device_t dv)
2742 {
2743
2744 localcount_release(dv->dv_localcount,
2745 &config_misc_cv, &config_misc_lock);
2746 }
2747
2748 /*
2749 * device_find_by_xname:
2750 *
2751 * Returns the device of the given name or NULL if it doesn't exist.
2752 */
2753 device_t
2754 device_find_by_xname(const char *name)
2755 {
2756 device_t dv;
2757 deviter_t di;
2758
2759 for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
2760 if (strcmp(device_xname(dv), name) == 0)
2761 break;
2762 }
2763 deviter_release(&di);
2764
2765 return dv;
2766 }
2767
2768 /*
2769 * device_find_by_driver_unit:
2770 *
2771 * Returns the device of the given driver name and unit or
2772 * NULL if it doesn't exist.
2773 */
2774 device_t
2775 device_find_by_driver_unit(const char *name, int unit)
2776 {
2777 struct cfdriver *cd;
2778
2779 if ((cd = config_cfdriver_lookup(name)) == NULL)
2780 return NULL;
2781 return device_lookup(cd, unit);
2782 }
2783
2784 static bool
2785 match_strcmp(const char * const s1, const char * const s2)
2786 {
2787 return strcmp(s1, s2) == 0;
2788 }
2789
2790 static bool
2791 match_pmatch(const char * const s1, const char * const s2)
2792 {
2793 return pmatch(s1, s2, NULL) == 2;
2794 }
2795
2796 static bool
2797 strarray_match_internal(const char ** const strings,
2798 unsigned int const nstrings, const char * const str,
2799 unsigned int * const indexp,
2800 bool (*match_fn)(const char *, const char *))
2801 {
2802 unsigned int i;
2803
2804 if (strings == NULL || nstrings == 0) {
2805 return false;
2806 }
2807
2808 for (i = 0; i < nstrings; i++) {
2809 if ((*match_fn)(strings[i], str)) {
2810 *indexp = i;
2811 return true;
2812 }
2813 }
2814
2815 return false;
2816 }
2817
2818 static int
2819 strarray_match(const char ** const strings, unsigned int const nstrings,
2820 const char * const str)
2821 {
2822 unsigned int idx;
2823
2824 if (strarray_match_internal(strings, nstrings, str, &idx,
2825 match_strcmp)) {
2826 return (int)(nstrings - idx);
2827 }
2828 return 0;
2829 }
2830
2831 static int
2832 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
2833 const char * const pattern)
2834 {
2835 unsigned int idx;
2836
2837 if (strarray_match_internal(strings, nstrings, pattern, &idx,
2838 match_pmatch)) {
2839 return (int)(nstrings - idx);
2840 }
2841 return 0;
2842 }
2843
2844 static int
2845 device_compatible_match_strarray_internal(
2846 const char **device_compats, int ndevice_compats,
2847 const struct device_compatible_entry *driver_compats,
2848 const struct device_compatible_entry **matching_entryp,
2849 int (*match_fn)(const char **, unsigned int, const char *))
2850 {
2851 const struct device_compatible_entry *dce = NULL;
2852 int rv;
2853
2854 if (ndevice_compats == 0 || device_compats == NULL ||
2855 driver_compats == NULL)
2856 return 0;
2857
2858 for (dce = driver_compats; dce->compat != NULL; dce++) {
2859 rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
2860 if (rv != 0) {
2861 if (matching_entryp != NULL) {
2862 *matching_entryp = dce;
2863 }
2864 return rv;
2865 }
2866 }
2867 return 0;
2868 }
2869
2870 /*
2871 * device_compatible_match:
2872 *
2873 * Match a driver's "compatible" data against a device's
2874 * "compatible" strings. Returns resulted weighted by
2875 * which device "compatible" string was matched.
2876 */
2877 int
2878 device_compatible_match(const char **device_compats, int ndevice_compats,
2879 const struct device_compatible_entry *driver_compats)
2880 {
2881 return device_compatible_match_strarray_internal(device_compats,
2882 ndevice_compats, driver_compats, NULL, strarray_match);
2883 }
2884
2885 /*
2886 * device_compatible_pmatch:
2887 *
2888 * Like device_compatible_match(), but uses pmatch(9) to compare
2889 * the device "compatible" strings against patterns in the
2890 * driver's "compatible" data.
2891 */
2892 int
2893 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
2894 const struct device_compatible_entry *driver_compats)
2895 {
2896 return device_compatible_match_strarray_internal(device_compats,
2897 ndevice_compats, driver_compats, NULL, strarray_pmatch);
2898 }
2899
2900 static int
2901 device_compatible_match_strlist_internal(
2902 const char * const device_compats, size_t const device_compatsize,
2903 const struct device_compatible_entry *driver_compats,
2904 const struct device_compatible_entry **matching_entryp,
2905 int (*match_fn)(const char *, size_t, const char *))
2906 {
2907 const struct device_compatible_entry *dce = NULL;
2908 int rv;
2909
2910 if (device_compats == NULL || device_compatsize == 0 ||
2911 driver_compats == NULL)
2912 return 0;
2913
2914 for (dce = driver_compats; dce->compat != NULL; dce++) {
2915 rv = (*match_fn)(device_compats, device_compatsize,
2916 dce->compat);
2917 if (rv != 0) {
2918 if (matching_entryp != NULL) {
2919 *matching_entryp = dce;
2920 }
2921 return rv;
2922 }
2923 }
2924 return 0;
2925 }
2926
2927 /*
2928 * device_compatible_match_strlist:
2929 *
2930 * Like device_compatible_match(), but take the device
2931 * "compatible" strings as an OpenFirmware-style string
2932 * list.
2933 */
2934 int
2935 device_compatible_match_strlist(
2936 const char * const device_compats, size_t const device_compatsize,
2937 const struct device_compatible_entry *driver_compats)
2938 {
2939 return device_compatible_match_strlist_internal(device_compats,
2940 device_compatsize, driver_compats, NULL, strlist_match);
2941 }
2942
2943 /*
2944 * device_compatible_pmatch_strlist:
2945 *
2946 * Like device_compatible_pmatch(), but take the device
2947 * "compatible" strings as an OpenFirmware-style string
2948 * list.
2949 */
2950 int
2951 device_compatible_pmatch_strlist(
2952 const char * const device_compats, size_t const device_compatsize,
2953 const struct device_compatible_entry *driver_compats)
2954 {
2955 return device_compatible_match_strlist_internal(device_compats,
2956 device_compatsize, driver_compats, NULL, strlist_pmatch);
2957 }
2958
2959 static int
2960 device_compatible_match_id_internal(
2961 uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
2962 const struct device_compatible_entry *driver_compats,
2963 const struct device_compatible_entry **matching_entryp)
2964 {
2965 const struct device_compatible_entry *dce = NULL;
2966
2967 if (mask == 0)
2968 return 0;
2969
2970 for (dce = driver_compats; dce->id != sentinel_id; dce++) {
2971 if ((id & mask) == dce->id) {
2972 if (matching_entryp != NULL) {
2973 *matching_entryp = dce;
2974 }
2975 return 1;
2976 }
2977 }
2978 return 0;
2979 }
2980
2981 /*
2982 * device_compatible_match_id:
2983 *
2984 * Like device_compatible_match(), but takes a single
2985 * unsigned integer device ID.
2986 */
2987 int
2988 device_compatible_match_id(
2989 uintptr_t const id, uintptr_t const sentinel_id,
2990 const struct device_compatible_entry *driver_compats)
2991 {
2992 return device_compatible_match_id_internal(id, (uintptr_t)-1,
2993 sentinel_id, driver_compats, NULL);
2994 }
2995
2996 /*
2997 * device_compatible_lookup:
2998 *
2999 * Look up and return the device_compatible_entry, using the
3000 * same matching criteria used by device_compatible_match().
3001 */
3002 const struct device_compatible_entry *
3003 device_compatible_lookup(const char **device_compats, int ndevice_compats,
3004 const struct device_compatible_entry *driver_compats)
3005 {
3006 const struct device_compatible_entry *dce;
3007
3008 if (device_compatible_match_strarray_internal(device_compats,
3009 ndevice_compats, driver_compats, &dce, strarray_match)) {
3010 return dce;
3011 }
3012 return NULL;
3013 }
3014
3015 /*
3016 * device_compatible_plookup:
3017 *
3018 * Look up and return the device_compatible_entry, using the
3019 * same matching criteria used by device_compatible_pmatch().
3020 */
3021 const struct device_compatible_entry *
3022 device_compatible_plookup(const char **device_compats, int ndevice_compats,
3023 const struct device_compatible_entry *driver_compats)
3024 {
3025 const struct device_compatible_entry *dce;
3026
3027 if (device_compatible_match_strarray_internal(device_compats,
3028 ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
3029 return dce;
3030 }
3031 return NULL;
3032 }
3033
3034 /*
3035 * device_compatible_lookup_strlist:
3036 *
3037 * Like device_compatible_lookup(), but take the device
3038 * "compatible" strings as an OpenFirmware-style string
3039 * list.
3040 */
3041 const struct device_compatible_entry *
3042 device_compatible_lookup_strlist(
3043 const char * const device_compats, size_t const device_compatsize,
3044 const struct device_compatible_entry *driver_compats)
3045 {
3046 const struct device_compatible_entry *dce;
3047
3048 if (device_compatible_match_strlist_internal(device_compats,
3049 device_compatsize, driver_compats, &dce, strlist_match)) {
3050 return dce;
3051 }
3052 return NULL;
3053 }
3054
3055 /*
3056 * device_compatible_plookup_strlist:
3057 *
3058 * Like device_compatible_plookup(), but take the device
3059 * "compatible" strings as an OpenFirmware-style string
3060 * list.
3061 */
3062 const struct device_compatible_entry *
3063 device_compatible_plookup_strlist(
3064 const char * const device_compats, size_t const device_compatsize,
3065 const struct device_compatible_entry *driver_compats)
3066 {
3067 const struct device_compatible_entry *dce;
3068
3069 if (device_compatible_match_strlist_internal(device_compats,
3070 device_compatsize, driver_compats, &dce, strlist_pmatch)) {
3071 return dce;
3072 }
3073 return NULL;
3074 }
3075
3076 /*
3077 * device_compatible_lookup_id:
3078 *
3079 * Like device_compatible_lookup(), but takes a single
3080 * unsigned integer device ID.
3081 */
3082 const struct device_compatible_entry *
3083 device_compatible_lookup_id(
3084 uintptr_t const id, uintptr_t const sentinel_id,
3085 const struct device_compatible_entry *driver_compats)
3086 {
3087 const struct device_compatible_entry *dce;
3088
3089 if (device_compatible_match_id_internal(id, (uintptr_t)-1,
3090 sentinel_id, driver_compats, &dce)) {
3091 return dce;
3092 }
3093 return NULL;
3094 }
3095
3096 /*
3097 * Power management related functions.
3098 */
3099
3100 bool
3101 device_pmf_is_registered(device_t dev)
3102 {
3103 return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
3104 }
3105
3106 bool
3107 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
3108 {
3109 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3110 return true;
3111 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3112 return false;
3113 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3114 dev->dv_driver_suspend != NULL &&
3115 !(*dev->dv_driver_suspend)(dev, qual))
3116 return false;
3117
3118 dev->dv_flags |= DVF_DRIVER_SUSPENDED;
3119 return true;
3120 }
3121
3122 bool
3123 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
3124 {
3125 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3126 return true;
3127 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3128 return false;
3129 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3130 dev->dv_driver_resume != NULL &&
3131 !(*dev->dv_driver_resume)(dev, qual))
3132 return false;
3133
3134 dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
3135 return true;
3136 }
3137
3138 bool
3139 device_pmf_driver_shutdown(device_t dev, int how)
3140 {
3141
3142 if (*dev->dv_driver_shutdown != NULL &&
3143 !(*dev->dv_driver_shutdown)(dev, how))
3144 return false;
3145 return true;
3146 }
3147
3148 void
3149 device_pmf_driver_register(device_t dev,
3150 bool (*suspend)(device_t, const pmf_qual_t *),
3151 bool (*resume)(device_t, const pmf_qual_t *),
3152 bool (*shutdown)(device_t, int))
3153 {
3154
3155 dev->dv_driver_suspend = suspend;
3156 dev->dv_driver_resume = resume;
3157 dev->dv_driver_shutdown = shutdown;
3158 dev->dv_flags |= DVF_POWER_HANDLERS;
3159 }
3160
3161 void
3162 device_pmf_driver_deregister(device_t dev)
3163 {
3164 device_lock_t dvl = device_getlock(dev);
3165
3166 dev->dv_driver_suspend = NULL;
3167 dev->dv_driver_resume = NULL;
3168
3169 mutex_enter(&dvl->dvl_mtx);
3170 dev->dv_flags &= ~DVF_POWER_HANDLERS;
3171 while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
3172 /* Wake a thread that waits for the lock. That
3173 * thread will fail to acquire the lock, and then
3174 * it will wake the next thread that waits for the
3175 * lock, or else it will wake us.
3176 */
3177 cv_signal(&dvl->dvl_cv);
3178 pmflock_debug(dev, __func__, __LINE__);
3179 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3180 pmflock_debug(dev, __func__, __LINE__);
3181 }
3182 mutex_exit(&dvl->dvl_mtx);
3183 }
3184
3185 void
3186 device_pmf_driver_child_register(device_t dev)
3187 {
3188 device_t parent = device_parent(dev);
3189
3190 if (parent == NULL || parent->dv_driver_child_register == NULL)
3191 return;
3192 (*parent->dv_driver_child_register)(dev);
3193 }
3194
3195 void
3196 device_pmf_driver_set_child_register(device_t dev,
3197 void (*child_register)(device_t))
3198 {
3199 dev->dv_driver_child_register = child_register;
3200 }
3201
3202 static void
3203 pmflock_debug(device_t dev, const char *func, int line)
3204 {
3205 #ifdef PMFLOCK_DEBUG
3206 device_lock_t dvl = device_getlock(dev);
3207 const char *curlwp_name;
3208
3209 if (curlwp->l_name != NULL)
3210 curlwp_name = curlwp->l_name;
3211 else
3212 curlwp_name = curlwp->l_proc->p_comm;
3213
3214 aprint_debug_dev(dev,
3215 "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
3216 curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
3217 #endif /* PMFLOCK_DEBUG */
3218 }
3219
3220 static bool
3221 device_pmf_lock1(device_t dev)
3222 {
3223 device_lock_t dvl = device_getlock(dev);
3224
3225 while (device_pmf_is_registered(dev) &&
3226 dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
3227 dvl->dvl_nwait++;
3228 pmflock_debug(dev, __func__, __LINE__);
3229 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3230 pmflock_debug(dev, __func__, __LINE__);
3231 dvl->dvl_nwait--;
3232 }
3233 if (!device_pmf_is_registered(dev)) {
3234 pmflock_debug(dev, __func__, __LINE__);
3235 /* We could not acquire the lock, but some other thread may
3236 * wait for it, also. Wake that thread.
3237 */
3238 cv_signal(&dvl->dvl_cv);
3239 return false;
3240 }
3241 dvl->dvl_nlock++;
3242 dvl->dvl_holder = curlwp;
3243 pmflock_debug(dev, __func__, __LINE__);
3244 return true;
3245 }
3246
3247 bool
3248 device_pmf_lock(device_t dev)
3249 {
3250 bool rc;
3251 device_lock_t dvl = device_getlock(dev);
3252
3253 mutex_enter(&dvl->dvl_mtx);
3254 rc = device_pmf_lock1(dev);
3255 mutex_exit(&dvl->dvl_mtx);
3256
3257 return rc;
3258 }
3259
3260 void
3261 device_pmf_unlock(device_t dev)
3262 {
3263 device_lock_t dvl = device_getlock(dev);
3264
3265 KASSERT(dvl->dvl_nlock > 0);
3266 mutex_enter(&dvl->dvl_mtx);
3267 if (--dvl->dvl_nlock == 0)
3268 dvl->dvl_holder = NULL;
3269 cv_signal(&dvl->dvl_cv);
3270 pmflock_debug(dev, __func__, __LINE__);
3271 mutex_exit(&dvl->dvl_mtx);
3272 }
3273
3274 device_lock_t
3275 device_getlock(device_t dev)
3276 {
3277 return &dev->dv_lock;
3278 }
3279
3280 void *
3281 device_pmf_bus_private(device_t dev)
3282 {
3283 return dev->dv_bus_private;
3284 }
3285
3286 bool
3287 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
3288 {
3289 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3290 return true;
3291 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
3292 (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3293 return false;
3294 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3295 dev->dv_bus_suspend != NULL &&
3296 !(*dev->dv_bus_suspend)(dev, qual))
3297 return false;
3298
3299 dev->dv_flags |= DVF_BUS_SUSPENDED;
3300 return true;
3301 }
3302
3303 bool
3304 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
3305 {
3306 if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
3307 return true;
3308 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3309 dev->dv_bus_resume != NULL &&
3310 !(*dev->dv_bus_resume)(dev, qual))
3311 return false;
3312
3313 dev->dv_flags &= ~DVF_BUS_SUSPENDED;
3314 return true;
3315 }
3316
3317 bool
3318 device_pmf_bus_shutdown(device_t dev, int how)
3319 {
3320
3321 if (*dev->dv_bus_shutdown != NULL &&
3322 !(*dev->dv_bus_shutdown)(dev, how))
3323 return false;
3324 return true;
3325 }
3326
3327 void
3328 device_pmf_bus_register(device_t dev, void *priv,
3329 bool (*suspend)(device_t, const pmf_qual_t *),
3330 bool (*resume)(device_t, const pmf_qual_t *),
3331 bool (*shutdown)(device_t, int), void (*deregister)(device_t))
3332 {
3333 dev->dv_bus_private = priv;
3334 dev->dv_bus_resume = resume;
3335 dev->dv_bus_suspend = suspend;
3336 dev->dv_bus_shutdown = shutdown;
3337 dev->dv_bus_deregister = deregister;
3338 }
3339
3340 void
3341 device_pmf_bus_deregister(device_t dev)
3342 {
3343 if (dev->dv_bus_deregister == NULL)
3344 return;
3345 (*dev->dv_bus_deregister)(dev);
3346 dev->dv_bus_private = NULL;
3347 dev->dv_bus_suspend = NULL;
3348 dev->dv_bus_resume = NULL;
3349 dev->dv_bus_deregister = NULL;
3350 }
3351
3352 void *
3353 device_pmf_class_private(device_t dev)
3354 {
3355 return dev->dv_class_private;
3356 }
3357
3358 bool
3359 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
3360 {
3361 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
3362 return true;
3363 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3364 dev->dv_class_suspend != NULL &&
3365 !(*dev->dv_class_suspend)(dev, qual))
3366 return false;
3367
3368 dev->dv_flags |= DVF_CLASS_SUSPENDED;
3369 return true;
3370 }
3371
3372 bool
3373 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
3374 {
3375 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3376 return true;
3377 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
3378 (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3379 return false;
3380 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3381 dev->dv_class_resume != NULL &&
3382 !(*dev->dv_class_resume)(dev, qual))
3383 return false;
3384
3385 dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
3386 return true;
3387 }
3388
3389 void
3390 device_pmf_class_register(device_t dev, void *priv,
3391 bool (*suspend)(device_t, const pmf_qual_t *),
3392 bool (*resume)(device_t, const pmf_qual_t *),
3393 void (*deregister)(device_t))
3394 {
3395 dev->dv_class_private = priv;
3396 dev->dv_class_suspend = suspend;
3397 dev->dv_class_resume = resume;
3398 dev->dv_class_deregister = deregister;
3399 }
3400
3401 void
3402 device_pmf_class_deregister(device_t dev)
3403 {
3404 if (dev->dv_class_deregister == NULL)
3405 return;
3406 (*dev->dv_class_deregister)(dev);
3407 dev->dv_class_private = NULL;
3408 dev->dv_class_suspend = NULL;
3409 dev->dv_class_resume = NULL;
3410 dev->dv_class_deregister = NULL;
3411 }
3412
3413 bool
3414 device_active(device_t dev, devactive_t type)
3415 {
3416 size_t i;
3417
3418 if (dev->dv_activity_count == 0)
3419 return false;
3420
3421 for (i = 0; i < dev->dv_activity_count; ++i) {
3422 if (dev->dv_activity_handlers[i] == NULL)
3423 break;
3424 (*dev->dv_activity_handlers[i])(dev, type);
3425 }
3426
3427 return true;
3428 }
3429
3430 bool
3431 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
3432 {
3433 void (**new_handlers)(device_t, devactive_t);
3434 void (**old_handlers)(device_t, devactive_t);
3435 size_t i, old_size, new_size;
3436 int s;
3437
3438 old_handlers = dev->dv_activity_handlers;
3439 old_size = dev->dv_activity_count;
3440
3441 KASSERT(old_size == 0 || old_handlers != NULL);
3442
3443 for (i = 0; i < old_size; ++i) {
3444 KASSERT(old_handlers[i] != handler);
3445 if (old_handlers[i] == NULL) {
3446 old_handlers[i] = handler;
3447 return true;
3448 }
3449 }
3450
3451 new_size = old_size + 4;
3452 new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
3453
3454 for (i = 0; i < old_size; ++i)
3455 new_handlers[i] = old_handlers[i];
3456 new_handlers[old_size] = handler;
3457 for (i = old_size+1; i < new_size; ++i)
3458 new_handlers[i] = NULL;
3459
3460 s = splhigh();
3461 dev->dv_activity_count = new_size;
3462 dev->dv_activity_handlers = new_handlers;
3463 splx(s);
3464
3465 if (old_size > 0)
3466 kmem_free(old_handlers, sizeof(void *) * old_size);
3467
3468 return true;
3469 }
3470
3471 void
3472 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
3473 {
3474 void (**old_handlers)(device_t, devactive_t);
3475 size_t i, old_size;
3476 int s;
3477
3478 old_handlers = dev->dv_activity_handlers;
3479 old_size = dev->dv_activity_count;
3480
3481 for (i = 0; i < old_size; ++i) {
3482 if (old_handlers[i] == handler)
3483 break;
3484 if (old_handlers[i] == NULL)
3485 return; /* XXX panic? */
3486 }
3487
3488 if (i == old_size)
3489 return; /* XXX panic? */
3490
3491 for (; i < old_size - 1; ++i) {
3492 if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
3493 continue;
3494
3495 if (i == 0) {
3496 s = splhigh();
3497 dev->dv_activity_count = 0;
3498 dev->dv_activity_handlers = NULL;
3499 splx(s);
3500 kmem_free(old_handlers, sizeof(void *) * old_size);
3501 }
3502 return;
3503 }
3504 old_handlers[i] = NULL;
3505 }
3506
3507 /* Return true iff the device_t `dev' exists at generation `gen'. */
3508 static bool
3509 device_exists_at(device_t dv, devgen_t gen)
3510 {
3511 return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
3512 dv->dv_add_gen <= gen;
3513 }
3514
3515 static bool
3516 deviter_visits(const deviter_t *di, device_t dv)
3517 {
3518 return device_exists_at(dv, di->di_gen);
3519 }
3520
3521 /*
3522 * Device Iteration
3523 *
3524 * deviter_t: a device iterator. Holds state for a "walk" visiting
3525 * each device_t's in the device tree.
3526 *
3527 * deviter_init(di, flags): initialize the device iterator `di'
3528 * to "walk" the device tree. deviter_next(di) will return
3529 * the first device_t in the device tree, or NULL if there are
3530 * no devices.
3531 *
3532 * `flags' is one or more of DEVITER_F_RW, indicating that the
3533 * caller intends to modify the device tree by calling
3534 * config_detach(9) on devices in the order that the iterator
3535 * returns them; DEVITER_F_ROOT_FIRST, asking for the devices
3536 * nearest the "root" of the device tree to be returned, first;
3537 * DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
3538 * the root of the device tree, first; and DEVITER_F_SHUTDOWN,
3539 * indicating both that deviter_init() should not respect any
3540 * locks on the device tree, and that deviter_next(di) may run
3541 * in more than one LWP before the walk has finished.
3542 *
3543 * Only one DEVITER_F_RW iterator may be in the device tree at
3544 * once.
3545 *
3546 * DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
3547 *
3548 * Results are undefined if the flags DEVITER_F_ROOT_FIRST and
3549 * DEVITER_F_LEAVES_FIRST are used in combination.
3550 *
3551 * deviter_first(di, flags): initialize the device iterator `di'
3552 * and return the first device_t in the device tree, or NULL
3553 * if there are no devices. The statement
3554 *
3555 * dv = deviter_first(di);
3556 *
3557 * is shorthand for
3558 *
3559 * deviter_init(di);
3560 * dv = deviter_next(di);
3561 *
3562 * deviter_next(di): return the next device_t in the device tree,
3563 * or NULL if there are no more devices. deviter_next(di)
3564 * is undefined if `di' was not initialized with deviter_init() or
3565 * deviter_first().
3566 *
3567 * deviter_release(di): stops iteration (subsequent calls to
3568 * deviter_next() will return NULL), releases any locks and
3569 * resources held by the device iterator.
3570 *
3571 * Device iteration does not return device_t's in any particular
3572 * order. An iterator will never return the same device_t twice.
3573 * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
3574 * is called repeatedly on the same `di', it will eventually return
3575 * NULL. It is ok to attach/detach devices during device iteration.
3576 */
3577 void
3578 deviter_init(deviter_t *di, deviter_flags_t flags)
3579 {
3580 device_t dv;
3581
3582 memset(di, 0, sizeof(*di));
3583
3584 if ((flags & DEVITER_F_SHUTDOWN) != 0)
3585 flags |= DEVITER_F_RW;
3586
3587 mutex_enter(&alldevs_lock);
3588 if ((flags & DEVITER_F_RW) != 0)
3589 alldevs_nwrite++;
3590 else
3591 alldevs_nread++;
3592 di->di_gen = alldevs_gen++;
3593 di->di_flags = flags;
3594
3595 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3596 case DEVITER_F_LEAVES_FIRST:
3597 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3598 if (!deviter_visits(di, dv))
3599 continue;
3600 di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
3601 }
3602 break;
3603 case DEVITER_F_ROOT_FIRST:
3604 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3605 if (!deviter_visits(di, dv))
3606 continue;
3607 di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
3608 }
3609 break;
3610 default:
3611 break;
3612 }
3613
3614 deviter_reinit(di);
3615 mutex_exit(&alldevs_lock);
3616 }
3617
3618 static void
3619 deviter_reinit(deviter_t *di)
3620 {
3621
3622 KASSERT(mutex_owned(&alldevs_lock));
3623 if ((di->di_flags & DEVITER_F_RW) != 0)
3624 di->di_prev = TAILQ_LAST(&alldevs, devicelist);
3625 else
3626 di->di_prev = TAILQ_FIRST(&alldevs);
3627 }
3628
3629 device_t
3630 deviter_first(deviter_t *di, deviter_flags_t flags)
3631 {
3632
3633 deviter_init(di, flags);
3634 return deviter_next(di);
3635 }
3636
3637 static device_t
3638 deviter_next2(deviter_t *di)
3639 {
3640 device_t dv;
3641
3642 KASSERT(mutex_owned(&alldevs_lock));
3643
3644 dv = di->di_prev;
3645
3646 if (dv == NULL)
3647 return NULL;
3648
3649 if ((di->di_flags & DEVITER_F_RW) != 0)
3650 di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
3651 else
3652 di->di_prev = TAILQ_NEXT(dv, dv_list);
3653
3654 return dv;
3655 }
3656
3657 static device_t
3658 deviter_next1(deviter_t *di)
3659 {
3660 device_t dv;
3661
3662 KASSERT(mutex_owned(&alldevs_lock));
3663
3664 do {
3665 dv = deviter_next2(di);
3666 } while (dv != NULL && !deviter_visits(di, dv));
3667
3668 return dv;
3669 }
3670
3671 device_t
3672 deviter_next(deviter_t *di)
3673 {
3674 device_t dv = NULL;
3675
3676 mutex_enter(&alldevs_lock);
3677 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3678 case 0:
3679 dv = deviter_next1(di);
3680 break;
3681 case DEVITER_F_LEAVES_FIRST:
3682 while (di->di_curdepth >= 0) {
3683 if ((dv = deviter_next1(di)) == NULL) {
3684 di->di_curdepth--;
3685 deviter_reinit(di);
3686 } else if (dv->dv_depth == di->di_curdepth)
3687 break;
3688 }
3689 break;
3690 case DEVITER_F_ROOT_FIRST:
3691 while (di->di_curdepth <= di->di_maxdepth) {
3692 if ((dv = deviter_next1(di)) == NULL) {
3693 di->di_curdepth++;
3694 deviter_reinit(di);
3695 } else if (dv->dv_depth == di->di_curdepth)
3696 break;
3697 }
3698 break;
3699 default:
3700 break;
3701 }
3702 mutex_exit(&alldevs_lock);
3703
3704 return dv;
3705 }
3706
3707 void
3708 deviter_release(deviter_t *di)
3709 {
3710 bool rw = (di->di_flags & DEVITER_F_RW) != 0;
3711
3712 mutex_enter(&alldevs_lock);
3713 if (rw)
3714 --alldevs_nwrite;
3715 else
3716 --alldevs_nread;
3717 /* XXX wake a garbage-collection thread */
3718 mutex_exit(&alldevs_lock);
3719 }
3720
3721 const char *
3722 cfdata_ifattr(const struct cfdata *cf)
3723 {
3724 return cf->cf_pspec->cfp_iattr;
3725 }
3726
3727 bool
3728 ifattr_match(const char *snull, const char *t)
3729 {
3730 return (snull == NULL) || strcmp(snull, t) == 0;
3731 }
3732
3733 void
3734 null_childdetached(device_t self, device_t child)
3735 {
3736 /* do nothing */
3737 }
3738
3739 static void
3740 sysctl_detach_setup(struct sysctllog **clog)
3741 {
3742
3743 sysctl_createv(clog, 0, NULL, NULL,
3744 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
3745 CTLTYPE_BOOL, "detachall",
3746 SYSCTL_DESCR("Detach all devices at shutdown"),
3747 NULL, 0, &detachall, 0,
3748 CTL_KERN, CTL_CREATE, CTL_EOL);
3749 }
3750