subr_autoconf.c revision 1.286 1 /* $NetBSD: subr_autoconf.c,v 1.286 2021/06/13 00:11:46 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1996, 2000 Christopher G. Demetriou
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the
18 * NetBSD Project. See http://www.NetBSD.org/ for
19 * information about NetBSD.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
35 */
36
37 /*
38 * Copyright (c) 1992, 1993
39 * The Regents of the University of California. All rights reserved.
40 *
41 * This software was developed by the Computer Systems Engineering group
42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43 * contributed to Berkeley.
44 *
45 * All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Lawrence Berkeley Laboratories.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp (LBL)
75 *
76 * @(#)subr_autoconf.c 8.3 (Berkeley) 5/17/94
77 */
78
79 #define __SUBR_AUTOCONF_PRIVATE /* see <sys/device.h> */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.286 2021/06/13 00:11:46 riastradh Exp $");
83
84 #ifdef _KERNEL_OPT
85 #include "opt_ddb.h"
86 #include "drvctl.h"
87 #endif
88
89 #include <sys/param.h>
90 #include <sys/device.h>
91 #include <sys/disklabel.h>
92 #include <sys/conf.h>
93 #include <sys/kauth.h>
94 #include <sys/kmem.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/errno.h>
98 #include <sys/proc.h>
99 #include <sys/reboot.h>
100 #include <sys/kthread.h>
101 #include <sys/buf.h>
102 #include <sys/dirent.h>
103 #include <sys/mount.h>
104 #include <sys/namei.h>
105 #include <sys/unistd.h>
106 #include <sys/fcntl.h>
107 #include <sys/lockf.h>
108 #include <sys/callout.h>
109 #include <sys/devmon.h>
110 #include <sys/cpu.h>
111 #include <sys/sysctl.h>
112 #include <sys/stdarg.h>
113
114 #include <sys/disk.h>
115
116 #include <sys/rndsource.h>
117
118 #include <machine/limits.h>
119
120 /*
121 * Autoconfiguration subroutines.
122 */
123
124 /*
125 * Device autoconfiguration timings are mixed into the entropy pool.
126 */
127 static krndsource_t rnd_autoconf_source;
128
129 /*
130 * ioconf.c exports exactly two names: cfdata and cfroots. All system
131 * devices and drivers are found via these tables.
132 */
133 extern struct cfdata cfdata[];
134 extern const short cfroots[];
135
136 /*
137 * List of all cfdriver structures. We use this to detect duplicates
138 * when other cfdrivers are loaded.
139 */
140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
141 extern struct cfdriver * const cfdriver_list_initial[];
142
143 /*
144 * Initial list of cfattach's.
145 */
146 extern const struct cfattachinit cfattachinit[];
147
148 /*
149 * List of cfdata tables. We always have one such list -- the one
150 * built statically when the kernel was configured.
151 */
152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
153 static struct cftable initcftable;
154
155 #define ROOT ((device_t)NULL)
156
157 struct matchinfo {
158 cfsubmatch_t fn;
159 device_t parent;
160 const int *locs;
161 void *aux;
162 struct cfdata *match;
163 int pri;
164 };
165
166 struct alldevs_foray {
167 int af_s;
168 struct devicelist af_garbage;
169 };
170
171 static char *number(char *, int);
172 static void mapply(struct matchinfo *, cfdata_t);
173 static void config_devdelete(device_t);
174 static void config_devunlink(device_t, struct devicelist *);
175 static void config_makeroom(int, struct cfdriver *);
176 static void config_devlink(device_t);
177 static void config_alldevs_enter(struct alldevs_foray *);
178 static void config_alldevs_exit(struct alldevs_foray *);
179 static void config_add_attrib_dict(device_t);
180
181 static void config_collect_garbage(struct devicelist *);
182 static void config_dump_garbage(struct devicelist *);
183
184 static void pmflock_debug(device_t, const char *, int);
185
186 static device_t deviter_next1(deviter_t *);
187 static void deviter_reinit(deviter_t *);
188
189 struct deferred_config {
190 TAILQ_ENTRY(deferred_config) dc_queue;
191 device_t dc_dev;
192 void (*dc_func)(device_t);
193 };
194
195 TAILQ_HEAD(deferred_config_head, deferred_config);
196
197 static struct deferred_config_head deferred_config_queue =
198 TAILQ_HEAD_INITIALIZER(deferred_config_queue);
199 static struct deferred_config_head interrupt_config_queue =
200 TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
201 static int interrupt_config_threads = 8;
202 static struct deferred_config_head mountroot_config_queue =
203 TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
204 static int mountroot_config_threads = 2;
205 static lwp_t **mountroot_config_lwpids;
206 static size_t mountroot_config_lwpids_size;
207 bool root_is_mounted = false;
208
209 static void config_process_deferred(struct deferred_config_head *, device_t);
210
211 /* Hooks to finalize configuration once all real devices have been found. */
212 struct finalize_hook {
213 TAILQ_ENTRY(finalize_hook) f_list;
214 int (*f_func)(device_t);
215 device_t f_dev;
216 };
217 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
218 TAILQ_HEAD_INITIALIZER(config_finalize_list);
219 static int config_finalize_done;
220
221 /* list of all devices */
222 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
223 static kmutex_t alldevs_lock __cacheline_aligned;
224 static devgen_t alldevs_gen = 1;
225 static int alldevs_nread = 0;
226 static int alldevs_nwrite = 0;
227 static bool alldevs_garbage = false;
228
229 static struct devicelist config_pending =
230 TAILQ_HEAD_INITIALIZER(config_pending);
231 static kmutex_t config_misc_lock;
232 static kcondvar_t config_misc_cv;
233
234 static bool detachall = false;
235
236 #define STREQ(s1, s2) \
237 (*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
238
239 static bool config_initialized = false; /* config_init() has been called. */
240
241 static int config_do_twiddle;
242 static callout_t config_twiddle_ch;
243
244 static void sysctl_detach_setup(struct sysctllog **);
245
246 int no_devmon_insert(const char *, prop_dictionary_t);
247 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
248
249 typedef int (*cfdriver_fn)(struct cfdriver *);
250 static int
251 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
252 cfdriver_fn drv_do, cfdriver_fn drv_undo,
253 const char *style, bool dopanic)
254 {
255 void (*pr)(const char *, ...) __printflike(1, 2) =
256 dopanic ? panic : printf;
257 int i, error = 0, e2 __diagused;
258
259 for (i = 0; cfdriverv[i] != NULL; i++) {
260 if ((error = drv_do(cfdriverv[i])) != 0) {
261 pr("configure: `%s' driver %s failed: %d",
262 cfdriverv[i]->cd_name, style, error);
263 goto bad;
264 }
265 }
266
267 KASSERT(error == 0);
268 return 0;
269
270 bad:
271 printf("\n");
272 for (i--; i >= 0; i--) {
273 e2 = drv_undo(cfdriverv[i]);
274 KASSERT(e2 == 0);
275 }
276
277 return error;
278 }
279
280 typedef int (*cfattach_fn)(const char *, struct cfattach *);
281 static int
282 frob_cfattachvec(const struct cfattachinit *cfattachv,
283 cfattach_fn att_do, cfattach_fn att_undo,
284 const char *style, bool dopanic)
285 {
286 const struct cfattachinit *cfai = NULL;
287 void (*pr)(const char *, ...) __printflike(1, 2) =
288 dopanic ? panic : printf;
289 int j = 0, error = 0, e2 __diagused;
290
291 for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
292 for (j = 0; cfai->cfai_list[j] != NULL; j++) {
293 if ((error = att_do(cfai->cfai_name,
294 cfai->cfai_list[j])) != 0) {
295 pr("configure: attachment `%s' "
296 "of `%s' driver %s failed: %d",
297 cfai->cfai_list[j]->ca_name,
298 cfai->cfai_name, style, error);
299 goto bad;
300 }
301 }
302 }
303
304 KASSERT(error == 0);
305 return 0;
306
307 bad:
308 /*
309 * Rollback in reverse order. dunno if super-important, but
310 * do that anyway. Although the code looks a little like
311 * someone did a little integration (in the math sense).
312 */
313 printf("\n");
314 if (cfai) {
315 bool last;
316
317 for (last = false; last == false; ) {
318 if (cfai == &cfattachv[0])
319 last = true;
320 for (j--; j >= 0; j--) {
321 e2 = att_undo(cfai->cfai_name,
322 cfai->cfai_list[j]);
323 KASSERT(e2 == 0);
324 }
325 if (!last) {
326 cfai--;
327 for (j = 0; cfai->cfai_list[j] != NULL; j++)
328 ;
329 }
330 }
331 }
332
333 return error;
334 }
335
336 /*
337 * Initialize the autoconfiguration data structures. Normally this
338 * is done by configure(), but some platforms need to do this very
339 * early (to e.g. initialize the console).
340 */
341 void
342 config_init(void)
343 {
344
345 KASSERT(config_initialized == false);
346
347 mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
348
349 mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
350 cv_init(&config_misc_cv, "cfgmisc");
351
352 callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
353
354 frob_cfdrivervec(cfdriver_list_initial,
355 config_cfdriver_attach, NULL, "bootstrap", true);
356 frob_cfattachvec(cfattachinit,
357 config_cfattach_attach, NULL, "bootstrap", true);
358
359 initcftable.ct_cfdata = cfdata;
360 TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
361
362 rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
363 RND_FLAG_COLLECT_TIME);
364
365 config_initialized = true;
366 }
367
368 /*
369 * Init or fini drivers and attachments. Either all or none
370 * are processed (via rollback). It would be nice if this were
371 * atomic to outside consumers, but with the current state of
372 * locking ...
373 */
374 int
375 config_init_component(struct cfdriver * const *cfdriverv,
376 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
377 {
378 int error;
379
380 KERNEL_LOCK(1, NULL);
381
382 if ((error = frob_cfdrivervec(cfdriverv,
383 config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
384 goto out;
385 if ((error = frob_cfattachvec(cfattachv,
386 config_cfattach_attach, config_cfattach_detach,
387 "init", false)) != 0) {
388 frob_cfdrivervec(cfdriverv,
389 config_cfdriver_detach, NULL, "init rollback", true);
390 goto out;
391 }
392 if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
393 frob_cfattachvec(cfattachv,
394 config_cfattach_detach, NULL, "init rollback", true);
395 frob_cfdrivervec(cfdriverv,
396 config_cfdriver_detach, NULL, "init rollback", true);
397 goto out;
398 }
399
400 /* Success! */
401 error = 0;
402
403 out: KERNEL_UNLOCK_ONE(NULL);
404 return error;
405 }
406
407 int
408 config_fini_component(struct cfdriver * const *cfdriverv,
409 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
410 {
411 int error;
412
413 KERNEL_LOCK(1, NULL);
414
415 if ((error = config_cfdata_detach(cfdatav)) != 0)
416 goto out;
417 if ((error = frob_cfattachvec(cfattachv,
418 config_cfattach_detach, config_cfattach_attach,
419 "fini", false)) != 0) {
420 if (config_cfdata_attach(cfdatav, 0) != 0)
421 panic("config_cfdata fini rollback failed");
422 goto out;
423 }
424 if ((error = frob_cfdrivervec(cfdriverv,
425 config_cfdriver_detach, config_cfdriver_attach,
426 "fini", false)) != 0) {
427 frob_cfattachvec(cfattachv,
428 config_cfattach_attach, NULL, "fini rollback", true);
429 if (config_cfdata_attach(cfdatav, 0) != 0)
430 panic("config_cfdata fini rollback failed");
431 goto out;
432 }
433
434 /* Success! */
435 error = 0;
436
437 out: KERNEL_UNLOCK_ONE(NULL);
438 return error;
439 }
440
441 void
442 config_init_mi(void)
443 {
444
445 if (!config_initialized)
446 config_init();
447
448 sysctl_detach_setup(NULL);
449 }
450
451 void
452 config_deferred(device_t dev)
453 {
454
455 KASSERT(KERNEL_LOCKED_P());
456
457 config_process_deferred(&deferred_config_queue, dev);
458 config_process_deferred(&interrupt_config_queue, dev);
459 config_process_deferred(&mountroot_config_queue, dev);
460 }
461
462 static void
463 config_interrupts_thread(void *cookie)
464 {
465 struct deferred_config *dc;
466 device_t dev;
467
468 mutex_enter(&config_misc_lock);
469 while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
470 TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
471 mutex_exit(&config_misc_lock);
472
473 dev = dc->dc_dev;
474 (*dc->dc_func)(dev);
475 if (!device_pmf_is_registered(dev))
476 aprint_debug_dev(dev,
477 "WARNING: power management not supported\n");
478 config_pending_decr(dev);
479 kmem_free(dc, sizeof(*dc));
480
481 mutex_enter(&config_misc_lock);
482 }
483 mutex_exit(&config_misc_lock);
484
485 kthread_exit(0);
486 }
487
488 void
489 config_create_interruptthreads(void)
490 {
491 int i;
492
493 for (i = 0; i < interrupt_config_threads; i++) {
494 (void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
495 config_interrupts_thread, NULL, NULL, "configintr");
496 }
497 }
498
499 static void
500 config_mountroot_thread(void *cookie)
501 {
502 struct deferred_config *dc;
503
504 mutex_enter(&config_misc_lock);
505 while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
506 TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
507 mutex_exit(&config_misc_lock);
508
509 (*dc->dc_func)(dc->dc_dev);
510 kmem_free(dc, sizeof(*dc));
511
512 mutex_enter(&config_misc_lock);
513 }
514 mutex_exit(&config_misc_lock);
515
516 kthread_exit(0);
517 }
518
519 void
520 config_create_mountrootthreads(void)
521 {
522 int i;
523
524 if (!root_is_mounted)
525 root_is_mounted = true;
526
527 mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
528 mountroot_config_threads;
529 mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
530 KM_NOSLEEP);
531 KASSERT(mountroot_config_lwpids);
532 for (i = 0; i < mountroot_config_threads; i++) {
533 mountroot_config_lwpids[i] = 0;
534 (void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
535 NULL, config_mountroot_thread, NULL,
536 &mountroot_config_lwpids[i],
537 "configroot");
538 }
539 }
540
541 void
542 config_finalize_mountroot(void)
543 {
544 int i, error;
545
546 for (i = 0; i < mountroot_config_threads; i++) {
547 if (mountroot_config_lwpids[i] == 0)
548 continue;
549
550 error = kthread_join(mountroot_config_lwpids[i]);
551 if (error)
552 printf("%s: thread %x joined with error %d\n",
553 __func__, i, error);
554 }
555 kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
556 }
557
558 /*
559 * Announce device attach/detach to userland listeners.
560 */
561
562 int
563 no_devmon_insert(const char *name, prop_dictionary_t p)
564 {
565
566 return ENODEV;
567 }
568
569 static void
570 devmon_report_device(device_t dev, bool isattach)
571 {
572 prop_dictionary_t ev, dict = device_properties(dev);
573 const char *parent;
574 const char *what;
575 const char *where;
576 device_t pdev = device_parent(dev);
577
578 /* If currently no drvctl device, just return */
579 if (devmon_insert_vec == no_devmon_insert)
580 return;
581
582 ev = prop_dictionary_create();
583 if (ev == NULL)
584 return;
585
586 what = (isattach ? "device-attach" : "device-detach");
587 parent = (pdev == NULL ? "root" : device_xname(pdev));
588 if (prop_dictionary_get_string(dict, "location", &where)) {
589 prop_dictionary_set_string(ev, "location", where);
590 aprint_debug("ev: %s %s at %s in [%s]\n",
591 what, device_xname(dev), parent, where);
592 }
593 if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
594 !prop_dictionary_set_string(ev, "parent", parent)) {
595 prop_object_release(ev);
596 return;
597 }
598
599 if ((*devmon_insert_vec)(what, ev) != 0)
600 prop_object_release(ev);
601 }
602
603 /*
604 * Add a cfdriver to the system.
605 */
606 int
607 config_cfdriver_attach(struct cfdriver *cd)
608 {
609 struct cfdriver *lcd;
610
611 /* Make sure this driver isn't already in the system. */
612 LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
613 if (STREQ(lcd->cd_name, cd->cd_name))
614 return EEXIST;
615 }
616
617 LIST_INIT(&cd->cd_attach);
618 LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
619
620 return 0;
621 }
622
623 /*
624 * Remove a cfdriver from the system.
625 */
626 int
627 config_cfdriver_detach(struct cfdriver *cd)
628 {
629 struct alldevs_foray af;
630 int i, rc = 0;
631
632 config_alldevs_enter(&af);
633 /* Make sure there are no active instances. */
634 for (i = 0; i < cd->cd_ndevs; i++) {
635 if (cd->cd_devs[i] != NULL) {
636 rc = EBUSY;
637 break;
638 }
639 }
640 config_alldevs_exit(&af);
641
642 if (rc != 0)
643 return rc;
644
645 /* ...and no attachments loaded. */
646 if (LIST_EMPTY(&cd->cd_attach) == 0)
647 return EBUSY;
648
649 LIST_REMOVE(cd, cd_list);
650
651 KASSERT(cd->cd_devs == NULL);
652
653 return 0;
654 }
655
656 /*
657 * Look up a cfdriver by name.
658 */
659 struct cfdriver *
660 config_cfdriver_lookup(const char *name)
661 {
662 struct cfdriver *cd;
663
664 LIST_FOREACH(cd, &allcfdrivers, cd_list) {
665 if (STREQ(cd->cd_name, name))
666 return cd;
667 }
668
669 return NULL;
670 }
671
672 /*
673 * Add a cfattach to the specified driver.
674 */
675 int
676 config_cfattach_attach(const char *driver, struct cfattach *ca)
677 {
678 struct cfattach *lca;
679 struct cfdriver *cd;
680
681 cd = config_cfdriver_lookup(driver);
682 if (cd == NULL)
683 return ESRCH;
684
685 /* Make sure this attachment isn't already on this driver. */
686 LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
687 if (STREQ(lca->ca_name, ca->ca_name))
688 return EEXIST;
689 }
690
691 LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
692
693 return 0;
694 }
695
696 /*
697 * Remove a cfattach from the specified driver.
698 */
699 int
700 config_cfattach_detach(const char *driver, struct cfattach *ca)
701 {
702 struct alldevs_foray af;
703 struct cfdriver *cd;
704 device_t dev;
705 int i, rc = 0;
706
707 cd = config_cfdriver_lookup(driver);
708 if (cd == NULL)
709 return ESRCH;
710
711 config_alldevs_enter(&af);
712 /* Make sure there are no active instances. */
713 for (i = 0; i < cd->cd_ndevs; i++) {
714 if ((dev = cd->cd_devs[i]) == NULL)
715 continue;
716 if (dev->dv_cfattach == ca) {
717 rc = EBUSY;
718 break;
719 }
720 }
721 config_alldevs_exit(&af);
722
723 if (rc != 0)
724 return rc;
725
726 LIST_REMOVE(ca, ca_list);
727
728 return 0;
729 }
730
731 /*
732 * Look up a cfattach by name.
733 */
734 static struct cfattach *
735 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
736 {
737 struct cfattach *ca;
738
739 LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
740 if (STREQ(ca->ca_name, atname))
741 return ca;
742 }
743
744 return NULL;
745 }
746
747 /*
748 * Look up a cfattach by driver/attachment name.
749 */
750 struct cfattach *
751 config_cfattach_lookup(const char *name, const char *atname)
752 {
753 struct cfdriver *cd;
754
755 cd = config_cfdriver_lookup(name);
756 if (cd == NULL)
757 return NULL;
758
759 return config_cfattach_lookup_cd(cd, atname);
760 }
761
762 /*
763 * Apply the matching function and choose the best. This is used
764 * a few times and we want to keep the code small.
765 */
766 static void
767 mapply(struct matchinfo *m, cfdata_t cf)
768 {
769 int pri;
770
771 if (m->fn != NULL) {
772 pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
773 } else {
774 pri = config_match(m->parent, cf, m->aux);
775 }
776 if (pri > m->pri) {
777 m->match = cf;
778 m->pri = pri;
779 }
780 }
781
782 int
783 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
784 {
785 const struct cfiattrdata *ci;
786 const struct cflocdesc *cl;
787 int nlocs, i;
788
789 ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
790 KASSERT(ci);
791 nlocs = ci->ci_loclen;
792 KASSERT(!nlocs || locs);
793 for (i = 0; i < nlocs; i++) {
794 cl = &ci->ci_locdesc[i];
795 if (cl->cld_defaultstr != NULL &&
796 cf->cf_loc[i] == cl->cld_default)
797 continue;
798 if (cf->cf_loc[i] == locs[i])
799 continue;
800 return 0;
801 }
802
803 return config_match(parent, cf, aux);
804 }
805
806 /*
807 * Helper function: check whether the driver supports the interface attribute
808 * and return its descriptor structure.
809 */
810 static const struct cfiattrdata *
811 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
812 {
813 const struct cfiattrdata * const *cpp;
814
815 if (cd->cd_attrs == NULL)
816 return 0;
817
818 for (cpp = cd->cd_attrs; *cpp; cpp++) {
819 if (STREQ((*cpp)->ci_name, ia)) {
820 /* Match. */
821 return *cpp;
822 }
823 }
824 return 0;
825 }
826
827 #if defined(DIAGNOSTIC)
828 static int
829 cfdriver_iattr_count(const struct cfdriver *cd)
830 {
831 const struct cfiattrdata * const *cpp;
832 int i;
833
834 if (cd->cd_attrs == NULL)
835 return 0;
836
837 for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
838 i++;
839 }
840 return i;
841 }
842 #endif /* DIAGNOSTIC */
843
844 /*
845 * Lookup an interface attribute description by name.
846 * If the driver is given, consider only its supported attributes.
847 */
848 const struct cfiattrdata *
849 cfiattr_lookup(const char *name, const struct cfdriver *cd)
850 {
851 const struct cfdriver *d;
852 const struct cfiattrdata *ia;
853
854 if (cd)
855 return cfdriver_get_iattr(cd, name);
856
857 LIST_FOREACH(d, &allcfdrivers, cd_list) {
858 ia = cfdriver_get_iattr(d, name);
859 if (ia)
860 return ia;
861 }
862 return 0;
863 }
864
865 /*
866 * Determine if `parent' is a potential parent for a device spec based
867 * on `cfp'.
868 */
869 static int
870 cfparent_match(const device_t parent, const struct cfparent *cfp)
871 {
872 struct cfdriver *pcd;
873
874 /* We don't match root nodes here. */
875 if (cfp == NULL)
876 return 0;
877
878 pcd = parent->dv_cfdriver;
879 KASSERT(pcd != NULL);
880
881 /*
882 * First, ensure this parent has the correct interface
883 * attribute.
884 */
885 if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr))
886 return 0;
887
888 /*
889 * If no specific parent device instance was specified (i.e.
890 * we're attaching to the attribute only), we're done!
891 */
892 if (cfp->cfp_parent == NULL)
893 return 1;
894
895 /*
896 * Check the parent device's name.
897 */
898 if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
899 return 0; /* not the same parent */
900
901 /*
902 * Make sure the unit number matches.
903 */
904 if (cfp->cfp_unit == DVUNIT_ANY || /* wildcard */
905 cfp->cfp_unit == parent->dv_unit)
906 return 1;
907
908 /* Unit numbers don't match. */
909 return 0;
910 }
911
912 /*
913 * Helper for config_cfdata_attach(): check all devices whether it could be
914 * parent any attachment in the config data table passed, and rescan.
915 */
916 static void
917 rescan_with_cfdata(const struct cfdata *cf)
918 {
919 device_t d;
920 const struct cfdata *cf1;
921 deviter_t di;
922
923 KASSERT(KERNEL_LOCKED_P());
924
925 /*
926 * "alldevs" is likely longer than a modules's cfdata, so make it
927 * the outer loop.
928 */
929 for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
930
931 if (!(d->dv_cfattach->ca_rescan))
932 continue;
933
934 for (cf1 = cf; cf1->cf_name; cf1++) {
935
936 if (!cfparent_match(d, cf1->cf_pspec))
937 continue;
938
939 (*d->dv_cfattach->ca_rescan)(d,
940 cfdata_ifattr(cf1), cf1->cf_loc);
941
942 config_deferred(d);
943 }
944 }
945 deviter_release(&di);
946 }
947
948 /*
949 * Attach a supplemental config data table and rescan potential
950 * parent devices if required.
951 */
952 int
953 config_cfdata_attach(cfdata_t cf, int scannow)
954 {
955 struct cftable *ct;
956
957 KERNEL_LOCK(1, NULL);
958
959 ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
960 ct->ct_cfdata = cf;
961 TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
962
963 if (scannow)
964 rescan_with_cfdata(cf);
965
966 KERNEL_UNLOCK_ONE();
967
968 return 0;
969 }
970
971 /*
972 * Helper for config_cfdata_detach: check whether a device is
973 * found through any attachment in the config data table.
974 */
975 static int
976 dev_in_cfdata(device_t d, cfdata_t cf)
977 {
978 const struct cfdata *cf1;
979
980 for (cf1 = cf; cf1->cf_name; cf1++)
981 if (d->dv_cfdata == cf1)
982 return 1;
983
984 return 0;
985 }
986
987 /*
988 * Detach a supplemental config data table. Detach all devices found
989 * through that table (and thus keeping references to it) before.
990 */
991 int
992 config_cfdata_detach(cfdata_t cf)
993 {
994 device_t d;
995 int error = 0;
996 struct cftable *ct;
997 deviter_t di;
998
999 KERNEL_LOCK(1, NULL);
1000
1001 for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
1002 d = deviter_next(&di)) {
1003 if (!dev_in_cfdata(d, cf))
1004 continue;
1005 if ((error = config_detach(d, 0)) != 0)
1006 break;
1007 }
1008 deviter_release(&di);
1009 if (error) {
1010 aprint_error_dev(d, "unable to detach instance\n");
1011 goto out;
1012 }
1013
1014 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1015 if (ct->ct_cfdata == cf) {
1016 TAILQ_REMOVE(&allcftables, ct, ct_list);
1017 kmem_free(ct, sizeof(*ct));
1018 error = 0;
1019 goto out;
1020 }
1021 }
1022
1023 /* not found -- shouldn't happen */
1024 error = EINVAL;
1025
1026 out: KERNEL_UNLOCK_ONE(NULL);
1027 return error;
1028 }
1029
1030 /*
1031 * Invoke the "match" routine for a cfdata entry on behalf of
1032 * an external caller, usually a direct config "submatch" routine.
1033 */
1034 int
1035 config_match(device_t parent, cfdata_t cf, void *aux)
1036 {
1037 struct cfattach *ca;
1038
1039 KASSERT(KERNEL_LOCKED_P());
1040
1041 ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
1042 if (ca == NULL) {
1043 /* No attachment for this entry, oh well. */
1044 return 0;
1045 }
1046
1047 return (*ca->ca_match)(parent, cf, aux);
1048 }
1049
1050 /*
1051 * Invoke the "probe" routine for a cfdata entry on behalf of
1052 * an external caller, usually an indirect config "search" routine.
1053 */
1054 int
1055 config_probe(device_t parent, cfdata_t cf, void *aux)
1056 {
1057 /*
1058 * This is currently a synonym for config_match(), but this
1059 * is an implementation detail; "match" and "probe" routines
1060 * have different behaviors.
1061 *
1062 * XXX config_probe() should return a bool, because there is
1063 * XXX no match score for probe -- it's either there or it's
1064 * XXX not, but some ports abuse the return value as a way
1065 * XXX to attach "critical" devices before "non-critical"
1066 * XXX devices.
1067 */
1068 return config_match(parent, cf, aux);
1069 }
1070
1071 static void
1072 config_get_cfargs(cfarg_t tag,
1073 cfsubmatch_t *fnp, /* output */
1074 const char **ifattrp, /* output */
1075 const int **locsp, /* output */
1076 devhandle_t *handlep, /* output */
1077 va_list ap)
1078 {
1079 cfsubmatch_t fn = NULL;
1080 const char *ifattr = NULL;
1081 const int *locs = NULL;
1082 devhandle_t handle;
1083
1084 devhandle_invalidate(&handle);
1085
1086 while (tag != CFARG_EOL) {
1087 switch (tag) {
1088 /*
1089 * CFARG_SUBMATCH and CFARG_SEARCH are synonyms, but this
1090 * is merely an implementation detail. They are distinct
1091 * from the caller's point of view.
1092 */
1093 case CFARG_SUBMATCH:
1094 case CFARG_SEARCH:
1095 /* Only allow one function to be specified. */
1096 if (fn != NULL) {
1097 panic("%s: caller specified both "
1098 "SUBMATCH and SEARCH", __func__);
1099 }
1100 fn = va_arg(ap, cfsubmatch_t);
1101 break;
1102
1103 case CFARG_IATTR:
1104 ifattr = va_arg(ap, const char *);
1105 break;
1106
1107 case CFARG_LOCATORS:
1108 locs = va_arg(ap, const int *);
1109 break;
1110
1111 case CFARG_DEVHANDLE:
1112 handle = va_arg(ap, devhandle_t);
1113 break;
1114
1115 default:
1116 panic("%s: unknown cfarg tag: %d\n",
1117 __func__, tag);
1118 }
1119 tag = va_arg(ap, cfarg_t);
1120 }
1121
1122 if (fnp != NULL)
1123 *fnp = fn;
1124 if (ifattrp != NULL)
1125 *ifattrp = ifattr;
1126 if (locsp != NULL)
1127 *locsp = locs;
1128 if (handlep != NULL)
1129 *handlep = handle;
1130 }
1131
1132 /*
1133 * Iterate over all potential children of some device, calling the given
1134 * function (default being the child's match function) for each one.
1135 * Nonzero returns are matches; the highest value returned is considered
1136 * the best match. Return the `found child' if we got a match, or NULL
1137 * otherwise. The `aux' pointer is simply passed on through.
1138 *
1139 * Note that this function is designed so that it can be used to apply
1140 * an arbitrary function to all potential children (its return value
1141 * can be ignored).
1142 */
1143 cfdata_t
1144 config_vsearch(device_t parent, void *aux, cfarg_t tag, va_list ap)
1145 {
1146 cfsubmatch_t fn;
1147 const char *ifattr;
1148 const int *locs;
1149 struct cftable *ct;
1150 cfdata_t cf;
1151 struct matchinfo m;
1152
1153 config_get_cfargs(tag, &fn, &ifattr, &locs, NULL, ap);
1154
1155 KASSERT(config_initialized);
1156 KASSERT(!ifattr || cfdriver_get_iattr(parent->dv_cfdriver, ifattr));
1157 KASSERT(ifattr || cfdriver_iattr_count(parent->dv_cfdriver) < 2);
1158
1159 m.fn = fn;
1160 m.parent = parent;
1161 m.locs = locs;
1162 m.aux = aux;
1163 m.match = NULL;
1164 m.pri = 0;
1165
1166 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1167 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1168
1169 /* We don't match root nodes here. */
1170 if (!cf->cf_pspec)
1171 continue;
1172
1173 /*
1174 * Skip cf if no longer eligible, otherwise scan
1175 * through parents for one matching `parent', and
1176 * try match function.
1177 */
1178 if (cf->cf_fstate == FSTATE_FOUND)
1179 continue;
1180 if (cf->cf_fstate == FSTATE_DNOTFOUND ||
1181 cf->cf_fstate == FSTATE_DSTAR)
1182 continue;
1183
1184 /*
1185 * If an interface attribute was specified,
1186 * consider only children which attach to
1187 * that attribute.
1188 */
1189 if (ifattr && !STREQ(ifattr, cfdata_ifattr(cf)))
1190 continue;
1191
1192 if (cfparent_match(parent, cf->cf_pspec))
1193 mapply(&m, cf);
1194 }
1195 }
1196 return m.match;
1197 }
1198
1199 cfdata_t
1200 config_search(device_t parent, void *aux, cfarg_t tag, ...)
1201 {
1202 cfdata_t cf;
1203 va_list ap;
1204
1205 va_start(ap, tag);
1206 cf = config_vsearch(parent, aux, tag, ap);
1207 va_end(ap);
1208
1209 return cf;
1210 }
1211
1212 /*
1213 * Find the given root device.
1214 * This is much like config_search, but there is no parent.
1215 * Don't bother with multiple cfdata tables; the root node
1216 * must always be in the initial table.
1217 */
1218 cfdata_t
1219 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
1220 {
1221 cfdata_t cf;
1222 const short *p;
1223 struct matchinfo m;
1224
1225 m.fn = fn;
1226 m.parent = ROOT;
1227 m.aux = aux;
1228 m.match = NULL;
1229 m.pri = 0;
1230 m.locs = 0;
1231 /*
1232 * Look at root entries for matching name. We do not bother
1233 * with found-state here since only one root should ever be
1234 * searched (and it must be done first).
1235 */
1236 for (p = cfroots; *p >= 0; p++) {
1237 cf = &cfdata[*p];
1238 if (strcmp(cf->cf_name, rootname) == 0)
1239 mapply(&m, cf);
1240 }
1241 return m.match;
1242 }
1243
1244 static const char * const msgs[] = {
1245 [QUIET] = "",
1246 [UNCONF] = " not configured\n",
1247 [UNSUPP] = " unsupported\n",
1248 };
1249
1250 /*
1251 * The given `aux' argument describes a device that has been found
1252 * on the given parent, but not necessarily configured. Locate the
1253 * configuration data for that device (using the submatch function
1254 * provided, or using candidates' cd_match configuration driver
1255 * functions) and attach it, and return its device_t. If the device was
1256 * not configured, call the given `print' function and return NULL.
1257 */
1258 device_t
1259 config_vfound(device_t parent, void *aux, cfprint_t print, cfarg_t tag,
1260 va_list ap)
1261 {
1262 cfdata_t cf;
1263 va_list nap;
1264
1265 va_copy(nap, ap);
1266 cf = config_vsearch(parent, aux, tag, nap);
1267 va_end(nap);
1268
1269 if (cf != NULL) {
1270 return config_vattach(parent, cf, aux, print, tag, ap);
1271 }
1272
1273 if (print) {
1274 if (config_do_twiddle && cold)
1275 twiddle();
1276
1277 const int pret = (*print)(aux, device_xname(parent));
1278 KASSERT(pret >= 0);
1279 KASSERT(pret < __arraycount(msgs));
1280 KASSERT(msgs[pret] != NULL);
1281 aprint_normal("%s", msgs[pret]);
1282 }
1283
1284 /*
1285 * This has the effect of mixing in a single timestamp to the
1286 * entropy pool. Experiments indicate the estimator will almost
1287 * always attribute one bit of entropy to this sample; analysis
1288 * of device attach/detach timestamps on FreeBSD indicates 4
1289 * bits of entropy/sample so this seems appropriately conservative.
1290 */
1291 rnd_add_uint32(&rnd_autoconf_source, 0);
1292 return NULL;
1293 }
1294
1295 device_t
1296 config_found(device_t parent, void *aux, cfprint_t print, cfarg_t tag, ...)
1297 {
1298 device_t dev;
1299 va_list ap;
1300
1301 va_start(ap, tag);
1302 dev = config_vfound(parent, aux, print, tag, ap);
1303 va_end(ap);
1304
1305 return dev;
1306 }
1307
1308 /*
1309 * As above, but for root devices.
1310 */
1311 device_t
1312 config_rootfound(const char *rootname, void *aux)
1313 {
1314 cfdata_t cf;
1315 device_t dev = NULL;
1316
1317 KERNEL_LOCK(1, NULL);
1318 if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
1319 dev = config_attach(ROOT, cf, aux, NULL, CFARG_EOL);
1320 else
1321 aprint_error("root device %s not configured\n", rootname);
1322 KERNEL_UNLOCK_ONE(NULL);
1323 return dev;
1324 }
1325
1326 /* just like sprintf(buf, "%d") except that it works from the end */
1327 static char *
1328 number(char *ep, int n)
1329 {
1330
1331 *--ep = 0;
1332 while (n >= 10) {
1333 *--ep = (n % 10) + '0';
1334 n /= 10;
1335 }
1336 *--ep = n + '0';
1337 return ep;
1338 }
1339
1340 /*
1341 * Expand the size of the cd_devs array if necessary.
1342 *
1343 * The caller must hold alldevs_lock. config_makeroom() may release and
1344 * re-acquire alldevs_lock, so callers should re-check conditions such
1345 * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
1346 * returns.
1347 */
1348 static void
1349 config_makeroom(int n, struct cfdriver *cd)
1350 {
1351 int ondevs, nndevs;
1352 device_t *osp, *nsp;
1353
1354 KASSERT(mutex_owned(&alldevs_lock));
1355 alldevs_nwrite++;
1356
1357 for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
1358 ;
1359
1360 while (n >= cd->cd_ndevs) {
1361 /*
1362 * Need to expand the array.
1363 */
1364 ondevs = cd->cd_ndevs;
1365 osp = cd->cd_devs;
1366
1367 /*
1368 * Release alldevs_lock around allocation, which may
1369 * sleep.
1370 */
1371 mutex_exit(&alldevs_lock);
1372 nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
1373 mutex_enter(&alldevs_lock);
1374
1375 /*
1376 * If another thread moved the array while we did
1377 * not hold alldevs_lock, try again.
1378 */
1379 if (cd->cd_devs != osp) {
1380 mutex_exit(&alldevs_lock);
1381 kmem_free(nsp, sizeof(device_t) * nndevs);
1382 mutex_enter(&alldevs_lock);
1383 continue;
1384 }
1385
1386 memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
1387 if (ondevs != 0)
1388 memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
1389
1390 cd->cd_ndevs = nndevs;
1391 cd->cd_devs = nsp;
1392 if (ondevs != 0) {
1393 mutex_exit(&alldevs_lock);
1394 kmem_free(osp, sizeof(device_t) * ondevs);
1395 mutex_enter(&alldevs_lock);
1396 }
1397 }
1398 KASSERT(mutex_owned(&alldevs_lock));
1399 alldevs_nwrite--;
1400 }
1401
1402 /*
1403 * Put dev into the devices list.
1404 */
1405 static void
1406 config_devlink(device_t dev)
1407 {
1408
1409 mutex_enter(&alldevs_lock);
1410
1411 KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
1412
1413 dev->dv_add_gen = alldevs_gen;
1414 /* It is safe to add a device to the tail of the list while
1415 * readers and writers are in the list.
1416 */
1417 TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
1418 mutex_exit(&alldevs_lock);
1419 }
1420
1421 static void
1422 config_devfree(device_t dev)
1423 {
1424
1425 KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
1426 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1427
1428 if (dev->dv_cfattach->ca_devsize > 0)
1429 kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
1430 kmem_free(dev, sizeof(*dev));
1431 }
1432
1433 /*
1434 * Caller must hold alldevs_lock.
1435 */
1436 static void
1437 config_devunlink(device_t dev, struct devicelist *garbage)
1438 {
1439 struct device_garbage *dg = &dev->dv_garbage;
1440 cfdriver_t cd = device_cfdriver(dev);
1441 int i;
1442
1443 KASSERT(mutex_owned(&alldevs_lock));
1444 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1445
1446 /* Unlink from device list. Link to garbage list. */
1447 TAILQ_REMOVE(&alldevs, dev, dv_list);
1448 TAILQ_INSERT_TAIL(garbage, dev, dv_list);
1449
1450 /* Remove from cfdriver's array. */
1451 cd->cd_devs[dev->dv_unit] = NULL;
1452
1453 /*
1454 * If the device now has no units in use, unlink its softc array.
1455 */
1456 for (i = 0; i < cd->cd_ndevs; i++) {
1457 if (cd->cd_devs[i] != NULL)
1458 break;
1459 }
1460 /* Nothing found. Unlink, now. Deallocate, later. */
1461 if (i == cd->cd_ndevs) {
1462 dg->dg_ndevs = cd->cd_ndevs;
1463 dg->dg_devs = cd->cd_devs;
1464 cd->cd_devs = NULL;
1465 cd->cd_ndevs = 0;
1466 }
1467 }
1468
1469 static void
1470 config_devdelete(device_t dev)
1471 {
1472 struct device_garbage *dg = &dev->dv_garbage;
1473 device_lock_t dvl = device_getlock(dev);
1474
1475 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1476
1477 if (dg->dg_devs != NULL)
1478 kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
1479
1480 cv_destroy(&dvl->dvl_cv);
1481 mutex_destroy(&dvl->dvl_mtx);
1482
1483 KASSERT(dev->dv_properties != NULL);
1484 prop_object_release(dev->dv_properties);
1485
1486 if (dev->dv_activity_handlers)
1487 panic("%s with registered handlers", __func__);
1488
1489 if (dev->dv_locators) {
1490 size_t amount = *--dev->dv_locators;
1491 kmem_free(dev->dv_locators, amount);
1492 }
1493
1494 config_devfree(dev);
1495 }
1496
1497 static int
1498 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
1499 {
1500 int unit;
1501
1502 if (cf->cf_fstate == FSTATE_STAR) {
1503 for (unit = cf->cf_unit; unit < cd->cd_ndevs; unit++)
1504 if (cd->cd_devs[unit] == NULL)
1505 break;
1506 /*
1507 * unit is now the unit of the first NULL device pointer,
1508 * or max(cd->cd_ndevs,cf->cf_unit).
1509 */
1510 } else {
1511 unit = cf->cf_unit;
1512 if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
1513 unit = -1;
1514 }
1515 return unit;
1516 }
1517
1518 static int
1519 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
1520 {
1521 struct alldevs_foray af;
1522 int unit;
1523
1524 config_alldevs_enter(&af);
1525 for (;;) {
1526 unit = config_unit_nextfree(cd, cf);
1527 if (unit == -1)
1528 break;
1529 if (unit < cd->cd_ndevs) {
1530 cd->cd_devs[unit] = dev;
1531 dev->dv_unit = unit;
1532 break;
1533 }
1534 config_makeroom(unit, cd);
1535 }
1536 config_alldevs_exit(&af);
1537
1538 return unit;
1539 }
1540
1541 static device_t
1542 config_vdevalloc(const device_t parent, const cfdata_t cf, cfarg_t tag,
1543 va_list ap)
1544 {
1545 cfdriver_t cd;
1546 cfattach_t ca;
1547 size_t lname, lunit;
1548 const char *xunit;
1549 int myunit;
1550 char num[10];
1551 device_t dev;
1552 void *dev_private;
1553 const struct cfiattrdata *ia;
1554 device_lock_t dvl;
1555 const int *locs;
1556
1557 cd = config_cfdriver_lookup(cf->cf_name);
1558 if (cd == NULL)
1559 return NULL;
1560
1561 ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
1562 if (ca == NULL)
1563 return NULL;
1564
1565 /* get memory for all device vars */
1566 KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
1567 if (ca->ca_devsize > 0) {
1568 dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
1569 } else {
1570 dev_private = NULL;
1571 }
1572 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
1573
1574 /*
1575 * If a handle was supplied to config_attach(), we'll get it
1576 * assigned automatically here. If not, then we'll get the
1577 * default invalid handle.
1578 */
1579 config_get_cfargs(tag, NULL, NULL, &locs, &dev->dv_handle, ap);
1580
1581 dev->dv_class = cd->cd_class;
1582 dev->dv_cfdata = cf;
1583 dev->dv_cfdriver = cd;
1584 dev->dv_cfattach = ca;
1585 dev->dv_activity_count = 0;
1586 dev->dv_activity_handlers = NULL;
1587 dev->dv_private = dev_private;
1588 dev->dv_flags = ca->ca_flags; /* inherit flags from class */
1589
1590 myunit = config_unit_alloc(dev, cd, cf);
1591 if (myunit == -1) {
1592 config_devfree(dev);
1593 return NULL;
1594 }
1595
1596 /* compute length of name and decimal expansion of unit number */
1597 lname = strlen(cd->cd_name);
1598 xunit = number(&num[sizeof(num)], myunit);
1599 lunit = &num[sizeof(num)] - xunit;
1600 if (lname + lunit > sizeof(dev->dv_xname))
1601 panic("config_vdevalloc: device name too long");
1602
1603 dvl = device_getlock(dev);
1604
1605 mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
1606 cv_init(&dvl->dvl_cv, "pmfsusp");
1607
1608 memcpy(dev->dv_xname, cd->cd_name, lname);
1609 memcpy(dev->dv_xname + lname, xunit, lunit);
1610 dev->dv_parent = parent;
1611 if (parent != NULL)
1612 dev->dv_depth = parent->dv_depth + 1;
1613 else
1614 dev->dv_depth = 0;
1615 dev->dv_flags |= DVF_ACTIVE; /* always initially active */
1616 if (locs) {
1617 KASSERT(parent); /* no locators at root */
1618 ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
1619 dev->dv_locators =
1620 kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
1621 *dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
1622 memcpy(dev->dv_locators, locs, sizeof(int) * ia->ci_loclen);
1623 }
1624 dev->dv_properties = prop_dictionary_create();
1625 KASSERT(dev->dv_properties != NULL);
1626
1627 prop_dictionary_set_string_nocopy(dev->dv_properties,
1628 "device-driver", dev->dv_cfdriver->cd_name);
1629 prop_dictionary_set_uint16(dev->dv_properties,
1630 "device-unit", dev->dv_unit);
1631 if (parent != NULL) {
1632 prop_dictionary_set_string(dev->dv_properties,
1633 "device-parent", device_xname(parent));
1634 }
1635
1636 if (dev->dv_cfdriver->cd_attrs != NULL)
1637 config_add_attrib_dict(dev);
1638
1639 return dev;
1640 }
1641
1642 static device_t
1643 config_devalloc(const device_t parent, const cfdata_t cf, cfarg_t tag, ...)
1644 {
1645 device_t dev;
1646 va_list ap;
1647
1648 va_start(ap, tag);
1649 dev = config_vdevalloc(parent, cf, tag, ap);
1650 va_end(ap);
1651
1652 return dev;
1653 }
1654
1655 /*
1656 * Create an array of device attach attributes and add it
1657 * to the device's dv_properties dictionary.
1658 *
1659 * <key>interface-attributes</key>
1660 * <array>
1661 * <dict>
1662 * <key>attribute-name</key>
1663 * <string>foo</string>
1664 * <key>locators</key>
1665 * <array>
1666 * <dict>
1667 * <key>loc-name</key>
1668 * <string>foo-loc1</string>
1669 * </dict>
1670 * <dict>
1671 * <key>loc-name</key>
1672 * <string>foo-loc2</string>
1673 * <key>default</key>
1674 * <string>foo-loc2-default</string>
1675 * </dict>
1676 * ...
1677 * </array>
1678 * </dict>
1679 * ...
1680 * </array>
1681 */
1682
1683 static void
1684 config_add_attrib_dict(device_t dev)
1685 {
1686 int i, j;
1687 const struct cfiattrdata *ci;
1688 prop_dictionary_t attr_dict, loc_dict;
1689 prop_array_t attr_array, loc_array;
1690
1691 if ((attr_array = prop_array_create()) == NULL)
1692 return;
1693
1694 for (i = 0; ; i++) {
1695 if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
1696 break;
1697 if ((attr_dict = prop_dictionary_create()) == NULL)
1698 break;
1699 prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
1700 ci->ci_name);
1701
1702 /* Create an array of the locator names and defaults */
1703
1704 if (ci->ci_loclen != 0 &&
1705 (loc_array = prop_array_create()) != NULL) {
1706 for (j = 0; j < ci->ci_loclen; j++) {
1707 loc_dict = prop_dictionary_create();
1708 if (loc_dict == NULL)
1709 continue;
1710 prop_dictionary_set_string_nocopy(loc_dict,
1711 "loc-name", ci->ci_locdesc[j].cld_name);
1712 if (ci->ci_locdesc[j].cld_defaultstr != NULL)
1713 prop_dictionary_set_string_nocopy(
1714 loc_dict, "default",
1715 ci->ci_locdesc[j].cld_defaultstr);
1716 prop_array_set(loc_array, j, loc_dict);
1717 prop_object_release(loc_dict);
1718 }
1719 prop_dictionary_set_and_rel(attr_dict, "locators",
1720 loc_array);
1721 }
1722 prop_array_add(attr_array, attr_dict);
1723 prop_object_release(attr_dict);
1724 }
1725 if (i == 0)
1726 prop_object_release(attr_array);
1727 else
1728 prop_dictionary_set_and_rel(dev->dv_properties,
1729 "interface-attributes", attr_array);
1730
1731 return;
1732 }
1733
1734 /*
1735 * Attach a found device.
1736 */
1737 device_t
1738 config_vattach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1739 cfarg_t tag, va_list ap)
1740 {
1741 device_t dev;
1742 struct cftable *ct;
1743 const char *drvname;
1744 bool deferred;
1745
1746 KASSERT(KERNEL_LOCKED_P());
1747
1748 dev = config_vdevalloc(parent, cf, tag, ap);
1749 if (!dev)
1750 panic("config_attach: allocation of device softc failed");
1751
1752 /* XXX redundant - see below? */
1753 if (cf->cf_fstate != FSTATE_STAR) {
1754 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1755 cf->cf_fstate = FSTATE_FOUND;
1756 }
1757
1758 config_devlink(dev);
1759
1760 if (config_do_twiddle && cold)
1761 twiddle();
1762 else
1763 aprint_naive("Found ");
1764 /*
1765 * We want the next two printfs for normal, verbose, and quiet,
1766 * but not silent (in which case, we're twiddling, instead).
1767 */
1768 if (parent == ROOT) {
1769 aprint_naive("%s (root)", device_xname(dev));
1770 aprint_normal("%s (root)", device_xname(dev));
1771 } else {
1772 aprint_naive("%s at %s", device_xname(dev),
1773 device_xname(parent));
1774 aprint_normal("%s at %s", device_xname(dev),
1775 device_xname(parent));
1776 if (print)
1777 (void) (*print)(aux, NULL);
1778 }
1779
1780 /*
1781 * Before attaching, clobber any unfound devices that are
1782 * otherwise identical.
1783 * XXX code above is redundant?
1784 */
1785 drvname = dev->dv_cfdriver->cd_name;
1786 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1787 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1788 if (STREQ(cf->cf_name, drvname) &&
1789 cf->cf_unit == dev->dv_unit) {
1790 if (cf->cf_fstate == FSTATE_NOTFOUND)
1791 cf->cf_fstate = FSTATE_FOUND;
1792 }
1793 }
1794 }
1795 device_register(dev, aux);
1796
1797 /* Let userland know */
1798 devmon_report_device(dev, true);
1799
1800 config_pending_incr(dev);
1801 (*dev->dv_cfattach->ca_attach)(parent, dev, aux);
1802 config_pending_decr(dev);
1803
1804 mutex_enter(&config_misc_lock);
1805 deferred = (dev->dv_pending != 0);
1806 mutex_exit(&config_misc_lock);
1807
1808 if (!deferred && !device_pmf_is_registered(dev))
1809 aprint_debug_dev(dev,
1810 "WARNING: power management not supported\n");
1811
1812 config_process_deferred(&deferred_config_queue, dev);
1813
1814 device_register_post_config(dev, aux);
1815 return dev;
1816 }
1817
1818 device_t
1819 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1820 cfarg_t tag, ...)
1821 {
1822 device_t dev;
1823 va_list ap;
1824
1825 KASSERT(KERNEL_LOCKED_P());
1826
1827 va_start(ap, tag);
1828 dev = config_vattach(parent, cf, aux, print, tag, ap);
1829 va_end(ap);
1830
1831 return dev;
1832 }
1833
1834 /*
1835 * As above, but for pseudo-devices. Pseudo-devices attached in this
1836 * way are silently inserted into the device tree, and their children
1837 * attached.
1838 *
1839 * Note that because pseudo-devices are attached silently, any information
1840 * the attach routine wishes to print should be prefixed with the device
1841 * name by the attach routine.
1842 */
1843 device_t
1844 config_attach_pseudo(cfdata_t cf)
1845 {
1846 device_t dev;
1847
1848 KERNEL_LOCK(1, NULL);
1849
1850 dev = config_devalloc(ROOT, cf, CFARG_EOL);
1851 if (!dev)
1852 goto out;
1853
1854 /* XXX mark busy in cfdata */
1855
1856 if (cf->cf_fstate != FSTATE_STAR) {
1857 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1858 cf->cf_fstate = FSTATE_FOUND;
1859 }
1860
1861 config_devlink(dev);
1862
1863 #if 0 /* XXXJRT not yet */
1864 device_register(dev, NULL); /* like a root node */
1865 #endif
1866
1867 /* Let userland know */
1868 devmon_report_device(dev, true);
1869
1870 config_pending_incr(dev);
1871 (*dev->dv_cfattach->ca_attach)(ROOT, dev, NULL);
1872 config_pending_decr(dev);
1873
1874 config_process_deferred(&deferred_config_queue, dev);
1875
1876 out: KERNEL_UNLOCK_ONE(NULL);
1877 return dev;
1878 }
1879
1880 /*
1881 * Caller must hold alldevs_lock.
1882 */
1883 static void
1884 config_collect_garbage(struct devicelist *garbage)
1885 {
1886 device_t dv;
1887
1888 KASSERT(!cpu_intr_p());
1889 KASSERT(!cpu_softintr_p());
1890 KASSERT(mutex_owned(&alldevs_lock));
1891
1892 while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
1893 TAILQ_FOREACH(dv, &alldevs, dv_list) {
1894 if (dv->dv_del_gen != 0)
1895 break;
1896 }
1897 if (dv == NULL) {
1898 alldevs_garbage = false;
1899 break;
1900 }
1901 config_devunlink(dv, garbage);
1902 }
1903 KASSERT(mutex_owned(&alldevs_lock));
1904 }
1905
1906 static void
1907 config_dump_garbage(struct devicelist *garbage)
1908 {
1909 device_t dv;
1910
1911 while ((dv = TAILQ_FIRST(garbage)) != NULL) {
1912 TAILQ_REMOVE(garbage, dv, dv_list);
1913 config_devdelete(dv);
1914 }
1915 }
1916
1917 static int
1918 config_detach_enter(device_t dev)
1919 {
1920 int error;
1921
1922 mutex_enter(&config_misc_lock);
1923 for (;;) {
1924 if (dev->dv_pending == 0 && dev->dv_detaching == NULL) {
1925 dev->dv_detaching = curlwp;
1926 error = 0;
1927 break;
1928 }
1929 KASSERTMSG(dev->dv_detaching != curlwp,
1930 "recursively detaching %s", device_xname(dev));
1931 error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
1932 if (error)
1933 break;
1934 }
1935 KASSERT(error || dev->dv_detaching == curlwp);
1936 mutex_exit(&config_misc_lock);
1937
1938 return error;
1939 }
1940
1941 static void
1942 config_detach_exit(device_t dev)
1943 {
1944
1945 mutex_enter(&config_misc_lock);
1946 KASSERT(dev->dv_detaching == curlwp);
1947 dev->dv_detaching = NULL;
1948 cv_broadcast(&config_misc_cv);
1949 mutex_exit(&config_misc_lock);
1950 }
1951
1952 /*
1953 * Detach a device. Optionally forced (e.g. because of hardware
1954 * removal) and quiet. Returns zero if successful, non-zero
1955 * (an error code) otherwise.
1956 *
1957 * Note that this code wants to be run from a process context, so
1958 * that the detach can sleep to allow processes which have a device
1959 * open to run and unwind their stacks.
1960 */
1961 int
1962 config_detach(device_t dev, int flags)
1963 {
1964 struct alldevs_foray af;
1965 struct cftable *ct;
1966 cfdata_t cf;
1967 const struct cfattach *ca;
1968 struct cfdriver *cd;
1969 device_t d __diagused;
1970 int rv = 0;
1971
1972 KASSERT(KERNEL_LOCKED_P());
1973
1974 cf = dev->dv_cfdata;
1975 KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
1976 cf->cf_fstate == FSTATE_STAR),
1977 "config_detach: %s: bad device fstate: %d",
1978 device_xname(dev), cf ? cf->cf_fstate : -1);
1979
1980 cd = dev->dv_cfdriver;
1981 KASSERT(cd != NULL);
1982
1983 ca = dev->dv_cfattach;
1984 KASSERT(ca != NULL);
1985
1986 /*
1987 * Only one detach at a time, please -- and not until fully
1988 * attached.
1989 */
1990 rv = config_detach_enter(dev);
1991 if (rv)
1992 return rv;
1993
1994 mutex_enter(&alldevs_lock);
1995 if (dev->dv_del_gen != 0) {
1996 mutex_exit(&alldevs_lock);
1997 #ifdef DIAGNOSTIC
1998 printf("%s: %s is already detached\n", __func__,
1999 device_xname(dev));
2000 #endif /* DIAGNOSTIC */
2001 config_detach_exit(dev);
2002 return ENOENT;
2003 }
2004 alldevs_nwrite++;
2005 mutex_exit(&alldevs_lock);
2006
2007 if (!detachall &&
2008 (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
2009 (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
2010 rv = EOPNOTSUPP;
2011 } else if (ca->ca_detach != NULL) {
2012 rv = (*ca->ca_detach)(dev, flags);
2013 } else
2014 rv = EOPNOTSUPP;
2015
2016 /*
2017 * If it was not possible to detach the device, then we either
2018 * panic() (for the forced but failed case), or return an error.
2019 *
2020 * If it was possible to detach the device, ensure that the
2021 * device is deactivated.
2022 */
2023 if (rv == 0)
2024 dev->dv_flags &= ~DVF_ACTIVE;
2025 else if ((flags & DETACH_FORCE) == 0)
2026 goto out;
2027 else {
2028 panic("config_detach: forced detach of %s failed (%d)",
2029 device_xname(dev), rv);
2030 }
2031
2032 /*
2033 * The device has now been successfully detached.
2034 */
2035
2036 /* Let userland know */
2037 devmon_report_device(dev, false);
2038
2039 #ifdef DIAGNOSTIC
2040 /*
2041 * Sanity: If you're successfully detached, you should have no
2042 * children. (Note that because children must be attached
2043 * after parents, we only need to search the latter part of
2044 * the list.)
2045 */
2046 mutex_enter(&alldevs_lock);
2047 for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
2048 d = TAILQ_NEXT(d, dv_list)) {
2049 if (d->dv_parent == dev && d->dv_del_gen == 0) {
2050 printf("config_detach: detached device %s"
2051 " has children %s\n", device_xname(dev),
2052 device_xname(d));
2053 panic("config_detach");
2054 }
2055 }
2056 mutex_exit(&alldevs_lock);
2057 #endif
2058
2059 /* notify the parent that the child is gone */
2060 if (dev->dv_parent) {
2061 device_t p = dev->dv_parent;
2062 if (p->dv_cfattach->ca_childdetached)
2063 (*p->dv_cfattach->ca_childdetached)(p, dev);
2064 }
2065
2066 /*
2067 * Mark cfdata to show that the unit can be reused, if possible.
2068 */
2069 TAILQ_FOREACH(ct, &allcftables, ct_list) {
2070 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
2071 if (STREQ(cf->cf_name, cd->cd_name)) {
2072 if (cf->cf_fstate == FSTATE_FOUND &&
2073 cf->cf_unit == dev->dv_unit)
2074 cf->cf_fstate = FSTATE_NOTFOUND;
2075 }
2076 }
2077 }
2078
2079 if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
2080 aprint_normal_dev(dev, "detached\n");
2081
2082 out:
2083 config_detach_exit(dev);
2084
2085 config_alldevs_enter(&af);
2086 KASSERT(alldevs_nwrite != 0);
2087 --alldevs_nwrite;
2088 if (rv == 0 && dev->dv_del_gen == 0) {
2089 if (alldevs_nwrite == 0 && alldevs_nread == 0)
2090 config_devunlink(dev, &af.af_garbage);
2091 else {
2092 dev->dv_del_gen = alldevs_gen;
2093 alldevs_garbage = true;
2094 }
2095 }
2096 config_alldevs_exit(&af);
2097
2098 return rv;
2099 }
2100
2101 int
2102 config_detach_children(device_t parent, int flags)
2103 {
2104 device_t dv;
2105 deviter_t di;
2106 int error = 0;
2107
2108 KASSERT(KERNEL_LOCKED_P());
2109
2110 for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
2111 dv = deviter_next(&di)) {
2112 if (device_parent(dv) != parent)
2113 continue;
2114 if ((error = config_detach(dv, flags)) != 0)
2115 break;
2116 }
2117 deviter_release(&di);
2118 return error;
2119 }
2120
2121 device_t
2122 shutdown_first(struct shutdown_state *s)
2123 {
2124 if (!s->initialized) {
2125 deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
2126 s->initialized = true;
2127 }
2128 return shutdown_next(s);
2129 }
2130
2131 device_t
2132 shutdown_next(struct shutdown_state *s)
2133 {
2134 device_t dv;
2135
2136 while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
2137 ;
2138
2139 if (dv == NULL)
2140 s->initialized = false;
2141
2142 return dv;
2143 }
2144
2145 bool
2146 config_detach_all(int how)
2147 {
2148 static struct shutdown_state s;
2149 device_t curdev;
2150 bool progress = false;
2151 int flags;
2152
2153 KERNEL_LOCK(1, NULL);
2154
2155 if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
2156 goto out;
2157
2158 if ((how & RB_POWERDOWN) == RB_POWERDOWN)
2159 flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
2160 else
2161 flags = DETACH_SHUTDOWN;
2162
2163 for (curdev = shutdown_first(&s); curdev != NULL;
2164 curdev = shutdown_next(&s)) {
2165 aprint_debug(" detaching %s, ", device_xname(curdev));
2166 if (config_detach(curdev, flags) == 0) {
2167 progress = true;
2168 aprint_debug("success.");
2169 } else
2170 aprint_debug("failed.");
2171 }
2172
2173 out: KERNEL_UNLOCK_ONE(NULL);
2174 return progress;
2175 }
2176
2177 static bool
2178 device_is_ancestor_of(device_t ancestor, device_t descendant)
2179 {
2180 device_t dv;
2181
2182 for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
2183 if (device_parent(dv) == ancestor)
2184 return true;
2185 }
2186 return false;
2187 }
2188
2189 int
2190 config_deactivate(device_t dev)
2191 {
2192 deviter_t di;
2193 const struct cfattach *ca;
2194 device_t descendant;
2195 int s, rv = 0, oflags;
2196
2197 for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
2198 descendant != NULL;
2199 descendant = deviter_next(&di)) {
2200 if (dev != descendant &&
2201 !device_is_ancestor_of(dev, descendant))
2202 continue;
2203
2204 if ((descendant->dv_flags & DVF_ACTIVE) == 0)
2205 continue;
2206
2207 ca = descendant->dv_cfattach;
2208 oflags = descendant->dv_flags;
2209
2210 descendant->dv_flags &= ~DVF_ACTIVE;
2211 if (ca->ca_activate == NULL)
2212 continue;
2213 s = splhigh();
2214 rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
2215 splx(s);
2216 if (rv != 0)
2217 descendant->dv_flags = oflags;
2218 }
2219 deviter_release(&di);
2220 return rv;
2221 }
2222
2223 /*
2224 * Defer the configuration of the specified device until all
2225 * of its parent's devices have been attached.
2226 */
2227 void
2228 config_defer(device_t dev, void (*func)(device_t))
2229 {
2230 struct deferred_config *dc;
2231
2232 if (dev->dv_parent == NULL)
2233 panic("config_defer: can't defer config of a root device");
2234
2235 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2236
2237 config_pending_incr(dev);
2238
2239 mutex_enter(&config_misc_lock);
2240 #ifdef DIAGNOSTIC
2241 struct deferred_config *odc;
2242 TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
2243 if (odc->dc_dev == dev)
2244 panic("config_defer: deferred twice");
2245 }
2246 #endif
2247 dc->dc_dev = dev;
2248 dc->dc_func = func;
2249 TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
2250 mutex_exit(&config_misc_lock);
2251 }
2252
2253 /*
2254 * Defer some autoconfiguration for a device until after interrupts
2255 * are enabled.
2256 */
2257 void
2258 config_interrupts(device_t dev, void (*func)(device_t))
2259 {
2260 struct deferred_config *dc;
2261
2262 /*
2263 * If interrupts are enabled, callback now.
2264 */
2265 if (cold == 0) {
2266 (*func)(dev);
2267 return;
2268 }
2269
2270 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2271
2272 config_pending_incr(dev);
2273
2274 mutex_enter(&config_misc_lock);
2275 #ifdef DIAGNOSTIC
2276 struct deferred_config *odc;
2277 TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
2278 if (odc->dc_dev == dev)
2279 panic("config_interrupts: deferred twice");
2280 }
2281 #endif
2282 dc->dc_dev = dev;
2283 dc->dc_func = func;
2284 TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
2285 mutex_exit(&config_misc_lock);
2286 }
2287
2288 /*
2289 * Defer some autoconfiguration for a device until after root file system
2290 * is mounted (to load firmware etc).
2291 */
2292 void
2293 config_mountroot(device_t dev, void (*func)(device_t))
2294 {
2295 struct deferred_config *dc;
2296
2297 /*
2298 * If root file system is mounted, callback now.
2299 */
2300 if (root_is_mounted) {
2301 (*func)(dev);
2302 return;
2303 }
2304
2305 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2306
2307 mutex_enter(&config_misc_lock);
2308 #ifdef DIAGNOSTIC
2309 struct deferred_config *odc;
2310 TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
2311 if (odc->dc_dev == dev)
2312 panic("%s: deferred twice", __func__);
2313 }
2314 #endif
2315
2316 dc->dc_dev = dev;
2317 dc->dc_func = func;
2318 TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
2319 mutex_exit(&config_misc_lock);
2320 }
2321
2322 /*
2323 * Process a deferred configuration queue.
2324 */
2325 static void
2326 config_process_deferred(struct deferred_config_head *queue, device_t parent)
2327 {
2328 struct deferred_config *dc;
2329
2330 KASSERT(KERNEL_LOCKED_P());
2331
2332 mutex_enter(&config_misc_lock);
2333 dc = TAILQ_FIRST(queue);
2334 while (dc) {
2335 if (parent == NULL || dc->dc_dev->dv_parent == parent) {
2336 TAILQ_REMOVE(queue, dc, dc_queue);
2337 mutex_exit(&config_misc_lock);
2338
2339 (*dc->dc_func)(dc->dc_dev);
2340 config_pending_decr(dc->dc_dev);
2341 kmem_free(dc, sizeof(*dc));
2342
2343 mutex_enter(&config_misc_lock);
2344 /* Restart, queue might have changed */
2345 dc = TAILQ_FIRST(queue);
2346 } else {
2347 dc = TAILQ_NEXT(dc, dc_queue);
2348 }
2349 }
2350 mutex_exit(&config_misc_lock);
2351 }
2352
2353 /*
2354 * Manipulate the config_pending semaphore.
2355 */
2356 void
2357 config_pending_incr(device_t dev)
2358 {
2359
2360 mutex_enter(&config_misc_lock);
2361 KASSERTMSG(dev->dv_pending < INT_MAX,
2362 "%s: excess config_pending_incr", device_xname(dev));
2363 if (dev->dv_pending++ == 0)
2364 TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
2365 #ifdef DEBUG_AUTOCONF
2366 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2367 #endif
2368 mutex_exit(&config_misc_lock);
2369 }
2370
2371 void
2372 config_pending_decr(device_t dev)
2373 {
2374
2375 mutex_enter(&config_misc_lock);
2376 KASSERTMSG(dev->dv_pending > 0,
2377 "%s: excess config_pending_decr", device_xname(dev));
2378 if (--dev->dv_pending == 0) {
2379 TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
2380 cv_broadcast(&config_misc_cv);
2381 }
2382 #ifdef DEBUG_AUTOCONF
2383 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2384 #endif
2385 mutex_exit(&config_misc_lock);
2386 }
2387
2388 /*
2389 * Register a "finalization" routine. Finalization routines are
2390 * called iteratively once all real devices have been found during
2391 * autoconfiguration, for as long as any one finalizer has done
2392 * any work.
2393 */
2394 int
2395 config_finalize_register(device_t dev, int (*fn)(device_t))
2396 {
2397 struct finalize_hook *f;
2398 int error = 0;
2399
2400 KERNEL_LOCK(1, NULL);
2401
2402 /*
2403 * If finalization has already been done, invoke the
2404 * callback function now.
2405 */
2406 if (config_finalize_done) {
2407 while ((*fn)(dev) != 0)
2408 /* loop */ ;
2409 goto out;
2410 }
2411
2412 /* Ensure this isn't already on the list. */
2413 TAILQ_FOREACH(f, &config_finalize_list, f_list) {
2414 if (f->f_func == fn && f->f_dev == dev) {
2415 error = EEXIST;
2416 goto out;
2417 }
2418 }
2419
2420 f = kmem_alloc(sizeof(*f), KM_SLEEP);
2421 f->f_func = fn;
2422 f->f_dev = dev;
2423 TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
2424
2425 /* Success! */
2426 error = 0;
2427
2428 out: KERNEL_UNLOCK_ONE(NULL);
2429 return error;
2430 }
2431
2432 void
2433 config_finalize(void)
2434 {
2435 struct finalize_hook *f;
2436 struct pdevinit *pdev;
2437 extern struct pdevinit pdevinit[];
2438 int errcnt, rv;
2439
2440 /*
2441 * Now that device driver threads have been created, wait for
2442 * them to finish any deferred autoconfiguration.
2443 */
2444 mutex_enter(&config_misc_lock);
2445 while (!TAILQ_EMPTY(&config_pending)) {
2446 device_t dev;
2447 TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
2448 aprint_debug_dev(dev, "holding up boot\n");
2449 cv_wait(&config_misc_cv, &config_misc_lock);
2450 }
2451 mutex_exit(&config_misc_lock);
2452
2453 KERNEL_LOCK(1, NULL);
2454
2455 /* Attach pseudo-devices. */
2456 for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
2457 (*pdev->pdev_attach)(pdev->pdev_count);
2458
2459 /* Run the hooks until none of them does any work. */
2460 do {
2461 rv = 0;
2462 TAILQ_FOREACH(f, &config_finalize_list, f_list)
2463 rv |= (*f->f_func)(f->f_dev);
2464 } while (rv != 0);
2465
2466 config_finalize_done = 1;
2467
2468 /* Now free all the hooks. */
2469 while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
2470 TAILQ_REMOVE(&config_finalize_list, f, f_list);
2471 kmem_free(f, sizeof(*f));
2472 }
2473
2474 KERNEL_UNLOCK_ONE(NULL);
2475
2476 errcnt = aprint_get_error_count();
2477 if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
2478 (boothowto & AB_VERBOSE) == 0) {
2479 mutex_enter(&config_misc_lock);
2480 if (config_do_twiddle) {
2481 config_do_twiddle = 0;
2482 printf_nolog(" done.\n");
2483 }
2484 mutex_exit(&config_misc_lock);
2485 }
2486 if (errcnt != 0) {
2487 printf("WARNING: %d error%s while detecting hardware; "
2488 "check system log.\n", errcnt,
2489 errcnt == 1 ? "" : "s");
2490 }
2491 }
2492
2493 void
2494 config_twiddle_init(void)
2495 {
2496
2497 if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
2498 config_do_twiddle = 1;
2499 }
2500 callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
2501 }
2502
2503 void
2504 config_twiddle_fn(void *cookie)
2505 {
2506
2507 mutex_enter(&config_misc_lock);
2508 if (config_do_twiddle) {
2509 twiddle();
2510 callout_schedule(&config_twiddle_ch, mstohz(100));
2511 }
2512 mutex_exit(&config_misc_lock);
2513 }
2514
2515 static void
2516 config_alldevs_enter(struct alldevs_foray *af)
2517 {
2518 TAILQ_INIT(&af->af_garbage);
2519 mutex_enter(&alldevs_lock);
2520 config_collect_garbage(&af->af_garbage);
2521 }
2522
2523 static void
2524 config_alldevs_exit(struct alldevs_foray *af)
2525 {
2526 mutex_exit(&alldevs_lock);
2527 config_dump_garbage(&af->af_garbage);
2528 }
2529
2530 /*
2531 * device_lookup:
2532 *
2533 * Look up a device instance for a given driver.
2534 */
2535 device_t
2536 device_lookup(cfdriver_t cd, int unit)
2537 {
2538 device_t dv;
2539
2540 mutex_enter(&alldevs_lock);
2541 if (unit < 0 || unit >= cd->cd_ndevs)
2542 dv = NULL;
2543 else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
2544 dv = NULL;
2545 mutex_exit(&alldevs_lock);
2546
2547 return dv;
2548 }
2549
2550 /*
2551 * device_lookup_private:
2552 *
2553 * Look up a softc instance for a given driver.
2554 */
2555 void *
2556 device_lookup_private(cfdriver_t cd, int unit)
2557 {
2558
2559 return device_private(device_lookup(cd, unit));
2560 }
2561
2562 /*
2563 * device_find_by_xname:
2564 *
2565 * Returns the device of the given name or NULL if it doesn't exist.
2566 */
2567 device_t
2568 device_find_by_xname(const char *name)
2569 {
2570 device_t dv;
2571 deviter_t di;
2572
2573 for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
2574 if (strcmp(device_xname(dv), name) == 0)
2575 break;
2576 }
2577 deviter_release(&di);
2578
2579 return dv;
2580 }
2581
2582 /*
2583 * device_find_by_driver_unit:
2584 *
2585 * Returns the device of the given driver name and unit or
2586 * NULL if it doesn't exist.
2587 */
2588 device_t
2589 device_find_by_driver_unit(const char *name, int unit)
2590 {
2591 struct cfdriver *cd;
2592
2593 if ((cd = config_cfdriver_lookup(name)) == NULL)
2594 return NULL;
2595 return device_lookup(cd, unit);
2596 }
2597
2598 static bool
2599 match_strcmp(const char * const s1, const char * const s2)
2600 {
2601 return strcmp(s1, s2) == 0;
2602 }
2603
2604 static bool
2605 match_pmatch(const char * const s1, const char * const s2)
2606 {
2607 return pmatch(s1, s2, NULL) == 2;
2608 }
2609
2610 static bool
2611 strarray_match_internal(const char ** const strings,
2612 unsigned int const nstrings, const char * const str,
2613 unsigned int * const indexp,
2614 bool (*match_fn)(const char *, const char *))
2615 {
2616 unsigned int i;
2617
2618 if (strings == NULL || nstrings == 0) {
2619 return false;
2620 }
2621
2622 for (i = 0; i < nstrings; i++) {
2623 if ((*match_fn)(strings[i], str)) {
2624 *indexp = i;
2625 return true;
2626 }
2627 }
2628
2629 return false;
2630 }
2631
2632 static int
2633 strarray_match(const char ** const strings, unsigned int const nstrings,
2634 const char * const str)
2635 {
2636 unsigned int idx;
2637
2638 if (strarray_match_internal(strings, nstrings, str, &idx,
2639 match_strcmp)) {
2640 return (int)(nstrings - idx);
2641 }
2642 return 0;
2643 }
2644
2645 static int
2646 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
2647 const char * const pattern)
2648 {
2649 unsigned int idx;
2650
2651 if (strarray_match_internal(strings, nstrings, pattern, &idx,
2652 match_pmatch)) {
2653 return (int)(nstrings - idx);
2654 }
2655 return 0;
2656 }
2657
2658 static int
2659 device_compatible_match_strarray_internal(
2660 const char **device_compats, int ndevice_compats,
2661 const struct device_compatible_entry *driver_compats,
2662 const struct device_compatible_entry **matching_entryp,
2663 int (*match_fn)(const char **, unsigned int, const char *))
2664 {
2665 const struct device_compatible_entry *dce = NULL;
2666 int rv;
2667
2668 if (ndevice_compats == 0 || device_compats == NULL ||
2669 driver_compats == NULL)
2670 return 0;
2671
2672 for (dce = driver_compats; dce->compat != NULL; dce++) {
2673 rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
2674 if (rv != 0) {
2675 if (matching_entryp != NULL) {
2676 *matching_entryp = dce;
2677 }
2678 return rv;
2679 }
2680 }
2681 return 0;
2682 }
2683
2684 /*
2685 * device_compatible_match:
2686 *
2687 * Match a driver's "compatible" data against a device's
2688 * "compatible" strings. Returns resulted weighted by
2689 * which device "compatible" string was matched.
2690 */
2691 int
2692 device_compatible_match(const char **device_compats, int ndevice_compats,
2693 const struct device_compatible_entry *driver_compats)
2694 {
2695 return device_compatible_match_strarray_internal(device_compats,
2696 ndevice_compats, driver_compats, NULL, strarray_match);
2697 }
2698
2699 /*
2700 * device_compatible_pmatch:
2701 *
2702 * Like device_compatible_match(), but uses pmatch(9) to compare
2703 * the device "compatible" strings against patterns in the
2704 * driver's "compatible" data.
2705 */
2706 int
2707 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
2708 const struct device_compatible_entry *driver_compats)
2709 {
2710 return device_compatible_match_strarray_internal(device_compats,
2711 ndevice_compats, driver_compats, NULL, strarray_pmatch);
2712 }
2713
2714 static int
2715 device_compatible_match_strlist_internal(
2716 const char * const device_compats, size_t const device_compatsize,
2717 const struct device_compatible_entry *driver_compats,
2718 const struct device_compatible_entry **matching_entryp,
2719 int (*match_fn)(const char *, size_t, const char *))
2720 {
2721 const struct device_compatible_entry *dce = NULL;
2722 int rv;
2723
2724 if (device_compats == NULL || device_compatsize == 0 ||
2725 driver_compats == NULL)
2726 return 0;
2727
2728 for (dce = driver_compats; dce->compat != NULL; dce++) {
2729 rv = (*match_fn)(device_compats, device_compatsize,
2730 dce->compat);
2731 if (rv != 0) {
2732 if (matching_entryp != NULL) {
2733 *matching_entryp = dce;
2734 }
2735 return rv;
2736 }
2737 }
2738 return 0;
2739 }
2740
2741 /*
2742 * device_compatible_match_strlist:
2743 *
2744 * Like device_compatible_match(), but take the device
2745 * "compatible" strings as an OpenFirmware-style string
2746 * list.
2747 */
2748 int
2749 device_compatible_match_strlist(
2750 const char * const device_compats, size_t const device_compatsize,
2751 const struct device_compatible_entry *driver_compats)
2752 {
2753 return device_compatible_match_strlist_internal(device_compats,
2754 device_compatsize, driver_compats, NULL, strlist_match);
2755 }
2756
2757 /*
2758 * device_compatible_pmatch_strlist:
2759 *
2760 * Like device_compatible_pmatch(), but take the device
2761 * "compatible" strings as an OpenFirmware-style string
2762 * list.
2763 */
2764 int
2765 device_compatible_pmatch_strlist(
2766 const char * const device_compats, size_t const device_compatsize,
2767 const struct device_compatible_entry *driver_compats)
2768 {
2769 return device_compatible_match_strlist_internal(device_compats,
2770 device_compatsize, driver_compats, NULL, strlist_pmatch);
2771 }
2772
2773 static int
2774 device_compatible_match_id_internal(
2775 uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
2776 const struct device_compatible_entry *driver_compats,
2777 const struct device_compatible_entry **matching_entryp)
2778 {
2779 const struct device_compatible_entry *dce = NULL;
2780
2781 if (mask == 0)
2782 return 0;
2783
2784 for (dce = driver_compats; dce->id != sentinel_id; dce++) {
2785 if ((id & mask) == dce->id) {
2786 if (matching_entryp != NULL) {
2787 *matching_entryp = dce;
2788 }
2789 return 1;
2790 }
2791 }
2792 return 0;
2793 }
2794
2795 /*
2796 * device_compatible_match_id:
2797 *
2798 * Like device_compatible_match(), but takes a single
2799 * unsigned integer device ID.
2800 */
2801 int
2802 device_compatible_match_id(
2803 uintptr_t const id, uintptr_t const sentinel_id,
2804 const struct device_compatible_entry *driver_compats)
2805 {
2806 return device_compatible_match_id_internal(id, (uintptr_t)-1,
2807 sentinel_id, driver_compats, NULL);
2808 }
2809
2810 /*
2811 * device_compatible_lookup:
2812 *
2813 * Look up and return the device_compatible_entry, using the
2814 * same matching criteria used by device_compatible_match().
2815 */
2816 const struct device_compatible_entry *
2817 device_compatible_lookup(const char **device_compats, int ndevice_compats,
2818 const struct device_compatible_entry *driver_compats)
2819 {
2820 const struct device_compatible_entry *dce;
2821
2822 if (device_compatible_match_strarray_internal(device_compats,
2823 ndevice_compats, driver_compats, &dce, strarray_match)) {
2824 return dce;
2825 }
2826 return NULL;
2827 }
2828
2829 /*
2830 * device_compatible_plookup:
2831 *
2832 * Look up and return the device_compatible_entry, using the
2833 * same matching criteria used by device_compatible_pmatch().
2834 */
2835 const struct device_compatible_entry *
2836 device_compatible_plookup(const char **device_compats, int ndevice_compats,
2837 const struct device_compatible_entry *driver_compats)
2838 {
2839 const struct device_compatible_entry *dce;
2840
2841 if (device_compatible_match_strarray_internal(device_compats,
2842 ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
2843 return dce;
2844 }
2845 return NULL;
2846 }
2847
2848 /*
2849 * device_compatible_lookup_strlist:
2850 *
2851 * Like device_compatible_lookup(), but take the device
2852 * "compatible" strings as an OpenFirmware-style string
2853 * list.
2854 */
2855 const struct device_compatible_entry *
2856 device_compatible_lookup_strlist(
2857 const char * const device_compats, size_t const device_compatsize,
2858 const struct device_compatible_entry *driver_compats)
2859 {
2860 const struct device_compatible_entry *dce;
2861
2862 if (device_compatible_match_strlist_internal(device_compats,
2863 device_compatsize, driver_compats, &dce, strlist_match)) {
2864 return dce;
2865 }
2866 return NULL;
2867 }
2868
2869 /*
2870 * device_compatible_plookup_strlist:
2871 *
2872 * Like device_compatible_plookup(), but take the device
2873 * "compatible" strings as an OpenFirmware-style string
2874 * list.
2875 */
2876 const struct device_compatible_entry *
2877 device_compatible_plookup_strlist(
2878 const char * const device_compats, size_t const device_compatsize,
2879 const struct device_compatible_entry *driver_compats)
2880 {
2881 const struct device_compatible_entry *dce;
2882
2883 if (device_compatible_match_strlist_internal(device_compats,
2884 device_compatsize, driver_compats, &dce, strlist_pmatch)) {
2885 return dce;
2886 }
2887 return NULL;
2888 }
2889
2890 /*
2891 * device_compatible_lookup_id:
2892 *
2893 * Like device_compatible_lookup(), but takes a single
2894 * unsigned integer device ID.
2895 */
2896 const struct device_compatible_entry *
2897 device_compatible_lookup_id(
2898 uintptr_t const id, uintptr_t const sentinel_id,
2899 const struct device_compatible_entry *driver_compats)
2900 {
2901 const struct device_compatible_entry *dce;
2902
2903 if (device_compatible_match_id_internal(id, (uintptr_t)-1,
2904 sentinel_id, driver_compats, &dce)) {
2905 return dce;
2906 }
2907 return NULL;
2908 }
2909
2910 /*
2911 * Power management related functions.
2912 */
2913
2914 bool
2915 device_pmf_is_registered(device_t dev)
2916 {
2917 return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
2918 }
2919
2920 bool
2921 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
2922 {
2923 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
2924 return true;
2925 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
2926 return false;
2927 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
2928 dev->dv_driver_suspend != NULL &&
2929 !(*dev->dv_driver_suspend)(dev, qual))
2930 return false;
2931
2932 dev->dv_flags |= DVF_DRIVER_SUSPENDED;
2933 return true;
2934 }
2935
2936 bool
2937 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
2938 {
2939 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
2940 return true;
2941 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
2942 return false;
2943 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
2944 dev->dv_driver_resume != NULL &&
2945 !(*dev->dv_driver_resume)(dev, qual))
2946 return false;
2947
2948 dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
2949 return true;
2950 }
2951
2952 bool
2953 device_pmf_driver_shutdown(device_t dev, int how)
2954 {
2955
2956 if (*dev->dv_driver_shutdown != NULL &&
2957 !(*dev->dv_driver_shutdown)(dev, how))
2958 return false;
2959 return true;
2960 }
2961
2962 bool
2963 device_pmf_driver_register(device_t dev,
2964 bool (*suspend)(device_t, const pmf_qual_t *),
2965 bool (*resume)(device_t, const pmf_qual_t *),
2966 bool (*shutdown)(device_t, int))
2967 {
2968 dev->dv_driver_suspend = suspend;
2969 dev->dv_driver_resume = resume;
2970 dev->dv_driver_shutdown = shutdown;
2971 dev->dv_flags |= DVF_POWER_HANDLERS;
2972 return true;
2973 }
2974
2975 static const char *
2976 curlwp_name(void)
2977 {
2978 if (curlwp->l_name != NULL)
2979 return curlwp->l_name;
2980 else
2981 return curlwp->l_proc->p_comm;
2982 }
2983
2984 void
2985 device_pmf_driver_deregister(device_t dev)
2986 {
2987 device_lock_t dvl = device_getlock(dev);
2988
2989 dev->dv_driver_suspend = NULL;
2990 dev->dv_driver_resume = NULL;
2991
2992 mutex_enter(&dvl->dvl_mtx);
2993 dev->dv_flags &= ~DVF_POWER_HANDLERS;
2994 while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
2995 /* Wake a thread that waits for the lock. That
2996 * thread will fail to acquire the lock, and then
2997 * it will wake the next thread that waits for the
2998 * lock, or else it will wake us.
2999 */
3000 cv_signal(&dvl->dvl_cv);
3001 pmflock_debug(dev, __func__, __LINE__);
3002 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3003 pmflock_debug(dev, __func__, __LINE__);
3004 }
3005 mutex_exit(&dvl->dvl_mtx);
3006 }
3007
3008 bool
3009 device_pmf_driver_child_register(device_t dev)
3010 {
3011 device_t parent = device_parent(dev);
3012
3013 if (parent == NULL || parent->dv_driver_child_register == NULL)
3014 return true;
3015 return (*parent->dv_driver_child_register)(dev);
3016 }
3017
3018 void
3019 device_pmf_driver_set_child_register(device_t dev,
3020 bool (*child_register)(device_t))
3021 {
3022 dev->dv_driver_child_register = child_register;
3023 }
3024
3025 static void
3026 pmflock_debug(device_t dev, const char *func, int line)
3027 {
3028 device_lock_t dvl = device_getlock(dev);
3029
3030 aprint_debug_dev(dev,
3031 "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
3032 curlwp_name(), dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
3033 }
3034
3035 static bool
3036 device_pmf_lock1(device_t dev)
3037 {
3038 device_lock_t dvl = device_getlock(dev);
3039
3040 while (device_pmf_is_registered(dev) &&
3041 dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
3042 dvl->dvl_nwait++;
3043 pmflock_debug(dev, __func__, __LINE__);
3044 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3045 pmflock_debug(dev, __func__, __LINE__);
3046 dvl->dvl_nwait--;
3047 }
3048 if (!device_pmf_is_registered(dev)) {
3049 pmflock_debug(dev, __func__, __LINE__);
3050 /* We could not acquire the lock, but some other thread may
3051 * wait for it, also. Wake that thread.
3052 */
3053 cv_signal(&dvl->dvl_cv);
3054 return false;
3055 }
3056 dvl->dvl_nlock++;
3057 dvl->dvl_holder = curlwp;
3058 pmflock_debug(dev, __func__, __LINE__);
3059 return true;
3060 }
3061
3062 bool
3063 device_pmf_lock(device_t dev)
3064 {
3065 bool rc;
3066 device_lock_t dvl = device_getlock(dev);
3067
3068 mutex_enter(&dvl->dvl_mtx);
3069 rc = device_pmf_lock1(dev);
3070 mutex_exit(&dvl->dvl_mtx);
3071
3072 return rc;
3073 }
3074
3075 void
3076 device_pmf_unlock(device_t dev)
3077 {
3078 device_lock_t dvl = device_getlock(dev);
3079
3080 KASSERT(dvl->dvl_nlock > 0);
3081 mutex_enter(&dvl->dvl_mtx);
3082 if (--dvl->dvl_nlock == 0)
3083 dvl->dvl_holder = NULL;
3084 cv_signal(&dvl->dvl_cv);
3085 pmflock_debug(dev, __func__, __LINE__);
3086 mutex_exit(&dvl->dvl_mtx);
3087 }
3088
3089 device_lock_t
3090 device_getlock(device_t dev)
3091 {
3092 return &dev->dv_lock;
3093 }
3094
3095 void *
3096 device_pmf_bus_private(device_t dev)
3097 {
3098 return dev->dv_bus_private;
3099 }
3100
3101 bool
3102 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
3103 {
3104 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3105 return true;
3106 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
3107 (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3108 return false;
3109 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3110 dev->dv_bus_suspend != NULL &&
3111 !(*dev->dv_bus_suspend)(dev, qual))
3112 return false;
3113
3114 dev->dv_flags |= DVF_BUS_SUSPENDED;
3115 return true;
3116 }
3117
3118 bool
3119 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
3120 {
3121 if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
3122 return true;
3123 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3124 dev->dv_bus_resume != NULL &&
3125 !(*dev->dv_bus_resume)(dev, qual))
3126 return false;
3127
3128 dev->dv_flags &= ~DVF_BUS_SUSPENDED;
3129 return true;
3130 }
3131
3132 bool
3133 device_pmf_bus_shutdown(device_t dev, int how)
3134 {
3135
3136 if (*dev->dv_bus_shutdown != NULL &&
3137 !(*dev->dv_bus_shutdown)(dev, how))
3138 return false;
3139 return true;
3140 }
3141
3142 void
3143 device_pmf_bus_register(device_t dev, void *priv,
3144 bool (*suspend)(device_t, const pmf_qual_t *),
3145 bool (*resume)(device_t, const pmf_qual_t *),
3146 bool (*shutdown)(device_t, int), void (*deregister)(device_t))
3147 {
3148 dev->dv_bus_private = priv;
3149 dev->dv_bus_resume = resume;
3150 dev->dv_bus_suspend = suspend;
3151 dev->dv_bus_shutdown = shutdown;
3152 dev->dv_bus_deregister = deregister;
3153 }
3154
3155 void
3156 device_pmf_bus_deregister(device_t dev)
3157 {
3158 if (dev->dv_bus_deregister == NULL)
3159 return;
3160 (*dev->dv_bus_deregister)(dev);
3161 dev->dv_bus_private = NULL;
3162 dev->dv_bus_suspend = NULL;
3163 dev->dv_bus_resume = NULL;
3164 dev->dv_bus_deregister = NULL;
3165 }
3166
3167 void *
3168 device_pmf_class_private(device_t dev)
3169 {
3170 return dev->dv_class_private;
3171 }
3172
3173 bool
3174 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
3175 {
3176 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
3177 return true;
3178 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3179 dev->dv_class_suspend != NULL &&
3180 !(*dev->dv_class_suspend)(dev, qual))
3181 return false;
3182
3183 dev->dv_flags |= DVF_CLASS_SUSPENDED;
3184 return true;
3185 }
3186
3187 bool
3188 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
3189 {
3190 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3191 return true;
3192 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
3193 (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3194 return false;
3195 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3196 dev->dv_class_resume != NULL &&
3197 !(*dev->dv_class_resume)(dev, qual))
3198 return false;
3199
3200 dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
3201 return true;
3202 }
3203
3204 void
3205 device_pmf_class_register(device_t dev, void *priv,
3206 bool (*suspend)(device_t, const pmf_qual_t *),
3207 bool (*resume)(device_t, const pmf_qual_t *),
3208 void (*deregister)(device_t))
3209 {
3210 dev->dv_class_private = priv;
3211 dev->dv_class_suspend = suspend;
3212 dev->dv_class_resume = resume;
3213 dev->dv_class_deregister = deregister;
3214 }
3215
3216 void
3217 device_pmf_class_deregister(device_t dev)
3218 {
3219 if (dev->dv_class_deregister == NULL)
3220 return;
3221 (*dev->dv_class_deregister)(dev);
3222 dev->dv_class_private = NULL;
3223 dev->dv_class_suspend = NULL;
3224 dev->dv_class_resume = NULL;
3225 dev->dv_class_deregister = NULL;
3226 }
3227
3228 bool
3229 device_active(device_t dev, devactive_t type)
3230 {
3231 size_t i;
3232
3233 if (dev->dv_activity_count == 0)
3234 return false;
3235
3236 for (i = 0; i < dev->dv_activity_count; ++i) {
3237 if (dev->dv_activity_handlers[i] == NULL)
3238 break;
3239 (*dev->dv_activity_handlers[i])(dev, type);
3240 }
3241
3242 return true;
3243 }
3244
3245 bool
3246 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
3247 {
3248 void (**new_handlers)(device_t, devactive_t);
3249 void (**old_handlers)(device_t, devactive_t);
3250 size_t i, old_size, new_size;
3251 int s;
3252
3253 old_handlers = dev->dv_activity_handlers;
3254 old_size = dev->dv_activity_count;
3255
3256 KASSERT(old_size == 0 || old_handlers != NULL);
3257
3258 for (i = 0; i < old_size; ++i) {
3259 KASSERT(old_handlers[i] != handler);
3260 if (old_handlers[i] == NULL) {
3261 old_handlers[i] = handler;
3262 return true;
3263 }
3264 }
3265
3266 new_size = old_size + 4;
3267 new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
3268
3269 for (i = 0; i < old_size; ++i)
3270 new_handlers[i] = old_handlers[i];
3271 new_handlers[old_size] = handler;
3272 for (i = old_size+1; i < new_size; ++i)
3273 new_handlers[i] = NULL;
3274
3275 s = splhigh();
3276 dev->dv_activity_count = new_size;
3277 dev->dv_activity_handlers = new_handlers;
3278 splx(s);
3279
3280 if (old_size > 0)
3281 kmem_free(old_handlers, sizeof(void *) * old_size);
3282
3283 return true;
3284 }
3285
3286 void
3287 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
3288 {
3289 void (**old_handlers)(device_t, devactive_t);
3290 size_t i, old_size;
3291 int s;
3292
3293 old_handlers = dev->dv_activity_handlers;
3294 old_size = dev->dv_activity_count;
3295
3296 for (i = 0; i < old_size; ++i) {
3297 if (old_handlers[i] == handler)
3298 break;
3299 if (old_handlers[i] == NULL)
3300 return; /* XXX panic? */
3301 }
3302
3303 if (i == old_size)
3304 return; /* XXX panic? */
3305
3306 for (; i < old_size - 1; ++i) {
3307 if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
3308 continue;
3309
3310 if (i == 0) {
3311 s = splhigh();
3312 dev->dv_activity_count = 0;
3313 dev->dv_activity_handlers = NULL;
3314 splx(s);
3315 kmem_free(old_handlers, sizeof(void *) * old_size);
3316 }
3317 return;
3318 }
3319 old_handlers[i] = NULL;
3320 }
3321
3322 /* Return true iff the device_t `dev' exists at generation `gen'. */
3323 static bool
3324 device_exists_at(device_t dv, devgen_t gen)
3325 {
3326 return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
3327 dv->dv_add_gen <= gen;
3328 }
3329
3330 static bool
3331 deviter_visits(const deviter_t *di, device_t dv)
3332 {
3333 return device_exists_at(dv, di->di_gen);
3334 }
3335
3336 /*
3337 * Device Iteration
3338 *
3339 * deviter_t: a device iterator. Holds state for a "walk" visiting
3340 * each device_t's in the device tree.
3341 *
3342 * deviter_init(di, flags): initialize the device iterator `di'
3343 * to "walk" the device tree. deviter_next(di) will return
3344 * the first device_t in the device tree, or NULL if there are
3345 * no devices.
3346 *
3347 * `flags' is one or more of DEVITER_F_RW, indicating that the
3348 * caller intends to modify the device tree by calling
3349 * config_detach(9) on devices in the order that the iterator
3350 * returns them; DEVITER_F_ROOT_FIRST, asking for the devices
3351 * nearest the "root" of the device tree to be returned, first;
3352 * DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
3353 * the root of the device tree, first; and DEVITER_F_SHUTDOWN,
3354 * indicating both that deviter_init() should not respect any
3355 * locks on the device tree, and that deviter_next(di) may run
3356 * in more than one LWP before the walk has finished.
3357 *
3358 * Only one DEVITER_F_RW iterator may be in the device tree at
3359 * once.
3360 *
3361 * DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
3362 *
3363 * Results are undefined if the flags DEVITER_F_ROOT_FIRST and
3364 * DEVITER_F_LEAVES_FIRST are used in combination.
3365 *
3366 * deviter_first(di, flags): initialize the device iterator `di'
3367 * and return the first device_t in the device tree, or NULL
3368 * if there are no devices. The statement
3369 *
3370 * dv = deviter_first(di);
3371 *
3372 * is shorthand for
3373 *
3374 * deviter_init(di);
3375 * dv = deviter_next(di);
3376 *
3377 * deviter_next(di): return the next device_t in the device tree,
3378 * or NULL if there are no more devices. deviter_next(di)
3379 * is undefined if `di' was not initialized with deviter_init() or
3380 * deviter_first().
3381 *
3382 * deviter_release(di): stops iteration (subsequent calls to
3383 * deviter_next() will return NULL), releases any locks and
3384 * resources held by the device iterator.
3385 *
3386 * Device iteration does not return device_t's in any particular
3387 * order. An iterator will never return the same device_t twice.
3388 * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
3389 * is called repeatedly on the same `di', it will eventually return
3390 * NULL. It is ok to attach/detach devices during device iteration.
3391 */
3392 void
3393 deviter_init(deviter_t *di, deviter_flags_t flags)
3394 {
3395 device_t dv;
3396
3397 memset(di, 0, sizeof(*di));
3398
3399 if ((flags & DEVITER_F_SHUTDOWN) != 0)
3400 flags |= DEVITER_F_RW;
3401
3402 mutex_enter(&alldevs_lock);
3403 if ((flags & DEVITER_F_RW) != 0)
3404 alldevs_nwrite++;
3405 else
3406 alldevs_nread++;
3407 di->di_gen = alldevs_gen++;
3408 di->di_flags = flags;
3409
3410 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3411 case DEVITER_F_LEAVES_FIRST:
3412 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3413 if (!deviter_visits(di, dv))
3414 continue;
3415 di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
3416 }
3417 break;
3418 case DEVITER_F_ROOT_FIRST:
3419 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3420 if (!deviter_visits(di, dv))
3421 continue;
3422 di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
3423 }
3424 break;
3425 default:
3426 break;
3427 }
3428
3429 deviter_reinit(di);
3430 mutex_exit(&alldevs_lock);
3431 }
3432
3433 static void
3434 deviter_reinit(deviter_t *di)
3435 {
3436
3437 KASSERT(mutex_owned(&alldevs_lock));
3438 if ((di->di_flags & DEVITER_F_RW) != 0)
3439 di->di_prev = TAILQ_LAST(&alldevs, devicelist);
3440 else
3441 di->di_prev = TAILQ_FIRST(&alldevs);
3442 }
3443
3444 device_t
3445 deviter_first(deviter_t *di, deviter_flags_t flags)
3446 {
3447
3448 deviter_init(di, flags);
3449 return deviter_next(di);
3450 }
3451
3452 static device_t
3453 deviter_next2(deviter_t *di)
3454 {
3455 device_t dv;
3456
3457 KASSERT(mutex_owned(&alldevs_lock));
3458
3459 dv = di->di_prev;
3460
3461 if (dv == NULL)
3462 return NULL;
3463
3464 if ((di->di_flags & DEVITER_F_RW) != 0)
3465 di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
3466 else
3467 di->di_prev = TAILQ_NEXT(dv, dv_list);
3468
3469 return dv;
3470 }
3471
3472 static device_t
3473 deviter_next1(deviter_t *di)
3474 {
3475 device_t dv;
3476
3477 KASSERT(mutex_owned(&alldevs_lock));
3478
3479 do {
3480 dv = deviter_next2(di);
3481 } while (dv != NULL && !deviter_visits(di, dv));
3482
3483 return dv;
3484 }
3485
3486 device_t
3487 deviter_next(deviter_t *di)
3488 {
3489 device_t dv = NULL;
3490
3491 mutex_enter(&alldevs_lock);
3492 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3493 case 0:
3494 dv = deviter_next1(di);
3495 break;
3496 case DEVITER_F_LEAVES_FIRST:
3497 while (di->di_curdepth >= 0) {
3498 if ((dv = deviter_next1(di)) == NULL) {
3499 di->di_curdepth--;
3500 deviter_reinit(di);
3501 } else if (dv->dv_depth == di->di_curdepth)
3502 break;
3503 }
3504 break;
3505 case DEVITER_F_ROOT_FIRST:
3506 while (di->di_curdepth <= di->di_maxdepth) {
3507 if ((dv = deviter_next1(di)) == NULL) {
3508 di->di_curdepth++;
3509 deviter_reinit(di);
3510 } else if (dv->dv_depth == di->di_curdepth)
3511 break;
3512 }
3513 break;
3514 default:
3515 break;
3516 }
3517 mutex_exit(&alldevs_lock);
3518
3519 return dv;
3520 }
3521
3522 void
3523 deviter_release(deviter_t *di)
3524 {
3525 bool rw = (di->di_flags & DEVITER_F_RW) != 0;
3526
3527 mutex_enter(&alldevs_lock);
3528 if (rw)
3529 --alldevs_nwrite;
3530 else
3531 --alldevs_nread;
3532 /* XXX wake a garbage-collection thread */
3533 mutex_exit(&alldevs_lock);
3534 }
3535
3536 const char *
3537 cfdata_ifattr(const struct cfdata *cf)
3538 {
3539 return cf->cf_pspec->cfp_iattr;
3540 }
3541
3542 bool
3543 ifattr_match(const char *snull, const char *t)
3544 {
3545 return (snull == NULL) || strcmp(snull, t) == 0;
3546 }
3547
3548 void
3549 null_childdetached(device_t self, device_t child)
3550 {
3551 /* do nothing */
3552 }
3553
3554 static void
3555 sysctl_detach_setup(struct sysctllog **clog)
3556 {
3557
3558 sysctl_createv(clog, 0, NULL, NULL,
3559 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
3560 CTLTYPE_BOOL, "detachall",
3561 SYSCTL_DESCR("Detach all devices at shutdown"),
3562 NULL, 0, &detachall, 0,
3563 CTL_KERN, CTL_CREATE, CTL_EOL);
3564 }
3565