subr_autoconf.c revision 1.302 1 /* $NetBSD: subr_autoconf.c,v 1.302 2022/08/12 16:16:12 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1996, 2000 Christopher G. Demetriou
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the
18 * NetBSD Project. See http://www.NetBSD.org/ for
19 * information about NetBSD.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
35 */
36
37 /*
38 * Copyright (c) 1992, 1993
39 * The Regents of the University of California. All rights reserved.
40 *
41 * This software was developed by the Computer Systems Engineering group
42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43 * contributed to Berkeley.
44 *
45 * All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Lawrence Berkeley Laboratories.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp (LBL)
75 *
76 * @(#)subr_autoconf.c 8.3 (Berkeley) 5/17/94
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.302 2022/08/12 16:16:12 riastradh Exp $");
81
82 #ifdef _KERNEL_OPT
83 #include "opt_ddb.h"
84 #include "drvctl.h"
85 #endif
86
87 #include <sys/param.h>
88 #include <sys/device.h>
89 #include <sys/device_impl.h>
90 #include <sys/disklabel.h>
91 #include <sys/conf.h>
92 #include <sys/kauth.h>
93 #include <sys/kmem.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/errno.h>
97 #include <sys/proc.h>
98 #include <sys/reboot.h>
99 #include <sys/kthread.h>
100 #include <sys/buf.h>
101 #include <sys/dirent.h>
102 #include <sys/mount.h>
103 #include <sys/namei.h>
104 #include <sys/unistd.h>
105 #include <sys/fcntl.h>
106 #include <sys/lockf.h>
107 #include <sys/callout.h>
108 #include <sys/devmon.h>
109 #include <sys/cpu.h>
110 #include <sys/sysctl.h>
111 #include <sys/stdarg.h>
112 #include <sys/localcount.h>
113
114 #include <sys/disk.h>
115
116 #include <sys/rndsource.h>
117
118 #include <machine/limits.h>
119
120 /*
121 * Autoconfiguration subroutines.
122 */
123
124 /*
125 * Device autoconfiguration timings are mixed into the entropy pool.
126 */
127 static krndsource_t rnd_autoconf_source;
128
129 /*
130 * ioconf.c exports exactly two names: cfdata and cfroots. All system
131 * devices and drivers are found via these tables.
132 */
133 extern struct cfdata cfdata[];
134 extern const short cfroots[];
135
136 /*
137 * List of all cfdriver structures. We use this to detect duplicates
138 * when other cfdrivers are loaded.
139 */
140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
141 extern struct cfdriver * const cfdriver_list_initial[];
142
143 /*
144 * Initial list of cfattach's.
145 */
146 extern const struct cfattachinit cfattachinit[];
147
148 /*
149 * List of cfdata tables. We always have one such list -- the one
150 * built statically when the kernel was configured.
151 */
152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
153 static struct cftable initcftable;
154
155 #define ROOT ((device_t)NULL)
156
157 struct matchinfo {
158 cfsubmatch_t fn;
159 device_t parent;
160 const int *locs;
161 void *aux;
162 struct cfdata *match;
163 int pri;
164 };
165
166 struct alldevs_foray {
167 int af_s;
168 struct devicelist af_garbage;
169 };
170
171 /*
172 * Internal version of the cfargs structure; all versions are
173 * canonicalized to this.
174 */
175 struct cfargs_internal {
176 union {
177 cfsubmatch_t submatch;/* submatch function (direct config) */
178 cfsearch_t search; /* search function (indirect config) */
179 };
180 const char * iattr; /* interface attribute */
181 const int * locators; /* locators array */
182 devhandle_t devhandle; /* devhandle_t (by value) */
183 };
184
185 static char *number(char *, int);
186 static void mapply(struct matchinfo *, cfdata_t);
187 static void config_devdelete(device_t);
188 static void config_devunlink(device_t, struct devicelist *);
189 static void config_makeroom(int, struct cfdriver *);
190 static void config_devlink(device_t);
191 static void config_alldevs_enter(struct alldevs_foray *);
192 static void config_alldevs_exit(struct alldevs_foray *);
193 static void config_add_attrib_dict(device_t);
194 static device_t config_attach_internal(device_t, cfdata_t, void *,
195 cfprint_t, const struct cfargs_internal *);
196
197 static void config_collect_garbage(struct devicelist *);
198 static void config_dump_garbage(struct devicelist *);
199
200 static void pmflock_debug(device_t, const char *, int);
201
202 static device_t deviter_next1(deviter_t *);
203 static void deviter_reinit(deviter_t *);
204
205 struct deferred_config {
206 TAILQ_ENTRY(deferred_config) dc_queue;
207 device_t dc_dev;
208 void (*dc_func)(device_t);
209 };
210
211 TAILQ_HEAD(deferred_config_head, deferred_config);
212
213 static struct deferred_config_head deferred_config_queue =
214 TAILQ_HEAD_INITIALIZER(deferred_config_queue);
215 static struct deferred_config_head interrupt_config_queue =
216 TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
217 static int interrupt_config_threads = 8;
218 static struct deferred_config_head mountroot_config_queue =
219 TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
220 static int mountroot_config_threads = 2;
221 static lwp_t **mountroot_config_lwpids;
222 static size_t mountroot_config_lwpids_size;
223 bool root_is_mounted = false;
224
225 static void config_process_deferred(struct deferred_config_head *, device_t);
226
227 /* Hooks to finalize configuration once all real devices have been found. */
228 struct finalize_hook {
229 TAILQ_ENTRY(finalize_hook) f_list;
230 int (*f_func)(device_t);
231 device_t f_dev;
232 };
233 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
234 TAILQ_HEAD_INITIALIZER(config_finalize_list);
235 static int config_finalize_done;
236
237 /* list of all devices */
238 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
239 static kmutex_t alldevs_lock __cacheline_aligned;
240 static devgen_t alldevs_gen = 1;
241 static int alldevs_nread = 0;
242 static int alldevs_nwrite = 0;
243 static bool alldevs_garbage = false;
244
245 static struct devicelist config_pending =
246 TAILQ_HEAD_INITIALIZER(config_pending);
247 static kmutex_t config_misc_lock;
248 static kcondvar_t config_misc_cv;
249
250 static bool detachall = false;
251
252 #define STREQ(s1, s2) \
253 (*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
254
255 static bool config_initialized = false; /* config_init() has been called. */
256
257 static int config_do_twiddle;
258 static callout_t config_twiddle_ch;
259
260 static void sysctl_detach_setup(struct sysctllog **);
261
262 int no_devmon_insert(const char *, prop_dictionary_t);
263 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
264
265 typedef int (*cfdriver_fn)(struct cfdriver *);
266 static int
267 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
268 cfdriver_fn drv_do, cfdriver_fn drv_undo,
269 const char *style, bool dopanic)
270 {
271 void (*pr)(const char *, ...) __printflike(1, 2) =
272 dopanic ? panic : printf;
273 int i, error = 0, e2 __diagused;
274
275 for (i = 0; cfdriverv[i] != NULL; i++) {
276 if ((error = drv_do(cfdriverv[i])) != 0) {
277 pr("configure: `%s' driver %s failed: %d",
278 cfdriverv[i]->cd_name, style, error);
279 goto bad;
280 }
281 }
282
283 KASSERT(error == 0);
284 return 0;
285
286 bad:
287 printf("\n");
288 for (i--; i >= 0; i--) {
289 e2 = drv_undo(cfdriverv[i]);
290 KASSERT(e2 == 0);
291 }
292
293 return error;
294 }
295
296 typedef int (*cfattach_fn)(const char *, struct cfattach *);
297 static int
298 frob_cfattachvec(const struct cfattachinit *cfattachv,
299 cfattach_fn att_do, cfattach_fn att_undo,
300 const char *style, bool dopanic)
301 {
302 const struct cfattachinit *cfai = NULL;
303 void (*pr)(const char *, ...) __printflike(1, 2) =
304 dopanic ? panic : printf;
305 int j = 0, error = 0, e2 __diagused;
306
307 for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
308 for (j = 0; cfai->cfai_list[j] != NULL; j++) {
309 if ((error = att_do(cfai->cfai_name,
310 cfai->cfai_list[j])) != 0) {
311 pr("configure: attachment `%s' "
312 "of `%s' driver %s failed: %d",
313 cfai->cfai_list[j]->ca_name,
314 cfai->cfai_name, style, error);
315 goto bad;
316 }
317 }
318 }
319
320 KASSERT(error == 0);
321 return 0;
322
323 bad:
324 /*
325 * Rollback in reverse order. dunno if super-important, but
326 * do that anyway. Although the code looks a little like
327 * someone did a little integration (in the math sense).
328 */
329 printf("\n");
330 if (cfai) {
331 bool last;
332
333 for (last = false; last == false; ) {
334 if (cfai == &cfattachv[0])
335 last = true;
336 for (j--; j >= 0; j--) {
337 e2 = att_undo(cfai->cfai_name,
338 cfai->cfai_list[j]);
339 KASSERT(e2 == 0);
340 }
341 if (!last) {
342 cfai--;
343 for (j = 0; cfai->cfai_list[j] != NULL; j++)
344 ;
345 }
346 }
347 }
348
349 return error;
350 }
351
352 /*
353 * Initialize the autoconfiguration data structures. Normally this
354 * is done by configure(), but some platforms need to do this very
355 * early (to e.g. initialize the console).
356 */
357 void
358 config_init(void)
359 {
360
361 KASSERT(config_initialized == false);
362
363 mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
364
365 mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
366 cv_init(&config_misc_cv, "cfgmisc");
367
368 callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
369
370 frob_cfdrivervec(cfdriver_list_initial,
371 config_cfdriver_attach, NULL, "bootstrap", true);
372 frob_cfattachvec(cfattachinit,
373 config_cfattach_attach, NULL, "bootstrap", true);
374
375 initcftable.ct_cfdata = cfdata;
376 TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
377
378 rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
379 RND_FLAG_COLLECT_TIME);
380
381 config_initialized = true;
382 }
383
384 /*
385 * Init or fini drivers and attachments. Either all or none
386 * are processed (via rollback). It would be nice if this were
387 * atomic to outside consumers, but with the current state of
388 * locking ...
389 */
390 int
391 config_init_component(struct cfdriver * const *cfdriverv,
392 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
393 {
394 int error;
395
396 KERNEL_LOCK(1, NULL);
397
398 if ((error = frob_cfdrivervec(cfdriverv,
399 config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
400 goto out;
401 if ((error = frob_cfattachvec(cfattachv,
402 config_cfattach_attach, config_cfattach_detach,
403 "init", false)) != 0) {
404 frob_cfdrivervec(cfdriverv,
405 config_cfdriver_detach, NULL, "init rollback", true);
406 goto out;
407 }
408 if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
409 frob_cfattachvec(cfattachv,
410 config_cfattach_detach, NULL, "init rollback", true);
411 frob_cfdrivervec(cfdriverv,
412 config_cfdriver_detach, NULL, "init rollback", true);
413 goto out;
414 }
415
416 /* Success! */
417 error = 0;
418
419 out: KERNEL_UNLOCK_ONE(NULL);
420 return error;
421 }
422
423 int
424 config_fini_component(struct cfdriver * const *cfdriverv,
425 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
426 {
427 int error;
428
429 KERNEL_LOCK(1, NULL);
430
431 if ((error = config_cfdata_detach(cfdatav)) != 0)
432 goto out;
433 if ((error = frob_cfattachvec(cfattachv,
434 config_cfattach_detach, config_cfattach_attach,
435 "fini", false)) != 0) {
436 if (config_cfdata_attach(cfdatav, 0) != 0)
437 panic("config_cfdata fini rollback failed");
438 goto out;
439 }
440 if ((error = frob_cfdrivervec(cfdriverv,
441 config_cfdriver_detach, config_cfdriver_attach,
442 "fini", false)) != 0) {
443 frob_cfattachvec(cfattachv,
444 config_cfattach_attach, NULL, "fini rollback", true);
445 if (config_cfdata_attach(cfdatav, 0) != 0)
446 panic("config_cfdata fini rollback failed");
447 goto out;
448 }
449
450 /* Success! */
451 error = 0;
452
453 out: KERNEL_UNLOCK_ONE(NULL);
454 return error;
455 }
456
457 void
458 config_init_mi(void)
459 {
460
461 if (!config_initialized)
462 config_init();
463
464 sysctl_detach_setup(NULL);
465 }
466
467 void
468 config_deferred(device_t dev)
469 {
470
471 KASSERT(KERNEL_LOCKED_P());
472
473 config_process_deferred(&deferred_config_queue, dev);
474 config_process_deferred(&interrupt_config_queue, dev);
475 config_process_deferred(&mountroot_config_queue, dev);
476 }
477
478 static void
479 config_interrupts_thread(void *cookie)
480 {
481 struct deferred_config *dc;
482 device_t dev;
483
484 mutex_enter(&config_misc_lock);
485 while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
486 TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
487 mutex_exit(&config_misc_lock);
488
489 dev = dc->dc_dev;
490 (*dc->dc_func)(dev);
491 if (!device_pmf_is_registered(dev))
492 aprint_debug_dev(dev,
493 "WARNING: power management not supported\n");
494 config_pending_decr(dev);
495 kmem_free(dc, sizeof(*dc));
496
497 mutex_enter(&config_misc_lock);
498 }
499 mutex_exit(&config_misc_lock);
500
501 kthread_exit(0);
502 }
503
504 void
505 config_create_interruptthreads(void)
506 {
507 int i;
508
509 for (i = 0; i < interrupt_config_threads; i++) {
510 (void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
511 config_interrupts_thread, NULL, NULL, "configintr");
512 }
513 }
514
515 static void
516 config_mountroot_thread(void *cookie)
517 {
518 struct deferred_config *dc;
519
520 mutex_enter(&config_misc_lock);
521 while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
522 TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
523 mutex_exit(&config_misc_lock);
524
525 (*dc->dc_func)(dc->dc_dev);
526 kmem_free(dc, sizeof(*dc));
527
528 mutex_enter(&config_misc_lock);
529 }
530 mutex_exit(&config_misc_lock);
531
532 kthread_exit(0);
533 }
534
535 void
536 config_create_mountrootthreads(void)
537 {
538 int i;
539
540 if (!root_is_mounted)
541 root_is_mounted = true;
542
543 mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
544 mountroot_config_threads;
545 mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
546 KM_NOSLEEP);
547 KASSERT(mountroot_config_lwpids);
548 for (i = 0; i < mountroot_config_threads; i++) {
549 mountroot_config_lwpids[i] = 0;
550 (void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
551 NULL, config_mountroot_thread, NULL,
552 &mountroot_config_lwpids[i],
553 "configroot");
554 }
555 }
556
557 void
558 config_finalize_mountroot(void)
559 {
560 int i, error;
561
562 for (i = 0; i < mountroot_config_threads; i++) {
563 if (mountroot_config_lwpids[i] == 0)
564 continue;
565
566 error = kthread_join(mountroot_config_lwpids[i]);
567 if (error)
568 printf("%s: thread %x joined with error %d\n",
569 __func__, i, error);
570 }
571 kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
572 }
573
574 /*
575 * Announce device attach/detach to userland listeners.
576 */
577
578 int
579 no_devmon_insert(const char *name, prop_dictionary_t p)
580 {
581
582 return ENODEV;
583 }
584
585 static void
586 devmon_report_device(device_t dev, bool isattach)
587 {
588 prop_dictionary_t ev, dict = device_properties(dev);
589 const char *parent;
590 const char *what;
591 const char *where;
592 device_t pdev = device_parent(dev);
593
594 /* If currently no drvctl device, just return */
595 if (devmon_insert_vec == no_devmon_insert)
596 return;
597
598 ev = prop_dictionary_create();
599 if (ev == NULL)
600 return;
601
602 what = (isattach ? "device-attach" : "device-detach");
603 parent = (pdev == NULL ? "root" : device_xname(pdev));
604 if (prop_dictionary_get_string(dict, "location", &where)) {
605 prop_dictionary_set_string(ev, "location", where);
606 aprint_debug("ev: %s %s at %s in [%s]\n",
607 what, device_xname(dev), parent, where);
608 }
609 if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
610 !prop_dictionary_set_string(ev, "parent", parent)) {
611 prop_object_release(ev);
612 return;
613 }
614
615 if ((*devmon_insert_vec)(what, ev) != 0)
616 prop_object_release(ev);
617 }
618
619 /*
620 * Add a cfdriver to the system.
621 */
622 int
623 config_cfdriver_attach(struct cfdriver *cd)
624 {
625 struct cfdriver *lcd;
626
627 /* Make sure this driver isn't already in the system. */
628 LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
629 if (STREQ(lcd->cd_name, cd->cd_name))
630 return EEXIST;
631 }
632
633 LIST_INIT(&cd->cd_attach);
634 LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
635
636 return 0;
637 }
638
639 /*
640 * Remove a cfdriver from the system.
641 */
642 int
643 config_cfdriver_detach(struct cfdriver *cd)
644 {
645 struct alldevs_foray af;
646 int i, rc = 0;
647
648 config_alldevs_enter(&af);
649 /* Make sure there are no active instances. */
650 for (i = 0; i < cd->cd_ndevs; i++) {
651 if (cd->cd_devs[i] != NULL) {
652 rc = EBUSY;
653 break;
654 }
655 }
656 config_alldevs_exit(&af);
657
658 if (rc != 0)
659 return rc;
660
661 /* ...and no attachments loaded. */
662 if (LIST_EMPTY(&cd->cd_attach) == 0)
663 return EBUSY;
664
665 LIST_REMOVE(cd, cd_list);
666
667 KASSERT(cd->cd_devs == NULL);
668
669 return 0;
670 }
671
672 /*
673 * Look up a cfdriver by name.
674 */
675 struct cfdriver *
676 config_cfdriver_lookup(const char *name)
677 {
678 struct cfdriver *cd;
679
680 LIST_FOREACH(cd, &allcfdrivers, cd_list) {
681 if (STREQ(cd->cd_name, name))
682 return cd;
683 }
684
685 return NULL;
686 }
687
688 /*
689 * Add a cfattach to the specified driver.
690 */
691 int
692 config_cfattach_attach(const char *driver, struct cfattach *ca)
693 {
694 struct cfattach *lca;
695 struct cfdriver *cd;
696
697 cd = config_cfdriver_lookup(driver);
698 if (cd == NULL)
699 return ESRCH;
700
701 /* Make sure this attachment isn't already on this driver. */
702 LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
703 if (STREQ(lca->ca_name, ca->ca_name))
704 return EEXIST;
705 }
706
707 LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
708
709 return 0;
710 }
711
712 /*
713 * Remove a cfattach from the specified driver.
714 */
715 int
716 config_cfattach_detach(const char *driver, struct cfattach *ca)
717 {
718 struct alldevs_foray af;
719 struct cfdriver *cd;
720 device_t dev;
721 int i, rc = 0;
722
723 cd = config_cfdriver_lookup(driver);
724 if (cd == NULL)
725 return ESRCH;
726
727 config_alldevs_enter(&af);
728 /* Make sure there are no active instances. */
729 for (i = 0; i < cd->cd_ndevs; i++) {
730 if ((dev = cd->cd_devs[i]) == NULL)
731 continue;
732 if (dev->dv_cfattach == ca) {
733 rc = EBUSY;
734 break;
735 }
736 }
737 config_alldevs_exit(&af);
738
739 if (rc != 0)
740 return rc;
741
742 LIST_REMOVE(ca, ca_list);
743
744 return 0;
745 }
746
747 /*
748 * Look up a cfattach by name.
749 */
750 static struct cfattach *
751 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
752 {
753 struct cfattach *ca;
754
755 LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
756 if (STREQ(ca->ca_name, atname))
757 return ca;
758 }
759
760 return NULL;
761 }
762
763 /*
764 * Look up a cfattach by driver/attachment name.
765 */
766 struct cfattach *
767 config_cfattach_lookup(const char *name, const char *atname)
768 {
769 struct cfdriver *cd;
770
771 cd = config_cfdriver_lookup(name);
772 if (cd == NULL)
773 return NULL;
774
775 return config_cfattach_lookup_cd(cd, atname);
776 }
777
778 /*
779 * Apply the matching function and choose the best. This is used
780 * a few times and we want to keep the code small.
781 */
782 static void
783 mapply(struct matchinfo *m, cfdata_t cf)
784 {
785 int pri;
786
787 if (m->fn != NULL) {
788 pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
789 } else {
790 pri = config_match(m->parent, cf, m->aux);
791 }
792 if (pri > m->pri) {
793 m->match = cf;
794 m->pri = pri;
795 }
796 }
797
798 int
799 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
800 {
801 const struct cfiattrdata *ci;
802 const struct cflocdesc *cl;
803 int nlocs, i;
804
805 ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
806 KASSERT(ci);
807 nlocs = ci->ci_loclen;
808 KASSERT(!nlocs || locs);
809 for (i = 0; i < nlocs; i++) {
810 cl = &ci->ci_locdesc[i];
811 if (cl->cld_defaultstr != NULL &&
812 cf->cf_loc[i] == cl->cld_default)
813 continue;
814 if (cf->cf_loc[i] == locs[i])
815 continue;
816 return 0;
817 }
818
819 return config_match(parent, cf, aux);
820 }
821
822 /*
823 * Helper function: check whether the driver supports the interface attribute
824 * and return its descriptor structure.
825 */
826 static const struct cfiattrdata *
827 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
828 {
829 const struct cfiattrdata * const *cpp;
830
831 if (cd->cd_attrs == NULL)
832 return 0;
833
834 for (cpp = cd->cd_attrs; *cpp; cpp++) {
835 if (STREQ((*cpp)->ci_name, ia)) {
836 /* Match. */
837 return *cpp;
838 }
839 }
840 return 0;
841 }
842
843 static int __diagused
844 cfdriver_iattr_count(const struct cfdriver *cd)
845 {
846 const struct cfiattrdata * const *cpp;
847 int i;
848
849 if (cd->cd_attrs == NULL)
850 return 0;
851
852 for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
853 i++;
854 }
855 return i;
856 }
857
858 /*
859 * Lookup an interface attribute description by name.
860 * If the driver is given, consider only its supported attributes.
861 */
862 const struct cfiattrdata *
863 cfiattr_lookup(const char *name, const struct cfdriver *cd)
864 {
865 const struct cfdriver *d;
866 const struct cfiattrdata *ia;
867
868 if (cd)
869 return cfdriver_get_iattr(cd, name);
870
871 LIST_FOREACH(d, &allcfdrivers, cd_list) {
872 ia = cfdriver_get_iattr(d, name);
873 if (ia)
874 return ia;
875 }
876 return 0;
877 }
878
879 /*
880 * Determine if `parent' is a potential parent for a device spec based
881 * on `cfp'.
882 */
883 static int
884 cfparent_match(const device_t parent, const struct cfparent *cfp)
885 {
886 struct cfdriver *pcd;
887
888 /* We don't match root nodes here. */
889 if (cfp == NULL)
890 return 0;
891
892 pcd = parent->dv_cfdriver;
893 KASSERT(pcd != NULL);
894
895 /*
896 * First, ensure this parent has the correct interface
897 * attribute.
898 */
899 if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr))
900 return 0;
901
902 /*
903 * If no specific parent device instance was specified (i.e.
904 * we're attaching to the attribute only), we're done!
905 */
906 if (cfp->cfp_parent == NULL)
907 return 1;
908
909 /*
910 * Check the parent device's name.
911 */
912 if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
913 return 0; /* not the same parent */
914
915 /*
916 * Make sure the unit number matches.
917 */
918 if (cfp->cfp_unit == DVUNIT_ANY || /* wildcard */
919 cfp->cfp_unit == parent->dv_unit)
920 return 1;
921
922 /* Unit numbers don't match. */
923 return 0;
924 }
925
926 /*
927 * Helper for config_cfdata_attach(): check all devices whether it could be
928 * parent any attachment in the config data table passed, and rescan.
929 */
930 static void
931 rescan_with_cfdata(const struct cfdata *cf)
932 {
933 device_t d;
934 const struct cfdata *cf1;
935 deviter_t di;
936
937 KASSERT(KERNEL_LOCKED_P());
938
939 /*
940 * "alldevs" is likely longer than a modules's cfdata, so make it
941 * the outer loop.
942 */
943 for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
944
945 if (!(d->dv_cfattach->ca_rescan))
946 continue;
947
948 for (cf1 = cf; cf1->cf_name; cf1++) {
949
950 if (!cfparent_match(d, cf1->cf_pspec))
951 continue;
952
953 (*d->dv_cfattach->ca_rescan)(d,
954 cfdata_ifattr(cf1), cf1->cf_loc);
955
956 config_deferred(d);
957 }
958 }
959 deviter_release(&di);
960 }
961
962 /*
963 * Attach a supplemental config data table and rescan potential
964 * parent devices if required.
965 */
966 int
967 config_cfdata_attach(cfdata_t cf, int scannow)
968 {
969 struct cftable *ct;
970
971 KERNEL_LOCK(1, NULL);
972
973 ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
974 ct->ct_cfdata = cf;
975 TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
976
977 if (scannow)
978 rescan_with_cfdata(cf);
979
980 KERNEL_UNLOCK_ONE(NULL);
981
982 return 0;
983 }
984
985 /*
986 * Helper for config_cfdata_detach: check whether a device is
987 * found through any attachment in the config data table.
988 */
989 static int
990 dev_in_cfdata(device_t d, cfdata_t cf)
991 {
992 const struct cfdata *cf1;
993
994 for (cf1 = cf; cf1->cf_name; cf1++)
995 if (d->dv_cfdata == cf1)
996 return 1;
997
998 return 0;
999 }
1000
1001 /*
1002 * Detach a supplemental config data table. Detach all devices found
1003 * through that table (and thus keeping references to it) before.
1004 */
1005 int
1006 config_cfdata_detach(cfdata_t cf)
1007 {
1008 device_t d;
1009 int error = 0;
1010 struct cftable *ct;
1011 deviter_t di;
1012
1013 KERNEL_LOCK(1, NULL);
1014
1015 for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
1016 d = deviter_next(&di)) {
1017 if (!dev_in_cfdata(d, cf))
1018 continue;
1019 if ((error = config_detach(d, 0)) != 0)
1020 break;
1021 }
1022 deviter_release(&di);
1023 if (error) {
1024 aprint_error_dev(d, "unable to detach instance\n");
1025 goto out;
1026 }
1027
1028 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1029 if (ct->ct_cfdata == cf) {
1030 TAILQ_REMOVE(&allcftables, ct, ct_list);
1031 kmem_free(ct, sizeof(*ct));
1032 error = 0;
1033 goto out;
1034 }
1035 }
1036
1037 /* not found -- shouldn't happen */
1038 error = EINVAL;
1039
1040 out: KERNEL_UNLOCK_ONE(NULL);
1041 return error;
1042 }
1043
1044 /*
1045 * Invoke the "match" routine for a cfdata entry on behalf of
1046 * an external caller, usually a direct config "submatch" routine.
1047 */
1048 int
1049 config_match(device_t parent, cfdata_t cf, void *aux)
1050 {
1051 struct cfattach *ca;
1052
1053 KASSERT(KERNEL_LOCKED_P());
1054
1055 ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
1056 if (ca == NULL) {
1057 /* No attachment for this entry, oh well. */
1058 return 0;
1059 }
1060
1061 return (*ca->ca_match)(parent, cf, aux);
1062 }
1063
1064 /*
1065 * Invoke the "probe" routine for a cfdata entry on behalf of
1066 * an external caller, usually an indirect config "search" routine.
1067 */
1068 int
1069 config_probe(device_t parent, cfdata_t cf, void *aux)
1070 {
1071 /*
1072 * This is currently a synonym for config_match(), but this
1073 * is an implementation detail; "match" and "probe" routines
1074 * have different behaviors.
1075 *
1076 * XXX config_probe() should return a bool, because there is
1077 * XXX no match score for probe -- it's either there or it's
1078 * XXX not, but some ports abuse the return value as a way
1079 * XXX to attach "critical" devices before "non-critical"
1080 * XXX devices.
1081 */
1082 return config_match(parent, cf, aux);
1083 }
1084
1085 static struct cfargs_internal *
1086 cfargs_canonicalize(const struct cfargs * const cfargs,
1087 struct cfargs_internal * const store)
1088 {
1089 struct cfargs_internal *args = store;
1090
1091 memset(args, 0, sizeof(*args));
1092
1093 /* If none specified, are all-NULL pointers are good. */
1094 if (cfargs == NULL) {
1095 return args;
1096 }
1097
1098 /*
1099 * Only one arguments version is recognized at this time.
1100 */
1101 if (cfargs->cfargs_version != CFARGS_VERSION) {
1102 panic("cfargs_canonicalize: unknown version %lu\n",
1103 (unsigned long)cfargs->cfargs_version);
1104 }
1105
1106 /*
1107 * submatch and search are mutually-exclusive.
1108 */
1109 if (cfargs->submatch != NULL && cfargs->search != NULL) {
1110 panic("cfargs_canonicalize: submatch and search are "
1111 "mutually-exclusive");
1112 }
1113 if (cfargs->submatch != NULL) {
1114 args->submatch = cfargs->submatch;
1115 } else if (cfargs->search != NULL) {
1116 args->search = cfargs->search;
1117 }
1118
1119 args->iattr = cfargs->iattr;
1120 args->locators = cfargs->locators;
1121 args->devhandle = cfargs->devhandle;
1122
1123 return args;
1124 }
1125
1126 /*
1127 * Iterate over all potential children of some device, calling the given
1128 * function (default being the child's match function) for each one.
1129 * Nonzero returns are matches; the highest value returned is considered
1130 * the best match. Return the `found child' if we got a match, or NULL
1131 * otherwise. The `aux' pointer is simply passed on through.
1132 *
1133 * Note that this function is designed so that it can be used to apply
1134 * an arbitrary function to all potential children (its return value
1135 * can be ignored).
1136 */
1137 static cfdata_t
1138 config_search_internal(device_t parent, void *aux,
1139 const struct cfargs_internal * const args)
1140 {
1141 struct cftable *ct;
1142 cfdata_t cf;
1143 struct matchinfo m;
1144
1145 KASSERT(config_initialized);
1146 KASSERT(!args->iattr ||
1147 cfdriver_get_iattr(parent->dv_cfdriver, args->iattr));
1148 KASSERT(args->iattr ||
1149 cfdriver_iattr_count(parent->dv_cfdriver) < 2);
1150
1151 m.fn = args->submatch; /* N.B. union */
1152 m.parent = parent;
1153 m.locs = args->locators;
1154 m.aux = aux;
1155 m.match = NULL;
1156 m.pri = 0;
1157
1158 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1159 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1160
1161 /* We don't match root nodes here. */
1162 if (!cf->cf_pspec)
1163 continue;
1164
1165 /*
1166 * Skip cf if no longer eligible, otherwise scan
1167 * through parents for one matching `parent', and
1168 * try match function.
1169 */
1170 if (cf->cf_fstate == FSTATE_FOUND)
1171 continue;
1172 if (cf->cf_fstate == FSTATE_DNOTFOUND ||
1173 cf->cf_fstate == FSTATE_DSTAR)
1174 continue;
1175
1176 /*
1177 * If an interface attribute was specified,
1178 * consider only children which attach to
1179 * that attribute.
1180 */
1181 if (args->iattr != NULL &&
1182 !STREQ(args->iattr, cfdata_ifattr(cf)))
1183 continue;
1184
1185 if (cfparent_match(parent, cf->cf_pspec))
1186 mapply(&m, cf);
1187 }
1188 }
1189 rnd_add_uint32(&rnd_autoconf_source, 0);
1190 return m.match;
1191 }
1192
1193 cfdata_t
1194 config_search(device_t parent, void *aux, const struct cfargs *cfargs)
1195 {
1196 cfdata_t cf;
1197 struct cfargs_internal store;
1198
1199 cf = config_search_internal(parent, aux,
1200 cfargs_canonicalize(cfargs, &store));
1201
1202 return cf;
1203 }
1204
1205 /*
1206 * Find the given root device.
1207 * This is much like config_search, but there is no parent.
1208 * Don't bother with multiple cfdata tables; the root node
1209 * must always be in the initial table.
1210 */
1211 cfdata_t
1212 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
1213 {
1214 cfdata_t cf;
1215 const short *p;
1216 struct matchinfo m;
1217
1218 m.fn = fn;
1219 m.parent = ROOT;
1220 m.aux = aux;
1221 m.match = NULL;
1222 m.pri = 0;
1223 m.locs = 0;
1224 /*
1225 * Look at root entries for matching name. We do not bother
1226 * with found-state here since only one root should ever be
1227 * searched (and it must be done first).
1228 */
1229 for (p = cfroots; *p >= 0; p++) {
1230 cf = &cfdata[*p];
1231 if (strcmp(cf->cf_name, rootname) == 0)
1232 mapply(&m, cf);
1233 }
1234 return m.match;
1235 }
1236
1237 static const char * const msgs[] = {
1238 [QUIET] = "",
1239 [UNCONF] = " not configured\n",
1240 [UNSUPP] = " unsupported\n",
1241 };
1242
1243 /*
1244 * The given `aux' argument describes a device that has been found
1245 * on the given parent, but not necessarily configured. Locate the
1246 * configuration data for that device (using the submatch function
1247 * provided, or using candidates' cd_match configuration driver
1248 * functions) and attach it, and return its device_t. If the device was
1249 * not configured, call the given `print' function and return NULL.
1250 */
1251 device_t
1252 config_found(device_t parent, void *aux, cfprint_t print,
1253 const struct cfargs * const cfargs)
1254 {
1255 cfdata_t cf;
1256 struct cfargs_internal store;
1257 const struct cfargs_internal * const args =
1258 cfargs_canonicalize(cfargs, &store);
1259
1260 cf = config_search_internal(parent, aux, args);
1261 if (cf != NULL) {
1262 return config_attach_internal(parent, cf, aux, print, args);
1263 }
1264
1265 if (print) {
1266 if (config_do_twiddle && cold)
1267 twiddle();
1268
1269 const int pret = (*print)(aux, device_xname(parent));
1270 KASSERT(pret >= 0);
1271 KASSERT(pret < __arraycount(msgs));
1272 KASSERT(msgs[pret] != NULL);
1273 aprint_normal("%s", msgs[pret]);
1274 }
1275
1276 return NULL;
1277 }
1278
1279 /*
1280 * As above, but for root devices.
1281 */
1282 device_t
1283 config_rootfound(const char *rootname, void *aux)
1284 {
1285 cfdata_t cf;
1286 device_t dev = NULL;
1287
1288 KERNEL_LOCK(1, NULL);
1289 if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
1290 dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE);
1291 else
1292 aprint_error("root device %s not configured\n", rootname);
1293 KERNEL_UNLOCK_ONE(NULL);
1294 return dev;
1295 }
1296
1297 /* just like sprintf(buf, "%d") except that it works from the end */
1298 static char *
1299 number(char *ep, int n)
1300 {
1301
1302 *--ep = 0;
1303 while (n >= 10) {
1304 *--ep = (n % 10) + '0';
1305 n /= 10;
1306 }
1307 *--ep = n + '0';
1308 return ep;
1309 }
1310
1311 /*
1312 * Expand the size of the cd_devs array if necessary.
1313 *
1314 * The caller must hold alldevs_lock. config_makeroom() may release and
1315 * re-acquire alldevs_lock, so callers should re-check conditions such
1316 * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
1317 * returns.
1318 */
1319 static void
1320 config_makeroom(int n, struct cfdriver *cd)
1321 {
1322 int ondevs, nndevs;
1323 device_t *osp, *nsp;
1324
1325 KASSERT(mutex_owned(&alldevs_lock));
1326 alldevs_nwrite++;
1327
1328 for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
1329 ;
1330
1331 while (n >= cd->cd_ndevs) {
1332 /*
1333 * Need to expand the array.
1334 */
1335 ondevs = cd->cd_ndevs;
1336 osp = cd->cd_devs;
1337
1338 /*
1339 * Release alldevs_lock around allocation, which may
1340 * sleep.
1341 */
1342 mutex_exit(&alldevs_lock);
1343 nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
1344 mutex_enter(&alldevs_lock);
1345
1346 /*
1347 * If another thread moved the array while we did
1348 * not hold alldevs_lock, try again.
1349 */
1350 if (cd->cd_devs != osp) {
1351 mutex_exit(&alldevs_lock);
1352 kmem_free(nsp, sizeof(device_t) * nndevs);
1353 mutex_enter(&alldevs_lock);
1354 continue;
1355 }
1356
1357 memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
1358 if (ondevs != 0)
1359 memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
1360
1361 cd->cd_ndevs = nndevs;
1362 cd->cd_devs = nsp;
1363 if (ondevs != 0) {
1364 mutex_exit(&alldevs_lock);
1365 kmem_free(osp, sizeof(device_t) * ondevs);
1366 mutex_enter(&alldevs_lock);
1367 }
1368 }
1369 KASSERT(mutex_owned(&alldevs_lock));
1370 alldevs_nwrite--;
1371 }
1372
1373 /*
1374 * Put dev into the devices list.
1375 */
1376 static void
1377 config_devlink(device_t dev)
1378 {
1379
1380 mutex_enter(&alldevs_lock);
1381
1382 KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
1383
1384 dev->dv_add_gen = alldevs_gen;
1385 /* It is safe to add a device to the tail of the list while
1386 * readers and writers are in the list.
1387 */
1388 TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
1389 mutex_exit(&alldevs_lock);
1390 }
1391
1392 static void
1393 config_devfree(device_t dev)
1394 {
1395
1396 KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
1397 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1398
1399 if (dev->dv_cfattach->ca_devsize > 0)
1400 kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
1401 kmem_free(dev, sizeof(*dev));
1402 }
1403
1404 /*
1405 * Caller must hold alldevs_lock.
1406 */
1407 static void
1408 config_devunlink(device_t dev, struct devicelist *garbage)
1409 {
1410 struct device_garbage *dg = &dev->dv_garbage;
1411 cfdriver_t cd = device_cfdriver(dev);
1412 int i;
1413
1414 KASSERT(mutex_owned(&alldevs_lock));
1415 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1416
1417 /* Unlink from device list. Link to garbage list. */
1418 TAILQ_REMOVE(&alldevs, dev, dv_list);
1419 TAILQ_INSERT_TAIL(garbage, dev, dv_list);
1420
1421 /* Remove from cfdriver's array. */
1422 cd->cd_devs[dev->dv_unit] = NULL;
1423
1424 /*
1425 * If the device now has no units in use, unlink its softc array.
1426 */
1427 for (i = 0; i < cd->cd_ndevs; i++) {
1428 if (cd->cd_devs[i] != NULL)
1429 break;
1430 }
1431 /* Nothing found. Unlink, now. Deallocate, later. */
1432 if (i == cd->cd_ndevs) {
1433 dg->dg_ndevs = cd->cd_ndevs;
1434 dg->dg_devs = cd->cd_devs;
1435 cd->cd_devs = NULL;
1436 cd->cd_ndevs = 0;
1437 }
1438 }
1439
1440 static void
1441 config_devdelete(device_t dev)
1442 {
1443 struct device_garbage *dg = &dev->dv_garbage;
1444 device_lock_t dvl = device_getlock(dev);
1445
1446 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1447
1448 if (dg->dg_devs != NULL)
1449 kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
1450
1451 localcount_fini(dev->dv_localcount);
1452 kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount));
1453
1454 cv_destroy(&dvl->dvl_cv);
1455 mutex_destroy(&dvl->dvl_mtx);
1456
1457 KASSERT(dev->dv_properties != NULL);
1458 prop_object_release(dev->dv_properties);
1459
1460 if (dev->dv_activity_handlers)
1461 panic("%s with registered handlers", __func__);
1462
1463 if (dev->dv_locators) {
1464 size_t amount = *--dev->dv_locators;
1465 kmem_free(dev->dv_locators, amount);
1466 }
1467
1468 config_devfree(dev);
1469 }
1470
1471 static int
1472 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
1473 {
1474 int unit = cf->cf_unit;
1475
1476 if (unit < 0)
1477 return -1;
1478 if (cf->cf_fstate == FSTATE_STAR) {
1479 for (; unit < cd->cd_ndevs; unit++)
1480 if (cd->cd_devs[unit] == NULL)
1481 break;
1482 /*
1483 * unit is now the unit of the first NULL device pointer,
1484 * or max(cd->cd_ndevs,cf->cf_unit).
1485 */
1486 } else {
1487 if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
1488 unit = -1;
1489 }
1490 return unit;
1491 }
1492
1493 static int
1494 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
1495 {
1496 struct alldevs_foray af;
1497 int unit;
1498
1499 config_alldevs_enter(&af);
1500 for (;;) {
1501 unit = config_unit_nextfree(cd, cf);
1502 if (unit == -1)
1503 break;
1504 if (unit < cd->cd_ndevs) {
1505 cd->cd_devs[unit] = dev;
1506 dev->dv_unit = unit;
1507 break;
1508 }
1509 config_makeroom(unit, cd);
1510 }
1511 config_alldevs_exit(&af);
1512
1513 return unit;
1514 }
1515
1516 static device_t
1517 config_devalloc(const device_t parent, const cfdata_t cf,
1518 const struct cfargs_internal * const args)
1519 {
1520 cfdriver_t cd;
1521 cfattach_t ca;
1522 size_t lname, lunit;
1523 const char *xunit;
1524 int myunit;
1525 char num[10];
1526 device_t dev;
1527 void *dev_private;
1528 const struct cfiattrdata *ia;
1529 device_lock_t dvl;
1530
1531 cd = config_cfdriver_lookup(cf->cf_name);
1532 if (cd == NULL)
1533 return NULL;
1534
1535 ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
1536 if (ca == NULL)
1537 return NULL;
1538
1539 /* get memory for all device vars */
1540 KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
1541 if (ca->ca_devsize > 0) {
1542 dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
1543 } else {
1544 dev_private = NULL;
1545 }
1546 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
1547
1548 dev->dv_handle = args->devhandle;
1549
1550 dev->dv_class = cd->cd_class;
1551 dev->dv_cfdata = cf;
1552 dev->dv_cfdriver = cd;
1553 dev->dv_cfattach = ca;
1554 dev->dv_activity_count = 0;
1555 dev->dv_activity_handlers = NULL;
1556 dev->dv_private = dev_private;
1557 dev->dv_flags = ca->ca_flags; /* inherit flags from class */
1558 dev->dv_attaching = curlwp;
1559
1560 myunit = config_unit_alloc(dev, cd, cf);
1561 if (myunit == -1) {
1562 config_devfree(dev);
1563 return NULL;
1564 }
1565
1566 /* compute length of name and decimal expansion of unit number */
1567 lname = strlen(cd->cd_name);
1568 xunit = number(&num[sizeof(num)], myunit);
1569 lunit = &num[sizeof(num)] - xunit;
1570 if (lname + lunit > sizeof(dev->dv_xname))
1571 panic("config_devalloc: device name too long");
1572
1573 dvl = device_getlock(dev);
1574
1575 mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
1576 cv_init(&dvl->dvl_cv, "pmfsusp");
1577
1578 memcpy(dev->dv_xname, cd->cd_name, lname);
1579 memcpy(dev->dv_xname + lname, xunit, lunit);
1580 dev->dv_parent = parent;
1581 if (parent != NULL)
1582 dev->dv_depth = parent->dv_depth + 1;
1583 else
1584 dev->dv_depth = 0;
1585 dev->dv_flags |= DVF_ACTIVE; /* always initially active */
1586 if (args->locators) {
1587 KASSERT(parent); /* no locators at root */
1588 ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
1589 dev->dv_locators =
1590 kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
1591 *dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
1592 memcpy(dev->dv_locators, args->locators,
1593 sizeof(int) * ia->ci_loclen);
1594 }
1595 dev->dv_properties = prop_dictionary_create();
1596 KASSERT(dev->dv_properties != NULL);
1597
1598 prop_dictionary_set_string_nocopy(dev->dv_properties,
1599 "device-driver", dev->dv_cfdriver->cd_name);
1600 prop_dictionary_set_uint16(dev->dv_properties,
1601 "device-unit", dev->dv_unit);
1602 if (parent != NULL) {
1603 prop_dictionary_set_string(dev->dv_properties,
1604 "device-parent", device_xname(parent));
1605 }
1606
1607 dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount),
1608 KM_SLEEP);
1609 localcount_init(dev->dv_localcount);
1610
1611 if (dev->dv_cfdriver->cd_attrs != NULL)
1612 config_add_attrib_dict(dev);
1613
1614 return dev;
1615 }
1616
1617 /*
1618 * Create an array of device attach attributes and add it
1619 * to the device's dv_properties dictionary.
1620 *
1621 * <key>interface-attributes</key>
1622 * <array>
1623 * <dict>
1624 * <key>attribute-name</key>
1625 * <string>foo</string>
1626 * <key>locators</key>
1627 * <array>
1628 * <dict>
1629 * <key>loc-name</key>
1630 * <string>foo-loc1</string>
1631 * </dict>
1632 * <dict>
1633 * <key>loc-name</key>
1634 * <string>foo-loc2</string>
1635 * <key>default</key>
1636 * <string>foo-loc2-default</string>
1637 * </dict>
1638 * ...
1639 * </array>
1640 * </dict>
1641 * ...
1642 * </array>
1643 */
1644
1645 static void
1646 config_add_attrib_dict(device_t dev)
1647 {
1648 int i, j;
1649 const struct cfiattrdata *ci;
1650 prop_dictionary_t attr_dict, loc_dict;
1651 prop_array_t attr_array, loc_array;
1652
1653 if ((attr_array = prop_array_create()) == NULL)
1654 return;
1655
1656 for (i = 0; ; i++) {
1657 if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
1658 break;
1659 if ((attr_dict = prop_dictionary_create()) == NULL)
1660 break;
1661 prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
1662 ci->ci_name);
1663
1664 /* Create an array of the locator names and defaults */
1665
1666 if (ci->ci_loclen != 0 &&
1667 (loc_array = prop_array_create()) != NULL) {
1668 for (j = 0; j < ci->ci_loclen; j++) {
1669 loc_dict = prop_dictionary_create();
1670 if (loc_dict == NULL)
1671 continue;
1672 prop_dictionary_set_string_nocopy(loc_dict,
1673 "loc-name", ci->ci_locdesc[j].cld_name);
1674 if (ci->ci_locdesc[j].cld_defaultstr != NULL)
1675 prop_dictionary_set_string_nocopy(
1676 loc_dict, "default",
1677 ci->ci_locdesc[j].cld_defaultstr);
1678 prop_array_set(loc_array, j, loc_dict);
1679 prop_object_release(loc_dict);
1680 }
1681 prop_dictionary_set_and_rel(attr_dict, "locators",
1682 loc_array);
1683 }
1684 prop_array_add(attr_array, attr_dict);
1685 prop_object_release(attr_dict);
1686 }
1687 if (i == 0)
1688 prop_object_release(attr_array);
1689 else
1690 prop_dictionary_set_and_rel(dev->dv_properties,
1691 "interface-attributes", attr_array);
1692
1693 return;
1694 }
1695
1696 /*
1697 * Attach a found device.
1698 */
1699 static device_t
1700 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1701 const struct cfargs_internal * const args)
1702 {
1703 device_t dev;
1704 struct cftable *ct;
1705 const char *drvname;
1706 bool deferred;
1707
1708 KASSERT(KERNEL_LOCKED_P());
1709
1710 dev = config_devalloc(parent, cf, args);
1711 if (!dev)
1712 panic("config_attach: allocation of device softc failed");
1713
1714 /* XXX redundant - see below? */
1715 if (cf->cf_fstate != FSTATE_STAR) {
1716 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1717 cf->cf_fstate = FSTATE_FOUND;
1718 }
1719
1720 config_devlink(dev);
1721
1722 if (config_do_twiddle && cold)
1723 twiddle();
1724 else
1725 aprint_naive("Found ");
1726 /*
1727 * We want the next two printfs for normal, verbose, and quiet,
1728 * but not silent (in which case, we're twiddling, instead).
1729 */
1730 if (parent == ROOT) {
1731 aprint_naive("%s (root)", device_xname(dev));
1732 aprint_normal("%s (root)", device_xname(dev));
1733 } else {
1734 aprint_naive("%s at %s", device_xname(dev),
1735 device_xname(parent));
1736 aprint_normal("%s at %s", device_xname(dev),
1737 device_xname(parent));
1738 if (print)
1739 (void) (*print)(aux, NULL);
1740 }
1741
1742 /*
1743 * Before attaching, clobber any unfound devices that are
1744 * otherwise identical.
1745 * XXX code above is redundant?
1746 */
1747 drvname = dev->dv_cfdriver->cd_name;
1748 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1749 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1750 if (STREQ(cf->cf_name, drvname) &&
1751 cf->cf_unit == dev->dv_unit) {
1752 if (cf->cf_fstate == FSTATE_NOTFOUND)
1753 cf->cf_fstate = FSTATE_FOUND;
1754 }
1755 }
1756 }
1757 device_register(dev, aux);
1758
1759 /* Let userland know */
1760 devmon_report_device(dev, true);
1761
1762 /*
1763 * Prevent detach until the driver's attach function, and all
1764 * deferred actions, have finished.
1765 */
1766 config_pending_incr(dev);
1767
1768 /* Call the driver's attach function. */
1769 (*dev->dv_cfattach->ca_attach)(parent, dev, aux);
1770
1771 /*
1772 * Allow other threads to acquire references to the device now
1773 * that the driver's attach function is done.
1774 */
1775 mutex_enter(&config_misc_lock);
1776 KASSERT(dev->dv_attaching == curlwp);
1777 dev->dv_attaching = NULL;
1778 cv_broadcast(&config_misc_cv);
1779 mutex_exit(&config_misc_lock);
1780
1781 /*
1782 * Synchronous parts of attach are done. Allow detach, unless
1783 * the driver's attach function scheduled deferred actions.
1784 */
1785 config_pending_decr(dev);
1786
1787 mutex_enter(&config_misc_lock);
1788 deferred = (dev->dv_pending != 0);
1789 mutex_exit(&config_misc_lock);
1790
1791 if (!deferred && !device_pmf_is_registered(dev))
1792 aprint_debug_dev(dev,
1793 "WARNING: power management not supported\n");
1794
1795 config_process_deferred(&deferred_config_queue, dev);
1796
1797 device_register_post_config(dev, aux);
1798 rnd_add_uint32(&rnd_autoconf_source, 0);
1799 return dev;
1800 }
1801
1802 device_t
1803 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1804 const struct cfargs *cfargs)
1805 {
1806 struct cfargs_internal store;
1807
1808 KASSERT(KERNEL_LOCKED_P());
1809
1810 return config_attach_internal(parent, cf, aux, print,
1811 cfargs_canonicalize(cfargs, &store));
1812 }
1813
1814 /*
1815 * As above, but for pseudo-devices. Pseudo-devices attached in this
1816 * way are silently inserted into the device tree, and their children
1817 * attached.
1818 *
1819 * Note that because pseudo-devices are attached silently, any information
1820 * the attach routine wishes to print should be prefixed with the device
1821 * name by the attach routine.
1822 */
1823 device_t
1824 config_attach_pseudo(cfdata_t cf)
1825 {
1826 device_t dev;
1827
1828 KERNEL_LOCK(1, NULL);
1829
1830 struct cfargs_internal args = { };
1831 dev = config_devalloc(ROOT, cf, &args);
1832 if (!dev)
1833 goto out;
1834
1835 /* XXX mark busy in cfdata */
1836
1837 if (cf->cf_fstate != FSTATE_STAR) {
1838 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1839 cf->cf_fstate = FSTATE_FOUND;
1840 }
1841
1842 config_devlink(dev);
1843
1844 #if 0 /* XXXJRT not yet */
1845 device_register(dev, NULL); /* like a root node */
1846 #endif
1847
1848 /* Let userland know */
1849 devmon_report_device(dev, true);
1850
1851 /*
1852 * Prevent detach until the driver's attach function, and all
1853 * deferred actions, have finished.
1854 */
1855 config_pending_incr(dev);
1856
1857 /* Call the driver's attach function. */
1858 (*dev->dv_cfattach->ca_attach)(ROOT, dev, NULL);
1859
1860 /*
1861 * Allow other threads to acquire references to the device now
1862 * that the driver's attach function is done.
1863 */
1864 mutex_enter(&config_misc_lock);
1865 KASSERT(dev->dv_attaching == curlwp);
1866 dev->dv_attaching = NULL;
1867 cv_broadcast(&config_misc_cv);
1868 mutex_exit(&config_misc_lock);
1869
1870 /*
1871 * Synchronous parts of attach are done. Allow detach, unless
1872 * the driver's attach function scheduled deferred actions.
1873 */
1874 config_pending_decr(dev);
1875
1876 config_process_deferred(&deferred_config_queue, dev);
1877
1878 out: KERNEL_UNLOCK_ONE(NULL);
1879 return dev;
1880 }
1881
1882 /*
1883 * Caller must hold alldevs_lock.
1884 */
1885 static void
1886 config_collect_garbage(struct devicelist *garbage)
1887 {
1888 device_t dv;
1889
1890 KASSERT(!cpu_intr_p());
1891 KASSERT(!cpu_softintr_p());
1892 KASSERT(mutex_owned(&alldevs_lock));
1893
1894 while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
1895 TAILQ_FOREACH(dv, &alldevs, dv_list) {
1896 if (dv->dv_del_gen != 0)
1897 break;
1898 }
1899 if (dv == NULL) {
1900 alldevs_garbage = false;
1901 break;
1902 }
1903 config_devunlink(dv, garbage);
1904 }
1905 KASSERT(mutex_owned(&alldevs_lock));
1906 }
1907
1908 static void
1909 config_dump_garbage(struct devicelist *garbage)
1910 {
1911 device_t dv;
1912
1913 while ((dv = TAILQ_FIRST(garbage)) != NULL) {
1914 TAILQ_REMOVE(garbage, dv, dv_list);
1915 config_devdelete(dv);
1916 }
1917 }
1918
1919 static int
1920 config_detach_enter(device_t dev)
1921 {
1922 struct lwp *l __diagused;
1923 int error = 0;
1924
1925 mutex_enter(&config_misc_lock);
1926
1927 /*
1928 * Wait until attach has fully completed, and until any
1929 * concurrent detach (e.g., drvctl racing with USB event
1930 * thread) has completed.
1931 *
1932 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via
1933 * deviter) to ensure the winner of the race doesn't free the
1934 * device leading the loser of the race into use-after-free.
1935 *
1936 * XXX Not all callers do this!
1937 */
1938 while (dev->dv_pending || dev->dv_detaching) {
1939 KASSERTMSG(dev->dv_detaching != curlwp,
1940 "recursively detaching %s", device_xname(dev));
1941 error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
1942 if (error)
1943 goto out;
1944 }
1945
1946 /*
1947 * Attach has completed, and no other concurrent detach is
1948 * running. Claim the device for detaching. This will cause
1949 * all new attempts to acquire references to block.
1950 */
1951 KASSERTMSG((l = dev->dv_attaching) == NULL,
1952 "lwp %ld [%s] @ %p attaching",
1953 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l);
1954 KASSERTMSG((l = dev->dv_detaching) == NULL,
1955 "lwp %ld [%s] @ %p detaching",
1956 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l);
1957 dev->dv_detaching = curlwp;
1958
1959 out: mutex_exit(&config_misc_lock);
1960 return error;
1961 }
1962
1963 static void
1964 config_detach_exit(device_t dev)
1965 {
1966 struct lwp *l __diagused;
1967
1968 mutex_enter(&config_misc_lock);
1969 KASSERTMSG((l = dev->dv_detaching) == curlwp,
1970 "lwp %ld [%s] @ %p detaching",
1971 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l);
1972 dev->dv_detaching = NULL;
1973 cv_broadcast(&config_misc_cv);
1974 mutex_exit(&config_misc_lock);
1975 }
1976
1977 /*
1978 * Detach a device. Optionally forced (e.g. because of hardware
1979 * removal) and quiet. Returns zero if successful, non-zero
1980 * (an error code) otherwise.
1981 *
1982 * Note that this code wants to be run from a process context, so
1983 * that the detach can sleep to allow processes which have a device
1984 * open to run and unwind their stacks.
1985 */
1986 int
1987 config_detach(device_t dev, int flags)
1988 {
1989 struct alldevs_foray af;
1990 struct cftable *ct;
1991 cfdata_t cf;
1992 const struct cfattach *ca;
1993 struct cfdriver *cd;
1994 device_t d __diagused;
1995 int rv = 0;
1996
1997 KERNEL_LOCK(1, NULL);
1998
1999 cf = dev->dv_cfdata;
2000 KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
2001 cf->cf_fstate == FSTATE_STAR),
2002 "config_detach: %s: bad device fstate: %d",
2003 device_xname(dev), cf ? cf->cf_fstate : -1);
2004
2005 cd = dev->dv_cfdriver;
2006 KASSERT(cd != NULL);
2007
2008 ca = dev->dv_cfattach;
2009 KASSERT(ca != NULL);
2010
2011 /*
2012 * Only one detach at a time, please -- and not until fully
2013 * attached.
2014 */
2015 rv = config_detach_enter(dev);
2016 if (rv) {
2017 KERNEL_UNLOCK_ONE(NULL);
2018 return rv;
2019 }
2020
2021 mutex_enter(&alldevs_lock);
2022 if (dev->dv_del_gen != 0) {
2023 mutex_exit(&alldevs_lock);
2024 #ifdef DIAGNOSTIC
2025 printf("%s: %s is already detached\n", __func__,
2026 device_xname(dev));
2027 #endif /* DIAGNOSTIC */
2028 config_detach_exit(dev);
2029 KERNEL_UNLOCK_ONE(NULL);
2030 return ENOENT;
2031 }
2032 alldevs_nwrite++;
2033 mutex_exit(&alldevs_lock);
2034
2035 /*
2036 * Call the driver's .ca_detach function, unless it has none or
2037 * we are skipping it because it's unforced shutdown time and
2038 * the driver didn't ask to detach on shutdown.
2039 */
2040 if (!detachall &&
2041 (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
2042 (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
2043 rv = EOPNOTSUPP;
2044 } else if (ca->ca_detach != NULL) {
2045 rv = (*ca->ca_detach)(dev, flags);
2046 } else
2047 rv = EOPNOTSUPP;
2048
2049 /*
2050 * If it was not possible to detach the device, then we either
2051 * panic() (for the forced but failed case), or return an error.
2052 */
2053 if (rv) {
2054 /*
2055 * Detach failed -- likely EOPNOTSUPP or EBUSY. Driver
2056 * must not have called config_detach_commit.
2057 */
2058 KASSERTMSG(!dev->dv_detached,
2059 "%s committed to detaching and then backed out",
2060 device_xname(dev));
2061 if (flags & DETACH_FORCE) {
2062 panic("config_detach: forced detach of %s failed (%d)",
2063 device_xname(dev), rv);
2064 }
2065 goto out;
2066 }
2067
2068 /*
2069 * The device has now been successfully detached.
2070 */
2071
2072 /*
2073 * If .ca_detach didn't commit to detach, then do that for it.
2074 * This wakes any pending device_lookup_acquire calls so they
2075 * will fail.
2076 */
2077 config_detach_commit(dev);
2078
2079 /*
2080 * If it was possible to detach the device, ensure that the
2081 * device is deactivated.
2082 */
2083 dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */
2084
2085 /*
2086 * Wait for all device_lookup_acquire references -- mostly, for
2087 * all attempts to open the device -- to drain. It is the
2088 * responsibility of .ca_detach to ensure anything with open
2089 * references will be interrupted and release them promptly,
2090 * not block indefinitely. All new attempts to acquire
2091 * references will fail, as config_detach_commit has arranged
2092 * by now.
2093 */
2094 mutex_enter(&config_misc_lock);
2095 localcount_drain(dev->dv_localcount,
2096 &config_misc_cv, &config_misc_lock);
2097 mutex_exit(&config_misc_lock);
2098
2099 /* Let userland know */
2100 devmon_report_device(dev, false);
2101
2102 #ifdef DIAGNOSTIC
2103 /*
2104 * Sanity: If you're successfully detached, you should have no
2105 * children. (Note that because children must be attached
2106 * after parents, we only need to search the latter part of
2107 * the list.)
2108 */
2109 mutex_enter(&alldevs_lock);
2110 for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
2111 d = TAILQ_NEXT(d, dv_list)) {
2112 if (d->dv_parent == dev && d->dv_del_gen == 0) {
2113 printf("config_detach: detached device %s"
2114 " has children %s\n", device_xname(dev),
2115 device_xname(d));
2116 panic("config_detach");
2117 }
2118 }
2119 mutex_exit(&alldevs_lock);
2120 #endif
2121
2122 /* notify the parent that the child is gone */
2123 if (dev->dv_parent) {
2124 device_t p = dev->dv_parent;
2125 if (p->dv_cfattach->ca_childdetached)
2126 (*p->dv_cfattach->ca_childdetached)(p, dev);
2127 }
2128
2129 /*
2130 * Mark cfdata to show that the unit can be reused, if possible.
2131 */
2132 TAILQ_FOREACH(ct, &allcftables, ct_list) {
2133 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
2134 if (STREQ(cf->cf_name, cd->cd_name)) {
2135 if (cf->cf_fstate == FSTATE_FOUND &&
2136 cf->cf_unit == dev->dv_unit)
2137 cf->cf_fstate = FSTATE_NOTFOUND;
2138 }
2139 }
2140 }
2141
2142 if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
2143 aprint_normal_dev(dev, "detached\n");
2144
2145 out:
2146 config_detach_exit(dev);
2147
2148 config_alldevs_enter(&af);
2149 KASSERT(alldevs_nwrite != 0);
2150 --alldevs_nwrite;
2151 if (rv == 0 && dev->dv_del_gen == 0) {
2152 if (alldevs_nwrite == 0 && alldevs_nread == 0)
2153 config_devunlink(dev, &af.af_garbage);
2154 else {
2155 dev->dv_del_gen = alldevs_gen;
2156 alldevs_garbage = true;
2157 }
2158 }
2159 config_alldevs_exit(&af);
2160
2161 KERNEL_UNLOCK_ONE(NULL);
2162
2163 return rv;
2164 }
2165
2166 /*
2167 * config_detach_commit(dev)
2168 *
2169 * Issued by a driver's .ca_detach routine to notify anyone
2170 * waiting in device_lookup_acquire that the driver is committed
2171 * to detaching the device, which allows device_lookup_acquire to
2172 * wake up and fail immediately.
2173 *
2174 * Safe to call multiple times -- idempotent. Must be called
2175 * during config_detach_enter/exit. Safe to use with
2176 * device_lookup because the device is not actually removed from
2177 * the table until after config_detach_exit.
2178 */
2179 void
2180 config_detach_commit(device_t dev)
2181 {
2182 struct lwp *l __diagused;
2183
2184 mutex_enter(&config_misc_lock);
2185 KASSERTMSG((l = dev->dv_detaching) == curlwp,
2186 "lwp %ld [%s] @ %p detaching",
2187 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l);
2188 dev->dv_detached = true;
2189 cv_broadcast(&config_misc_cv);
2190 mutex_exit(&config_misc_lock);
2191 }
2192
2193 int
2194 config_detach_children(device_t parent, int flags)
2195 {
2196 device_t dv;
2197 deviter_t di;
2198 int error = 0;
2199
2200 KASSERT(KERNEL_LOCKED_P());
2201
2202 for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
2203 dv = deviter_next(&di)) {
2204 if (device_parent(dv) != parent)
2205 continue;
2206 if ((error = config_detach(dv, flags)) != 0)
2207 break;
2208 }
2209 deviter_release(&di);
2210 return error;
2211 }
2212
2213 device_t
2214 shutdown_first(struct shutdown_state *s)
2215 {
2216 if (!s->initialized) {
2217 deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
2218 s->initialized = true;
2219 }
2220 return shutdown_next(s);
2221 }
2222
2223 device_t
2224 shutdown_next(struct shutdown_state *s)
2225 {
2226 device_t dv;
2227
2228 while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
2229 ;
2230
2231 if (dv == NULL)
2232 s->initialized = false;
2233
2234 return dv;
2235 }
2236
2237 bool
2238 config_detach_all(int how)
2239 {
2240 static struct shutdown_state s;
2241 device_t curdev;
2242 bool progress = false;
2243 int flags;
2244
2245 KERNEL_LOCK(1, NULL);
2246
2247 if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
2248 goto out;
2249
2250 if ((how & RB_POWERDOWN) == RB_POWERDOWN)
2251 flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
2252 else
2253 flags = DETACH_SHUTDOWN;
2254
2255 for (curdev = shutdown_first(&s); curdev != NULL;
2256 curdev = shutdown_next(&s)) {
2257 aprint_debug(" detaching %s, ", device_xname(curdev));
2258 if (config_detach(curdev, flags) == 0) {
2259 progress = true;
2260 aprint_debug("success.");
2261 } else
2262 aprint_debug("failed.");
2263 }
2264
2265 out: KERNEL_UNLOCK_ONE(NULL);
2266 return progress;
2267 }
2268
2269 static bool
2270 device_is_ancestor_of(device_t ancestor, device_t descendant)
2271 {
2272 device_t dv;
2273
2274 for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
2275 if (device_parent(dv) == ancestor)
2276 return true;
2277 }
2278 return false;
2279 }
2280
2281 int
2282 config_deactivate(device_t dev)
2283 {
2284 deviter_t di;
2285 const struct cfattach *ca;
2286 device_t descendant;
2287 int s, rv = 0, oflags;
2288
2289 for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
2290 descendant != NULL;
2291 descendant = deviter_next(&di)) {
2292 if (dev != descendant &&
2293 !device_is_ancestor_of(dev, descendant))
2294 continue;
2295
2296 if ((descendant->dv_flags & DVF_ACTIVE) == 0)
2297 continue;
2298
2299 ca = descendant->dv_cfattach;
2300 oflags = descendant->dv_flags;
2301
2302 descendant->dv_flags &= ~DVF_ACTIVE;
2303 if (ca->ca_activate == NULL)
2304 continue;
2305 s = splhigh();
2306 rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
2307 splx(s);
2308 if (rv != 0)
2309 descendant->dv_flags = oflags;
2310 }
2311 deviter_release(&di);
2312 return rv;
2313 }
2314
2315 /*
2316 * Defer the configuration of the specified device until all
2317 * of its parent's devices have been attached.
2318 */
2319 void
2320 config_defer(device_t dev, void (*func)(device_t))
2321 {
2322 struct deferred_config *dc;
2323
2324 if (dev->dv_parent == NULL)
2325 panic("config_defer: can't defer config of a root device");
2326
2327 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2328
2329 config_pending_incr(dev);
2330
2331 mutex_enter(&config_misc_lock);
2332 #ifdef DIAGNOSTIC
2333 struct deferred_config *odc;
2334 TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
2335 if (odc->dc_dev == dev)
2336 panic("config_defer: deferred twice");
2337 }
2338 #endif
2339 dc->dc_dev = dev;
2340 dc->dc_func = func;
2341 TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
2342 mutex_exit(&config_misc_lock);
2343 }
2344
2345 /*
2346 * Defer some autoconfiguration for a device until after interrupts
2347 * are enabled.
2348 */
2349 void
2350 config_interrupts(device_t dev, void (*func)(device_t))
2351 {
2352 struct deferred_config *dc;
2353
2354 /*
2355 * If interrupts are enabled, callback now.
2356 */
2357 if (cold == 0) {
2358 (*func)(dev);
2359 return;
2360 }
2361
2362 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2363
2364 config_pending_incr(dev);
2365
2366 mutex_enter(&config_misc_lock);
2367 #ifdef DIAGNOSTIC
2368 struct deferred_config *odc;
2369 TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
2370 if (odc->dc_dev == dev)
2371 panic("config_interrupts: deferred twice");
2372 }
2373 #endif
2374 dc->dc_dev = dev;
2375 dc->dc_func = func;
2376 TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
2377 mutex_exit(&config_misc_lock);
2378 }
2379
2380 /*
2381 * Defer some autoconfiguration for a device until after root file system
2382 * is mounted (to load firmware etc).
2383 */
2384 void
2385 config_mountroot(device_t dev, void (*func)(device_t))
2386 {
2387 struct deferred_config *dc;
2388
2389 /*
2390 * If root file system is mounted, callback now.
2391 */
2392 if (root_is_mounted) {
2393 (*func)(dev);
2394 return;
2395 }
2396
2397 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2398
2399 mutex_enter(&config_misc_lock);
2400 #ifdef DIAGNOSTIC
2401 struct deferred_config *odc;
2402 TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
2403 if (odc->dc_dev == dev)
2404 panic("%s: deferred twice", __func__);
2405 }
2406 #endif
2407
2408 dc->dc_dev = dev;
2409 dc->dc_func = func;
2410 TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
2411 mutex_exit(&config_misc_lock);
2412 }
2413
2414 /*
2415 * Process a deferred configuration queue.
2416 */
2417 static void
2418 config_process_deferred(struct deferred_config_head *queue, device_t parent)
2419 {
2420 struct deferred_config *dc;
2421
2422 KASSERT(KERNEL_LOCKED_P());
2423
2424 mutex_enter(&config_misc_lock);
2425 dc = TAILQ_FIRST(queue);
2426 while (dc) {
2427 if (parent == NULL || dc->dc_dev->dv_parent == parent) {
2428 TAILQ_REMOVE(queue, dc, dc_queue);
2429 mutex_exit(&config_misc_lock);
2430
2431 (*dc->dc_func)(dc->dc_dev);
2432 config_pending_decr(dc->dc_dev);
2433 kmem_free(dc, sizeof(*dc));
2434
2435 mutex_enter(&config_misc_lock);
2436 /* Restart, queue might have changed */
2437 dc = TAILQ_FIRST(queue);
2438 } else {
2439 dc = TAILQ_NEXT(dc, dc_queue);
2440 }
2441 }
2442 mutex_exit(&config_misc_lock);
2443 }
2444
2445 /*
2446 * Manipulate the config_pending semaphore.
2447 */
2448 void
2449 config_pending_incr(device_t dev)
2450 {
2451
2452 mutex_enter(&config_misc_lock);
2453 KASSERTMSG(dev->dv_pending < INT_MAX,
2454 "%s: excess config_pending_incr", device_xname(dev));
2455 if (dev->dv_pending++ == 0)
2456 TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
2457 #ifdef DEBUG_AUTOCONF
2458 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2459 #endif
2460 mutex_exit(&config_misc_lock);
2461 }
2462
2463 void
2464 config_pending_decr(device_t dev)
2465 {
2466
2467 mutex_enter(&config_misc_lock);
2468 KASSERTMSG(dev->dv_pending > 0,
2469 "%s: excess config_pending_decr", device_xname(dev));
2470 if (--dev->dv_pending == 0) {
2471 TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
2472 cv_broadcast(&config_misc_cv);
2473 }
2474 #ifdef DEBUG_AUTOCONF
2475 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2476 #endif
2477 mutex_exit(&config_misc_lock);
2478 }
2479
2480 /*
2481 * Register a "finalization" routine. Finalization routines are
2482 * called iteratively once all real devices have been found during
2483 * autoconfiguration, for as long as any one finalizer has done
2484 * any work.
2485 */
2486 int
2487 config_finalize_register(device_t dev, int (*fn)(device_t))
2488 {
2489 struct finalize_hook *f;
2490 int error = 0;
2491
2492 KERNEL_LOCK(1, NULL);
2493
2494 /*
2495 * If finalization has already been done, invoke the
2496 * callback function now.
2497 */
2498 if (config_finalize_done) {
2499 while ((*fn)(dev) != 0)
2500 /* loop */ ;
2501 goto out;
2502 }
2503
2504 /* Ensure this isn't already on the list. */
2505 TAILQ_FOREACH(f, &config_finalize_list, f_list) {
2506 if (f->f_func == fn && f->f_dev == dev) {
2507 error = EEXIST;
2508 goto out;
2509 }
2510 }
2511
2512 f = kmem_alloc(sizeof(*f), KM_SLEEP);
2513 f->f_func = fn;
2514 f->f_dev = dev;
2515 TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
2516
2517 /* Success! */
2518 error = 0;
2519
2520 out: KERNEL_UNLOCK_ONE(NULL);
2521 return error;
2522 }
2523
2524 void
2525 config_finalize(void)
2526 {
2527 struct finalize_hook *f;
2528 struct pdevinit *pdev;
2529 extern struct pdevinit pdevinit[];
2530 int errcnt, rv;
2531
2532 /*
2533 * Now that device driver threads have been created, wait for
2534 * them to finish any deferred autoconfiguration.
2535 */
2536 mutex_enter(&config_misc_lock);
2537 while (!TAILQ_EMPTY(&config_pending)) {
2538 device_t dev;
2539 int error;
2540
2541 error = cv_timedwait(&config_misc_cv, &config_misc_lock,
2542 mstohz(1000));
2543 if (error == EWOULDBLOCK) {
2544 aprint_debug("waiting for devices:");
2545 TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
2546 aprint_debug(" %s", device_xname(dev));
2547 aprint_debug("\n");
2548 }
2549 }
2550 mutex_exit(&config_misc_lock);
2551
2552 KERNEL_LOCK(1, NULL);
2553
2554 /* Attach pseudo-devices. */
2555 for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
2556 (*pdev->pdev_attach)(pdev->pdev_count);
2557
2558 /* Run the hooks until none of them does any work. */
2559 do {
2560 rv = 0;
2561 TAILQ_FOREACH(f, &config_finalize_list, f_list)
2562 rv |= (*f->f_func)(f->f_dev);
2563 } while (rv != 0);
2564
2565 config_finalize_done = 1;
2566
2567 /* Now free all the hooks. */
2568 while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
2569 TAILQ_REMOVE(&config_finalize_list, f, f_list);
2570 kmem_free(f, sizeof(*f));
2571 }
2572
2573 KERNEL_UNLOCK_ONE(NULL);
2574
2575 errcnt = aprint_get_error_count();
2576 if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
2577 (boothowto & AB_VERBOSE) == 0) {
2578 mutex_enter(&config_misc_lock);
2579 if (config_do_twiddle) {
2580 config_do_twiddle = 0;
2581 printf_nolog(" done.\n");
2582 }
2583 mutex_exit(&config_misc_lock);
2584 }
2585 if (errcnt != 0) {
2586 printf("WARNING: %d error%s while detecting hardware; "
2587 "check system log.\n", errcnt,
2588 errcnt == 1 ? "" : "s");
2589 }
2590 }
2591
2592 void
2593 config_twiddle_init(void)
2594 {
2595
2596 if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
2597 config_do_twiddle = 1;
2598 }
2599 callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
2600 }
2601
2602 void
2603 config_twiddle_fn(void *cookie)
2604 {
2605
2606 mutex_enter(&config_misc_lock);
2607 if (config_do_twiddle) {
2608 twiddle();
2609 callout_schedule(&config_twiddle_ch, mstohz(100));
2610 }
2611 mutex_exit(&config_misc_lock);
2612 }
2613
2614 static void
2615 config_alldevs_enter(struct alldevs_foray *af)
2616 {
2617 TAILQ_INIT(&af->af_garbage);
2618 mutex_enter(&alldevs_lock);
2619 config_collect_garbage(&af->af_garbage);
2620 }
2621
2622 static void
2623 config_alldevs_exit(struct alldevs_foray *af)
2624 {
2625 mutex_exit(&alldevs_lock);
2626 config_dump_garbage(&af->af_garbage);
2627 }
2628
2629 /*
2630 * device_lookup:
2631 *
2632 * Look up a device instance for a given driver.
2633 *
2634 * Caller is responsible for ensuring the device's state is
2635 * stable, either by holding a reference already obtained with
2636 * device_lookup_acquire or by otherwise ensuring the device is
2637 * attached and can't be detached (e.g., holding an open device
2638 * node and ensuring *_detach calls vdevgone).
2639 *
2640 * XXX Find a way to assert this.
2641 *
2642 * Safe for use up to and including interrupt context at IPL_VM.
2643 * Never sleeps.
2644 */
2645 device_t
2646 device_lookup(cfdriver_t cd, int unit)
2647 {
2648 device_t dv;
2649
2650 mutex_enter(&alldevs_lock);
2651 if (unit < 0 || unit >= cd->cd_ndevs)
2652 dv = NULL;
2653 else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
2654 dv = NULL;
2655 mutex_exit(&alldevs_lock);
2656
2657 return dv;
2658 }
2659
2660 /*
2661 * device_lookup_private:
2662 *
2663 * Look up a softc instance for a given driver.
2664 */
2665 void *
2666 device_lookup_private(cfdriver_t cd, int unit)
2667 {
2668
2669 return device_private(device_lookup(cd, unit));
2670 }
2671
2672 /*
2673 * device_lookup_acquire:
2674 *
2675 * Look up a device instance for a given driver, and return a
2676 * reference to it that must be released by device_release.
2677 *
2678 * => If the device is still attaching, blocks until *_attach has
2679 * returned.
2680 *
2681 * => If the device is detaching, blocks until *_detach has
2682 * returned. May succeed or fail in that case, depending on
2683 * whether *_detach has backed out (EBUSY) or committed to
2684 * detaching.
2685 *
2686 * May sleep.
2687 */
2688 device_t
2689 device_lookup_acquire(cfdriver_t cd, int unit)
2690 {
2691 device_t dv;
2692
2693 ASSERT_SLEEPABLE();
2694
2695 /* XXX This should have a pserialized fast path -- TBD. */
2696 mutex_enter(&config_misc_lock);
2697 mutex_enter(&alldevs_lock);
2698 retry: if (unit < 0 || unit >= cd->cd_ndevs ||
2699 (dv = cd->cd_devs[unit]) == NULL ||
2700 dv->dv_del_gen != 0 ||
2701 dv->dv_detached) {
2702 dv = NULL;
2703 } else {
2704 /*
2705 * Wait for the device to stabilize, if attaching or
2706 * detaching. Either way we must wait for *_attach or
2707 * *_detach to complete, and either way we must retry:
2708 * even if detaching, *_detach might fail (EBUSY) so
2709 * the device may still be there.
2710 */
2711 if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) ||
2712 dv->dv_detaching != NULL) {
2713 mutex_exit(&alldevs_lock);
2714 cv_wait(&config_misc_cv, &config_misc_lock);
2715 mutex_enter(&alldevs_lock);
2716 goto retry;
2717 }
2718 localcount_acquire(dv->dv_localcount);
2719 }
2720 mutex_exit(&alldevs_lock);
2721 mutex_exit(&config_misc_lock);
2722
2723 return dv;
2724 }
2725
2726 /*
2727 * device_release:
2728 *
2729 * Release a reference to a device acquired with
2730 * device_lookup_acquire.
2731 */
2732 void
2733 device_release(device_t dv)
2734 {
2735
2736 localcount_release(dv->dv_localcount,
2737 &config_misc_cv, &config_misc_lock);
2738 }
2739
2740 /*
2741 * device_find_by_xname:
2742 *
2743 * Returns the device of the given name or NULL if it doesn't exist.
2744 */
2745 device_t
2746 device_find_by_xname(const char *name)
2747 {
2748 device_t dv;
2749 deviter_t di;
2750
2751 for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
2752 if (strcmp(device_xname(dv), name) == 0)
2753 break;
2754 }
2755 deviter_release(&di);
2756
2757 return dv;
2758 }
2759
2760 /*
2761 * device_find_by_driver_unit:
2762 *
2763 * Returns the device of the given driver name and unit or
2764 * NULL if it doesn't exist.
2765 */
2766 device_t
2767 device_find_by_driver_unit(const char *name, int unit)
2768 {
2769 struct cfdriver *cd;
2770
2771 if ((cd = config_cfdriver_lookup(name)) == NULL)
2772 return NULL;
2773 return device_lookup(cd, unit);
2774 }
2775
2776 static bool
2777 match_strcmp(const char * const s1, const char * const s2)
2778 {
2779 return strcmp(s1, s2) == 0;
2780 }
2781
2782 static bool
2783 match_pmatch(const char * const s1, const char * const s2)
2784 {
2785 return pmatch(s1, s2, NULL) == 2;
2786 }
2787
2788 static bool
2789 strarray_match_internal(const char ** const strings,
2790 unsigned int const nstrings, const char * const str,
2791 unsigned int * const indexp,
2792 bool (*match_fn)(const char *, const char *))
2793 {
2794 unsigned int i;
2795
2796 if (strings == NULL || nstrings == 0) {
2797 return false;
2798 }
2799
2800 for (i = 0; i < nstrings; i++) {
2801 if ((*match_fn)(strings[i], str)) {
2802 *indexp = i;
2803 return true;
2804 }
2805 }
2806
2807 return false;
2808 }
2809
2810 static int
2811 strarray_match(const char ** const strings, unsigned int const nstrings,
2812 const char * const str)
2813 {
2814 unsigned int idx;
2815
2816 if (strarray_match_internal(strings, nstrings, str, &idx,
2817 match_strcmp)) {
2818 return (int)(nstrings - idx);
2819 }
2820 return 0;
2821 }
2822
2823 static int
2824 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
2825 const char * const pattern)
2826 {
2827 unsigned int idx;
2828
2829 if (strarray_match_internal(strings, nstrings, pattern, &idx,
2830 match_pmatch)) {
2831 return (int)(nstrings - idx);
2832 }
2833 return 0;
2834 }
2835
2836 static int
2837 device_compatible_match_strarray_internal(
2838 const char **device_compats, int ndevice_compats,
2839 const struct device_compatible_entry *driver_compats,
2840 const struct device_compatible_entry **matching_entryp,
2841 int (*match_fn)(const char **, unsigned int, const char *))
2842 {
2843 const struct device_compatible_entry *dce = NULL;
2844 int rv;
2845
2846 if (ndevice_compats == 0 || device_compats == NULL ||
2847 driver_compats == NULL)
2848 return 0;
2849
2850 for (dce = driver_compats; dce->compat != NULL; dce++) {
2851 rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
2852 if (rv != 0) {
2853 if (matching_entryp != NULL) {
2854 *matching_entryp = dce;
2855 }
2856 return rv;
2857 }
2858 }
2859 return 0;
2860 }
2861
2862 /*
2863 * device_compatible_match:
2864 *
2865 * Match a driver's "compatible" data against a device's
2866 * "compatible" strings. Returns resulted weighted by
2867 * which device "compatible" string was matched.
2868 */
2869 int
2870 device_compatible_match(const char **device_compats, int ndevice_compats,
2871 const struct device_compatible_entry *driver_compats)
2872 {
2873 return device_compatible_match_strarray_internal(device_compats,
2874 ndevice_compats, driver_compats, NULL, strarray_match);
2875 }
2876
2877 /*
2878 * device_compatible_pmatch:
2879 *
2880 * Like device_compatible_match(), but uses pmatch(9) to compare
2881 * the device "compatible" strings against patterns in the
2882 * driver's "compatible" data.
2883 */
2884 int
2885 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
2886 const struct device_compatible_entry *driver_compats)
2887 {
2888 return device_compatible_match_strarray_internal(device_compats,
2889 ndevice_compats, driver_compats, NULL, strarray_pmatch);
2890 }
2891
2892 static int
2893 device_compatible_match_strlist_internal(
2894 const char * const device_compats, size_t const device_compatsize,
2895 const struct device_compatible_entry *driver_compats,
2896 const struct device_compatible_entry **matching_entryp,
2897 int (*match_fn)(const char *, size_t, const char *))
2898 {
2899 const struct device_compatible_entry *dce = NULL;
2900 int rv;
2901
2902 if (device_compats == NULL || device_compatsize == 0 ||
2903 driver_compats == NULL)
2904 return 0;
2905
2906 for (dce = driver_compats; dce->compat != NULL; dce++) {
2907 rv = (*match_fn)(device_compats, device_compatsize,
2908 dce->compat);
2909 if (rv != 0) {
2910 if (matching_entryp != NULL) {
2911 *matching_entryp = dce;
2912 }
2913 return rv;
2914 }
2915 }
2916 return 0;
2917 }
2918
2919 /*
2920 * device_compatible_match_strlist:
2921 *
2922 * Like device_compatible_match(), but take the device
2923 * "compatible" strings as an OpenFirmware-style string
2924 * list.
2925 */
2926 int
2927 device_compatible_match_strlist(
2928 const char * const device_compats, size_t const device_compatsize,
2929 const struct device_compatible_entry *driver_compats)
2930 {
2931 return device_compatible_match_strlist_internal(device_compats,
2932 device_compatsize, driver_compats, NULL, strlist_match);
2933 }
2934
2935 /*
2936 * device_compatible_pmatch_strlist:
2937 *
2938 * Like device_compatible_pmatch(), but take the device
2939 * "compatible" strings as an OpenFirmware-style string
2940 * list.
2941 */
2942 int
2943 device_compatible_pmatch_strlist(
2944 const char * const device_compats, size_t const device_compatsize,
2945 const struct device_compatible_entry *driver_compats)
2946 {
2947 return device_compatible_match_strlist_internal(device_compats,
2948 device_compatsize, driver_compats, NULL, strlist_pmatch);
2949 }
2950
2951 static int
2952 device_compatible_match_id_internal(
2953 uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
2954 const struct device_compatible_entry *driver_compats,
2955 const struct device_compatible_entry **matching_entryp)
2956 {
2957 const struct device_compatible_entry *dce = NULL;
2958
2959 if (mask == 0)
2960 return 0;
2961
2962 for (dce = driver_compats; dce->id != sentinel_id; dce++) {
2963 if ((id & mask) == dce->id) {
2964 if (matching_entryp != NULL) {
2965 *matching_entryp = dce;
2966 }
2967 return 1;
2968 }
2969 }
2970 return 0;
2971 }
2972
2973 /*
2974 * device_compatible_match_id:
2975 *
2976 * Like device_compatible_match(), but takes a single
2977 * unsigned integer device ID.
2978 */
2979 int
2980 device_compatible_match_id(
2981 uintptr_t const id, uintptr_t const sentinel_id,
2982 const struct device_compatible_entry *driver_compats)
2983 {
2984 return device_compatible_match_id_internal(id, (uintptr_t)-1,
2985 sentinel_id, driver_compats, NULL);
2986 }
2987
2988 /*
2989 * device_compatible_lookup:
2990 *
2991 * Look up and return the device_compatible_entry, using the
2992 * same matching criteria used by device_compatible_match().
2993 */
2994 const struct device_compatible_entry *
2995 device_compatible_lookup(const char **device_compats, int ndevice_compats,
2996 const struct device_compatible_entry *driver_compats)
2997 {
2998 const struct device_compatible_entry *dce;
2999
3000 if (device_compatible_match_strarray_internal(device_compats,
3001 ndevice_compats, driver_compats, &dce, strarray_match)) {
3002 return dce;
3003 }
3004 return NULL;
3005 }
3006
3007 /*
3008 * device_compatible_plookup:
3009 *
3010 * Look up and return the device_compatible_entry, using the
3011 * same matching criteria used by device_compatible_pmatch().
3012 */
3013 const struct device_compatible_entry *
3014 device_compatible_plookup(const char **device_compats, int ndevice_compats,
3015 const struct device_compatible_entry *driver_compats)
3016 {
3017 const struct device_compatible_entry *dce;
3018
3019 if (device_compatible_match_strarray_internal(device_compats,
3020 ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
3021 return dce;
3022 }
3023 return NULL;
3024 }
3025
3026 /*
3027 * device_compatible_lookup_strlist:
3028 *
3029 * Like device_compatible_lookup(), but take the device
3030 * "compatible" strings as an OpenFirmware-style string
3031 * list.
3032 */
3033 const struct device_compatible_entry *
3034 device_compatible_lookup_strlist(
3035 const char * const device_compats, size_t const device_compatsize,
3036 const struct device_compatible_entry *driver_compats)
3037 {
3038 const struct device_compatible_entry *dce;
3039
3040 if (device_compatible_match_strlist_internal(device_compats,
3041 device_compatsize, driver_compats, &dce, strlist_match)) {
3042 return dce;
3043 }
3044 return NULL;
3045 }
3046
3047 /*
3048 * device_compatible_plookup_strlist:
3049 *
3050 * Like device_compatible_plookup(), but take the device
3051 * "compatible" strings as an OpenFirmware-style string
3052 * list.
3053 */
3054 const struct device_compatible_entry *
3055 device_compatible_plookup_strlist(
3056 const char * const device_compats, size_t const device_compatsize,
3057 const struct device_compatible_entry *driver_compats)
3058 {
3059 const struct device_compatible_entry *dce;
3060
3061 if (device_compatible_match_strlist_internal(device_compats,
3062 device_compatsize, driver_compats, &dce, strlist_pmatch)) {
3063 return dce;
3064 }
3065 return NULL;
3066 }
3067
3068 /*
3069 * device_compatible_lookup_id:
3070 *
3071 * Like device_compatible_lookup(), but takes a single
3072 * unsigned integer device ID.
3073 */
3074 const struct device_compatible_entry *
3075 device_compatible_lookup_id(
3076 uintptr_t const id, uintptr_t const sentinel_id,
3077 const struct device_compatible_entry *driver_compats)
3078 {
3079 const struct device_compatible_entry *dce;
3080
3081 if (device_compatible_match_id_internal(id, (uintptr_t)-1,
3082 sentinel_id, driver_compats, &dce)) {
3083 return dce;
3084 }
3085 return NULL;
3086 }
3087
3088 /*
3089 * Power management related functions.
3090 */
3091
3092 bool
3093 device_pmf_is_registered(device_t dev)
3094 {
3095 return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
3096 }
3097
3098 bool
3099 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
3100 {
3101 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3102 return true;
3103 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3104 return false;
3105 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3106 dev->dv_driver_suspend != NULL &&
3107 !(*dev->dv_driver_suspend)(dev, qual))
3108 return false;
3109
3110 dev->dv_flags |= DVF_DRIVER_SUSPENDED;
3111 return true;
3112 }
3113
3114 bool
3115 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
3116 {
3117 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3118 return true;
3119 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3120 return false;
3121 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3122 dev->dv_driver_resume != NULL &&
3123 !(*dev->dv_driver_resume)(dev, qual))
3124 return false;
3125
3126 dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
3127 return true;
3128 }
3129
3130 bool
3131 device_pmf_driver_shutdown(device_t dev, int how)
3132 {
3133
3134 if (*dev->dv_driver_shutdown != NULL &&
3135 !(*dev->dv_driver_shutdown)(dev, how))
3136 return false;
3137 return true;
3138 }
3139
3140 bool
3141 device_pmf_driver_register(device_t dev,
3142 bool (*suspend)(device_t, const pmf_qual_t *),
3143 bool (*resume)(device_t, const pmf_qual_t *),
3144 bool (*shutdown)(device_t, int))
3145 {
3146 dev->dv_driver_suspend = suspend;
3147 dev->dv_driver_resume = resume;
3148 dev->dv_driver_shutdown = shutdown;
3149 dev->dv_flags |= DVF_POWER_HANDLERS;
3150 return true;
3151 }
3152
3153 void
3154 device_pmf_driver_deregister(device_t dev)
3155 {
3156 device_lock_t dvl = device_getlock(dev);
3157
3158 dev->dv_driver_suspend = NULL;
3159 dev->dv_driver_resume = NULL;
3160
3161 mutex_enter(&dvl->dvl_mtx);
3162 dev->dv_flags &= ~DVF_POWER_HANDLERS;
3163 while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
3164 /* Wake a thread that waits for the lock. That
3165 * thread will fail to acquire the lock, and then
3166 * it will wake the next thread that waits for the
3167 * lock, or else it will wake us.
3168 */
3169 cv_signal(&dvl->dvl_cv);
3170 pmflock_debug(dev, __func__, __LINE__);
3171 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3172 pmflock_debug(dev, __func__, __LINE__);
3173 }
3174 mutex_exit(&dvl->dvl_mtx);
3175 }
3176
3177 bool
3178 device_pmf_driver_child_register(device_t dev)
3179 {
3180 device_t parent = device_parent(dev);
3181
3182 if (parent == NULL || parent->dv_driver_child_register == NULL)
3183 return true;
3184 return (*parent->dv_driver_child_register)(dev);
3185 }
3186
3187 void
3188 device_pmf_driver_set_child_register(device_t dev,
3189 bool (*child_register)(device_t))
3190 {
3191 dev->dv_driver_child_register = child_register;
3192 }
3193
3194 static void
3195 pmflock_debug(device_t dev, const char *func, int line)
3196 {
3197 #ifdef PMFLOCK_DEBUG
3198 device_lock_t dvl = device_getlock(dev);
3199 const char *curlwp_name;
3200
3201 if (curlwp->l_name != NULL)
3202 curlwp_name = curlwp->l_name;
3203 else
3204 curlwp_name = curlwp->l_proc->p_comm;
3205
3206 aprint_debug_dev(dev,
3207 "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
3208 curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
3209 #endif /* PMFLOCK_DEBUG */
3210 }
3211
3212 static bool
3213 device_pmf_lock1(device_t dev)
3214 {
3215 device_lock_t dvl = device_getlock(dev);
3216
3217 while (device_pmf_is_registered(dev) &&
3218 dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
3219 dvl->dvl_nwait++;
3220 pmflock_debug(dev, __func__, __LINE__);
3221 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3222 pmflock_debug(dev, __func__, __LINE__);
3223 dvl->dvl_nwait--;
3224 }
3225 if (!device_pmf_is_registered(dev)) {
3226 pmflock_debug(dev, __func__, __LINE__);
3227 /* We could not acquire the lock, but some other thread may
3228 * wait for it, also. Wake that thread.
3229 */
3230 cv_signal(&dvl->dvl_cv);
3231 return false;
3232 }
3233 dvl->dvl_nlock++;
3234 dvl->dvl_holder = curlwp;
3235 pmflock_debug(dev, __func__, __LINE__);
3236 return true;
3237 }
3238
3239 bool
3240 device_pmf_lock(device_t dev)
3241 {
3242 bool rc;
3243 device_lock_t dvl = device_getlock(dev);
3244
3245 mutex_enter(&dvl->dvl_mtx);
3246 rc = device_pmf_lock1(dev);
3247 mutex_exit(&dvl->dvl_mtx);
3248
3249 return rc;
3250 }
3251
3252 void
3253 device_pmf_unlock(device_t dev)
3254 {
3255 device_lock_t dvl = device_getlock(dev);
3256
3257 KASSERT(dvl->dvl_nlock > 0);
3258 mutex_enter(&dvl->dvl_mtx);
3259 if (--dvl->dvl_nlock == 0)
3260 dvl->dvl_holder = NULL;
3261 cv_signal(&dvl->dvl_cv);
3262 pmflock_debug(dev, __func__, __LINE__);
3263 mutex_exit(&dvl->dvl_mtx);
3264 }
3265
3266 device_lock_t
3267 device_getlock(device_t dev)
3268 {
3269 return &dev->dv_lock;
3270 }
3271
3272 void *
3273 device_pmf_bus_private(device_t dev)
3274 {
3275 return dev->dv_bus_private;
3276 }
3277
3278 bool
3279 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
3280 {
3281 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3282 return true;
3283 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
3284 (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3285 return false;
3286 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3287 dev->dv_bus_suspend != NULL &&
3288 !(*dev->dv_bus_suspend)(dev, qual))
3289 return false;
3290
3291 dev->dv_flags |= DVF_BUS_SUSPENDED;
3292 return true;
3293 }
3294
3295 bool
3296 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
3297 {
3298 if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
3299 return true;
3300 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3301 dev->dv_bus_resume != NULL &&
3302 !(*dev->dv_bus_resume)(dev, qual))
3303 return false;
3304
3305 dev->dv_flags &= ~DVF_BUS_SUSPENDED;
3306 return true;
3307 }
3308
3309 bool
3310 device_pmf_bus_shutdown(device_t dev, int how)
3311 {
3312
3313 if (*dev->dv_bus_shutdown != NULL &&
3314 !(*dev->dv_bus_shutdown)(dev, how))
3315 return false;
3316 return true;
3317 }
3318
3319 void
3320 device_pmf_bus_register(device_t dev, void *priv,
3321 bool (*suspend)(device_t, const pmf_qual_t *),
3322 bool (*resume)(device_t, const pmf_qual_t *),
3323 bool (*shutdown)(device_t, int), void (*deregister)(device_t))
3324 {
3325 dev->dv_bus_private = priv;
3326 dev->dv_bus_resume = resume;
3327 dev->dv_bus_suspend = suspend;
3328 dev->dv_bus_shutdown = shutdown;
3329 dev->dv_bus_deregister = deregister;
3330 }
3331
3332 void
3333 device_pmf_bus_deregister(device_t dev)
3334 {
3335 if (dev->dv_bus_deregister == NULL)
3336 return;
3337 (*dev->dv_bus_deregister)(dev);
3338 dev->dv_bus_private = NULL;
3339 dev->dv_bus_suspend = NULL;
3340 dev->dv_bus_resume = NULL;
3341 dev->dv_bus_deregister = NULL;
3342 }
3343
3344 void *
3345 device_pmf_class_private(device_t dev)
3346 {
3347 return dev->dv_class_private;
3348 }
3349
3350 bool
3351 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
3352 {
3353 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
3354 return true;
3355 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3356 dev->dv_class_suspend != NULL &&
3357 !(*dev->dv_class_suspend)(dev, qual))
3358 return false;
3359
3360 dev->dv_flags |= DVF_CLASS_SUSPENDED;
3361 return true;
3362 }
3363
3364 bool
3365 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
3366 {
3367 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3368 return true;
3369 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
3370 (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3371 return false;
3372 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3373 dev->dv_class_resume != NULL &&
3374 !(*dev->dv_class_resume)(dev, qual))
3375 return false;
3376
3377 dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
3378 return true;
3379 }
3380
3381 void
3382 device_pmf_class_register(device_t dev, void *priv,
3383 bool (*suspend)(device_t, const pmf_qual_t *),
3384 bool (*resume)(device_t, const pmf_qual_t *),
3385 void (*deregister)(device_t))
3386 {
3387 dev->dv_class_private = priv;
3388 dev->dv_class_suspend = suspend;
3389 dev->dv_class_resume = resume;
3390 dev->dv_class_deregister = deregister;
3391 }
3392
3393 void
3394 device_pmf_class_deregister(device_t dev)
3395 {
3396 if (dev->dv_class_deregister == NULL)
3397 return;
3398 (*dev->dv_class_deregister)(dev);
3399 dev->dv_class_private = NULL;
3400 dev->dv_class_suspend = NULL;
3401 dev->dv_class_resume = NULL;
3402 dev->dv_class_deregister = NULL;
3403 }
3404
3405 bool
3406 device_active(device_t dev, devactive_t type)
3407 {
3408 size_t i;
3409
3410 if (dev->dv_activity_count == 0)
3411 return false;
3412
3413 for (i = 0; i < dev->dv_activity_count; ++i) {
3414 if (dev->dv_activity_handlers[i] == NULL)
3415 break;
3416 (*dev->dv_activity_handlers[i])(dev, type);
3417 }
3418
3419 return true;
3420 }
3421
3422 bool
3423 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
3424 {
3425 void (**new_handlers)(device_t, devactive_t);
3426 void (**old_handlers)(device_t, devactive_t);
3427 size_t i, old_size, new_size;
3428 int s;
3429
3430 old_handlers = dev->dv_activity_handlers;
3431 old_size = dev->dv_activity_count;
3432
3433 KASSERT(old_size == 0 || old_handlers != NULL);
3434
3435 for (i = 0; i < old_size; ++i) {
3436 KASSERT(old_handlers[i] != handler);
3437 if (old_handlers[i] == NULL) {
3438 old_handlers[i] = handler;
3439 return true;
3440 }
3441 }
3442
3443 new_size = old_size + 4;
3444 new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
3445
3446 for (i = 0; i < old_size; ++i)
3447 new_handlers[i] = old_handlers[i];
3448 new_handlers[old_size] = handler;
3449 for (i = old_size+1; i < new_size; ++i)
3450 new_handlers[i] = NULL;
3451
3452 s = splhigh();
3453 dev->dv_activity_count = new_size;
3454 dev->dv_activity_handlers = new_handlers;
3455 splx(s);
3456
3457 if (old_size > 0)
3458 kmem_free(old_handlers, sizeof(void *) * old_size);
3459
3460 return true;
3461 }
3462
3463 void
3464 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
3465 {
3466 void (**old_handlers)(device_t, devactive_t);
3467 size_t i, old_size;
3468 int s;
3469
3470 old_handlers = dev->dv_activity_handlers;
3471 old_size = dev->dv_activity_count;
3472
3473 for (i = 0; i < old_size; ++i) {
3474 if (old_handlers[i] == handler)
3475 break;
3476 if (old_handlers[i] == NULL)
3477 return; /* XXX panic? */
3478 }
3479
3480 if (i == old_size)
3481 return; /* XXX panic? */
3482
3483 for (; i < old_size - 1; ++i) {
3484 if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
3485 continue;
3486
3487 if (i == 0) {
3488 s = splhigh();
3489 dev->dv_activity_count = 0;
3490 dev->dv_activity_handlers = NULL;
3491 splx(s);
3492 kmem_free(old_handlers, sizeof(void *) * old_size);
3493 }
3494 return;
3495 }
3496 old_handlers[i] = NULL;
3497 }
3498
3499 /* Return true iff the device_t `dev' exists at generation `gen'. */
3500 static bool
3501 device_exists_at(device_t dv, devgen_t gen)
3502 {
3503 return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
3504 dv->dv_add_gen <= gen;
3505 }
3506
3507 static bool
3508 deviter_visits(const deviter_t *di, device_t dv)
3509 {
3510 return device_exists_at(dv, di->di_gen);
3511 }
3512
3513 /*
3514 * Device Iteration
3515 *
3516 * deviter_t: a device iterator. Holds state for a "walk" visiting
3517 * each device_t's in the device tree.
3518 *
3519 * deviter_init(di, flags): initialize the device iterator `di'
3520 * to "walk" the device tree. deviter_next(di) will return
3521 * the first device_t in the device tree, or NULL if there are
3522 * no devices.
3523 *
3524 * `flags' is one or more of DEVITER_F_RW, indicating that the
3525 * caller intends to modify the device tree by calling
3526 * config_detach(9) on devices in the order that the iterator
3527 * returns them; DEVITER_F_ROOT_FIRST, asking for the devices
3528 * nearest the "root" of the device tree to be returned, first;
3529 * DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
3530 * the root of the device tree, first; and DEVITER_F_SHUTDOWN,
3531 * indicating both that deviter_init() should not respect any
3532 * locks on the device tree, and that deviter_next(di) may run
3533 * in more than one LWP before the walk has finished.
3534 *
3535 * Only one DEVITER_F_RW iterator may be in the device tree at
3536 * once.
3537 *
3538 * DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
3539 *
3540 * Results are undefined if the flags DEVITER_F_ROOT_FIRST and
3541 * DEVITER_F_LEAVES_FIRST are used in combination.
3542 *
3543 * deviter_first(di, flags): initialize the device iterator `di'
3544 * and return the first device_t in the device tree, or NULL
3545 * if there are no devices. The statement
3546 *
3547 * dv = deviter_first(di);
3548 *
3549 * is shorthand for
3550 *
3551 * deviter_init(di);
3552 * dv = deviter_next(di);
3553 *
3554 * deviter_next(di): return the next device_t in the device tree,
3555 * or NULL if there are no more devices. deviter_next(di)
3556 * is undefined if `di' was not initialized with deviter_init() or
3557 * deviter_first().
3558 *
3559 * deviter_release(di): stops iteration (subsequent calls to
3560 * deviter_next() will return NULL), releases any locks and
3561 * resources held by the device iterator.
3562 *
3563 * Device iteration does not return device_t's in any particular
3564 * order. An iterator will never return the same device_t twice.
3565 * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
3566 * is called repeatedly on the same `di', it will eventually return
3567 * NULL. It is ok to attach/detach devices during device iteration.
3568 */
3569 void
3570 deviter_init(deviter_t *di, deviter_flags_t flags)
3571 {
3572 device_t dv;
3573
3574 memset(di, 0, sizeof(*di));
3575
3576 if ((flags & DEVITER_F_SHUTDOWN) != 0)
3577 flags |= DEVITER_F_RW;
3578
3579 mutex_enter(&alldevs_lock);
3580 if ((flags & DEVITER_F_RW) != 0)
3581 alldevs_nwrite++;
3582 else
3583 alldevs_nread++;
3584 di->di_gen = alldevs_gen++;
3585 di->di_flags = flags;
3586
3587 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3588 case DEVITER_F_LEAVES_FIRST:
3589 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3590 if (!deviter_visits(di, dv))
3591 continue;
3592 di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
3593 }
3594 break;
3595 case DEVITER_F_ROOT_FIRST:
3596 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3597 if (!deviter_visits(di, dv))
3598 continue;
3599 di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
3600 }
3601 break;
3602 default:
3603 break;
3604 }
3605
3606 deviter_reinit(di);
3607 mutex_exit(&alldevs_lock);
3608 }
3609
3610 static void
3611 deviter_reinit(deviter_t *di)
3612 {
3613
3614 KASSERT(mutex_owned(&alldevs_lock));
3615 if ((di->di_flags & DEVITER_F_RW) != 0)
3616 di->di_prev = TAILQ_LAST(&alldevs, devicelist);
3617 else
3618 di->di_prev = TAILQ_FIRST(&alldevs);
3619 }
3620
3621 device_t
3622 deviter_first(deviter_t *di, deviter_flags_t flags)
3623 {
3624
3625 deviter_init(di, flags);
3626 return deviter_next(di);
3627 }
3628
3629 static device_t
3630 deviter_next2(deviter_t *di)
3631 {
3632 device_t dv;
3633
3634 KASSERT(mutex_owned(&alldevs_lock));
3635
3636 dv = di->di_prev;
3637
3638 if (dv == NULL)
3639 return NULL;
3640
3641 if ((di->di_flags & DEVITER_F_RW) != 0)
3642 di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
3643 else
3644 di->di_prev = TAILQ_NEXT(dv, dv_list);
3645
3646 return dv;
3647 }
3648
3649 static device_t
3650 deviter_next1(deviter_t *di)
3651 {
3652 device_t dv;
3653
3654 KASSERT(mutex_owned(&alldevs_lock));
3655
3656 do {
3657 dv = deviter_next2(di);
3658 } while (dv != NULL && !deviter_visits(di, dv));
3659
3660 return dv;
3661 }
3662
3663 device_t
3664 deviter_next(deviter_t *di)
3665 {
3666 device_t dv = NULL;
3667
3668 mutex_enter(&alldevs_lock);
3669 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3670 case 0:
3671 dv = deviter_next1(di);
3672 break;
3673 case DEVITER_F_LEAVES_FIRST:
3674 while (di->di_curdepth >= 0) {
3675 if ((dv = deviter_next1(di)) == NULL) {
3676 di->di_curdepth--;
3677 deviter_reinit(di);
3678 } else if (dv->dv_depth == di->di_curdepth)
3679 break;
3680 }
3681 break;
3682 case DEVITER_F_ROOT_FIRST:
3683 while (di->di_curdepth <= di->di_maxdepth) {
3684 if ((dv = deviter_next1(di)) == NULL) {
3685 di->di_curdepth++;
3686 deviter_reinit(di);
3687 } else if (dv->dv_depth == di->di_curdepth)
3688 break;
3689 }
3690 break;
3691 default:
3692 break;
3693 }
3694 mutex_exit(&alldevs_lock);
3695
3696 return dv;
3697 }
3698
3699 void
3700 deviter_release(deviter_t *di)
3701 {
3702 bool rw = (di->di_flags & DEVITER_F_RW) != 0;
3703
3704 mutex_enter(&alldevs_lock);
3705 if (rw)
3706 --alldevs_nwrite;
3707 else
3708 --alldevs_nread;
3709 /* XXX wake a garbage-collection thread */
3710 mutex_exit(&alldevs_lock);
3711 }
3712
3713 const char *
3714 cfdata_ifattr(const struct cfdata *cf)
3715 {
3716 return cf->cf_pspec->cfp_iattr;
3717 }
3718
3719 bool
3720 ifattr_match(const char *snull, const char *t)
3721 {
3722 return (snull == NULL) || strcmp(snull, t) == 0;
3723 }
3724
3725 void
3726 null_childdetached(device_t self, device_t child)
3727 {
3728 /* do nothing */
3729 }
3730
3731 static void
3732 sysctl_detach_setup(struct sysctllog **clog)
3733 {
3734
3735 sysctl_createv(clog, 0, NULL, NULL,
3736 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
3737 CTLTYPE_BOOL, "detachall",
3738 SYSCTL_DESCR("Detach all devices at shutdown"),
3739 NULL, 0, &detachall, 0,
3740 CTL_KERN, CTL_CREATE, CTL_EOL);
3741 }
3742