subr_devsw.c revision 1.15.6.8 1 /* $NetBSD: subr_devsw.c,v 1.15.6.8 2008/06/29 09:33:14 mjf Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.15.6.8 2008/06/29 09:33:14 mjf Exp $");
73
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
82 #include <sys/dirent.h>
83 #include <machine/stdarg.h>
84 #include <sys/disklabel.h>
85
86 #include <miscfs/specfs/specdev.h>
87
88 #ifdef DEVSW_DEBUG
89 #define DPRINTF(x) printf x
90 #else /* DEVSW_DEBUG */
91 #define DPRINTF(x)
92 #endif /* DEVSW_DEBUG */
93
94 #define MAXDEVSW 512 /* the maximum of major device number */
95 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
96 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
97 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
98
99 extern const struct bdevsw **bdevsw, *bdevsw0[];
100 extern const struct cdevsw **cdevsw, *cdevsw0[];
101 extern struct devsw_conv *devsw_conv, devsw_conv0[];
102 extern const int sys_bdevsws, sys_cdevsws;
103 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
104
105 static int bdevsw_attach(const struct bdevsw *, int *);
106 static int cdevsw_attach(const struct cdevsw *, int *);
107 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
108
109 static struct device_name *device_name_alloc(dev_t, device_t, bool,
110 enum devtype, const char *, va_list);
111
112 extern kmutex_t dname_lock;
113
114 /*
115 * A table of initialisation functions for device drivers that
116 * don't have an attach routine.
117 */
118 void (*devsw_init_funcs[])(void) = {
119 bpf_init,
120 cttyinit,
121 mem_init,
122 swap_init,
123 NULL,
124 };
125
126 void
127 devsw_init(void)
128 {
129 int i;
130
131 KASSERT(sys_bdevsws < MAXDEVSW - 1);
132 KASSERT(sys_cdevsws < MAXDEVSW - 1);
133
134 mutex_init(&dname_lock, MUTEX_DEFAULT, IPL_NONE);
135 TAILQ_INIT(&device_names);
136
137 /*
138 * Technically, some device drivers don't ever get 'attached'
139 * so we provide this table to allow device drivers to register
140 * their device names.
141 */
142 for (i = 0; devsw_init_funcs[i] != NULL; i++)
143 devsw_init_funcs[i]();
144 }
145
146 int
147 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
148 const struct cdevsw *cdev, int *cmajor)
149 {
150 struct devsw_conv *conv;
151 char *name;
152 int error, i;
153
154 if (devname == NULL || cdev == NULL)
155 return (EINVAL);
156
157 mutex_enter(&specfs_lock);
158
159 for (i = 0 ; i < max_devsw_convs ; i++) {
160 conv = &devsw_conv[i];
161 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
162 continue;
163
164 if (*bmajor < 0)
165 *bmajor = conv->d_bmajor;
166 if (*cmajor < 0)
167 *cmajor = conv->d_cmajor;
168
169 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
170 error = EINVAL;
171 goto fail;
172 }
173 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
174 error = EINVAL;
175 goto fail;
176 }
177
178 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
179 cdevsw[*cmajor] != NULL) {
180 error = EEXIST;
181 goto fail;
182 }
183
184 if (bdev != NULL)
185 bdevsw[*bmajor] = bdev;
186 cdevsw[*cmajor] = cdev;
187
188 mutex_exit(&specfs_lock);
189 return (0);
190 }
191
192 error = bdevsw_attach(bdev, bmajor);
193 if (error != 0)
194 goto fail;
195 error = cdevsw_attach(cdev, cmajor);
196 if (error != 0) {
197 devsw_detach_locked(bdev, NULL);
198 goto fail;
199 }
200
201 for (i = 0 ; i < max_devsw_convs ; i++) {
202 if (devsw_conv[i].d_name == NULL)
203 break;
204 }
205 if (i == max_devsw_convs) {
206 struct devsw_conv *newptr;
207 int old, new;
208
209 old = max_devsw_convs;
210 new = old + 1;
211
212 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
213 if (newptr == NULL) {
214 devsw_detach_locked(bdev, cdev);
215 error = ENOMEM;
216 goto fail;
217 }
218 newptr[old].d_name = NULL;
219 newptr[old].d_bmajor = -1;
220 newptr[old].d_cmajor = -1;
221 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
222 if (devsw_conv != devsw_conv0)
223 kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
224 devsw_conv = newptr;
225 max_devsw_convs = new;
226 }
227
228 i = strlen(devname) + 1;
229 name = kmem_alloc(i, KM_NOSLEEP);
230 if (name == NULL) {
231 devsw_detach_locked(bdev, cdev);
232 goto fail;
233 }
234 strlcpy(name, devname, i);
235
236 devsw_conv[i].d_name = name;
237 devsw_conv[i].d_bmajor = *bmajor;
238 devsw_conv[i].d_cmajor = *cmajor;
239
240 mutex_exit(&specfs_lock);
241 return (0);
242 fail:
243 mutex_exit(&specfs_lock);
244 return (error);
245 }
246
247 static int
248 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
249 {
250 const struct bdevsw **newptr;
251 int bmajor, i;
252
253 KASSERT(mutex_owned(&specfs_lock));
254
255 if (devsw == NULL)
256 return (0);
257
258 if (*devmajor < 0) {
259 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
260 if (bdevsw[bmajor] != NULL)
261 continue;
262 for (i = 0 ; i < max_devsw_convs ; i++) {
263 if (devsw_conv[i].d_bmajor == bmajor)
264 break;
265 }
266 if (i != max_devsw_convs)
267 continue;
268 break;
269 }
270 *devmajor = bmajor;
271 }
272
273 if (*devmajor >= MAXDEVSW) {
274 printf("bdevsw_attach: block majors exhausted");
275 return (ENOMEM);
276 }
277
278 if (*devmajor >= max_bdevsws) {
279 KASSERT(bdevsw == bdevsw0);
280 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
281 if (newptr == NULL)
282 return (ENOMEM);
283 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
284 bdevsw = newptr;
285 max_bdevsws = MAXDEVSW;
286 }
287
288 if (bdevsw[*devmajor] != NULL)
289 return (EEXIST);
290
291 bdevsw[*devmajor] = devsw;
292
293 return (0);
294 }
295
296 static int
297 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
298 {
299 const struct cdevsw **newptr;
300 int cmajor, i;
301
302 KASSERT(mutex_owned(&specfs_lock));
303
304 if (*devmajor < 0) {
305 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
306 if (cdevsw[cmajor] != NULL)
307 continue;
308 for (i = 0 ; i < max_devsw_convs ; i++) {
309 if (devsw_conv[i].d_cmajor == cmajor)
310 break;
311 }
312 if (i != max_devsw_convs)
313 continue;
314 break;
315 }
316 *devmajor = cmajor;
317 }
318
319 if (*devmajor >= MAXDEVSW) {
320 printf("cdevsw_attach: character majors exhausted");
321 return (ENOMEM);
322 }
323
324 if (*devmajor >= max_cdevsws) {
325 KASSERT(cdevsw == cdevsw0);
326 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
327 if (newptr == NULL)
328 return (ENOMEM);
329 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
330 cdevsw = newptr;
331 max_cdevsws = MAXDEVSW;
332 }
333
334 if (cdevsw[*devmajor] != NULL)
335 return (EEXIST);
336
337 cdevsw[*devmajor] = devsw;
338
339 return (0);
340 }
341
342 static void
343 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
344 {
345 int i;
346
347 KASSERT(mutex_owned(&specfs_lock));
348
349 if (bdev != NULL) {
350 for (i = 0 ; i < max_bdevsws ; i++) {
351 if (bdevsw[i] != bdev)
352 continue;
353 bdevsw[i] = NULL;
354 break;
355 }
356 }
357 if (cdev != NULL) {
358 for (i = 0 ; i < max_cdevsws ; i++) {
359 if (cdevsw[i] != cdev)
360 continue;
361 cdevsw[i] = NULL;
362 break;
363 }
364 }
365 }
366
367 int
368 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
369 {
370
371 mutex_enter(&specfs_lock);
372 devsw_detach_locked(bdev, cdev);
373 mutex_exit(&specfs_lock);
374 return 0;
375 }
376
377 /*
378 * Look up a block device by number.
379 *
380 * => Caller must ensure that the device is attached.
381 */
382 const struct bdevsw *
383 bdevsw_lookup(dev_t dev)
384 {
385 int bmajor;
386
387 if (dev == NODEV)
388 return (NULL);
389 bmajor = major(dev);
390 if (bmajor < 0 || bmajor >= max_bdevsws)
391 return (NULL);
392
393 return (bdevsw[bmajor]);
394 }
395
396 /*
397 * Look up a character device by number.
398 *
399 * => Caller must ensure that the device is attached.
400 */
401 const struct cdevsw *
402 cdevsw_lookup(dev_t dev)
403 {
404 int cmajor;
405
406 if (dev == NODEV)
407 return (NULL);
408 cmajor = major(dev);
409 if (cmajor < 0 || cmajor >= max_cdevsws)
410 return (NULL);
411
412 return (cdevsw[cmajor]);
413 }
414
415 /*
416 * Look up a block device by reference to its operations set.
417 *
418 * => Caller must ensure that the device is not detached, and therefore
419 * that the returned major is still valid when dereferenced.
420 */
421 int
422 bdevsw_lookup_major(const struct bdevsw *bdev)
423 {
424 int bmajor;
425
426 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
427 if (bdevsw[bmajor] == bdev)
428 return (bmajor);
429 }
430
431 return (-1);
432 }
433
434 /*
435 * Look up a character device by reference to its operations set.
436 *
437 * => Caller must ensure that the device is not detached, and therefore
438 * that the returned major is still valid when dereferenced.
439 */
440 int
441 cdevsw_lookup_major(const struct cdevsw *cdev)
442 {
443 int cmajor;
444
445 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
446 if (cdevsw[cmajor] == cdev)
447 return (cmajor);
448 }
449
450 return (-1);
451 }
452
453 /*
454 * Convert from block major number to name.
455 *
456 * => Caller must ensure that the device is not detached, and therefore
457 * that the name pointer is still valid when dereferenced.
458 */
459 const char *
460 devsw_blk2name(int bmajor)
461 {
462 const char *name;
463 int cmajor, i;
464
465 name = NULL;
466 cmajor = -1;
467
468 mutex_enter(&specfs_lock);
469 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
470 mutex_exit(&specfs_lock);
471 return (NULL);
472 }
473 for (i = 0 ; i < max_devsw_convs; i++) {
474 if (devsw_conv[i].d_bmajor == bmajor) {
475 cmajor = devsw_conv[i].d_cmajor;
476 break;
477 }
478 }
479 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
480 name = devsw_conv[i].d_name;
481 mutex_exit(&specfs_lock);
482
483 return (name);
484 }
485
486 /*
487 * Convert from device name to block major number.
488 *
489 * => Caller must ensure that the device is not detached, and therefore
490 * that the major number is still valid when dereferenced.
491 */
492 int
493 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
494 {
495 struct devsw_conv *conv;
496 int bmajor, i;
497
498 if (name == NULL)
499 return (-1);
500
501 mutex_enter(&specfs_lock);
502 for (i = 0 ; i < max_devsw_convs ; i++) {
503 size_t len;
504
505 conv = &devsw_conv[i];
506 if (conv->d_name == NULL)
507 continue;
508 len = strlen(conv->d_name);
509 if (strncmp(conv->d_name, name, len) != 0)
510 continue;
511 if (*(name +len) && !isdigit(*(name + len)))
512 continue;
513 bmajor = conv->d_bmajor;
514 if (bmajor < 0 || bmajor >= max_bdevsws ||
515 bdevsw[bmajor] == NULL)
516 break;
517 if (devname != NULL) {
518 #ifdef DEVSW_DEBUG
519 if (strlen(conv->d_name) >= devnamelen)
520 printf("devsw_name2blk: too short buffer");
521 #endif /* DEVSW_DEBUG */
522 strncpy(devname, conv->d_name, devnamelen);
523 devname[devnamelen - 1] = '\0';
524 }
525 mutex_exit(&specfs_lock);
526 return (bmajor);
527 }
528
529 mutex_exit(&specfs_lock);
530 return (-1);
531 }
532
533 /*
534 * Convert from device name to char major number.
535 *
536 * => Caller must ensure that the device is not detached, and therefore
537 * that the major number is still valid when dereferenced.
538 */
539 int
540 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
541 {
542 struct devsw_conv *conv;
543 int cmajor, i;
544
545 if (name == NULL)
546 return (-1);
547
548 mutex_enter(&specfs_lock);
549 for (i = 0 ; i < max_devsw_convs ; i++) {
550 size_t len;
551
552 conv = &devsw_conv[i];
553 if (conv->d_name == NULL)
554 continue;
555 len = strlen(conv->d_name);
556 if (strncmp(conv->d_name, name, len) != 0)
557 continue;
558 if (*(name +len) && !isdigit(*(name + len)))
559 continue;
560 cmajor = conv->d_cmajor;
561 if (cmajor < 0 || cmajor >= max_cdevsws ||
562 cdevsw[cmajor] == NULL)
563 break;
564 if (devname != NULL) {
565 #ifdef DEVSW_DEBUG
566 if (strlen(conv->d_name) >= devnamelen)
567 printf("devsw_name2chr: too short buffer");
568 #endif /* DEVSW_DEBUG */
569 strncpy(devname, conv->d_name, devnamelen);
570 devname[devnamelen - 1] = '\0';
571 }
572 mutex_exit(&specfs_lock);
573 return (cmajor);
574 }
575
576 mutex_exit(&specfs_lock);
577 return (-1);
578 }
579
580 /*
581 * Convert from character dev_t to block dev_t.
582 *
583 * => Caller must ensure that the device is not detached, and therefore
584 * that the major number is still valid when dereferenced.
585 */
586 dev_t
587 devsw_chr2blk(dev_t cdev)
588 {
589 int bmajor, cmajor, i;
590 dev_t rv;
591
592 cmajor = major(cdev);
593 bmajor = -1;
594 rv = NODEV;
595
596 mutex_enter(&specfs_lock);
597 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
598 mutex_exit(&specfs_lock);
599 return (NODEV);
600 }
601 for (i = 0 ; i < max_devsw_convs ; i++) {
602 if (devsw_conv[i].d_cmajor == cmajor) {
603 bmajor = devsw_conv[i].d_bmajor;
604 break;
605 }
606 }
607 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
608 rv = makedev(bmajor, minor(cdev));
609 mutex_exit(&specfs_lock);
610
611 return (rv);
612 }
613
614 /*
615 * Convert from block dev_t to character dev_t.
616 *
617 * => Caller must ensure that the device is not detached, and therefore
618 * that the major number is still valid when dereferenced.
619 */
620 dev_t
621 devsw_blk2chr(dev_t bdev)
622 {
623 int bmajor, cmajor, i;
624 dev_t rv;
625
626 bmajor = major(bdev);
627 cmajor = -1;
628 rv = NODEV;
629
630 mutex_enter(&specfs_lock);
631 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
632 mutex_exit(&specfs_lock);
633 return (NODEV);
634 }
635 for (i = 0 ; i < max_devsw_convs ; i++) {
636 if (devsw_conv[i].d_bmajor == bmajor) {
637 cmajor = devsw_conv[i].d_cmajor;
638 break;
639 }
640 }
641 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
642 rv = makedev(cmajor, minor(bdev));
643 mutex_exit(&specfs_lock);
644
645 return (rv);
646 }
647
648 /*
649 * Device access methods.
650 */
651
652 #define DEV_LOCK(d) \
653 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
654 KERNEL_LOCK(1, NULL); \
655 }
656
657 #define DEV_UNLOCK(d) \
658 if (mpflag == 0) { \
659 KERNEL_UNLOCK_ONE(NULL); \
660 }
661
662 int
663 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
664 {
665 const struct bdevsw *d;
666 int rv, mpflag;
667
668 /*
669 * For open we need to lock, in order to synchronize
670 * with attach/detach.
671 */
672 mutex_enter(&specfs_lock);
673 d = bdevsw_lookup(dev);
674 mutex_exit(&specfs_lock);
675 if (d == NULL)
676 return ENXIO;
677
678 DEV_LOCK(d);
679 rv = (*d->d_open)(dev, flag, devtype, l);
680 DEV_UNLOCK(d);
681
682 return rv;
683 }
684
685 int
686 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
687 {
688 const struct bdevsw *d;
689 int rv, mpflag;
690
691 if ((d = bdevsw_lookup(dev)) == NULL)
692 return ENXIO;
693
694 DEV_LOCK(d);
695 rv = (*d->d_close)(dev, flag, devtype, l);
696 DEV_UNLOCK(d);
697
698 return rv;
699 }
700
701 void
702 bdev_strategy(struct buf *bp)
703 {
704 const struct bdevsw *d;
705 int mpflag;
706
707 if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
708 panic("bdev_strategy");
709
710 DEV_LOCK(d);
711 (*d->d_strategy)(bp);
712 DEV_UNLOCK(d);
713 }
714
715 int
716 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
717 {
718 const struct bdevsw *d;
719 int rv, mpflag;
720
721 if ((d = bdevsw_lookup(dev)) == NULL)
722 return ENXIO;
723
724 DEV_LOCK(d);
725 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
726 DEV_UNLOCK(d);
727
728 return rv;
729 }
730
731 int
732 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
733 {
734 const struct bdevsw *d;
735 int rv;
736
737 /*
738 * Dump can be called without the device open. Since it can
739 * currently only be called with the system paused (and in a
740 * potentially unstable state), we don't perform any locking.
741 */
742 if ((d = bdevsw_lookup(dev)) == NULL)
743 return ENXIO;
744
745 /* DEV_LOCK(d); */
746 rv = (*d->d_dump)(dev, addr, data, sz);
747 /* DEV_UNLOCK(d); */
748
749 return rv;
750 }
751
752 int
753 bdev_type(dev_t dev)
754 {
755 const struct bdevsw *d;
756
757 if ((d = bdevsw_lookup(dev)) == NULL)
758 return D_OTHER;
759 return d->d_flag & D_TYPEMASK;
760 }
761
762 int
763 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
764 {
765 const struct cdevsw *d;
766 int rv, mpflag;
767
768 /*
769 * For open we need to lock, in order to synchronize
770 * with attach/detach.
771 */
772 mutex_enter(&specfs_lock);
773 d = cdevsw_lookup(dev);
774 mutex_exit(&specfs_lock);
775 if (d == NULL)
776 return ENXIO;
777
778 DEV_LOCK(d);
779 rv = (*d->d_open)(dev, flag, devtype, l);
780 DEV_UNLOCK(d);
781
782 return rv;
783 }
784
785 int
786 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
787 {
788 const struct cdevsw *d;
789 int rv, mpflag;
790
791 if ((d = cdevsw_lookup(dev)) == NULL)
792 return ENXIO;
793
794 DEV_LOCK(d);
795 rv = (*d->d_close)(dev, flag, devtype, l);
796 DEV_UNLOCK(d);
797
798 return rv;
799 }
800
801 int
802 cdev_read(dev_t dev, struct uio *uio, int flag)
803 {
804 const struct cdevsw *d;
805 int rv, mpflag;
806
807 if ((d = cdevsw_lookup(dev)) == NULL)
808 return ENXIO;
809
810 DEV_LOCK(d);
811 rv = (*d->d_read)(dev, uio, flag);
812 DEV_UNLOCK(d);
813
814 return rv;
815 }
816
817 int
818 cdev_write(dev_t dev, struct uio *uio, int flag)
819 {
820 const struct cdevsw *d;
821 int rv, mpflag;
822
823 if ((d = cdevsw_lookup(dev)) == NULL)
824 return ENXIO;
825
826 DEV_LOCK(d);
827 rv = (*d->d_write)(dev, uio, flag);
828 DEV_UNLOCK(d);
829
830 return rv;
831 }
832
833 int
834 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
835 {
836 const struct cdevsw *d;
837 int rv, mpflag;
838
839 if ((d = cdevsw_lookup(dev)) == NULL)
840 return ENXIO;
841
842 DEV_LOCK(d);
843 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
844 DEV_UNLOCK(d);
845
846 return rv;
847 }
848
849 void
850 cdev_stop(struct tty *tp, int flag)
851 {
852 const struct cdevsw *d;
853 int mpflag;
854
855 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
856 return;
857
858 DEV_LOCK(d);
859 (*d->d_stop)(tp, flag);
860 DEV_UNLOCK(d);
861 }
862
863 struct tty *
864 cdev_tty(dev_t dev)
865 {
866 const struct cdevsw *d;
867
868 if ((d = cdevsw_lookup(dev)) == NULL)
869 return NULL;
870
871 /* XXX Check if necessary. */
872 if (d->d_tty == NULL)
873 return NULL;
874
875 return (*d->d_tty)(dev);
876 }
877
878 int
879 cdev_poll(dev_t dev, int flag, lwp_t *l)
880 {
881 const struct cdevsw *d;
882 int rv, mpflag;
883
884 if ((d = cdevsw_lookup(dev)) == NULL)
885 return POLLERR;
886
887 DEV_LOCK(d);
888 rv = (*d->d_poll)(dev, flag, l);
889 DEV_UNLOCK(d);
890
891 return rv;
892 }
893
894 paddr_t
895 cdev_mmap(dev_t dev, off_t off, int flag)
896 {
897 const struct cdevsw *d;
898 paddr_t rv;
899 int mpflag;
900
901 if ((d = cdevsw_lookup(dev)) == NULL)
902 return (paddr_t)-1LL;
903
904 DEV_LOCK(d);
905 rv = (*d->d_mmap)(dev, off, flag);
906 DEV_UNLOCK(d);
907
908 return rv;
909 }
910
911 int
912 cdev_kqfilter(dev_t dev, struct knote *kn)
913 {
914 const struct cdevsw *d;
915 int rv, mpflag;
916
917 if ((d = cdevsw_lookup(dev)) == NULL)
918 return ENXIO;
919
920 DEV_LOCK(d);
921 rv = (*d->d_kqfilter)(dev, kn);
922 DEV_UNLOCK(d);
923
924 return rv;
925 }
926
927 int
928 cdev_type(dev_t dev)
929 {
930 const struct cdevsw *d;
931
932 if ((d = cdevsw_lookup(dev)) == NULL)
933 return D_OTHER;
934 return d->d_flag & D_TYPEMASK;
935 }
936
937 static struct device_name *
938 device_name_alloc(dev_t dev, device_t devp, bool cdev,
939 enum devtype dtype, const char *fmt, va_list src)
940 {
941 struct device_name *dn;
942 va_list dst;
943
944 /* TODO: Check for aliases */
945
946 dn = kmem_zalloc(sizeof(*dn), KM_NOSLEEP);
947 if (dn == NULL)
948 return NULL;
949
950 dn->d_dev = dev;
951 dn->d_devp = devp;
952 dn->d_char = cdev;
953 dn->d_type = dtype;
954
955 dn->d_name = kmem_zalloc(MAXNAMLEN, KM_NOSLEEP);
956 va_copy(dst, src);
957 vsnprintf(dn->d_name, MAXNAMLEN, fmt, dst);
958 va_end(dst);
959
960 return dn;
961 }
962
963 /*
964 * Register a dev_t and name for a device driver with devfs.
965 * We maintain a TAILQ of registered device drivers names and dev_t's.
966 *
967 * => if devp is NULL this device has no device_t instance. An example
968 * of this is zero(4).
969 *
970 * => if there already exists another name for this dev_t, then 'name'
971 * is assumed to be an alias of a previously registered device driver.
972 * TODO: The above isn't actually true at the moment, we just return 0.
973 *
974 * => 'cdev' indiciates whether we are a char or block device.
975 * If 'cdev' is true, we are a character device, otherwise we
976 * are a block device.
977 */
978 int
979 device_register_name(dev_t dev, device_t devp, bool cdev,
980 enum devtype dtype, const char *fmt, ...)
981 {
982 struct device_name *dn;
983 va_list ap;
984
985 va_start(ap, fmt);
986
987 if ((dn = device_name_alloc(dev, devp, cdev, dtype, fmt, ap)) == NULL)
988 return ENOMEM;
989
990 va_end(ap);
991
992 mutex_enter(&dname_lock);
993 TAILQ_INSERT_TAIL(&device_names, dn, d_next);
994 mutex_exit(&dname_lock);
995
996 return 0;
997 }
998
999 /*
1000 * Remove a previously registered name for 'dev'.
1001 *
1002 * => This must be called twice with different values for 'dev' if
1003 * the caller previously registered a name for a character device
1004 * and a name for a block device.
1005 */
1006 int
1007 device_deregister_name(dev_t dev, const char *fmt, ...)
1008 {
1009 int error = 0;
1010 struct device_name *dn;
1011 va_list ap;
1012 char name[MAXNAMLEN];
1013
1014 va_start(ap, fmt);
1015 vsnprintf(name, MAXNAMLEN, fmt, ap);
1016 va_end(ap);
1017
1018 mutex_enter(&dname_lock);
1019 TAILQ_FOREACH(dn, &device_names, d_next) {
1020 if ((strcmp(dn->d_name, name) == 0) && (dn->d_gone == false))
1021 break;
1022 }
1023
1024 if (dn != NULL)
1025 dn->d_gone = true;
1026 else
1027 error = EINVAL;
1028
1029 mutex_exit(&dname_lock);
1030 return error;
1031 }
1032
1033 /*
1034 * Remove all device names for this device_t.
1035 */
1036 int
1037 device_deregister_all(device_t dev)
1038 {
1039 struct device_name *dn;
1040
1041 mutex_enter(&dname_lock);
1042 TAILQ_FOREACH(dn, &device_names, d_next) {
1043 if ((dn->d_devp == dev) && (dn->d_gone == false))
1044 dn->d_gone = true;
1045 }
1046 mutex_exit(&dname_lock);
1047 return 0;
1048 }
1049
1050 struct device_name *
1051 device_lookup_info(dev_t dev, int is_char)
1052 {
1053 struct device_name *dn;
1054
1055 mutex_enter(&dname_lock);
1056 TAILQ_FOREACH(dn, &device_names, d_next) {
1057 if ((dn->d_dev == dev) && (dn->d_char == is_char))
1058 break;
1059 }
1060 mutex_exit(&dname_lock);
1061
1062 return dn;
1063 }
1064
1065 /*
1066 * Register a name for a device_t and wait for the device file to be
1067 * created in devfs mounts. Normally this operation is asynchronous in
1068 * the sense that a device name is registered and at some later time
1069 * a device file will appear in a devfs mount.
1070 *
1071 * cond - A kernel condition variable
1072 * ticks - Timeout value in hz
1073 *
1074 * NOTE: There is no guarantee that a device file will be created,
1075 * however, the caller will be notified in a synchronous manner
1076 * whether the creation failed or not.
1077 */
1078 int
1079 device_register_sync(dev_t dev, device_t devp, bool cdev,
1080 enum devtype dtype, kcondvar_t cond, int ticks, const char *fmt, ...)
1081 {
1082 int error = 0;
1083 struct device_name *dn;
1084 va_list ap;
1085
1086 va_start(ap, fmt);
1087
1088 if ((dn = device_name_alloc(dev, devp, cdev, dtype, fmt, ap)) == NULL)
1089 return ENOMEM;
1090 dn->d_busy = true;
1091 dn->d_cv = cond;
1092
1093 va_end(ap);
1094
1095 mutex_enter(&dname_lock);
1096 TAILQ_INSERT_TAIL(&device_names, dn, d_next);
1097 mutex_exit(&dname_lock);
1098
1099 mutex_init(&dn->d_cvmutex, MUTEX_DEFAULT, IPL_NONE);
1100
1101 mutex_enter(&dn->d_cvmutex);
1102
1103 while (dn->d_busy == true) {
1104 if (ticks <= 0)
1105 error = cv_wait_sig(&dn->d_cv, &dn->d_cvmutex);
1106 else
1107 error = cv_timedwait_sig(&dn->d_cv,
1108 &dn->d_cvmutex, ticks);
1109
1110 }
1111 error = dn->d_retval;
1112 mutex_exit(&dn->d_cvmutex);
1113
1114 return error;
1115 }
1116