subr_devsw.c revision 1.15.6.9 1 /* $NetBSD: subr_devsw.c,v 1.15.6.9 2009/01/17 13:29:19 mjf Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.15.6.9 2009/01/17 13:29:19 mjf Exp $");
73
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
82 #include <sys/dirent.h>
83 #include <machine/stdarg.h>
84 #include <sys/disklabel.h>
85
86 #ifdef DEVSW_DEBUG
87 #define DPRINTF(x) printf x
88 #else /* DEVSW_DEBUG */
89 #define DPRINTF(x)
90 #endif /* DEVSW_DEBUG */
91
92 #define MAXDEVSW 512 /* the maximum of major device number */
93 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
94 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
95 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
96
97 extern const struct bdevsw **bdevsw, *bdevsw0[];
98 extern const struct cdevsw **cdevsw, *cdevsw0[];
99 extern struct devsw_conv *devsw_conv, devsw_conv0[];
100 extern const int sys_bdevsws, sys_cdevsws;
101 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
102
103 static int bdevsw_attach(const struct bdevsw *, int *);
104 static int cdevsw_attach(const struct cdevsw *, int *);
105 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
106
107 static struct device_name *device_name_alloc(dev_t, device_t, bool,
108 enum devtype, const char *, va_list);
109
110 extern kmutex_t dname_lock;
111
112 /*
113 * A table of initialisation functions for device drivers that
114 * don't have an attach routine.
115 */
116 void (*devsw_init_funcs[])(void) = {
117 bpf_init,
118 cttyinit,
119 mem_init,
120 swap_init,
121 NULL,
122 };
123
124 kmutex_t device_lock;
125
126 void
127 devsw_init(void)
128 {
129 int i;
130
131 KASSERT(sys_bdevsws < MAXDEVSW - 1);
132 KASSERT(sys_cdevsws < MAXDEVSW - 1);
133
134 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
135 mutex_init(&dname_lock, MUTEX_DEFAULT, IPL_NONE);
136 TAILQ_INIT(&device_names);
137
138 /*
139 * Technically, some device drivers don't ever get 'attached'
140 * so we provide this table to allow device drivers to register
141 * their device names.
142 */
143 for (i = 0; devsw_init_funcs[i] != NULL; i++)
144 devsw_init_funcs[i]();
145 }
146
147 int
148 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
149 const struct cdevsw *cdev, int *cmajor)
150 {
151 struct devsw_conv *conv;
152 char *name;
153 int error, i;
154
155 if (devname == NULL || cdev == NULL)
156 return (EINVAL);
157
158 mutex_enter(&device_lock);
159
160 for (i = 0 ; i < max_devsw_convs ; i++) {
161 conv = &devsw_conv[i];
162 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
163 continue;
164
165 if (*bmajor < 0)
166 *bmajor = conv->d_bmajor;
167 if (*cmajor < 0)
168 *cmajor = conv->d_cmajor;
169
170 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
171 error = EINVAL;
172 goto fail;
173 }
174 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
175 error = EINVAL;
176 goto fail;
177 }
178
179 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
180 cdevsw[*cmajor] != NULL) {
181 error = EEXIST;
182 goto fail;
183 }
184
185 if (bdev != NULL)
186 bdevsw[*bmajor] = bdev;
187 cdevsw[*cmajor] = cdev;
188
189 mutex_exit(&device_lock);
190 return (0);
191 }
192
193 error = bdevsw_attach(bdev, bmajor);
194 if (error != 0)
195 goto fail;
196 error = cdevsw_attach(cdev, cmajor);
197 if (error != 0) {
198 devsw_detach_locked(bdev, NULL);
199 goto fail;
200 }
201
202 for (i = 0 ; i < max_devsw_convs ; i++) {
203 if (devsw_conv[i].d_name == NULL)
204 break;
205 }
206 if (i == max_devsw_convs) {
207 struct devsw_conv *newptr;
208 int old, new;
209
210 old = max_devsw_convs;
211 new = old + 1;
212
213 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
214 if (newptr == NULL) {
215 devsw_detach_locked(bdev, cdev);
216 error = ENOMEM;
217 goto fail;
218 }
219 newptr[old].d_name = NULL;
220 newptr[old].d_bmajor = -1;
221 newptr[old].d_cmajor = -1;
222 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
223 if (devsw_conv != devsw_conv0)
224 kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
225 devsw_conv = newptr;
226 max_devsw_convs = new;
227 }
228
229 i = strlen(devname) + 1;
230 name = kmem_alloc(i, KM_NOSLEEP);
231 if (name == NULL) {
232 devsw_detach_locked(bdev, cdev);
233 goto fail;
234 }
235 strlcpy(name, devname, i);
236
237 devsw_conv[i].d_name = name;
238 devsw_conv[i].d_bmajor = *bmajor;
239 devsw_conv[i].d_cmajor = *cmajor;
240
241 mutex_exit(&device_lock);
242 return (0);
243 fail:
244 mutex_exit(&device_lock);
245 return (error);
246 }
247
248 static int
249 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
250 {
251 const struct bdevsw **newptr;
252 int bmajor, i;
253
254 KASSERT(mutex_owned(&device_lock));
255
256 if (devsw == NULL)
257 return (0);
258
259 if (*devmajor < 0) {
260 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
261 if (bdevsw[bmajor] != NULL)
262 continue;
263 for (i = 0 ; i < max_devsw_convs ; i++) {
264 if (devsw_conv[i].d_bmajor == bmajor)
265 break;
266 }
267 if (i != max_devsw_convs)
268 continue;
269 break;
270 }
271 *devmajor = bmajor;
272 }
273
274 if (*devmajor >= MAXDEVSW) {
275 printf("bdevsw_attach: block majors exhausted");
276 return (ENOMEM);
277 }
278
279 if (*devmajor >= max_bdevsws) {
280 KASSERT(bdevsw == bdevsw0);
281 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
282 if (newptr == NULL)
283 return (ENOMEM);
284 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
285 bdevsw = newptr;
286 max_bdevsws = MAXDEVSW;
287 }
288
289 if (bdevsw[*devmajor] != NULL)
290 return (EEXIST);
291
292 bdevsw[*devmajor] = devsw;
293
294 return (0);
295 }
296
297 static int
298 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
299 {
300 const struct cdevsw **newptr;
301 int cmajor, i;
302
303 KASSERT(mutex_owned(&device_lock));
304
305 if (*devmajor < 0) {
306 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
307 if (cdevsw[cmajor] != NULL)
308 continue;
309 for (i = 0 ; i < max_devsw_convs ; i++) {
310 if (devsw_conv[i].d_cmajor == cmajor)
311 break;
312 }
313 if (i != max_devsw_convs)
314 continue;
315 break;
316 }
317 *devmajor = cmajor;
318 }
319
320 if (*devmajor >= MAXDEVSW) {
321 printf("cdevsw_attach: character majors exhausted");
322 return (ENOMEM);
323 }
324
325 if (*devmajor >= max_cdevsws) {
326 KASSERT(cdevsw == cdevsw0);
327 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
328 if (newptr == NULL)
329 return (ENOMEM);
330 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
331 cdevsw = newptr;
332 max_cdevsws = MAXDEVSW;
333 }
334
335 if (cdevsw[*devmajor] != NULL)
336 return (EEXIST);
337
338 cdevsw[*devmajor] = devsw;
339
340 return (0);
341 }
342
343 static void
344 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
345 {
346 int i;
347
348 KASSERT(mutex_owned(&device_lock));
349
350 if (bdev != NULL) {
351 for (i = 0 ; i < max_bdevsws ; i++) {
352 if (bdevsw[i] != bdev)
353 continue;
354 bdevsw[i] = NULL;
355 break;
356 }
357 }
358 if (cdev != NULL) {
359 for (i = 0 ; i < max_cdevsws ; i++) {
360 if (cdevsw[i] != cdev)
361 continue;
362 cdevsw[i] = NULL;
363 break;
364 }
365 }
366 }
367
368 int
369 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
370 {
371
372 mutex_enter(&device_lock);
373 devsw_detach_locked(bdev, cdev);
374 mutex_exit(&device_lock);
375 return 0;
376 }
377
378 /*
379 * Look up a block device by number.
380 *
381 * => Caller must ensure that the device is attached.
382 */
383 const struct bdevsw *
384 bdevsw_lookup(dev_t dev)
385 {
386 int bmajor;
387
388 if (dev == NODEV)
389 return (NULL);
390 bmajor = major(dev);
391 if (bmajor < 0 || bmajor >= max_bdevsws)
392 return (NULL);
393
394 return (bdevsw[bmajor]);
395 }
396
397 /*
398 * Look up a character device by number.
399 *
400 * => Caller must ensure that the device is attached.
401 */
402 const struct cdevsw *
403 cdevsw_lookup(dev_t dev)
404 {
405 int cmajor;
406
407 if (dev == NODEV)
408 return (NULL);
409 cmajor = major(dev);
410 if (cmajor < 0 || cmajor >= max_cdevsws)
411 return (NULL);
412
413 return (cdevsw[cmajor]);
414 }
415
416 /*
417 * Look up a block device by reference to its operations set.
418 *
419 * => Caller must ensure that the device is not detached, and therefore
420 * that the returned major is still valid when dereferenced.
421 */
422 int
423 bdevsw_lookup_major(const struct bdevsw *bdev)
424 {
425 int bmajor;
426
427 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
428 if (bdevsw[bmajor] == bdev)
429 return (bmajor);
430 }
431
432 return (-1);
433 }
434
435 /*
436 * Look up a character device by reference to its operations set.
437 *
438 * => Caller must ensure that the device is not detached, and therefore
439 * that the returned major is still valid when dereferenced.
440 */
441 int
442 cdevsw_lookup_major(const struct cdevsw *cdev)
443 {
444 int cmajor;
445
446 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
447 if (cdevsw[cmajor] == cdev)
448 return (cmajor);
449 }
450
451 return (-1);
452 }
453
454 /*
455 * Convert from block major number to name.
456 *
457 * => Caller must ensure that the device is not detached, and therefore
458 * that the name pointer is still valid when dereferenced.
459 */
460 const char *
461 devsw_blk2name(int bmajor)
462 {
463 const char *name;
464 int cmajor, i;
465
466 name = NULL;
467 cmajor = -1;
468
469 mutex_enter(&device_lock);
470 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
471 mutex_exit(&device_lock);
472 return (NULL);
473 }
474 for (i = 0 ; i < max_devsw_convs; i++) {
475 if (devsw_conv[i].d_bmajor == bmajor) {
476 cmajor = devsw_conv[i].d_cmajor;
477 break;
478 }
479 }
480 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
481 name = devsw_conv[i].d_name;
482 mutex_exit(&device_lock);
483
484 return (name);
485 }
486
487 /*
488 * Convert from device name to block major number.
489 *
490 * => Caller must ensure that the device is not detached, and therefore
491 * that the major number is still valid when dereferenced.
492 */
493 int
494 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
495 {
496 struct devsw_conv *conv;
497 int bmajor, i;
498
499 if (name == NULL)
500 return (-1);
501
502 mutex_enter(&device_lock);
503 for (i = 0 ; i < max_devsw_convs ; i++) {
504 size_t len;
505
506 conv = &devsw_conv[i];
507 if (conv->d_name == NULL)
508 continue;
509 len = strlen(conv->d_name);
510 if (strncmp(conv->d_name, name, len) != 0)
511 continue;
512 if (*(name +len) && !isdigit(*(name + len)))
513 continue;
514 bmajor = conv->d_bmajor;
515 if (bmajor < 0 || bmajor >= max_bdevsws ||
516 bdevsw[bmajor] == NULL)
517 break;
518 if (devname != NULL) {
519 #ifdef DEVSW_DEBUG
520 if (strlen(conv->d_name) >= devnamelen)
521 printf("devsw_name2blk: too short buffer");
522 #endif /* DEVSW_DEBUG */
523 strncpy(devname, conv->d_name, devnamelen);
524 devname[devnamelen - 1] = '\0';
525 }
526 mutex_exit(&device_lock);
527 return (bmajor);
528 }
529
530 mutex_exit(&device_lock);
531 return (-1);
532 }
533
534 /*
535 * Convert from device name to char major number.
536 *
537 * => Caller must ensure that the device is not detached, and therefore
538 * that the major number is still valid when dereferenced.
539 */
540 int
541 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
542 {
543 struct devsw_conv *conv;
544 int cmajor, i;
545
546 if (name == NULL)
547 return (-1);
548
549 mutex_enter(&device_lock);
550 for (i = 0 ; i < max_devsw_convs ; i++) {
551 size_t len;
552
553 conv = &devsw_conv[i];
554 if (conv->d_name == NULL)
555 continue;
556 len = strlen(conv->d_name);
557 if (strncmp(conv->d_name, name, len) != 0)
558 continue;
559 if (*(name +len) && !isdigit(*(name + len)))
560 continue;
561 cmajor = conv->d_cmajor;
562 if (cmajor < 0 || cmajor >= max_cdevsws ||
563 cdevsw[cmajor] == NULL)
564 break;
565 if (devname != NULL) {
566 #ifdef DEVSW_DEBUG
567 if (strlen(conv->d_name) >= devnamelen)
568 printf("devsw_name2chr: too short buffer");
569 #endif /* DEVSW_DEBUG */
570 strncpy(devname, conv->d_name, devnamelen);
571 devname[devnamelen - 1] = '\0';
572 }
573 mutex_exit(&device_lock);
574 return (cmajor);
575 }
576
577 mutex_exit(&device_lock);
578 return (-1);
579 }
580
581 /*
582 * Convert from character dev_t to block dev_t.
583 *
584 * => Caller must ensure that the device is not detached, and therefore
585 * that the major number is still valid when dereferenced.
586 */
587 dev_t
588 devsw_chr2blk(dev_t cdev)
589 {
590 int bmajor, cmajor, i;
591 dev_t rv;
592
593 cmajor = major(cdev);
594 bmajor = -1;
595 rv = NODEV;
596
597 mutex_enter(&device_lock);
598 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
599 mutex_exit(&device_lock);
600 return (NODEV);
601 }
602 for (i = 0 ; i < max_devsw_convs ; i++) {
603 if (devsw_conv[i].d_cmajor == cmajor) {
604 bmajor = devsw_conv[i].d_bmajor;
605 break;
606 }
607 }
608 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
609 rv = makedev(bmajor, minor(cdev));
610 mutex_exit(&device_lock);
611
612 return (rv);
613 }
614
615 /*
616 * Convert from block dev_t to character dev_t.
617 *
618 * => Caller must ensure that the device is not detached, and therefore
619 * that the major number is still valid when dereferenced.
620 */
621 dev_t
622 devsw_blk2chr(dev_t bdev)
623 {
624 int bmajor, cmajor, i;
625 dev_t rv;
626
627 bmajor = major(bdev);
628 cmajor = -1;
629 rv = NODEV;
630
631 mutex_enter(&device_lock);
632 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
633 mutex_exit(&device_lock);
634 return (NODEV);
635 }
636 for (i = 0 ; i < max_devsw_convs ; i++) {
637 if (devsw_conv[i].d_bmajor == bmajor) {
638 cmajor = devsw_conv[i].d_cmajor;
639 break;
640 }
641 }
642 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
643 rv = makedev(cmajor, minor(bdev));
644 mutex_exit(&device_lock);
645
646 return (rv);
647 }
648
649 /*
650 * Device access methods.
651 */
652
653 #define DEV_LOCK(d) \
654 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
655 KERNEL_LOCK(1, NULL); \
656 }
657
658 #define DEV_UNLOCK(d) \
659 if (mpflag == 0) { \
660 KERNEL_UNLOCK_ONE(NULL); \
661 }
662
663 int
664 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
665 {
666 const struct bdevsw *d;
667 int rv, mpflag;
668
669 /*
670 * For open we need to lock, in order to synchronize
671 * with attach/detach.
672 */
673 mutex_enter(&device_lock);
674 d = bdevsw_lookup(dev);
675 mutex_exit(&device_lock);
676 if (d == NULL)
677 return ENXIO;
678
679 DEV_LOCK(d);
680 rv = (*d->d_open)(dev, flag, devtype, l);
681 DEV_UNLOCK(d);
682
683 return rv;
684 }
685
686 int
687 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
688 {
689 const struct bdevsw *d;
690 int rv, mpflag;
691
692 if ((d = bdevsw_lookup(dev)) == NULL)
693 return ENXIO;
694
695 DEV_LOCK(d);
696 rv = (*d->d_close)(dev, flag, devtype, l);
697 DEV_UNLOCK(d);
698
699 return rv;
700 }
701
702 void
703 bdev_strategy(struct buf *bp)
704 {
705 const struct bdevsw *d;
706 int mpflag;
707
708 if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
709 panic("bdev_strategy");
710
711 DEV_LOCK(d);
712 (*d->d_strategy)(bp);
713 DEV_UNLOCK(d);
714 }
715
716 int
717 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
718 {
719 const struct bdevsw *d;
720 int rv, mpflag;
721
722 if ((d = bdevsw_lookup(dev)) == NULL)
723 return ENXIO;
724
725 DEV_LOCK(d);
726 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
727 DEV_UNLOCK(d);
728
729 return rv;
730 }
731
732 int
733 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
734 {
735 const struct bdevsw *d;
736 int rv;
737
738 /*
739 * Dump can be called without the device open. Since it can
740 * currently only be called with the system paused (and in a
741 * potentially unstable state), we don't perform any locking.
742 */
743 if ((d = bdevsw_lookup(dev)) == NULL)
744 return ENXIO;
745
746 /* DEV_LOCK(d); */
747 rv = (*d->d_dump)(dev, addr, data, sz);
748 /* DEV_UNLOCK(d); */
749
750 return rv;
751 }
752
753 int
754 bdev_type(dev_t dev)
755 {
756 const struct bdevsw *d;
757
758 if ((d = bdevsw_lookup(dev)) == NULL)
759 return D_OTHER;
760 return d->d_flag & D_TYPEMASK;
761 }
762
763 int
764 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
765 {
766 const struct cdevsw *d;
767 int rv, mpflag;
768
769 /*
770 * For open we need to lock, in order to synchronize
771 * with attach/detach.
772 */
773 mutex_enter(&device_lock);
774 d = cdevsw_lookup(dev);
775 mutex_exit(&device_lock);
776 if (d == NULL)
777 return ENXIO;
778
779 DEV_LOCK(d);
780 rv = (*d->d_open)(dev, flag, devtype, l);
781 DEV_UNLOCK(d);
782
783 return rv;
784 }
785
786 int
787 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
788 {
789 const struct cdevsw *d;
790 int rv, mpflag;
791
792 if ((d = cdevsw_lookup(dev)) == NULL)
793 return ENXIO;
794
795 DEV_LOCK(d);
796 rv = (*d->d_close)(dev, flag, devtype, l);
797 DEV_UNLOCK(d);
798
799 return rv;
800 }
801
802 int
803 cdev_read(dev_t dev, struct uio *uio, int flag)
804 {
805 const struct cdevsw *d;
806 int rv, mpflag;
807
808 if ((d = cdevsw_lookup(dev)) == NULL)
809 return ENXIO;
810
811 DEV_LOCK(d);
812 rv = (*d->d_read)(dev, uio, flag);
813 DEV_UNLOCK(d);
814
815 return rv;
816 }
817
818 int
819 cdev_write(dev_t dev, struct uio *uio, int flag)
820 {
821 const struct cdevsw *d;
822 int rv, mpflag;
823
824 if ((d = cdevsw_lookup(dev)) == NULL)
825 return ENXIO;
826
827 DEV_LOCK(d);
828 rv = (*d->d_write)(dev, uio, flag);
829 DEV_UNLOCK(d);
830
831 return rv;
832 }
833
834 int
835 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
836 {
837 const struct cdevsw *d;
838 int rv, mpflag;
839
840 if ((d = cdevsw_lookup(dev)) == NULL)
841 return ENXIO;
842
843 DEV_LOCK(d);
844 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
845 DEV_UNLOCK(d);
846
847 return rv;
848 }
849
850 void
851 cdev_stop(struct tty *tp, int flag)
852 {
853 const struct cdevsw *d;
854 int mpflag;
855
856 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
857 return;
858
859 DEV_LOCK(d);
860 (*d->d_stop)(tp, flag);
861 DEV_UNLOCK(d);
862 }
863
864 struct tty *
865 cdev_tty(dev_t dev)
866 {
867 const struct cdevsw *d;
868
869 if ((d = cdevsw_lookup(dev)) == NULL)
870 return NULL;
871
872 /* XXX Check if necessary. */
873 if (d->d_tty == NULL)
874 return NULL;
875
876 return (*d->d_tty)(dev);
877 }
878
879 int
880 cdev_poll(dev_t dev, int flag, lwp_t *l)
881 {
882 const struct cdevsw *d;
883 int rv, mpflag;
884
885 if ((d = cdevsw_lookup(dev)) == NULL)
886 return POLLERR;
887
888 DEV_LOCK(d);
889 rv = (*d->d_poll)(dev, flag, l);
890 DEV_UNLOCK(d);
891
892 return rv;
893 }
894
895 paddr_t
896 cdev_mmap(dev_t dev, off_t off, int flag)
897 {
898 const struct cdevsw *d;
899 paddr_t rv;
900 int mpflag;
901
902 if ((d = cdevsw_lookup(dev)) == NULL)
903 return (paddr_t)-1LL;
904
905 DEV_LOCK(d);
906 rv = (*d->d_mmap)(dev, off, flag);
907 DEV_UNLOCK(d);
908
909 return rv;
910 }
911
912 int
913 cdev_kqfilter(dev_t dev, struct knote *kn)
914 {
915 const struct cdevsw *d;
916 int rv, mpflag;
917
918 if ((d = cdevsw_lookup(dev)) == NULL)
919 return ENXIO;
920
921 DEV_LOCK(d);
922 rv = (*d->d_kqfilter)(dev, kn);
923 DEV_UNLOCK(d);
924
925 return rv;
926 }
927
928 int
929 cdev_type(dev_t dev)
930 {
931 const struct cdevsw *d;
932
933 if ((d = cdevsw_lookup(dev)) == NULL)
934 return D_OTHER;
935 return d->d_flag & D_TYPEMASK;
936 }
937
938 static struct device_name *
939 device_name_alloc(dev_t dev, device_t devp, bool cdev,
940 enum devtype dtype, const char *fmt, va_list src)
941 {
942 struct device_name *dn;
943 va_list dst;
944
945 /* TODO: Check for aliases */
946
947 dn = kmem_zalloc(sizeof(*dn), KM_NOSLEEP);
948 if (dn == NULL)
949 return NULL;
950
951 dn->d_dev = dev;
952 dn->d_devp = devp;
953 dn->d_char = cdev;
954 dn->d_type = dtype;
955
956 dn->d_name = kmem_zalloc(MAXNAMLEN, KM_NOSLEEP);
957 va_copy(dst, src);
958 vsnprintf(dn->d_name, MAXNAMLEN, fmt, dst);
959 va_end(dst);
960
961 return dn;
962 }
963
964 /*
965 * Register a dev_t and name for a device driver with devfs.
966 * We maintain a TAILQ of registered device drivers names and dev_t's.
967 *
968 * => if devp is NULL this device has no device_t instance. An example
969 * of this is zero(4).
970 *
971 * => if there already exists another name for this dev_t, then 'name'
972 * is assumed to be an alias of a previously registered device driver.
973 * TODO: The above isn't actually true at the moment, we just return 0.
974 *
975 * => 'cdev' indiciates whether we are a char or block device.
976 * If 'cdev' is true, we are a character device, otherwise we
977 * are a block device.
978 */
979 int
980 device_register_name(dev_t dev, device_t devp, bool cdev,
981 enum devtype dtype, const char *fmt, ...)
982 {
983 struct device_name *dn;
984 va_list ap;
985
986 va_start(ap, fmt);
987
988 if ((dn = device_name_alloc(dev, devp, cdev, dtype, fmt, ap)) == NULL)
989 return ENOMEM;
990
991 va_end(ap);
992
993 mutex_enter(&dname_lock);
994 TAILQ_INSERT_TAIL(&device_names, dn, d_next);
995 mutex_exit(&dname_lock);
996
997 return 0;
998 }
999
1000 /*
1001 * Remove a previously registered name for 'dev'.
1002 *
1003 * => This must be called twice with different values for 'dev' if
1004 * the caller previously registered a name for a character device
1005 * and a name for a block device.
1006 */
1007 int
1008 device_deregister_name(dev_t dev, const char *fmt, ...)
1009 {
1010 int error = 0;
1011 struct device_name *dn;
1012 va_list ap;
1013 char name[MAXNAMLEN];
1014
1015 va_start(ap, fmt);
1016 vsnprintf(name, MAXNAMLEN, fmt, ap);
1017 va_end(ap);
1018
1019 mutex_enter(&dname_lock);
1020 TAILQ_FOREACH(dn, &device_names, d_next) {
1021 if ((strcmp(dn->d_name, name) == 0) && (dn->d_gone == false))
1022 break;
1023 }
1024
1025 if (dn != NULL)
1026 dn->d_gone = true;
1027 else
1028 error = EINVAL;
1029
1030 mutex_exit(&dname_lock);
1031 return error;
1032 }
1033
1034 /*
1035 * Remove all device names for this device_t.
1036 */
1037 int
1038 device_deregister_all(device_t dev)
1039 {
1040 struct device_name *dn;
1041
1042 mutex_enter(&dname_lock);
1043 TAILQ_FOREACH(dn, &device_names, d_next) {
1044 if ((dn->d_devp == dev) && (dn->d_gone == false))
1045 dn->d_gone = true;
1046 }
1047 mutex_exit(&dname_lock);
1048 return 0;
1049 }
1050
1051 struct device_name *
1052 device_lookup_info(dev_t dev, int is_char)
1053 {
1054 struct device_name *dn;
1055
1056 mutex_enter(&dname_lock);
1057 TAILQ_FOREACH(dn, &device_names, d_next) {
1058 if ((dn->d_dev == dev) && (dn->d_char == is_char))
1059 break;
1060 }
1061 mutex_exit(&dname_lock);
1062
1063 return dn;
1064 }
1065
1066 /*
1067 * Register a name for a device_t and wait for the device file to be
1068 * created in devfs mounts. Normally this operation is asynchronous in
1069 * the sense that a device name is registered and at some later time
1070 * a device file will appear in a devfs mount.
1071 *
1072 * cond - A kernel condition variable
1073 * ticks - Timeout value in hz
1074 *
1075 * NOTE: There is no guarantee that a device file will be created,
1076 * however, the caller will be notified in a synchronous manner
1077 * whether the creation failed or not.
1078 */
1079 int
1080 device_register_sync(dev_t dev, device_t devp, bool cdev,
1081 enum devtype dtype, kcondvar_t cond, int ticks, const char *fmt, ...)
1082 {
1083 int error = 0;
1084 struct device_name *dn;
1085 va_list ap;
1086
1087 va_start(ap, fmt);
1088
1089 if ((dn = device_name_alloc(dev, devp, cdev, dtype, fmt, ap)) == NULL)
1090 return ENOMEM;
1091 dn->d_busy = true;
1092 dn->d_cv = cond;
1093
1094 va_end(ap);
1095
1096 mutex_enter(&dname_lock);
1097 TAILQ_INSERT_TAIL(&device_names, dn, d_next);
1098 mutex_exit(&dname_lock);
1099
1100 mutex_init(&dn->d_cvmutex, MUTEX_DEFAULT, IPL_NONE);
1101
1102 mutex_enter(&dn->d_cvmutex);
1103
1104 while (dn->d_busy == true) {
1105 if (ticks <= 0)
1106 error = cv_wait_sig(&dn->d_cv, &dn->d_cvmutex);
1107 else
1108 error = cv_timedwait_sig(&dn->d_cv,
1109 &dn->d_cvmutex, ticks);
1110
1111 }
1112 error = dn->d_retval;
1113 mutex_exit(&dn->d_cvmutex);
1114
1115 return error;
1116 }
1117