subr_devsw.c revision 1.15.6.3 1 /* $NetBSD: subr_devsw.c,v 1.15.6.3 2008/04/03 12:43:03 mjf Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Overview
41 *
42 * subr_devsw.c: registers device drivers by name and by major
43 * number, and provides wrapper methods for performing I/O and
44 * other tasks on device drivers, keying on the device number
45 * (dev_t).
46 *
47 * When the system is built, the config(8) command generates
48 * static tables of device drivers built into the kernel image
49 * along with their associated methods. These are recorded in
50 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
51 * and removed from the system dynamically.
52 *
53 * Allocation
54 *
55 * When the system initially boots only the statically allocated
56 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
57 * allocation, we allocate a fixed block of memory to hold the new,
58 * expanded index. This "fork" of the table is only ever performed
59 * once in order to guarantee that other threads may safely access
60 * the device tables:
61 *
62 * o Once a thread has a "reference" to the table via an earlier
63 * open() call, we know that the entry in the table must exist
64 * and so it is safe to access it.
65 *
66 * o Regardless of whether other threads see the old or new
67 * pointers, they will point to a correct device switch
68 * structure for the operation being performed.
69 *
70 * XXX Currently, the wrapper methods such as cdev_read() verify
71 * that a device driver does in fact exist before calling the
72 * associated driver method. This should be changed so that
73 * once the device is has been referenced by a vnode (opened),
74 * calling the other methods should be valid until that reference
75 * is dropped.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.15.6.3 2008/04/03 12:43:03 mjf Exp $");
80
81 #include <sys/param.h>
82 #include <sys/conf.h>
83 #include <sys/kmem.h>
84 #include <sys/systm.h>
85 #include <sys/poll.h>
86 #include <sys/tty.h>
87 #include <sys/cpu.h>
88 #include <sys/buf.h>
89 #include <sys/dirent.h>
90 #include <machine/stdarg.h>
91 #include <sys/disklabel.h>
92
93 #ifdef DEVSW_DEBUG
94 #define DPRINTF(x) printf x
95 #else /* DEVSW_DEBUG */
96 #define DPRINTF(x)
97 #endif /* DEVSW_DEBUG */
98
99 #define MAXDEVSW 512 /* the maximum of major device number */
100 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
101 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
102 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
103
104 extern const struct bdevsw **bdevsw, *bdevsw0[];
105 extern const struct cdevsw **cdevsw, *cdevsw0[];
106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
107 extern const int sys_bdevsws, sys_cdevsws;
108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
109
110 static int bdevsw_attach(const struct bdevsw *, int *);
111 static int cdevsw_attach(const struct cdevsw *, int *);
112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
113
114 kmutex_t devsw_lock;
115 extern kmutex_t dname_lock;
116
117 /*
118 * A table of initialisation functions for device drivers that
119 * don't have an attach routine.
120 */
121 void (*devsw_init_funcs[])(void) = {
122 bpf_init,
123 cttyinit,
124 mem_init,
125 swap_init,
126 NULL,
127 };
128
129 void
130 devsw_init(void)
131 {
132 int i;
133
134 KASSERT(sys_bdevsws < MAXDEVSW - 1);
135 KASSERT(sys_cdevsws < MAXDEVSW - 1);
136
137 mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE);
138 mutex_init(&dname_lock, MUTEX_DEFAULT, IPL_NONE);
139 TAILQ_INIT(&device_names);
140
141 /*
142 * Technically, some device drivers don't ever get 'attached'
143 * so we provide this table to allow device drivers to register
144 * their device names.
145 */
146 for (i = 0; devsw_init_funcs[i] != NULL; i++)
147 devsw_init_funcs[i]();
148 }
149
150 int
151 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
152 const struct cdevsw *cdev, int *cmajor)
153 {
154 struct devsw_conv *conv;
155 char *name;
156 int error, i;
157
158 if (devname == NULL || cdev == NULL)
159 return (EINVAL);
160
161 mutex_enter(&devsw_lock);
162
163 for (i = 0 ; i < max_devsw_convs ; i++) {
164 conv = &devsw_conv[i];
165 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
166 continue;
167
168 if (*bmajor < 0)
169 *bmajor = conv->d_bmajor;
170 if (*cmajor < 0)
171 *cmajor = conv->d_cmajor;
172
173 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
174 error = EINVAL;
175 goto fail;
176 }
177 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
178 error = EINVAL;
179 goto fail;
180 }
181
182 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
183 cdevsw[*cmajor] != NULL) {
184 error = EEXIST;
185 goto fail;
186 }
187
188 if (bdev != NULL)
189 bdevsw[*bmajor] = bdev;
190 cdevsw[*cmajor] = cdev;
191
192 mutex_exit(&devsw_lock);
193 return (0);
194 }
195
196 error = bdevsw_attach(bdev, bmajor);
197 if (error != 0)
198 goto fail;
199 error = cdevsw_attach(cdev, cmajor);
200 if (error != 0) {
201 devsw_detach_locked(bdev, NULL);
202 goto fail;
203 }
204
205 for (i = 0 ; i < max_devsw_convs ; i++) {
206 if (devsw_conv[i].d_name == NULL)
207 break;
208 }
209 if (i == max_devsw_convs) {
210 struct devsw_conv *newptr;
211 int old, new;
212
213 old = max_devsw_convs;
214 new = old + 1;
215
216 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
217 if (newptr == NULL) {
218 devsw_detach_locked(bdev, cdev);
219 error = ENOMEM;
220 goto fail;
221 }
222 newptr[old].d_name = NULL;
223 newptr[old].d_bmajor = -1;
224 newptr[old].d_cmajor = -1;
225 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
226 if (devsw_conv != devsw_conv0)
227 kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
228 devsw_conv = newptr;
229 max_devsw_convs = new;
230 }
231
232 i = strlen(devname) + 1;
233 name = kmem_alloc(i, KM_NOSLEEP);
234 if (name == NULL) {
235 devsw_detach_locked(bdev, cdev);
236 goto fail;
237 }
238 strlcpy(name, devname, i);
239
240 devsw_conv[i].d_name = name;
241 devsw_conv[i].d_bmajor = *bmajor;
242 devsw_conv[i].d_cmajor = *cmajor;
243
244 mutex_exit(&devsw_lock);
245 return (0);
246 fail:
247 mutex_exit(&devsw_lock);
248 return (error);
249 }
250
251 static int
252 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
253 {
254 const struct bdevsw **newptr;
255 int bmajor, i;
256
257 KASSERT(mutex_owned(&devsw_lock));
258
259 if (devsw == NULL)
260 return (0);
261
262 if (*devmajor < 0) {
263 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
264 if (bdevsw[bmajor] != NULL)
265 continue;
266 for (i = 0 ; i < max_devsw_convs ; i++) {
267 if (devsw_conv[i].d_bmajor == bmajor)
268 break;
269 }
270 if (i != max_devsw_convs)
271 continue;
272 break;
273 }
274 *devmajor = bmajor;
275 }
276
277 if (*devmajor >= MAXDEVSW) {
278 printf("bdevsw_attach: block majors exhausted");
279 return (ENOMEM);
280 }
281
282 if (*devmajor >= max_bdevsws) {
283 KASSERT(bdevsw == bdevsw0);
284 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
285 if (newptr == NULL)
286 return (ENOMEM);
287 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
288 bdevsw = newptr;
289 max_bdevsws = MAXDEVSW;
290 }
291
292 if (bdevsw[*devmajor] != NULL)
293 return (EEXIST);
294
295 bdevsw[*devmajor] = devsw;
296
297 return (0);
298 }
299
300 static int
301 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
302 {
303 const struct cdevsw **newptr;
304 int cmajor, i;
305
306 KASSERT(mutex_owned(&devsw_lock));
307
308 if (*devmajor < 0) {
309 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
310 if (cdevsw[cmajor] != NULL)
311 continue;
312 for (i = 0 ; i < max_devsw_convs ; i++) {
313 if (devsw_conv[i].d_cmajor == cmajor)
314 break;
315 }
316 if (i != max_devsw_convs)
317 continue;
318 break;
319 }
320 *devmajor = cmajor;
321 }
322
323 if (*devmajor >= MAXDEVSW) {
324 printf("cdevsw_attach: character majors exhausted");
325 return (ENOMEM);
326 }
327
328 if (*devmajor >= max_cdevsws) {
329 KASSERT(cdevsw == cdevsw0);
330 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
331 if (newptr == NULL)
332 return (ENOMEM);
333 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
334 cdevsw = newptr;
335 max_cdevsws = MAXDEVSW;
336 }
337
338 if (cdevsw[*devmajor] != NULL)
339 return (EEXIST);
340
341 cdevsw[*devmajor] = devsw;
342
343 return (0);
344 }
345
346 static void
347 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
348 {
349 int i;
350
351 KASSERT(mutex_owned(&devsw_lock));
352
353 if (bdev != NULL) {
354 for (i = 0 ; i < max_bdevsws ; i++) {
355 if (bdevsw[i] != bdev)
356 continue;
357 bdevsw[i] = NULL;
358 break;
359 }
360 }
361 if (cdev != NULL) {
362 for (i = 0 ; i < max_cdevsws ; i++) {
363 if (cdevsw[i] != cdev)
364 continue;
365 cdevsw[i] = NULL;
366 break;
367 }
368 }
369 }
370
371 void
372 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
373 {
374
375 mutex_enter(&devsw_lock);
376 devsw_detach_locked(bdev, cdev);
377 mutex_exit(&devsw_lock);
378 }
379
380 /*
381 * Look up a block device by number.
382 *
383 * => Caller must ensure that the device is attached.
384 */
385 const struct bdevsw *
386 bdevsw_lookup(dev_t dev)
387 {
388 int bmajor;
389
390 if (dev == NODEV)
391 return (NULL);
392 bmajor = major(dev);
393 if (bmajor < 0 || bmajor >= max_bdevsws)
394 return (NULL);
395
396 return (bdevsw[bmajor]);
397 }
398
399 /*
400 * Look up a character device by number.
401 *
402 * => Caller must ensure that the device is attached.
403 */
404 const struct cdevsw *
405 cdevsw_lookup(dev_t dev)
406 {
407 int cmajor;
408
409 if (dev == NODEV)
410 return (NULL);
411 cmajor = major(dev);
412 if (cmajor < 0 || cmajor >= max_cdevsws)
413 return (NULL);
414
415 return (cdevsw[cmajor]);
416 }
417
418 /*
419 * Look up a block device by reference to its operations set.
420 *
421 * => Caller must ensure that the device is not detached, and therefore
422 * that the returned major is still valid when dereferenced.
423 */
424 int
425 bdevsw_lookup_major(const struct bdevsw *bdev)
426 {
427 int bmajor;
428
429 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
430 if (bdevsw[bmajor] == bdev)
431 return (bmajor);
432 }
433
434 return (-1);
435 }
436
437 /*
438 * Look up a character device by reference to its operations set.
439 *
440 * => Caller must ensure that the device is not detached, and therefore
441 * that the returned major is still valid when dereferenced.
442 */
443 int
444 cdevsw_lookup_major(const struct cdevsw *cdev)
445 {
446 int cmajor;
447
448 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
449 if (cdevsw[cmajor] == cdev)
450 return (cmajor);
451 }
452
453 return (-1);
454 }
455
456 /*
457 * Convert from block major number to name.
458 *
459 * => Caller must ensure that the device is not detached, and therefore
460 * that the name pointer is still valid when dereferenced.
461 */
462 const char *
463 devsw_blk2name(int bmajor)
464 {
465 const char *name;
466 int cmajor, i;
467
468 name = NULL;
469 cmajor = -1;
470
471 mutex_enter(&devsw_lock);
472 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
473 mutex_exit(&devsw_lock);
474 return (NULL);
475 }
476 for (i = 0 ; i < max_devsw_convs; i++) {
477 if (devsw_conv[i].d_bmajor == bmajor) {
478 cmajor = devsw_conv[i].d_cmajor;
479 break;
480 }
481 }
482 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
483 name = devsw_conv[i].d_name;
484 mutex_exit(&devsw_lock);
485
486 return (name);
487 }
488
489 /*
490 * Convert from device name to block major number.
491 *
492 * => Caller must ensure that the device is not detached, and therefore
493 * that the major number is still valid when dereferenced.
494 */
495 int
496 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
497 {
498 struct devsw_conv *conv;
499 int bmajor, i;
500
501 if (name == NULL)
502 return (-1);
503
504 mutex_enter(&devsw_lock);
505 for (i = 0 ; i < max_devsw_convs ; i++) {
506 size_t len;
507
508 conv = &devsw_conv[i];
509 if (conv->d_name == NULL)
510 continue;
511 len = strlen(conv->d_name);
512 if (strncmp(conv->d_name, name, len) != 0)
513 continue;
514 if (*(name +len) && !isdigit(*(name + len)))
515 continue;
516 bmajor = conv->d_bmajor;
517 if (bmajor < 0 || bmajor >= max_bdevsws ||
518 bdevsw[bmajor] == NULL)
519 break;
520 if (devname != NULL) {
521 #ifdef DEVSW_DEBUG
522 if (strlen(conv->d_name) >= devnamelen)
523 printf("devsw_name2blk: too short buffer");
524 #endif /* DEVSW_DEBUG */
525 strncpy(devname, conv->d_name, devnamelen);
526 devname[devnamelen - 1] = '\0';
527 }
528 mutex_exit(&devsw_lock);
529 return (bmajor);
530 }
531
532 mutex_exit(&devsw_lock);
533 return (-1);
534 }
535
536 /*
537 * Convert from device name to char major number.
538 *
539 * => Caller must ensure that the device is not detached, and therefore
540 * that the major number is still valid when dereferenced.
541 */
542 int
543 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
544 {
545 struct devsw_conv *conv;
546 int cmajor, i;
547
548 if (name == NULL)
549 return (-1);
550
551 mutex_enter(&devsw_lock);
552 for (i = 0 ; i < max_devsw_convs ; i++) {
553 size_t len;
554
555 conv = &devsw_conv[i];
556 if (conv->d_name == NULL)
557 continue;
558 len = strlen(conv->d_name);
559 if (strncmp(conv->d_name, name, len) != 0)
560 continue;
561 if (*(name +len) && !isdigit(*(name + len)))
562 continue;
563 cmajor = conv->d_cmajor;
564 if (cmajor < 0 || cmajor >= max_cdevsws ||
565 cdevsw[cmajor] == NULL)
566 break;
567 if (devname != NULL) {
568 #ifdef DEVSW_DEBUG
569 if (strlen(conv->d_name) >= devnamelen)
570 printf("devsw_name2chr: too short buffer");
571 #endif /* DEVSW_DEBUG */
572 strncpy(devname, conv->d_name, devnamelen);
573 devname[devnamelen - 1] = '\0';
574 }
575 mutex_exit(&devsw_lock);
576 return (cmajor);
577 }
578
579 mutex_exit(&devsw_lock);
580 return (-1);
581 }
582
583 /*
584 * Convert from character dev_t to block dev_t.
585 *
586 * => Caller must ensure that the device is not detached, and therefore
587 * that the major number is still valid when dereferenced.
588 */
589 dev_t
590 devsw_chr2blk(dev_t cdev)
591 {
592 int bmajor, cmajor, i;
593 dev_t rv;
594
595 cmajor = major(cdev);
596 bmajor = -1;
597 rv = NODEV;
598
599 mutex_enter(&devsw_lock);
600 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
601 mutex_exit(&devsw_lock);
602 return (NODEV);
603 }
604 for (i = 0 ; i < max_devsw_convs ; i++) {
605 if (devsw_conv[i].d_cmajor == cmajor) {
606 bmajor = devsw_conv[i].d_bmajor;
607 break;
608 }
609 }
610 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
611 rv = makedev(bmajor, minor(cdev));
612 mutex_exit(&devsw_lock);
613
614 return (rv);
615 }
616
617 /*
618 * Convert from block dev_t to character dev_t.
619 *
620 * => Caller must ensure that the device is not detached, and therefore
621 * that the major number is still valid when dereferenced.
622 */
623 dev_t
624 devsw_blk2chr(dev_t bdev)
625 {
626 int bmajor, cmajor, i;
627 dev_t rv;
628
629 bmajor = major(bdev);
630 cmajor = -1;
631 rv = NODEV;
632
633 mutex_enter(&devsw_lock);
634 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
635 mutex_exit(&devsw_lock);
636 return (NODEV);
637 }
638 for (i = 0 ; i < max_devsw_convs ; i++) {
639 if (devsw_conv[i].d_bmajor == bmajor) {
640 cmajor = devsw_conv[i].d_cmajor;
641 break;
642 }
643 }
644 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
645 rv = makedev(cmajor, minor(bdev));
646 mutex_exit(&devsw_lock);
647
648 return (rv);
649 }
650
651 /*
652 * Device access methods.
653 */
654
655 #define DEV_LOCK(d) \
656 if ((d->d_flag & D_MPSAFE) == 0) { \
657 KERNEL_LOCK(1, curlwp); \
658 }
659
660 #define DEV_UNLOCK(d) \
661 if ((d->d_flag & D_MPSAFE) == 0) { \
662 KERNEL_UNLOCK_ONE(curlwp); \
663 }
664
665 int
666 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
667 {
668 const struct bdevsw *d;
669 int rv;
670
671 /*
672 * For open we need to lock, in order to synchronize
673 * with attach/detach.
674 */
675 mutex_enter(&devsw_lock);
676 d = bdevsw_lookup(dev);
677 mutex_exit(&devsw_lock);
678 if (d == NULL)
679 return ENXIO;
680
681 DEV_LOCK(d);
682 rv = (*d->d_open)(dev, flag, devtype, l);
683 DEV_UNLOCK(d);
684
685 return rv;
686 }
687
688 int
689 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
690 {
691 const struct bdevsw *d;
692 int rv;
693
694 if ((d = bdevsw_lookup(dev)) == NULL)
695 return ENXIO;
696
697 DEV_LOCK(d);
698 rv = (*d->d_close)(dev, flag, devtype, l);
699 DEV_UNLOCK(d);
700
701 return rv;
702 }
703
704 void
705 bdev_strategy(struct buf *bp)
706 {
707 const struct bdevsw *d;
708
709 if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
710 panic("bdev_strategy");
711
712 DEV_LOCK(d);
713 (*d->d_strategy)(bp);
714 DEV_UNLOCK(d);
715 }
716
717 int
718 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
719 {
720 const struct bdevsw *d;
721 int rv;
722
723 if ((d = bdevsw_lookup(dev)) == NULL)
724 return ENXIO;
725
726 DEV_LOCK(d);
727 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
728 DEV_UNLOCK(d);
729
730 return rv;
731 }
732
733 int
734 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
735 {
736 const struct bdevsw *d;
737 int rv;
738
739 /*
740 * Dump can be called without the device open. Since it can
741 * currently only be called with the system paused (and in a
742 * potentially unstable state), we don't perform any locking.
743 */
744 if ((d = bdevsw_lookup(dev)) == NULL)
745 return ENXIO;
746
747 /* DEV_LOCK(d); */
748 rv = (*d->d_dump)(dev, addr, data, sz);
749 /* DEV_UNLOCK(d); */
750
751 return rv;
752 }
753
754 int
755 bdev_type(dev_t dev)
756 {
757 const struct bdevsw *d;
758
759 if ((d = bdevsw_lookup(dev)) == NULL)
760 return D_OTHER;
761 return d->d_flag & D_TYPEMASK;
762 }
763
764 int
765 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
766 {
767 const struct cdevsw *d;
768 int rv;
769
770 /*
771 * For open we need to lock, in order to synchronize
772 * with attach/detach.
773 */
774 mutex_enter(&devsw_lock);
775 d = cdevsw_lookup(dev);
776 mutex_exit(&devsw_lock);
777 if (d == NULL)
778 return ENXIO;
779
780 DEV_LOCK(d);
781 rv = (*d->d_open)(dev, flag, devtype, l);
782 DEV_UNLOCK(d);
783
784 return rv;
785 }
786
787 int
788 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
789 {
790 const struct cdevsw *d;
791 int rv;
792
793 if ((d = cdevsw_lookup(dev)) == NULL)
794 return ENXIO;
795
796 DEV_LOCK(d);
797 rv = (*d->d_close)(dev, flag, devtype, l);
798 DEV_UNLOCK(d);
799
800 return rv;
801 }
802
803 int
804 cdev_read(dev_t dev, struct uio *uio, int flag)
805 {
806 const struct cdevsw *d;
807 int rv;
808
809 if ((d = cdevsw_lookup(dev)) == NULL)
810 return ENXIO;
811
812 DEV_LOCK(d);
813 rv = (*d->d_read)(dev, uio, flag);
814 DEV_UNLOCK(d);
815
816 return rv;
817 }
818
819 int
820 cdev_write(dev_t dev, struct uio *uio, int flag)
821 {
822 const struct cdevsw *d;
823 int rv;
824
825 if ((d = cdevsw_lookup(dev)) == NULL)
826 return ENXIO;
827
828 DEV_LOCK(d);
829 rv = (*d->d_write)(dev, uio, flag);
830 DEV_UNLOCK(d);
831
832 return rv;
833 }
834
835 int
836 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
837 {
838 const struct cdevsw *d;
839 int rv;
840
841 if ((d = cdevsw_lookup(dev)) == NULL)
842 return ENXIO;
843
844 DEV_LOCK(d);
845 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
846 DEV_UNLOCK(d);
847
848 return rv;
849 }
850
851 void
852 cdev_stop(struct tty *tp, int flag)
853 {
854 const struct cdevsw *d;
855
856 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
857 return;
858
859 DEV_LOCK(d);
860 (*d->d_stop)(tp, flag);
861 DEV_UNLOCK(d);
862 }
863
864 struct tty *
865 cdev_tty(dev_t dev)
866 {
867 const struct cdevsw *d;
868 struct tty * rv;
869
870 if ((d = cdevsw_lookup(dev)) == NULL)
871 return NULL;
872
873 /* XXX Check if necessary. */
874 if (d->d_tty == NULL)
875 return NULL;
876
877 DEV_LOCK(d);
878 rv = (*d->d_tty)(dev);
879 DEV_UNLOCK(d);
880
881 return rv;
882 }
883
884 int
885 cdev_poll(dev_t dev, int flag, lwp_t *l)
886 {
887 const struct cdevsw *d;
888 int rv;
889
890 if ((d = cdevsw_lookup(dev)) == NULL)
891 return POLLERR;
892
893 DEV_LOCK(d);
894 rv = (*d->d_poll)(dev, flag, l);
895 DEV_UNLOCK(d);
896
897 return rv;
898 }
899
900 paddr_t
901 cdev_mmap(dev_t dev, off_t off, int flag)
902 {
903 const struct cdevsw *d;
904 paddr_t rv;
905
906 if ((d = cdevsw_lookup(dev)) == NULL)
907 return (paddr_t)-1LL;
908
909 DEV_LOCK(d);
910 rv = (*d->d_mmap)(dev, off, flag);
911 DEV_UNLOCK(d);
912
913 return rv;
914 }
915
916 int
917 cdev_kqfilter(dev_t dev, struct knote *kn)
918 {
919 const struct cdevsw *d;
920 int rv;
921
922 if ((d = cdevsw_lookup(dev)) == NULL)
923 return ENXIO;
924
925 DEV_LOCK(d);
926 rv = (*d->d_kqfilter)(dev, kn);
927 DEV_UNLOCK(d);
928
929 return rv;
930 }
931
932 int
933 cdev_type(dev_t dev)
934 {
935 const struct cdevsw *d;
936
937 if ((d = cdevsw_lookup(dev)) == NULL)
938 return D_OTHER;
939 return d->d_flag & D_TYPEMASK;
940 }
941
942 /*
943 * Register a dev_t and name for a device driver with devfs.
944 * We maintain a TAILQ of registered device drivers names and dev_t's.
945 *
946 * => if devp is NULL this device has no device_t instance. An example
947 * of this is zero(4).
948 *
949 * => if there already exists another name for this dev_t, then 'name'
950 * is assumed to be an alias of a previously registered device driver.
951 * TODO: The above isn't actually true at the moment, we just return 0.
952 *
953 * => 'cdev' indiciates whether we are a char or block device.
954 * If 'cdev' is true, we are a character device, otherwise we
955 * are a block device.
956 */
957 int
958 device_register_name(dev_t dev, device_t devp, boolean_t cdev,
959 enum devtype dtype, const char *fmt, ...)
960 {
961 struct device_name *dn;
962 va_list ap;
963
964 /* TODO: Check for aliases */
965
966 dn = kmem_zalloc(sizeof(*dn), KM_NOSLEEP);
967 if (dn == NULL)
968 return ENOMEM;
969
970 dn->d_dev = dev;
971 dn->d_devp = devp;
972 dn->d_char = cdev;
973 dn->d_type = dtype;
974
975 dn->d_name = kmem_zalloc(MAXNAMLEN, KM_NOSLEEP);
976 va_start(ap, fmt);
977 vsnprintf(dn->d_name, MAXNAMLEN, fmt, ap);
978 va_end(ap);
979
980 mutex_enter(&dname_lock);
981 TAILQ_INSERT_TAIL(&device_names, dn, d_next);
982 mutex_exit(&dname_lock);
983
984 return 0;
985 }
986
987 /*
988 * Remove a previously registered name for 'dev'.
989 *
990 * => This must be called twice with different values for 'dev' if
991 * the caller previously registered a name for a character device
992 * and a name for a block device.
993 */
994 int
995 device_unregister_name(dev_t dev, const char *fmt, ...)
996 {
997 int error = 0;
998 struct device_name *dn;
999 va_list ap;
1000 char name[MAXNAMLEN];
1001
1002 va_start(ap, fmt);
1003 vsnprintf(name, MAXNAMLEN, fmt, ap);
1004 va_end(ap);
1005
1006 mutex_enter(&dname_lock);
1007 TAILQ_FOREACH(dn, &device_names, d_next) {
1008 if (strcmp(dn->d_name, name) == 0)
1009 break;
1010 }
1011
1012 if (dn != NULL)
1013 dn->d_gone = true;
1014 else
1015 error = EINVAL;
1016
1017 mutex_exit(&dname_lock);
1018 return error;
1019 }
1020
1021 struct device_name *
1022 device_lookup_info(dev_t dev, int is_char)
1023 {
1024 struct device_name *dn;
1025
1026 mutex_enter(&dname_lock);
1027 TAILQ_FOREACH(dn, &device_names, d_next) {
1028 if ((dn->d_dev == dev) && (dn->d_char == is_char))
1029 break;
1030 }
1031 mutex_exit(&dname_lock);
1032
1033 return dn;
1034 }
1035