subr_devsw.c revision 1.21 1 /* $NetBSD: subr_devsw.c,v 1.21 2008/06/08 12:22:39 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.21 2008/06/08 12:22:39 ad Exp $");
73
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
82
83 #include <miscfs/specfs/specdev.h>
84
85 #ifdef DEVSW_DEBUG
86 #define DPRINTF(x) printf x
87 #else /* DEVSW_DEBUG */
88 #define DPRINTF(x)
89 #endif /* DEVSW_DEBUG */
90
91 #define MAXDEVSW 512 /* the maximum of major device number */
92 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
93 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
94 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
95
96 extern const struct bdevsw **bdevsw, *bdevsw0[];
97 extern const struct cdevsw **cdevsw, *cdevsw0[];
98 extern struct devsw_conv *devsw_conv, devsw_conv0[];
99 extern const int sys_bdevsws, sys_cdevsws;
100 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
101
102 static int bdevsw_attach(const struct bdevsw *, int *);
103 static int cdevsw_attach(const struct cdevsw *, int *);
104 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
105
106 void
107 devsw_init(void)
108 {
109
110 KASSERT(sys_bdevsws < MAXDEVSW - 1);
111 KASSERT(sys_cdevsws < MAXDEVSW - 1);
112 }
113
114 int
115 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
116 const struct cdevsw *cdev, int *cmajor)
117 {
118 struct devsw_conv *conv;
119 char *name;
120 int error, i;
121
122 if (devname == NULL || cdev == NULL)
123 return (EINVAL);
124
125 mutex_enter(&specfs_lock);
126
127 for (i = 0 ; i < max_devsw_convs ; i++) {
128 conv = &devsw_conv[i];
129 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
130 continue;
131
132 if (*bmajor < 0)
133 *bmajor = conv->d_bmajor;
134 if (*cmajor < 0)
135 *cmajor = conv->d_cmajor;
136
137 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
138 error = EINVAL;
139 goto fail;
140 }
141 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
142 error = EINVAL;
143 goto fail;
144 }
145
146 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
147 cdevsw[*cmajor] != NULL) {
148 error = EEXIST;
149 goto fail;
150 }
151
152 if (bdev != NULL)
153 bdevsw[*bmajor] = bdev;
154 cdevsw[*cmajor] = cdev;
155
156 mutex_exit(&specfs_lock);
157 return (0);
158 }
159
160 error = bdevsw_attach(bdev, bmajor);
161 if (error != 0)
162 goto fail;
163 error = cdevsw_attach(cdev, cmajor);
164 if (error != 0) {
165 devsw_detach_locked(bdev, NULL);
166 goto fail;
167 }
168
169 for (i = 0 ; i < max_devsw_convs ; i++) {
170 if (devsw_conv[i].d_name == NULL)
171 break;
172 }
173 if (i == max_devsw_convs) {
174 struct devsw_conv *newptr;
175 int old, new;
176
177 old = max_devsw_convs;
178 new = old + 1;
179
180 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
181 if (newptr == NULL) {
182 devsw_detach_locked(bdev, cdev);
183 error = ENOMEM;
184 goto fail;
185 }
186 newptr[old].d_name = NULL;
187 newptr[old].d_bmajor = -1;
188 newptr[old].d_cmajor = -1;
189 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
190 if (devsw_conv != devsw_conv0)
191 kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
192 devsw_conv = newptr;
193 max_devsw_convs = new;
194 }
195
196 i = strlen(devname) + 1;
197 name = kmem_alloc(i, KM_NOSLEEP);
198 if (name == NULL) {
199 devsw_detach_locked(bdev, cdev);
200 goto fail;
201 }
202 strlcpy(name, devname, i);
203
204 devsw_conv[i].d_name = name;
205 devsw_conv[i].d_bmajor = *bmajor;
206 devsw_conv[i].d_cmajor = *cmajor;
207
208 mutex_exit(&specfs_lock);
209 return (0);
210 fail:
211 mutex_exit(&specfs_lock);
212 return (error);
213 }
214
215 static int
216 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
217 {
218 const struct bdevsw **newptr;
219 int bmajor, i;
220
221 KASSERT(mutex_owned(&specfs_lock));
222
223 if (devsw == NULL)
224 return (0);
225
226 if (*devmajor < 0) {
227 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
228 if (bdevsw[bmajor] != NULL)
229 continue;
230 for (i = 0 ; i < max_devsw_convs ; i++) {
231 if (devsw_conv[i].d_bmajor == bmajor)
232 break;
233 }
234 if (i != max_devsw_convs)
235 continue;
236 break;
237 }
238 *devmajor = bmajor;
239 }
240
241 if (*devmajor >= MAXDEVSW) {
242 printf("bdevsw_attach: block majors exhausted");
243 return (ENOMEM);
244 }
245
246 if (*devmajor >= max_bdevsws) {
247 KASSERT(bdevsw == bdevsw0);
248 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
249 if (newptr == NULL)
250 return (ENOMEM);
251 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
252 bdevsw = newptr;
253 max_bdevsws = MAXDEVSW;
254 }
255
256 if (bdevsw[*devmajor] != NULL)
257 return (EEXIST);
258
259 bdevsw[*devmajor] = devsw;
260
261 return (0);
262 }
263
264 static int
265 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
266 {
267 const struct cdevsw **newptr;
268 int cmajor, i;
269
270 KASSERT(mutex_owned(&specfs_lock));
271
272 if (*devmajor < 0) {
273 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
274 if (cdevsw[cmajor] != NULL)
275 continue;
276 for (i = 0 ; i < max_devsw_convs ; i++) {
277 if (devsw_conv[i].d_cmajor == cmajor)
278 break;
279 }
280 if (i != max_devsw_convs)
281 continue;
282 break;
283 }
284 *devmajor = cmajor;
285 }
286
287 if (*devmajor >= MAXDEVSW) {
288 printf("cdevsw_attach: character majors exhausted");
289 return (ENOMEM);
290 }
291
292 if (*devmajor >= max_cdevsws) {
293 KASSERT(cdevsw == cdevsw0);
294 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
295 if (newptr == NULL)
296 return (ENOMEM);
297 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
298 cdevsw = newptr;
299 max_cdevsws = MAXDEVSW;
300 }
301
302 if (cdevsw[*devmajor] != NULL)
303 return (EEXIST);
304
305 cdevsw[*devmajor] = devsw;
306
307 return (0);
308 }
309
310 static void
311 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
312 {
313 int i;
314
315 KASSERT(mutex_owned(&specfs_lock));
316
317 if (bdev != NULL) {
318 for (i = 0 ; i < max_bdevsws ; i++) {
319 if (bdevsw[i] != bdev)
320 continue;
321 bdevsw[i] = NULL;
322 break;
323 }
324 }
325 if (cdev != NULL) {
326 for (i = 0 ; i < max_cdevsws ; i++) {
327 if (cdevsw[i] != cdev)
328 continue;
329 cdevsw[i] = NULL;
330 break;
331 }
332 }
333 }
334
335 int
336 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
337 {
338
339 mutex_enter(&specfs_lock);
340 devsw_detach_locked(bdev, cdev);
341 mutex_exit(&specfs_lock);
342 return 0;
343 }
344
345 /*
346 * Look up a block device by number.
347 *
348 * => Caller must ensure that the device is attached.
349 */
350 const struct bdevsw *
351 bdevsw_lookup(dev_t dev)
352 {
353 int bmajor;
354
355 if (dev == NODEV)
356 return (NULL);
357 bmajor = major(dev);
358 if (bmajor < 0 || bmajor >= max_bdevsws)
359 return (NULL);
360
361 return (bdevsw[bmajor]);
362 }
363
364 /*
365 * Look up a character device by number.
366 *
367 * => Caller must ensure that the device is attached.
368 */
369 const struct cdevsw *
370 cdevsw_lookup(dev_t dev)
371 {
372 int cmajor;
373
374 if (dev == NODEV)
375 return (NULL);
376 cmajor = major(dev);
377 if (cmajor < 0 || cmajor >= max_cdevsws)
378 return (NULL);
379
380 return (cdevsw[cmajor]);
381 }
382
383 /*
384 * Look up a block device by reference to its operations set.
385 *
386 * => Caller must ensure that the device is not detached, and therefore
387 * that the returned major is still valid when dereferenced.
388 */
389 int
390 bdevsw_lookup_major(const struct bdevsw *bdev)
391 {
392 int bmajor;
393
394 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
395 if (bdevsw[bmajor] == bdev)
396 return (bmajor);
397 }
398
399 return (-1);
400 }
401
402 /*
403 * Look up a character device by reference to its operations set.
404 *
405 * => Caller must ensure that the device is not detached, and therefore
406 * that the returned major is still valid when dereferenced.
407 */
408 int
409 cdevsw_lookup_major(const struct cdevsw *cdev)
410 {
411 int cmajor;
412
413 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
414 if (cdevsw[cmajor] == cdev)
415 return (cmajor);
416 }
417
418 return (-1);
419 }
420
421 /*
422 * Convert from block major number to name.
423 *
424 * => Caller must ensure that the device is not detached, and therefore
425 * that the name pointer is still valid when dereferenced.
426 */
427 const char *
428 devsw_blk2name(int bmajor)
429 {
430 const char *name;
431 int cmajor, i;
432
433 name = NULL;
434 cmajor = -1;
435
436 mutex_enter(&specfs_lock);
437 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
438 mutex_exit(&specfs_lock);
439 return (NULL);
440 }
441 for (i = 0 ; i < max_devsw_convs; i++) {
442 if (devsw_conv[i].d_bmajor == bmajor) {
443 cmajor = devsw_conv[i].d_cmajor;
444 break;
445 }
446 }
447 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
448 name = devsw_conv[i].d_name;
449 mutex_exit(&specfs_lock);
450
451 return (name);
452 }
453
454 /*
455 * Convert from device name to block major number.
456 *
457 * => Caller must ensure that the device is not detached, and therefore
458 * that the major number is still valid when dereferenced.
459 */
460 int
461 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
462 {
463 struct devsw_conv *conv;
464 int bmajor, i;
465
466 if (name == NULL)
467 return (-1);
468
469 mutex_enter(&specfs_lock);
470 for (i = 0 ; i < max_devsw_convs ; i++) {
471 size_t len;
472
473 conv = &devsw_conv[i];
474 if (conv->d_name == NULL)
475 continue;
476 len = strlen(conv->d_name);
477 if (strncmp(conv->d_name, name, len) != 0)
478 continue;
479 if (*(name +len) && !isdigit(*(name + len)))
480 continue;
481 bmajor = conv->d_bmajor;
482 if (bmajor < 0 || bmajor >= max_bdevsws ||
483 bdevsw[bmajor] == NULL)
484 break;
485 if (devname != NULL) {
486 #ifdef DEVSW_DEBUG
487 if (strlen(conv->d_name) >= devnamelen)
488 printf("devsw_name2blk: too short buffer");
489 #endif /* DEVSW_DEBUG */
490 strncpy(devname, conv->d_name, devnamelen);
491 devname[devnamelen - 1] = '\0';
492 }
493 mutex_exit(&specfs_lock);
494 return (bmajor);
495 }
496
497 mutex_exit(&specfs_lock);
498 return (-1);
499 }
500
501 /*
502 * Convert from device name to char major number.
503 *
504 * => Caller must ensure that the device is not detached, and therefore
505 * that the major number is still valid when dereferenced.
506 */
507 int
508 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
509 {
510 struct devsw_conv *conv;
511 int cmajor, i;
512
513 if (name == NULL)
514 return (-1);
515
516 mutex_enter(&specfs_lock);
517 for (i = 0 ; i < max_devsw_convs ; i++) {
518 size_t len;
519
520 conv = &devsw_conv[i];
521 if (conv->d_name == NULL)
522 continue;
523 len = strlen(conv->d_name);
524 if (strncmp(conv->d_name, name, len) != 0)
525 continue;
526 if (*(name +len) && !isdigit(*(name + len)))
527 continue;
528 cmajor = conv->d_cmajor;
529 if (cmajor < 0 || cmajor >= max_cdevsws ||
530 cdevsw[cmajor] == NULL)
531 break;
532 if (devname != NULL) {
533 #ifdef DEVSW_DEBUG
534 if (strlen(conv->d_name) >= devnamelen)
535 printf("devsw_name2chr: too short buffer");
536 #endif /* DEVSW_DEBUG */
537 strncpy(devname, conv->d_name, devnamelen);
538 devname[devnamelen - 1] = '\0';
539 }
540 mutex_exit(&specfs_lock);
541 return (cmajor);
542 }
543
544 mutex_exit(&specfs_lock);
545 return (-1);
546 }
547
548 /*
549 * Convert from character dev_t to block dev_t.
550 *
551 * => Caller must ensure that the device is not detached, and therefore
552 * that the major number is still valid when dereferenced.
553 */
554 dev_t
555 devsw_chr2blk(dev_t cdev)
556 {
557 int bmajor, cmajor, i;
558 dev_t rv;
559
560 cmajor = major(cdev);
561 bmajor = -1;
562 rv = NODEV;
563
564 mutex_enter(&specfs_lock);
565 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
566 mutex_exit(&specfs_lock);
567 return (NODEV);
568 }
569 for (i = 0 ; i < max_devsw_convs ; i++) {
570 if (devsw_conv[i].d_cmajor == cmajor) {
571 bmajor = devsw_conv[i].d_bmajor;
572 break;
573 }
574 }
575 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
576 rv = makedev(bmajor, minor(cdev));
577 mutex_exit(&specfs_lock);
578
579 return (rv);
580 }
581
582 /*
583 * Convert from block dev_t to character dev_t.
584 *
585 * => Caller must ensure that the device is not detached, and therefore
586 * that the major number is still valid when dereferenced.
587 */
588 dev_t
589 devsw_blk2chr(dev_t bdev)
590 {
591 int bmajor, cmajor, i;
592 dev_t rv;
593
594 bmajor = major(bdev);
595 cmajor = -1;
596 rv = NODEV;
597
598 mutex_enter(&specfs_lock);
599 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
600 mutex_exit(&specfs_lock);
601 return (NODEV);
602 }
603 for (i = 0 ; i < max_devsw_convs ; i++) {
604 if (devsw_conv[i].d_bmajor == bmajor) {
605 cmajor = devsw_conv[i].d_cmajor;
606 break;
607 }
608 }
609 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
610 rv = makedev(cmajor, minor(bdev));
611 mutex_exit(&specfs_lock);
612
613 return (rv);
614 }
615
616 /*
617 * Device access methods.
618 */
619
620 #define DEV_LOCK(d) \
621 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
622 KERNEL_LOCK(1, NULL); \
623 }
624
625 #define DEV_UNLOCK(d) \
626 if (mpflag == 0) { \
627 KERNEL_UNLOCK_ONE(NULL); \
628 }
629
630 int
631 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
632 {
633 const struct bdevsw *d;
634 int rv, mpflag;
635
636 /*
637 * For open we need to lock, in order to synchronize
638 * with attach/detach.
639 */
640 mutex_enter(&specfs_lock);
641 d = bdevsw_lookup(dev);
642 mutex_exit(&specfs_lock);
643 if (d == NULL)
644 return ENXIO;
645
646 DEV_LOCK(d);
647 rv = (*d->d_open)(dev, flag, devtype, l);
648 DEV_UNLOCK(d);
649
650 return rv;
651 }
652
653 int
654 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
655 {
656 const struct bdevsw *d;
657 int rv, mpflag;
658
659 if ((d = bdevsw_lookup(dev)) == NULL)
660 return ENXIO;
661
662 DEV_LOCK(d);
663 rv = (*d->d_close)(dev, flag, devtype, l);
664 DEV_UNLOCK(d);
665
666 return rv;
667 }
668
669 void
670 bdev_strategy(struct buf *bp)
671 {
672 const struct bdevsw *d;
673 int mpflag;
674
675 if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
676 panic("bdev_strategy");
677
678 DEV_LOCK(d);
679 (*d->d_strategy)(bp);
680 DEV_UNLOCK(d);
681 }
682
683 int
684 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
685 {
686 const struct bdevsw *d;
687 int rv, mpflag;
688
689 if ((d = bdevsw_lookup(dev)) == NULL)
690 return ENXIO;
691
692 DEV_LOCK(d);
693 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
694 DEV_UNLOCK(d);
695
696 return rv;
697 }
698
699 int
700 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
701 {
702 const struct bdevsw *d;
703 int rv;
704
705 /*
706 * Dump can be called without the device open. Since it can
707 * currently only be called with the system paused (and in a
708 * potentially unstable state), we don't perform any locking.
709 */
710 if ((d = bdevsw_lookup(dev)) == NULL)
711 return ENXIO;
712
713 /* DEV_LOCK(d); */
714 rv = (*d->d_dump)(dev, addr, data, sz);
715 /* DEV_UNLOCK(d); */
716
717 return rv;
718 }
719
720 int
721 bdev_type(dev_t dev)
722 {
723 const struct bdevsw *d;
724
725 if ((d = bdevsw_lookup(dev)) == NULL)
726 return D_OTHER;
727 return d->d_flag & D_TYPEMASK;
728 }
729
730 int
731 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
732 {
733 const struct cdevsw *d;
734 int rv, mpflag;
735
736 /*
737 * For open we need to lock, in order to synchronize
738 * with attach/detach.
739 */
740 mutex_enter(&specfs_lock);
741 d = cdevsw_lookup(dev);
742 mutex_exit(&specfs_lock);
743 if (d == NULL)
744 return ENXIO;
745
746 DEV_LOCK(d);
747 rv = (*d->d_open)(dev, flag, devtype, l);
748 DEV_UNLOCK(d);
749
750 return rv;
751 }
752
753 int
754 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
755 {
756 const struct cdevsw *d;
757 int rv, mpflag;
758
759 if ((d = cdevsw_lookup(dev)) == NULL)
760 return ENXIO;
761
762 DEV_LOCK(d);
763 rv = (*d->d_close)(dev, flag, devtype, l);
764 DEV_UNLOCK(d);
765
766 return rv;
767 }
768
769 int
770 cdev_read(dev_t dev, struct uio *uio, int flag)
771 {
772 const struct cdevsw *d;
773 int rv, mpflag;
774
775 if ((d = cdevsw_lookup(dev)) == NULL)
776 return ENXIO;
777
778 DEV_LOCK(d);
779 rv = (*d->d_read)(dev, uio, flag);
780 DEV_UNLOCK(d);
781
782 return rv;
783 }
784
785 int
786 cdev_write(dev_t dev, struct uio *uio, int flag)
787 {
788 const struct cdevsw *d;
789 int rv, mpflag;
790
791 if ((d = cdevsw_lookup(dev)) == NULL)
792 return ENXIO;
793
794 DEV_LOCK(d);
795 rv = (*d->d_write)(dev, uio, flag);
796 DEV_UNLOCK(d);
797
798 return rv;
799 }
800
801 int
802 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
803 {
804 const struct cdevsw *d;
805 int rv, mpflag;
806
807 if ((d = cdevsw_lookup(dev)) == NULL)
808 return ENXIO;
809
810 DEV_LOCK(d);
811 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
812 DEV_UNLOCK(d);
813
814 return rv;
815 }
816
817 void
818 cdev_stop(struct tty *tp, int flag)
819 {
820 const struct cdevsw *d;
821 int mpflag;
822
823 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
824 return;
825
826 DEV_LOCK(d);
827 (*d->d_stop)(tp, flag);
828 DEV_UNLOCK(d);
829 }
830
831 struct tty *
832 cdev_tty(dev_t dev)
833 {
834 const struct cdevsw *d;
835 struct tty * rv;
836
837 if ((d = cdevsw_lookup(dev)) == NULL)
838 return NULL;
839
840 /* XXX Check if necessary. */
841 if (d->d_tty == NULL)
842 return NULL;
843
844 return (*d->d_tty)(dev);
845 }
846
847 int
848 cdev_poll(dev_t dev, int flag, lwp_t *l)
849 {
850 const struct cdevsw *d;
851 int rv, mpflag;
852
853 if ((d = cdevsw_lookup(dev)) == NULL)
854 return POLLERR;
855
856 DEV_LOCK(d);
857 rv = (*d->d_poll)(dev, flag, l);
858 DEV_UNLOCK(d);
859
860 return rv;
861 }
862
863 paddr_t
864 cdev_mmap(dev_t dev, off_t off, int flag)
865 {
866 const struct cdevsw *d;
867 paddr_t rv;
868 int mpflag;
869
870 if ((d = cdevsw_lookup(dev)) == NULL)
871 return (paddr_t)-1LL;
872
873 DEV_LOCK(d);
874 rv = (*d->d_mmap)(dev, off, flag);
875 DEV_UNLOCK(d);
876
877 return rv;
878 }
879
880 int
881 cdev_kqfilter(dev_t dev, struct knote *kn)
882 {
883 const struct cdevsw *d;
884 int rv, mpflag;
885
886 if ((d = cdevsw_lookup(dev)) == NULL)
887 return ENXIO;
888
889 DEV_LOCK(d);
890 rv = (*d->d_kqfilter)(dev, kn);
891 DEV_UNLOCK(d);
892
893 return rv;
894 }
895
896 int
897 cdev_type(dev_t dev)
898 {
899 const struct cdevsw *d;
900
901 if ((d = cdevsw_lookup(dev)) == NULL)
902 return D_OTHER;
903 return d->d_flag & D_TYPEMASK;
904 }
905