subr_devsw.c revision 1.10.8.1 1 /* $NetBSD: subr_devsw.c,v 1.10.8.1 2007/04/13 20:56:18 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Overview
41 *
42 * subr_devsw.c: registers device drivers by name and by major
43 * number, and provides wrapper methods for performing I/O and
44 * other tasks on device drivers, keying on the device number
45 * (dev_t).
46 *
47 * When the system is built, the config(8) command generates
48 * static tables of device drivers built into the kernel image
49 * along with their associated methods. These are recorded in
50 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
51 * and removed from the system dynamically.
52 *
53 * Allocation
54 *
55 * When the system initially, only the statically allocated
56 * indexes (bdevsw0, cdevsw0) are used. If these overflow due
57 * to allocation, we allocate a fixed block of memory to hold
58 * the new, expanded index. This "fork" of the table is only
59 * ever performed once in order to guarantee that other threads
60 * may safely access the device tables:
61 *
62 * o Once a thread has a "reference" to the table via an earlier
63 * open() call, we know that the entry in the table must exist
64 * and so it is safe to access it.
65 *
66 * o Regardless of whether other threads see the old or new
67 * pointers, they will point to a correct device switch
68 * structure for the operation being performed.
69 *
70 * XXX Currently, the wrapper methods such as cdev_read() verify
71 * that a device driver does in fact exist before calling the
72 * associated driver method. This should be changed so that
73 * once the device is has been referenced (i.e. opened), calling
74 * the other methods should be valid until that reference is
75 * dropped.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.10.8.1 2007/04/13 20:56:18 ad Exp $");
80
81 #include "opt_multiprocessor.h"
82
83 #include <sys/param.h>
84 #include <sys/conf.h>
85 #include <sys/kmem.h>
86 #include <sys/systm.h>
87 #include <sys/proc.h>
88 #include <sys/buf.h>
89 #include <sys/tty.h>
90 #include <sys/poll.h>
91
92 #define MAXDEVSW 512 /* the maximum major device number */
93 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
94 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
95 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
96
97 extern const struct bdevsw **bdevsw, *bdevsw0[];
98 extern const struct cdevsw **cdevsw, *cdevsw0[];
99 extern struct devsw_conv *devsw_conv, devsw_conv0[];
100 extern const int sys_bdevsws, sys_cdevsws;
101 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
102
103 static int bdevsw_attach(const char *, const struct bdevsw *, int *);
104 static int cdevsw_attach(const char *, const struct cdevsw *, int *);
105 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
106
107 static kmutex_t devsw_lock;
108
109 void
110 devsw_init(void)
111 {
112
113 KASSERT(sys_bdevsws < MAXDEVSW - 1);
114 KASSERT(sys_cdevsws < MAXDEVSW - 1);
115
116 mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE);
117 }
118
119 int
120 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
121 const struct cdevsw *cdev, int *cmajor)
122 {
123 struct devsw_conv *conv;
124 char *name;
125 int error, i;
126
127 if (devname == NULL || cdev == NULL)
128 return (EINVAL);
129
130 mutex_enter(&devsw_lock);
131
132 for (i = 0 ; i < max_devsw_convs ; i++) {
133 conv = &devsw_conv[i];
134 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
135 continue;
136
137 if (*bmajor < 0)
138 *bmajor = conv->d_bmajor;
139 if (*cmajor < 0)
140 *cmajor = conv->d_cmajor;
141
142 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
143 error = EINVAL;
144 goto fail;
145 }
146 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
147 error = EINVAL;
148 goto fail;
149 }
150
151 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
152 cdevsw[*cmajor] != NULL) {
153 error = EEXIST;
154 goto fail;
155 }
156
157 if (bdev != NULL)
158 bdevsw[*bmajor] = bdev;
159 cdevsw[*cmajor] = cdev;
160
161 mutex_exit(&devsw_lock);
162 return (0);
163 }
164
165 error = bdevsw_attach(devname, bdev, bmajor);
166 if (error != 0)
167 goto fail;
168 error = cdevsw_attach(devname, cdev, cmajor);
169 if (error != 0) {
170 devsw_detach_locked(bdev, NULL);
171 goto fail;
172 }
173
174 for (i = 0 ; i < max_devsw_convs ; i++) {
175 if (devsw_conv[i].d_name == NULL)
176 break;
177 }
178 if (i == max_devsw_convs) {
179 struct devsw_conv *newptr;
180 int old, new;
181
182 old = max_devsw_convs;
183 new = old + 1;
184
185 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
186 if (newptr == NULL) {
187 devsw_detach_locked(bdev, cdev);
188 error = ENOMEM;
189 goto fail;
190 }
191 newptr[old].d_name = NULL;
192 newptr[old].d_bmajor = -1;
193 newptr[old].d_cmajor = -1;
194 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
195 if (devsw_conv != devsw_conv0)
196 kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
197 devsw_conv = newptr;
198 max_devsw_convs = new;
199 }
200
201 i = strlen(devname) + 1;
202 name = kmem_alloc(i, KM_NOSLEEP);
203 if (name == NULL) {
204 devsw_detach_locked(bdev, cdev);
205 goto fail;
206 }
207 strlcpy(name, devname, i);
208
209 devsw_conv[i].d_name = name;
210 devsw_conv[i].d_bmajor = *bmajor;
211 devsw_conv[i].d_cmajor = *cmajor;
212
213 mutex_exit(&devsw_lock);
214 return (0);
215 fail:
216 mutex_exit(&devsw_lock);
217 return (error);
218 }
219
220 static int
221 bdevsw_attach(const char *devname, const struct bdevsw *devsw, int *devmajor)
222 {
223 const struct bdevsw **newptr;
224 int bmajor, i;
225
226 KASSERT(mutex_owned(&devsw_lock));
227
228 if (devsw == NULL)
229 return (0);
230
231 if (*devmajor < 0) {
232 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
233 if (bdevsw[bmajor] != NULL)
234 continue;
235 for (i = 0 ; i < max_devsw_convs ; i++) {
236 if (devsw_conv[i].d_bmajor == bmajor)
237 break;
238 }
239 if (i != max_devsw_convs)
240 continue;
241 break;
242 }
243 *devmajor = bmajor;
244 }
245
246 if (*devmajor >= MAXDEVSW) {
247 printf("bdevsw_attach: block majors exhausted");
248 return (ENOMEM);
249 }
250
251 if (*devmajor >= max_bdevsws) {
252 KASSERT(bdevsw == bdevsw0);
253 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
254 if (newptr == NULL)
255 return (ENOMEM);
256 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
257 bdevsw = newptr;
258 max_bdevsws = MAXDEVSW;
259 }
260
261 if (bdevsw[*devmajor] != NULL)
262 return (EEXIST);
263
264 bdevsw[*devmajor] = devsw;
265
266 return (0);
267 }
268
269 static int
270 cdevsw_attach(const char *devname, const struct cdevsw *devsw, int *devmajor)
271 {
272 const struct cdevsw **newptr;
273 int cmajor, i;
274
275 KASSERT(mutex_owned(&devsw_lock));
276
277 if (*devmajor < 0) {
278 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
279 if (cdevsw[cmajor] != NULL)
280 continue;
281 for (i = 0 ; i < max_devsw_convs ; i++) {
282 if (devsw_conv[i].d_cmajor == cmajor)
283 break;
284 }
285 if (i != max_devsw_convs)
286 continue;
287 break;
288 }
289 *devmajor = cmajor;
290 }
291
292 if (*devmajor >= MAXDEVSW) {
293 printf("cdevsw_attach: character majors exhausted");
294 return (ENOMEM);
295 }
296
297 if (*devmajor >= max_cdevsws) {
298 KASSERT(cdevsw == cdevsw0);
299 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
300 if (newptr == NULL)
301 return (ENOMEM);
302 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
303 cdevsw = newptr;
304 max_cdevsws = MAXDEVSW;
305 }
306
307 if (cdevsw[*devmajor] != NULL)
308 return (EEXIST);
309
310 cdevsw[*devmajor] = devsw;
311
312 return (0);
313 }
314
315 static void
316 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
317 {
318 int i;
319
320 KASSERT(mutex_owned(&devsw_lock));
321
322 if (bdev != NULL) {
323 for (i = 0 ; i < max_bdevsws ; i++) {
324 if (bdevsw[i] != bdev)
325 continue;
326 bdevsw[i] = NULL;
327 break;
328 }
329 }
330 if (cdev != NULL) {
331 for (i = 0 ; i < max_cdevsws ; i++) {
332 if (cdevsw[i] != cdev)
333 continue;
334 cdevsw[i] = NULL;
335 break;
336 }
337 }
338 }
339
340 void
341 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
342 {
343
344 mutex_enter(&devsw_lock);
345 devsw_detach_locked(bdev, cdev);
346 mutex_exit(&devsw_lock);
347 }
348
349 /*
350 * Look up a block device by number.
351 *
352 * => Caller must ensure that the device is attached.
353 */
354 const struct bdevsw *
355 bdevsw_lookup(dev_t dev)
356 {
357 int bmajor;
358
359 if (dev == NODEV)
360 return (NULL);
361 bmajor = major(dev);
362 if (bmajor < 0 || bmajor >= max_bdevsws)
363 return (NULL);
364
365 return (bdevsw[bmajor]);
366 }
367
368 /*
369 * Look up a character device by number.
370 *
371 * => Caller must ensure that the device is attached.
372 */
373 const struct cdevsw *
374 cdevsw_lookup(dev_t dev)
375 {
376 int cmajor;
377
378 if (dev == NODEV)
379 return (NULL);
380 cmajor = major(dev);
381 if (cmajor < 0 || cmajor >= max_cdevsws)
382 return (NULL);
383
384 return (cdevsw[cmajor]);
385 }
386
387 /*
388 * Look up a block device by reference to its operations set.
389 *
390 * => Caller must ensure that the device is not detached, and therefore
391 * that the returned major is still valid when dereferenced.
392 */
393 int
394 bdevsw_lookup_major(const struct bdevsw *bdev)
395 {
396 int bmajor;
397
398 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
399 if (bdevsw[bmajor] == bdev)
400 return (bmajor);
401 }
402
403 return (-1);
404 }
405
406 /*
407 * Look up a character device by reference to its operations set.
408 *
409 * => Caller must ensure that the device is not detached, and therefore
410 * that the returned major is still valid when dereferenced.
411 */
412 int
413 cdevsw_lookup_major(const struct cdevsw *cdev)
414 {
415 int cmajor;
416
417 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
418 if (cdevsw[cmajor] == cdev)
419 return (cmajor);
420 }
421
422 return (-1);
423 }
424
425 /*
426 * Convert from block major number to name.
427 *
428 * => Caller must ensure that the device is not detached, and therefore
429 * that the name pointer is still valid when dereferenced.
430 */
431 const char *
432 devsw_blk2name(int bmajor)
433 {
434 const char *name;
435 int cmajor, i;
436
437 name = NULL;
438 cmajor = -1;
439
440 mutex_enter(&devsw_lock);
441 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
442 mutex_exit(&devsw_lock);
443 return (NULL);
444 }
445 for (i = 0 ; i < max_devsw_convs; i++) {
446 if (devsw_conv[i].d_bmajor == bmajor) {
447 cmajor = devsw_conv[i].d_cmajor;
448 break;
449 }
450 }
451 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
452 name = devsw_conv[i].d_name;
453 mutex_exit(&devsw_lock);
454
455 return (name);
456 }
457
458 /*
459 * Convert from device name to block major number.
460 *
461 * => Caller must ensure that the device is not detached, and therefore
462 * that the major number is still valid when dereferenced.
463 */
464 int
465 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
466 {
467 struct devsw_conv *conv;
468 int bmajor, i;
469
470 if (name == NULL)
471 return (-1);
472
473 mutex_enter(&devsw_lock);
474 for (i = 0 ; i < max_devsw_convs ; i++) {
475 size_t len;
476
477 conv = &devsw_conv[i];
478 if (conv->d_name == NULL)
479 continue;
480 len = strlen(conv->d_name);
481 if (strncmp(conv->d_name, name, len) != 0)
482 continue;
483 if (*(name +len) && !isdigit(*(name + len)))
484 continue;
485 bmajor = conv->d_bmajor;
486 if (bmajor < 0 || bmajor >= max_bdevsws ||
487 bdevsw[bmajor] == NULL)
488 break;
489 if (devname != NULL) {
490 #ifdef DEVSW_DEBUG
491 if (strlen(conv->d_name) >= devnamelen)
492 printf("devsw_name2blk: too short buffer");
493 #endif /* DEVSW_DEBUG */
494 strncpy(devname, conv->d_name, devnamelen);
495 devname[devnamelen - 1] = '\0';
496 }
497 mutex_exit(&devsw_lock);
498 return (bmajor);
499 }
500
501 mutex_exit(&devsw_lock);
502 return (-1);
503 }
504
505 /*
506 * Convert from character dev_t to block dev_t.
507 *
508 * => Caller must ensure that the device is not detached, and therefore
509 * that the major number is still valid when dereferenced.
510 */
511 dev_t
512 devsw_chr2blk(dev_t cdev)
513 {
514 int bmajor, cmajor, i;
515 dev_t rv;
516
517 cmajor = major(cdev);
518 bmajor = -1;
519 rv = NODEV;
520
521 mutex_enter(&devsw_lock);
522 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
523 mutex_exit(&devsw_lock);
524 return (NODEV);
525 }
526 for (i = 0 ; i < max_devsw_convs ; i++) {
527 if (devsw_conv[i].d_cmajor == cmajor) {
528 bmajor = devsw_conv[i].d_bmajor;
529 break;
530 }
531 }
532 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
533 rv = makedev(bmajor, minor(cdev));
534 mutex_exit(&devsw_lock);
535
536 return (rv);
537 }
538
539 /*
540 * Convert from block dev_t to character dev_t.
541 *
542 * => Caller must ensure that the device is not detached, and therefore
543 * that the major number is still valid when dereferenced.
544 */
545 dev_t
546 devsw_blk2chr(dev_t bdev)
547 {
548 int bmajor, cmajor, i;
549 dev_t rv;
550
551 bmajor = major(bdev);
552 cmajor = -1;
553 rv = NODEV;
554
555 mutex_enter(&devsw_lock);
556 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
557 mutex_exit(&devsw_lock);
558 return (NODEV);
559 }
560 for (i = 0 ; i < max_devsw_convs ; i++) {
561 if (devsw_conv[i].d_bmajor == bmajor) {
562 cmajor = devsw_conv[i].d_cmajor;
563 break;
564 }
565 }
566 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
567 rv = makedev(cmajor, minor(bdev));
568 mutex_exit(&devsw_lock);
569
570 return (rv);
571 }
572
573 /*
574 * Device access methods.
575 */
576
577 #define DEV_LOCK(d) \
578 if ((d->d_flag & D_MPSAFE) == 0) { \
579 KERNEL_LOCK(1, curlwp); \
580 }
581
582 #define DEV_UNLOCK(d) \
583 if ((d->d_flag & D_MPSAFE) == 0) { \
584 KERNEL_UNLOCK_ONE(curlwp); \
585 }
586
587 int
588 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
589 {
590 const struct bdevsw *d;
591 int rv;
592
593 /*
594 * For open we need to lock, in order to synchronize
595 * with attach/detach.
596 */
597 mutex_enter(&devsw_lock);
598 d = bdevsw_lookup(dev);
599 mutex_exit(&devsw_lock);
600 if (d == NULL)
601 return ENXIO;
602
603 DEV_LOCK(d);
604 rv = (*d->d_open)(dev, flag, devtype, l);
605 DEV_UNLOCK(d);
606
607 return rv;
608 }
609
610 int
611 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
612 {
613 const struct bdevsw *d;
614 int rv;
615
616 if ((d = bdevsw_lookup(dev)) == NULL)
617 return ENXIO;
618
619 DEV_LOCK(d);
620 rv = (*d->d_close)(dev, flag, devtype, l);
621 DEV_UNLOCK(d);
622
623 return rv;
624 }
625
626 void
627 bdev_strategy(struct buf *bp)
628 {
629 const struct bdevsw *d;
630
631 if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
632 panic("bdev_strategy");
633
634 DEV_LOCK(d);
635 (*d->d_strategy)(bp);
636 DEV_UNLOCK(d);
637 }
638
639 int
640 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
641 {
642 const struct bdevsw *d;
643 int rv;
644
645 if ((d = bdevsw_lookup(dev)) == NULL)
646 return ENXIO;
647
648 DEV_LOCK(d);
649 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
650 DEV_UNLOCK(d);
651
652 return rv;
653 }
654
655 int
656 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
657 {
658 const struct bdevsw *d;
659 int rv;
660
661 /*
662 * Dump can be called without the device open. Since it can
663 * currently only be called with the system paused (and in a
664 * potentially unstable state), we don't perform any locking.
665 */
666 if ((d = bdevsw_lookup(dev)) == NULL)
667 return ENXIO;
668
669 /* DEV_LOCK(d); */
670 rv = (*d->d_dump)(dev, addr, data, sz);
671 /* DEV_UNLOCK(d); */
672
673 return rv;
674 }
675
676 int
677 bdev_type(dev_t dev)
678 {
679 const struct bdevsw *d;
680
681 if ((d = bdevsw_lookup(dev)) == NULL)
682 return D_OTHER;
683 return d->d_flag & D_TYPEMASK;
684 }
685
686 int
687 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
688 {
689 const struct cdevsw *d;
690 int rv;
691
692 /*
693 * For open we need to lock, in order to synchronize
694 * with attach/detach.
695 */
696 mutex_enter(&devsw_lock);
697 d = cdevsw_lookup(dev);
698 mutex_exit(&devsw_lock);
699 if (d == NULL)
700 return ENXIO;
701
702 DEV_LOCK(d);
703 rv = (*d->d_open)(dev, flag, devtype, l);
704 DEV_UNLOCK(d);
705
706 return rv;
707 }
708
709 int
710 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
711 {
712 const struct cdevsw *d;
713 int rv;
714
715 if ((d = cdevsw_lookup(dev)) == NULL)
716 return ENXIO;
717
718 DEV_LOCK(d);
719 rv = (*d->d_close)(dev, flag, devtype, l);
720 DEV_UNLOCK(d);
721
722 return rv;
723 }
724
725 int
726 cdev_read(dev_t dev, struct uio *uio, int flag)
727 {
728 const struct cdevsw *d;
729 int rv;
730
731 if ((d = cdevsw_lookup(dev)) == NULL)
732 return ENXIO;
733
734 DEV_LOCK(d);
735 rv = (*d->d_read)(dev, uio, flag);
736 DEV_UNLOCK(d);
737
738 return rv;
739 }
740
741 int
742 cdev_write(dev_t dev, struct uio *uio, int flag)
743 {
744 const struct cdevsw *d;
745 int rv;
746
747 if ((d = cdevsw_lookup(dev)) == NULL)
748 return ENXIO;
749
750 DEV_LOCK(d);
751 rv = (*d->d_write)(dev, uio, flag);
752 DEV_UNLOCK(d);
753
754 return rv;
755 }
756
757 int
758 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
759 {
760 const struct cdevsw *d;
761 int rv;
762
763 if ((d = cdevsw_lookup(dev)) == NULL)
764 return ENXIO;
765
766 DEV_LOCK(d);
767 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
768 DEV_UNLOCK(d);
769
770 return rv;
771 }
772
773 void
774 cdev_stop(struct tty *tp, int flag)
775 {
776 const struct cdevsw *d;
777
778 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
779 return;
780
781 DEV_LOCK(d);
782 (*d->d_stop)(tp, flag);
783 DEV_UNLOCK(d);
784 }
785
786 struct tty *
787 cdev_tty(dev_t dev)
788 {
789 const struct cdevsw *d;
790 struct tty * rv;
791
792 if ((d = cdevsw_lookup(dev)) == NULL)
793 return NULL;
794
795 DEV_LOCK(d);
796 rv = (*d->d_tty)(dev);
797 DEV_UNLOCK(d);
798
799 return rv;
800 }
801
802 int
803 cdev_poll(dev_t dev, int flag, lwp_t *l)
804 {
805 const struct cdevsw *d;
806 int rv;
807
808 if ((d = cdevsw_lookup(dev)) == NULL)
809 return POLLERR;
810
811 DEV_LOCK(d);
812 rv = (*d->d_poll)(dev, flag, l);
813 DEV_UNLOCK(d);
814
815 return rv;
816 }
817
818 paddr_t
819 cdev_mmap(dev_t dev, off_t off, int flag)
820 {
821 const struct cdevsw *d;
822 paddr_t rv;
823
824 if ((d = cdevsw_lookup(dev)) == NULL)
825 return (paddr_t)-1LL;
826
827 DEV_LOCK(d);
828 rv = (*d->d_mmap)(dev, off, flag);
829 DEV_UNLOCK(d);
830
831 return rv;
832 }
833
834 int
835 cdev_kqfilter(dev_t dev, struct knote *kn)
836 {
837 const struct cdevsw *d;
838 int rv;
839
840 if ((d = cdevsw_lookup(dev)) == NULL)
841 return ENXIO;
842
843 DEV_LOCK(d);
844 rv = (*d->d_kqfilter)(dev, kn);
845 DEV_UNLOCK(d);
846
847 return rv;
848 }
849
850 int
851 cdev_type(dev_t dev)
852 {
853 const struct cdevsw *d;
854
855 if ((d = cdevsw_lookup(dev)) == NULL)
856 return D_OTHER;
857 return d->d_flag & D_TYPEMASK;
858 }
859