subr_devsw.c revision 1.17.4.1 1 /* $NetBSD: subr_devsw.c,v 1.17.4.1 2008/05/16 02:25:26 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.17.4.1 2008/05/16 02:25:26 yamt Exp $");
73
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
82
83 #ifdef DEVSW_DEBUG
84 #define DPRINTF(x) printf x
85 #else /* DEVSW_DEBUG */
86 #define DPRINTF(x)
87 #endif /* DEVSW_DEBUG */
88
89 #define MAXDEVSW 512 /* the maximum of major device number */
90 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
91 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
92 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
93
94 extern const struct bdevsw **bdevsw, *bdevsw0[];
95 extern const struct cdevsw **cdevsw, *cdevsw0[];
96 extern struct devsw_conv *devsw_conv, devsw_conv0[];
97 extern const int sys_bdevsws, sys_cdevsws;
98 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
99
100 static int bdevsw_attach(const struct bdevsw *, int *);
101 static int cdevsw_attach(const struct cdevsw *, int *);
102 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
103
104 kmutex_t devsw_lock;
105
106 void
107 devsw_init(void)
108 {
109
110 KASSERT(sys_bdevsws < MAXDEVSW - 1);
111 KASSERT(sys_cdevsws < MAXDEVSW - 1);
112
113 mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE);
114 }
115
116 int
117 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
118 const struct cdevsw *cdev, int *cmajor)
119 {
120 struct devsw_conv *conv;
121 char *name;
122 int error, i;
123
124 if (devname == NULL || cdev == NULL)
125 return (EINVAL);
126
127 mutex_enter(&devsw_lock);
128
129 for (i = 0 ; i < max_devsw_convs ; i++) {
130 conv = &devsw_conv[i];
131 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
132 continue;
133
134 if (*bmajor < 0)
135 *bmajor = conv->d_bmajor;
136 if (*cmajor < 0)
137 *cmajor = conv->d_cmajor;
138
139 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
140 error = EINVAL;
141 goto fail;
142 }
143 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
144 error = EINVAL;
145 goto fail;
146 }
147
148 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
149 cdevsw[*cmajor] != NULL) {
150 error = EEXIST;
151 goto fail;
152 }
153
154 if (bdev != NULL)
155 bdevsw[*bmajor] = bdev;
156 cdevsw[*cmajor] = cdev;
157
158 mutex_exit(&devsw_lock);
159 return (0);
160 }
161
162 error = bdevsw_attach(bdev, bmajor);
163 if (error != 0)
164 goto fail;
165 error = cdevsw_attach(cdev, cmajor);
166 if (error != 0) {
167 devsw_detach_locked(bdev, NULL);
168 goto fail;
169 }
170
171 for (i = 0 ; i < max_devsw_convs ; i++) {
172 if (devsw_conv[i].d_name == NULL)
173 break;
174 }
175 if (i == max_devsw_convs) {
176 struct devsw_conv *newptr;
177 int old, new;
178
179 old = max_devsw_convs;
180 new = old + 1;
181
182 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
183 if (newptr == NULL) {
184 devsw_detach_locked(bdev, cdev);
185 error = ENOMEM;
186 goto fail;
187 }
188 newptr[old].d_name = NULL;
189 newptr[old].d_bmajor = -1;
190 newptr[old].d_cmajor = -1;
191 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
192 if (devsw_conv != devsw_conv0)
193 kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
194 devsw_conv = newptr;
195 max_devsw_convs = new;
196 }
197
198 i = strlen(devname) + 1;
199 name = kmem_alloc(i, KM_NOSLEEP);
200 if (name == NULL) {
201 devsw_detach_locked(bdev, cdev);
202 goto fail;
203 }
204 strlcpy(name, devname, i);
205
206 devsw_conv[i].d_name = name;
207 devsw_conv[i].d_bmajor = *bmajor;
208 devsw_conv[i].d_cmajor = *cmajor;
209
210 mutex_exit(&devsw_lock);
211 return (0);
212 fail:
213 mutex_exit(&devsw_lock);
214 return (error);
215 }
216
217 static int
218 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
219 {
220 const struct bdevsw **newptr;
221 int bmajor, i;
222
223 KASSERT(mutex_owned(&devsw_lock));
224
225 if (devsw == NULL)
226 return (0);
227
228 if (*devmajor < 0) {
229 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
230 if (bdevsw[bmajor] != NULL)
231 continue;
232 for (i = 0 ; i < max_devsw_convs ; i++) {
233 if (devsw_conv[i].d_bmajor == bmajor)
234 break;
235 }
236 if (i != max_devsw_convs)
237 continue;
238 break;
239 }
240 *devmajor = bmajor;
241 }
242
243 if (*devmajor >= MAXDEVSW) {
244 printf("bdevsw_attach: block majors exhausted");
245 return (ENOMEM);
246 }
247
248 if (*devmajor >= max_bdevsws) {
249 KASSERT(bdevsw == bdevsw0);
250 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
251 if (newptr == NULL)
252 return (ENOMEM);
253 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
254 bdevsw = newptr;
255 max_bdevsws = MAXDEVSW;
256 }
257
258 if (bdevsw[*devmajor] != NULL)
259 return (EEXIST);
260
261 bdevsw[*devmajor] = devsw;
262
263 return (0);
264 }
265
266 static int
267 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
268 {
269 const struct cdevsw **newptr;
270 int cmajor, i;
271
272 KASSERT(mutex_owned(&devsw_lock));
273
274 if (*devmajor < 0) {
275 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
276 if (cdevsw[cmajor] != NULL)
277 continue;
278 for (i = 0 ; i < max_devsw_convs ; i++) {
279 if (devsw_conv[i].d_cmajor == cmajor)
280 break;
281 }
282 if (i != max_devsw_convs)
283 continue;
284 break;
285 }
286 *devmajor = cmajor;
287 }
288
289 if (*devmajor >= MAXDEVSW) {
290 printf("cdevsw_attach: character majors exhausted");
291 return (ENOMEM);
292 }
293
294 if (*devmajor >= max_cdevsws) {
295 KASSERT(cdevsw == cdevsw0);
296 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
297 if (newptr == NULL)
298 return (ENOMEM);
299 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
300 cdevsw = newptr;
301 max_cdevsws = MAXDEVSW;
302 }
303
304 if (cdevsw[*devmajor] != NULL)
305 return (EEXIST);
306
307 cdevsw[*devmajor] = devsw;
308
309 return (0);
310 }
311
312 static void
313 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
314 {
315 int i;
316
317 KASSERT(mutex_owned(&devsw_lock));
318
319 if (bdev != NULL) {
320 for (i = 0 ; i < max_bdevsws ; i++) {
321 if (bdevsw[i] != bdev)
322 continue;
323 bdevsw[i] = NULL;
324 break;
325 }
326 }
327 if (cdev != NULL) {
328 for (i = 0 ; i < max_cdevsws ; i++) {
329 if (cdevsw[i] != cdev)
330 continue;
331 cdevsw[i] = NULL;
332 break;
333 }
334 }
335 }
336
337 void
338 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
339 {
340
341 mutex_enter(&devsw_lock);
342 devsw_detach_locked(bdev, cdev);
343 mutex_exit(&devsw_lock);
344 }
345
346 /*
347 * Look up a block device by number.
348 *
349 * => Caller must ensure that the device is attached.
350 */
351 const struct bdevsw *
352 bdevsw_lookup(dev_t dev)
353 {
354 int bmajor;
355
356 if (dev == NODEV)
357 return (NULL);
358 bmajor = major(dev);
359 if (bmajor < 0 || bmajor >= max_bdevsws)
360 return (NULL);
361
362 return (bdevsw[bmajor]);
363 }
364
365 /*
366 * Look up a character device by number.
367 *
368 * => Caller must ensure that the device is attached.
369 */
370 const struct cdevsw *
371 cdevsw_lookup(dev_t dev)
372 {
373 int cmajor;
374
375 if (dev == NODEV)
376 return (NULL);
377 cmajor = major(dev);
378 if (cmajor < 0 || cmajor >= max_cdevsws)
379 return (NULL);
380
381 return (cdevsw[cmajor]);
382 }
383
384 /*
385 * Look up a block device by reference to its operations set.
386 *
387 * => Caller must ensure that the device is not detached, and therefore
388 * that the returned major is still valid when dereferenced.
389 */
390 int
391 bdevsw_lookup_major(const struct bdevsw *bdev)
392 {
393 int bmajor;
394
395 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
396 if (bdevsw[bmajor] == bdev)
397 return (bmajor);
398 }
399
400 return (-1);
401 }
402
403 /*
404 * Look up a character device by reference to its operations set.
405 *
406 * => Caller must ensure that the device is not detached, and therefore
407 * that the returned major is still valid when dereferenced.
408 */
409 int
410 cdevsw_lookup_major(const struct cdevsw *cdev)
411 {
412 int cmajor;
413
414 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
415 if (cdevsw[cmajor] == cdev)
416 return (cmajor);
417 }
418
419 return (-1);
420 }
421
422 /*
423 * Convert from block major number to name.
424 *
425 * => Caller must ensure that the device is not detached, and therefore
426 * that the name pointer is still valid when dereferenced.
427 */
428 const char *
429 devsw_blk2name(int bmajor)
430 {
431 const char *name;
432 int cmajor, i;
433
434 name = NULL;
435 cmajor = -1;
436
437 mutex_enter(&devsw_lock);
438 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
439 mutex_exit(&devsw_lock);
440 return (NULL);
441 }
442 for (i = 0 ; i < max_devsw_convs; i++) {
443 if (devsw_conv[i].d_bmajor == bmajor) {
444 cmajor = devsw_conv[i].d_cmajor;
445 break;
446 }
447 }
448 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
449 name = devsw_conv[i].d_name;
450 mutex_exit(&devsw_lock);
451
452 return (name);
453 }
454
455 /*
456 * Convert from device name to block major number.
457 *
458 * => Caller must ensure that the device is not detached, and therefore
459 * that the major number is still valid when dereferenced.
460 */
461 int
462 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
463 {
464 struct devsw_conv *conv;
465 int bmajor, i;
466
467 if (name == NULL)
468 return (-1);
469
470 mutex_enter(&devsw_lock);
471 for (i = 0 ; i < max_devsw_convs ; i++) {
472 size_t len;
473
474 conv = &devsw_conv[i];
475 if (conv->d_name == NULL)
476 continue;
477 len = strlen(conv->d_name);
478 if (strncmp(conv->d_name, name, len) != 0)
479 continue;
480 if (*(name +len) && !isdigit(*(name + len)))
481 continue;
482 bmajor = conv->d_bmajor;
483 if (bmajor < 0 || bmajor >= max_bdevsws ||
484 bdevsw[bmajor] == NULL)
485 break;
486 if (devname != NULL) {
487 #ifdef DEVSW_DEBUG
488 if (strlen(conv->d_name) >= devnamelen)
489 printf("devsw_name2blk: too short buffer");
490 #endif /* DEVSW_DEBUG */
491 strncpy(devname, conv->d_name, devnamelen);
492 devname[devnamelen - 1] = '\0';
493 }
494 mutex_exit(&devsw_lock);
495 return (bmajor);
496 }
497
498 mutex_exit(&devsw_lock);
499 return (-1);
500 }
501
502 /*
503 * Convert from device name to char major number.
504 *
505 * => Caller must ensure that the device is not detached, and therefore
506 * that the major number is still valid when dereferenced.
507 */
508 int
509 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
510 {
511 struct devsw_conv *conv;
512 int cmajor, i;
513
514 if (name == NULL)
515 return (-1);
516
517 mutex_enter(&devsw_lock);
518 for (i = 0 ; i < max_devsw_convs ; i++) {
519 size_t len;
520
521 conv = &devsw_conv[i];
522 if (conv->d_name == NULL)
523 continue;
524 len = strlen(conv->d_name);
525 if (strncmp(conv->d_name, name, len) != 0)
526 continue;
527 if (*(name +len) && !isdigit(*(name + len)))
528 continue;
529 cmajor = conv->d_cmajor;
530 if (cmajor < 0 || cmajor >= max_cdevsws ||
531 cdevsw[cmajor] == NULL)
532 break;
533 if (devname != NULL) {
534 #ifdef DEVSW_DEBUG
535 if (strlen(conv->d_name) >= devnamelen)
536 printf("devsw_name2chr: too short buffer");
537 #endif /* DEVSW_DEBUG */
538 strncpy(devname, conv->d_name, devnamelen);
539 devname[devnamelen - 1] = '\0';
540 }
541 mutex_exit(&devsw_lock);
542 return (cmajor);
543 }
544
545 mutex_exit(&devsw_lock);
546 return (-1);
547 }
548
549 /*
550 * Convert from character dev_t to block dev_t.
551 *
552 * => Caller must ensure that the device is not detached, and therefore
553 * that the major number is still valid when dereferenced.
554 */
555 dev_t
556 devsw_chr2blk(dev_t cdev)
557 {
558 int bmajor, cmajor, i;
559 dev_t rv;
560
561 cmajor = major(cdev);
562 bmajor = -1;
563 rv = NODEV;
564
565 mutex_enter(&devsw_lock);
566 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
567 mutex_exit(&devsw_lock);
568 return (NODEV);
569 }
570 for (i = 0 ; i < max_devsw_convs ; i++) {
571 if (devsw_conv[i].d_cmajor == cmajor) {
572 bmajor = devsw_conv[i].d_bmajor;
573 break;
574 }
575 }
576 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
577 rv = makedev(bmajor, minor(cdev));
578 mutex_exit(&devsw_lock);
579
580 return (rv);
581 }
582
583 /*
584 * Convert from block dev_t to character dev_t.
585 *
586 * => Caller must ensure that the device is not detached, and therefore
587 * that the major number is still valid when dereferenced.
588 */
589 dev_t
590 devsw_blk2chr(dev_t bdev)
591 {
592 int bmajor, cmajor, i;
593 dev_t rv;
594
595 bmajor = major(bdev);
596 cmajor = -1;
597 rv = NODEV;
598
599 mutex_enter(&devsw_lock);
600 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
601 mutex_exit(&devsw_lock);
602 return (NODEV);
603 }
604 for (i = 0 ; i < max_devsw_convs ; i++) {
605 if (devsw_conv[i].d_bmajor == bmajor) {
606 cmajor = devsw_conv[i].d_cmajor;
607 break;
608 }
609 }
610 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
611 rv = makedev(cmajor, minor(bdev));
612 mutex_exit(&devsw_lock);
613
614 return (rv);
615 }
616
617 /*
618 * Device access methods.
619 */
620
621 #define DEV_LOCK(d) \
622 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
623 KERNEL_LOCK(1, NULL); \
624 }
625
626 #define DEV_UNLOCK(d) \
627 if (mpflag == 0) { \
628 KERNEL_UNLOCK_ONE(NULL); \
629 }
630
631 int
632 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
633 {
634 const struct bdevsw *d;
635 int rv, mpflag;
636
637 /*
638 * For open we need to lock, in order to synchronize
639 * with attach/detach.
640 */
641 mutex_enter(&devsw_lock);
642 d = bdevsw_lookup(dev);
643 mutex_exit(&devsw_lock);
644 if (d == NULL)
645 return ENXIO;
646
647 DEV_LOCK(d);
648 rv = (*d->d_open)(dev, flag, devtype, l);
649 DEV_UNLOCK(d);
650
651 return rv;
652 }
653
654 int
655 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
656 {
657 const struct bdevsw *d;
658 int rv, mpflag;
659
660 if ((d = bdevsw_lookup(dev)) == NULL)
661 return ENXIO;
662
663 DEV_LOCK(d);
664 rv = (*d->d_close)(dev, flag, devtype, l);
665 DEV_UNLOCK(d);
666
667 return rv;
668 }
669
670 void
671 bdev_strategy(struct buf *bp)
672 {
673 const struct bdevsw *d;
674 int mpflag;
675
676 if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
677 panic("bdev_strategy");
678
679 DEV_LOCK(d);
680 (*d->d_strategy)(bp);
681 DEV_UNLOCK(d);
682 }
683
684 int
685 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
686 {
687 const struct bdevsw *d;
688 int rv, mpflag;
689
690 if ((d = bdevsw_lookup(dev)) == NULL)
691 return ENXIO;
692
693 DEV_LOCK(d);
694 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
695 DEV_UNLOCK(d);
696
697 return rv;
698 }
699
700 int
701 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
702 {
703 const struct bdevsw *d;
704 int rv;
705
706 /*
707 * Dump can be called without the device open. Since it can
708 * currently only be called with the system paused (and in a
709 * potentially unstable state), we don't perform any locking.
710 */
711 if ((d = bdevsw_lookup(dev)) == NULL)
712 return ENXIO;
713
714 /* DEV_LOCK(d); */
715 rv = (*d->d_dump)(dev, addr, data, sz);
716 /* DEV_UNLOCK(d); */
717
718 return rv;
719 }
720
721 int
722 bdev_type(dev_t dev)
723 {
724 const struct bdevsw *d;
725
726 if ((d = bdevsw_lookup(dev)) == NULL)
727 return D_OTHER;
728 return d->d_flag & D_TYPEMASK;
729 }
730
731 int
732 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
733 {
734 const struct cdevsw *d;
735 int rv, mpflag;
736
737 /*
738 * For open we need to lock, in order to synchronize
739 * with attach/detach.
740 */
741 mutex_enter(&devsw_lock);
742 d = cdevsw_lookup(dev);
743 mutex_exit(&devsw_lock);
744 if (d == NULL)
745 return ENXIO;
746
747 DEV_LOCK(d);
748 rv = (*d->d_open)(dev, flag, devtype, l);
749 DEV_UNLOCK(d);
750
751 return rv;
752 }
753
754 int
755 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
756 {
757 const struct cdevsw *d;
758 int rv, mpflag;
759
760 if ((d = cdevsw_lookup(dev)) == NULL)
761 return ENXIO;
762
763 DEV_LOCK(d);
764 rv = (*d->d_close)(dev, flag, devtype, l);
765 DEV_UNLOCK(d);
766
767 return rv;
768 }
769
770 int
771 cdev_read(dev_t dev, struct uio *uio, int flag)
772 {
773 const struct cdevsw *d;
774 int rv, mpflag;
775
776 if ((d = cdevsw_lookup(dev)) == NULL)
777 return ENXIO;
778
779 DEV_LOCK(d);
780 rv = (*d->d_read)(dev, uio, flag);
781 DEV_UNLOCK(d);
782
783 return rv;
784 }
785
786 int
787 cdev_write(dev_t dev, struct uio *uio, int flag)
788 {
789 const struct cdevsw *d;
790 int rv, mpflag;
791
792 if ((d = cdevsw_lookup(dev)) == NULL)
793 return ENXIO;
794
795 DEV_LOCK(d);
796 rv = (*d->d_write)(dev, uio, flag);
797 DEV_UNLOCK(d);
798
799 return rv;
800 }
801
802 int
803 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
804 {
805 const struct cdevsw *d;
806 int rv, mpflag;
807
808 if ((d = cdevsw_lookup(dev)) == NULL)
809 return ENXIO;
810
811 DEV_LOCK(d);
812 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
813 DEV_UNLOCK(d);
814
815 return rv;
816 }
817
818 void
819 cdev_stop(struct tty *tp, int flag)
820 {
821 const struct cdevsw *d;
822 int mpflag;
823
824 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
825 return;
826
827 DEV_LOCK(d);
828 (*d->d_stop)(tp, flag);
829 DEV_UNLOCK(d);
830 }
831
832 struct tty *
833 cdev_tty(dev_t dev)
834 {
835 const struct cdevsw *d;
836 struct tty * rv;
837 int mpflag;
838
839 if ((d = cdevsw_lookup(dev)) == NULL)
840 return NULL;
841
842 /* XXX Check if necessary. */
843 if (d->d_tty == NULL)
844 return NULL;
845
846 DEV_LOCK(d);
847 rv = (*d->d_tty)(dev);
848 DEV_UNLOCK(d);
849
850 return rv;
851 }
852
853 int
854 cdev_poll(dev_t dev, int flag, lwp_t *l)
855 {
856 const struct cdevsw *d;
857 int rv, mpflag;
858
859 if ((d = cdevsw_lookup(dev)) == NULL)
860 return POLLERR;
861
862 DEV_LOCK(d);
863 rv = (*d->d_poll)(dev, flag, l);
864 DEV_UNLOCK(d);
865
866 return rv;
867 }
868
869 paddr_t
870 cdev_mmap(dev_t dev, off_t off, int flag)
871 {
872 const struct cdevsw *d;
873 paddr_t rv;
874 int mpflag;
875
876 if ((d = cdevsw_lookup(dev)) == NULL)
877 return (paddr_t)-1LL;
878
879 DEV_LOCK(d);
880 rv = (*d->d_mmap)(dev, off, flag);
881 DEV_UNLOCK(d);
882
883 return rv;
884 }
885
886 int
887 cdev_kqfilter(dev_t dev, struct knote *kn)
888 {
889 const struct cdevsw *d;
890 int rv, mpflag;
891
892 if ((d = cdevsw_lookup(dev)) == NULL)
893 return ENXIO;
894
895 DEV_LOCK(d);
896 rv = (*d->d_kqfilter)(dev, kn);
897 DEV_UNLOCK(d);
898
899 return rv;
900 }
901
902 int
903 cdev_type(dev_t dev)
904 {
905 const struct cdevsw *d;
906
907 if ((d = cdevsw_lookup(dev)) == NULL)
908 return D_OTHER;
909 return d->d_flag & D_TYPEMASK;
910 }
911