subr_devsw.c revision 1.30.2.1 1 /* $NetBSD: subr_devsw.c,v 1.30.2.1 2014/08/20 00:04:29 tls Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.30.2.1 2014/08/20 00:04:29 tls Exp $");
73
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
82 #include <sys/reboot.h>
83
84 #ifdef DEVSW_DEBUG
85 #define DPRINTF(x) printf x
86 #else /* DEVSW_DEBUG */
87 #define DPRINTF(x)
88 #endif /* DEVSW_DEBUG */
89
90 #define MAXDEVSW 512 /* the maximum of major device number */
91 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
92 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
93 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
94
95 extern const struct bdevsw **bdevsw, *bdevsw0[];
96 extern const struct cdevsw **cdevsw, *cdevsw0[];
97 extern struct devsw_conv *devsw_conv, devsw_conv0[];
98 extern const int sys_bdevsws, sys_cdevsws;
99 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
100
101 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
102 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
103 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
104
105 kmutex_t device_lock;
106
107 void (*biodone_vfs)(buf_t *) = (void *)nullop;
108
109 void
110 devsw_init(void)
111 {
112
113 KASSERT(sys_bdevsws < MAXDEVSW - 1);
114 KASSERT(sys_cdevsws < MAXDEVSW - 1);
115 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
116 }
117
118 int
119 devsw_attach(const char *devname,
120 const struct bdevsw *bdev, devmajor_t *bmajor,
121 const struct cdevsw *cdev, devmajor_t *cmajor)
122 {
123 struct devsw_conv *conv;
124 char *name;
125 int error, i;
126 size_t len;
127
128 if (devname == NULL || cdev == NULL)
129 return (EINVAL);
130
131 mutex_enter(&device_lock);
132
133 for (i = 0 ; i < max_devsw_convs ; i++) {
134 conv = &devsw_conv[i];
135 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
136 continue;
137
138 if (*bmajor < 0)
139 *bmajor = conv->d_bmajor;
140 if (*cmajor < 0)
141 *cmajor = conv->d_cmajor;
142
143 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
144 error = EINVAL;
145 goto fail;
146 }
147 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
148 error = EINVAL;
149 goto fail;
150 }
151
152 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
153 cdevsw[*cmajor] != NULL) {
154 error = EEXIST;
155 goto fail;
156 }
157
158 if (bdev != NULL)
159 bdevsw[*bmajor] = bdev;
160 cdevsw[*cmajor] = cdev;
161
162 mutex_exit(&device_lock);
163 return (0);
164 }
165
166 error = bdevsw_attach(bdev, bmajor);
167 if (error != 0)
168 goto fail;
169 error = cdevsw_attach(cdev, cmajor);
170 if (error != 0) {
171 devsw_detach_locked(bdev, NULL);
172 goto fail;
173 }
174
175 for (i = 0 ; i < max_devsw_convs ; i++) {
176 if (devsw_conv[i].d_name == NULL)
177 break;
178 }
179 if (i == max_devsw_convs) {
180 struct devsw_conv *newptr;
181 int old, new;
182
183 old = max_devsw_convs;
184 new = old + 1;
185
186 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
187 if (newptr == NULL) {
188 devsw_detach_locked(bdev, cdev);
189 error = ENOMEM;
190 goto fail;
191 }
192 newptr[old].d_name = NULL;
193 newptr[old].d_bmajor = -1;
194 newptr[old].d_cmajor = -1;
195 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
196 if (devsw_conv != devsw_conv0)
197 kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
198 devsw_conv = newptr;
199 max_devsw_convs = new;
200 }
201
202 len = strlen(devname) + 1;
203 name = kmem_alloc(len, KM_NOSLEEP);
204 if (name == NULL) {
205 devsw_detach_locked(bdev, cdev);
206 error = ENOMEM;
207 goto fail;
208 }
209 strlcpy(name, devname, len);
210
211 devsw_conv[i].d_name = name;
212 devsw_conv[i].d_bmajor = *bmajor;
213 devsw_conv[i].d_cmajor = *cmajor;
214
215 mutex_exit(&device_lock);
216 return (0);
217 fail:
218 mutex_exit(&device_lock);
219 return (error);
220 }
221
222 static int
223 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
224 {
225 const struct bdevsw **newptr;
226 devmajor_t bmajor;
227 int i;
228
229 KASSERT(mutex_owned(&device_lock));
230
231 if (devsw == NULL)
232 return (0);
233
234 if (*devmajor < 0) {
235 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
236 if (bdevsw[bmajor] != NULL)
237 continue;
238 for (i = 0 ; i < max_devsw_convs ; i++) {
239 if (devsw_conv[i].d_bmajor == bmajor)
240 break;
241 }
242 if (i != max_devsw_convs)
243 continue;
244 break;
245 }
246 *devmajor = bmajor;
247 }
248
249 if (*devmajor >= MAXDEVSW) {
250 printf("bdevsw_attach: block majors exhausted");
251 return (ENOMEM);
252 }
253
254 if (*devmajor >= max_bdevsws) {
255 KASSERT(bdevsw == bdevsw0);
256 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
257 if (newptr == NULL)
258 return (ENOMEM);
259 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
260 bdevsw = newptr;
261 max_bdevsws = MAXDEVSW;
262 }
263
264 if (bdevsw[*devmajor] != NULL)
265 return (EEXIST);
266
267 bdevsw[*devmajor] = devsw;
268
269 return (0);
270 }
271
272 static int
273 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
274 {
275 const struct cdevsw **newptr;
276 devmajor_t cmajor;
277 int i;
278
279 KASSERT(mutex_owned(&device_lock));
280
281 if (*devmajor < 0) {
282 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
283 if (cdevsw[cmajor] != NULL)
284 continue;
285 for (i = 0 ; i < max_devsw_convs ; i++) {
286 if (devsw_conv[i].d_cmajor == cmajor)
287 break;
288 }
289 if (i != max_devsw_convs)
290 continue;
291 break;
292 }
293 *devmajor = cmajor;
294 }
295
296 if (*devmajor >= MAXDEVSW) {
297 printf("cdevsw_attach: character majors exhausted");
298 return (ENOMEM);
299 }
300
301 if (*devmajor >= max_cdevsws) {
302 KASSERT(cdevsw == cdevsw0);
303 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
304 if (newptr == NULL)
305 return (ENOMEM);
306 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
307 cdevsw = newptr;
308 max_cdevsws = MAXDEVSW;
309 }
310
311 if (cdevsw[*devmajor] != NULL)
312 return (EEXIST);
313
314 cdevsw[*devmajor] = devsw;
315
316 return (0);
317 }
318
319 static void
320 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
321 {
322 int i;
323
324 KASSERT(mutex_owned(&device_lock));
325
326 if (bdev != NULL) {
327 for (i = 0 ; i < max_bdevsws ; i++) {
328 if (bdevsw[i] != bdev)
329 continue;
330 bdevsw[i] = NULL;
331 break;
332 }
333 }
334 if (cdev != NULL) {
335 for (i = 0 ; i < max_cdevsws ; i++) {
336 if (cdevsw[i] != cdev)
337 continue;
338 cdevsw[i] = NULL;
339 break;
340 }
341 }
342 }
343
344 int
345 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
346 {
347
348 mutex_enter(&device_lock);
349 devsw_detach_locked(bdev, cdev);
350 mutex_exit(&device_lock);
351 return 0;
352 }
353
354 /*
355 * Look up a block device by number.
356 *
357 * => Caller must ensure that the device is attached.
358 */
359 const struct bdevsw *
360 bdevsw_lookup(dev_t dev)
361 {
362 devmajor_t bmajor;
363
364 if (dev == NODEV)
365 return (NULL);
366 bmajor = major(dev);
367 if (bmajor < 0 || bmajor >= max_bdevsws)
368 return (NULL);
369
370 return (bdevsw[bmajor]);
371 }
372
373 /*
374 * Look up a character device by number.
375 *
376 * => Caller must ensure that the device is attached.
377 */
378 const struct cdevsw *
379 cdevsw_lookup(dev_t dev)
380 {
381 devmajor_t cmajor;
382
383 if (dev == NODEV)
384 return (NULL);
385 cmajor = major(dev);
386 if (cmajor < 0 || cmajor >= max_cdevsws)
387 return (NULL);
388
389 return (cdevsw[cmajor]);
390 }
391
392 /*
393 * Look up a block device by reference to its operations set.
394 *
395 * => Caller must ensure that the device is not detached, and therefore
396 * that the returned major is still valid when dereferenced.
397 */
398 devmajor_t
399 bdevsw_lookup_major(const struct bdevsw *bdev)
400 {
401 devmajor_t bmajor;
402
403 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
404 if (bdevsw[bmajor] == bdev)
405 return (bmajor);
406 }
407
408 return (NODEVMAJOR);
409 }
410
411 /*
412 * Look up a character device by reference to its operations set.
413 *
414 * => Caller must ensure that the device is not detached, and therefore
415 * that the returned major is still valid when dereferenced.
416 */
417 devmajor_t
418 cdevsw_lookup_major(const struct cdevsw *cdev)
419 {
420 devmajor_t cmajor;
421
422 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
423 if (cdevsw[cmajor] == cdev)
424 return (cmajor);
425 }
426
427 return (NODEVMAJOR);
428 }
429
430 /*
431 * Convert from block major number to name.
432 *
433 * => Caller must ensure that the device is not detached, and therefore
434 * that the name pointer is still valid when dereferenced.
435 */
436 const char *
437 devsw_blk2name(devmajor_t bmajor)
438 {
439 const char *name;
440 devmajor_t cmajor;
441 int i;
442
443 name = NULL;
444 cmajor = -1;
445
446 mutex_enter(&device_lock);
447 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
448 mutex_exit(&device_lock);
449 return (NULL);
450 }
451 for (i = 0 ; i < max_devsw_convs; i++) {
452 if (devsw_conv[i].d_bmajor == bmajor) {
453 cmajor = devsw_conv[i].d_cmajor;
454 break;
455 }
456 }
457 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
458 name = devsw_conv[i].d_name;
459 mutex_exit(&device_lock);
460
461 return (name);
462 }
463
464 /*
465 * Convert char major number to device driver name.
466 */
467 const char *
468 cdevsw_getname(devmajor_t major)
469 {
470 const char *name;
471 int i;
472
473 name = NULL;
474
475 if (major < 0)
476 return (NULL);
477
478 mutex_enter(&device_lock);
479 for (i = 0 ; i < max_devsw_convs; i++) {
480 if (devsw_conv[i].d_cmajor == major) {
481 name = devsw_conv[i].d_name;
482 break;
483 }
484 }
485 mutex_exit(&device_lock);
486 return (name);
487 }
488
489 /*
490 * Convert block major number to device driver name.
491 */
492 const char *
493 bdevsw_getname(devmajor_t major)
494 {
495 const char *name;
496 int i;
497
498 name = NULL;
499
500 if (major < 0)
501 return (NULL);
502
503 mutex_enter(&device_lock);
504 for (i = 0 ; i < max_devsw_convs; i++) {
505 if (devsw_conv[i].d_bmajor == major) {
506 name = devsw_conv[i].d_name;
507 break;
508 }
509 }
510 mutex_exit(&device_lock);
511 return (name);
512 }
513
514 /*
515 * Convert from device name to block major number.
516 *
517 * => Caller must ensure that the device is not detached, and therefore
518 * that the major number is still valid when dereferenced.
519 */
520 devmajor_t
521 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
522 {
523 struct devsw_conv *conv;
524 devmajor_t bmajor;
525 int i;
526
527 if (name == NULL)
528 return (NODEVMAJOR);
529
530 mutex_enter(&device_lock);
531 for (i = 0 ; i < max_devsw_convs ; i++) {
532 size_t len;
533
534 conv = &devsw_conv[i];
535 if (conv->d_name == NULL)
536 continue;
537 len = strlen(conv->d_name);
538 if (strncmp(conv->d_name, name, len) != 0)
539 continue;
540 if (*(name +len) && !isdigit(*(name + len)))
541 continue;
542 bmajor = conv->d_bmajor;
543 if (bmajor < 0 || bmajor >= max_bdevsws ||
544 bdevsw[bmajor] == NULL)
545 break;
546 if (devname != NULL) {
547 #ifdef DEVSW_DEBUG
548 if (strlen(conv->d_name) >= devnamelen)
549 printf("devsw_name2blk: too short buffer");
550 #endif /* DEVSW_DEBUG */
551 strncpy(devname, conv->d_name, devnamelen);
552 devname[devnamelen - 1] = '\0';
553 }
554 mutex_exit(&device_lock);
555 return (bmajor);
556 }
557
558 mutex_exit(&device_lock);
559 return (NODEVMAJOR);
560 }
561
562 /*
563 * Convert from device name to char major number.
564 *
565 * => Caller must ensure that the device is not detached, and therefore
566 * that the major number is still valid when dereferenced.
567 */
568 devmajor_t
569 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
570 {
571 struct devsw_conv *conv;
572 devmajor_t cmajor;
573 int i;
574
575 if (name == NULL)
576 return (NODEVMAJOR);
577
578 mutex_enter(&device_lock);
579 for (i = 0 ; i < max_devsw_convs ; i++) {
580 size_t len;
581
582 conv = &devsw_conv[i];
583 if (conv->d_name == NULL)
584 continue;
585 len = strlen(conv->d_name);
586 if (strncmp(conv->d_name, name, len) != 0)
587 continue;
588 if (*(name +len) && !isdigit(*(name + len)))
589 continue;
590 cmajor = conv->d_cmajor;
591 if (cmajor < 0 || cmajor >= max_cdevsws ||
592 cdevsw[cmajor] == NULL)
593 break;
594 if (devname != NULL) {
595 #ifdef DEVSW_DEBUG
596 if (strlen(conv->d_name) >= devnamelen)
597 printf("devsw_name2chr: too short buffer");
598 #endif /* DEVSW_DEBUG */
599 strncpy(devname, conv->d_name, devnamelen);
600 devname[devnamelen - 1] = '\0';
601 }
602 mutex_exit(&device_lock);
603 return (cmajor);
604 }
605
606 mutex_exit(&device_lock);
607 return (NODEVMAJOR);
608 }
609
610 /*
611 * Convert from character dev_t to block dev_t.
612 *
613 * => Caller must ensure that the device is not detached, and therefore
614 * that the major number is still valid when dereferenced.
615 */
616 dev_t
617 devsw_chr2blk(dev_t cdev)
618 {
619 devmajor_t bmajor, cmajor;
620 int i;
621 dev_t rv;
622
623 cmajor = major(cdev);
624 bmajor = NODEVMAJOR;
625 rv = NODEV;
626
627 mutex_enter(&device_lock);
628 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
629 mutex_exit(&device_lock);
630 return (NODEV);
631 }
632 for (i = 0 ; i < max_devsw_convs ; i++) {
633 if (devsw_conv[i].d_cmajor == cmajor) {
634 bmajor = devsw_conv[i].d_bmajor;
635 break;
636 }
637 }
638 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
639 rv = makedev(bmajor, minor(cdev));
640 mutex_exit(&device_lock);
641
642 return (rv);
643 }
644
645 /*
646 * Convert from block dev_t to character dev_t.
647 *
648 * => Caller must ensure that the device is not detached, and therefore
649 * that the major number is still valid when dereferenced.
650 */
651 dev_t
652 devsw_blk2chr(dev_t bdev)
653 {
654 devmajor_t bmajor, cmajor;
655 int i;
656 dev_t rv;
657
658 bmajor = major(bdev);
659 cmajor = NODEVMAJOR;
660 rv = NODEV;
661
662 mutex_enter(&device_lock);
663 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
664 mutex_exit(&device_lock);
665 return (NODEV);
666 }
667 for (i = 0 ; i < max_devsw_convs ; i++) {
668 if (devsw_conv[i].d_bmajor == bmajor) {
669 cmajor = devsw_conv[i].d_cmajor;
670 break;
671 }
672 }
673 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
674 rv = makedev(cmajor, minor(bdev));
675 mutex_exit(&device_lock);
676
677 return (rv);
678 }
679
680 /*
681 * Device access methods.
682 */
683
684 #define DEV_LOCK(d) \
685 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
686 KERNEL_LOCK(1, NULL); \
687 }
688
689 #define DEV_UNLOCK(d) \
690 if (mpflag == 0) { \
691 KERNEL_UNLOCK_ONE(NULL); \
692 }
693
694 int
695 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
696 {
697 const struct bdevsw *d;
698 int rv, mpflag;
699
700 /*
701 * For open we need to lock, in order to synchronize
702 * with attach/detach.
703 */
704 mutex_enter(&device_lock);
705 d = bdevsw_lookup(dev);
706 mutex_exit(&device_lock);
707 if (d == NULL)
708 return ENXIO;
709
710 DEV_LOCK(d);
711 rv = (*d->d_open)(dev, flag, devtype, l);
712 DEV_UNLOCK(d);
713
714 return rv;
715 }
716
717 int
718 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
719 {
720 const struct bdevsw *d;
721 int rv, mpflag;
722
723 if ((d = bdevsw_lookup(dev)) == NULL)
724 return ENXIO;
725
726 DEV_LOCK(d);
727 rv = (*d->d_close)(dev, flag, devtype, l);
728 DEV_UNLOCK(d);
729
730 return rv;
731 }
732
733 void
734 bdev_strategy(struct buf *bp)
735 {
736 const struct bdevsw *d;
737 int mpflag;
738
739 if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
740 bp->b_error = ENXIO;
741 bp->b_resid = bp->b_bcount;
742 biodone_vfs(bp); /* biodone() iff vfs present */
743 return;
744 }
745
746 DEV_LOCK(d);
747 (*d->d_strategy)(bp);
748 DEV_UNLOCK(d);
749 }
750
751 int
752 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
753 {
754 const struct bdevsw *d;
755 int rv, mpflag;
756
757 if ((d = bdevsw_lookup(dev)) == NULL)
758 return ENXIO;
759
760 DEV_LOCK(d);
761 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
762 DEV_UNLOCK(d);
763
764 return rv;
765 }
766
767 int
768 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
769 {
770 const struct bdevsw *d;
771 int rv;
772
773 /*
774 * Dump can be called without the device open. Since it can
775 * currently only be called with the system paused (and in a
776 * potentially unstable state), we don't perform any locking.
777 */
778 if ((d = bdevsw_lookup(dev)) == NULL)
779 return ENXIO;
780
781 /* DEV_LOCK(d); */
782 rv = (*d->d_dump)(dev, addr, data, sz);
783 /* DEV_UNLOCK(d); */
784
785 return rv;
786 }
787
788 int
789 bdev_type(dev_t dev)
790 {
791 const struct bdevsw *d;
792
793 if ((d = bdevsw_lookup(dev)) == NULL)
794 return D_OTHER;
795 return d->d_flag & D_TYPEMASK;
796 }
797
798 int
799 bdev_size(dev_t dev)
800 {
801 const struct bdevsw *d;
802 int rv, mpflag = 0;
803
804 if ((d = bdevsw_lookup(dev)) == NULL ||
805 d->d_psize == NULL)
806 return -1;
807
808 /*
809 * Don't to try lock the device if we're dumping.
810 * XXX: is there a better way to test this?
811 */
812 if ((boothowto & RB_DUMP) == 0)
813 DEV_LOCK(d);
814 rv = (*d->d_psize)(dev);
815 if ((boothowto & RB_DUMP) == 0)
816 DEV_UNLOCK(d);
817
818 return rv;
819 }
820
821 int
822 bdev_discard(dev_t dev, off_t pos, off_t len)
823 {
824 const struct bdevsw *d;
825 int rv, mpflag;
826
827 if ((d = bdevsw_lookup(dev)) == NULL)
828 return ENXIO;
829
830 DEV_LOCK(d);
831 rv = (*d->d_discard)(dev, pos, len);
832 DEV_UNLOCK(d);
833
834 return rv;
835 }
836
837 int
838 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
839 {
840 const struct cdevsw *d;
841 int rv, mpflag;
842
843 /*
844 * For open we need to lock, in order to synchronize
845 * with attach/detach.
846 */
847 mutex_enter(&device_lock);
848 d = cdevsw_lookup(dev);
849 mutex_exit(&device_lock);
850 if (d == NULL)
851 return ENXIO;
852
853 DEV_LOCK(d);
854 rv = (*d->d_open)(dev, flag, devtype, l);
855 DEV_UNLOCK(d);
856
857 return rv;
858 }
859
860 int
861 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
862 {
863 const struct cdevsw *d;
864 int rv, mpflag;
865
866 if ((d = cdevsw_lookup(dev)) == NULL)
867 return ENXIO;
868
869 DEV_LOCK(d);
870 rv = (*d->d_close)(dev, flag, devtype, l);
871 DEV_UNLOCK(d);
872
873 return rv;
874 }
875
876 int
877 cdev_read(dev_t dev, struct uio *uio, int flag)
878 {
879 const struct cdevsw *d;
880 int rv, mpflag;
881
882 if ((d = cdevsw_lookup(dev)) == NULL)
883 return ENXIO;
884
885 DEV_LOCK(d);
886 rv = (*d->d_read)(dev, uio, flag);
887 DEV_UNLOCK(d);
888
889 return rv;
890 }
891
892 int
893 cdev_write(dev_t dev, struct uio *uio, int flag)
894 {
895 const struct cdevsw *d;
896 int rv, mpflag;
897
898 if ((d = cdevsw_lookup(dev)) == NULL)
899 return ENXIO;
900
901 DEV_LOCK(d);
902 rv = (*d->d_write)(dev, uio, flag);
903 DEV_UNLOCK(d);
904
905 return rv;
906 }
907
908 int
909 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
910 {
911 const struct cdevsw *d;
912 int rv, mpflag;
913
914 if ((d = cdevsw_lookup(dev)) == NULL)
915 return ENXIO;
916
917 DEV_LOCK(d);
918 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
919 DEV_UNLOCK(d);
920
921 return rv;
922 }
923
924 void
925 cdev_stop(struct tty *tp, int flag)
926 {
927 const struct cdevsw *d;
928 int mpflag;
929
930 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
931 return;
932
933 DEV_LOCK(d);
934 (*d->d_stop)(tp, flag);
935 DEV_UNLOCK(d);
936 }
937
938 struct tty *
939 cdev_tty(dev_t dev)
940 {
941 const struct cdevsw *d;
942
943 if ((d = cdevsw_lookup(dev)) == NULL)
944 return NULL;
945
946 /* XXX Check if necessary. */
947 if (d->d_tty == NULL)
948 return NULL;
949
950 return (*d->d_tty)(dev);
951 }
952
953 int
954 cdev_poll(dev_t dev, int flag, lwp_t *l)
955 {
956 const struct cdevsw *d;
957 int rv, mpflag;
958
959 if ((d = cdevsw_lookup(dev)) == NULL)
960 return POLLERR;
961
962 DEV_LOCK(d);
963 rv = (*d->d_poll)(dev, flag, l);
964 DEV_UNLOCK(d);
965
966 return rv;
967 }
968
969 paddr_t
970 cdev_mmap(dev_t dev, off_t off, int flag)
971 {
972 const struct cdevsw *d;
973 paddr_t rv;
974 int mpflag;
975
976 if ((d = cdevsw_lookup(dev)) == NULL)
977 return (paddr_t)-1LL;
978
979 DEV_LOCK(d);
980 rv = (*d->d_mmap)(dev, off, flag);
981 DEV_UNLOCK(d);
982
983 return rv;
984 }
985
986 int
987 cdev_kqfilter(dev_t dev, struct knote *kn)
988 {
989 const struct cdevsw *d;
990 int rv, mpflag;
991
992 if ((d = cdevsw_lookup(dev)) == NULL)
993 return ENXIO;
994
995 DEV_LOCK(d);
996 rv = (*d->d_kqfilter)(dev, kn);
997 DEV_UNLOCK(d);
998
999 return rv;
1000 }
1001
1002 int
1003 cdev_discard(dev_t dev, off_t pos, off_t len)
1004 {
1005 const struct cdevsw *d;
1006 int rv, mpflag;
1007
1008 if ((d = cdevsw_lookup(dev)) == NULL)
1009 return ENXIO;
1010
1011 DEV_LOCK(d);
1012 rv = (*d->d_discard)(dev, pos, len);
1013 DEV_UNLOCK(d);
1014
1015 return rv;
1016 }
1017
1018 int
1019 cdev_type(dev_t dev)
1020 {
1021 const struct cdevsw *d;
1022
1023 if ((d = cdevsw_lookup(dev)) == NULL)
1024 return D_OTHER;
1025 return d->d_flag & D_TYPEMASK;
1026 }
1027