subr_devsw.c revision 1.34.2.2 1 /* $NetBSD: subr_devsw.c,v 1.34.2.2 2016/07/16 22:35:34 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.34.2.2 2016/07/16 22:35:34 pgoyette Exp $");
73
74 #ifdef _KERNEL_OPT
75 #include "opt_dtrace.h"
76 #endif
77
78 #include <sys/param.h>
79 #include <sys/conf.h>
80 #include <sys/kmem.h>
81 #include <sys/systm.h>
82 #include <sys/poll.h>
83 #include <sys/tty.h>
84 #include <sys/cpu.h>
85 #include <sys/buf.h>
86 #include <sys/reboot.h>
87 #include <sys/sdt.h>
88 #include <sys/atomic.h>
89 #include <sys/condvar.h>
90 #include <sys/localcount.h>
91 #include <sys/pserialize.h>
92
93 #ifdef DEVSW_DEBUG
94 #define DPRINTF(x) printf x
95 #else /* DEVSW_DEBUG */
96 #define DPRINTF(x)
97 #endif /* DEVSW_DEBUG */
98
99 #define MAXDEVSW 512 /* the maximum of major device number */
100 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
101 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
102 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
103
104 extern const struct bdevsw **bdevsw, *bdevsw0[];
105 extern const struct cdevsw **cdevsw, *cdevsw0[];
106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
107 extern const int sys_bdevsws, sys_cdevsws;
108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
109
110 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
111 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
113
114 kmutex_t device_lock;
115 kcondvar_t device_cv;
116
117 void (*biodone_vfs)(buf_t *) = (void *)nullop;
118
119 void
120 devsw_init(void)
121 {
122
123 KASSERT(sys_bdevsws < MAXDEVSW - 1);
124 KASSERT(sys_cdevsws < MAXDEVSW - 1);
125 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
126 cv_init(&device_cv, "devsw");
127 }
128
129 int
130 devsw_attach(const char *devname,
131 const struct bdevsw *bdev, devmajor_t *bmajor,
132 const struct cdevsw *cdev, devmajor_t *cmajor)
133 {
134 struct devsw_conv *conv;
135 char *name;
136 int error, i;
137 size_t len;
138
139 if (devname == NULL || cdev == NULL)
140 return (EINVAL);
141
142 mutex_enter(&device_lock);
143
144 if (bdev != NULL) {
145 KASSERT(bdev->d_localcount != NULL);
146 KASSERT(bdev->d_localcount != cdev->d_localcount);
147 }
148 if (cdev != NULL)
149 KASSERT(cdev->d_localcount != NULL);
150
151 for (i = 0 ; i < max_devsw_convs ; i++) {
152 conv = &devsw_conv[i];
153 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
154 continue;
155
156 if (*bmajor < 0)
157 *bmajor = conv->d_bmajor;
158 if (*cmajor < 0)
159 *cmajor = conv->d_cmajor;
160
161 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
162 error = EINVAL;
163 goto fail;
164 }
165 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
166 error = EINVAL;
167 goto fail;
168 }
169
170 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
171 cdevsw[*cmajor] != NULL) {
172 error = EEXIST;
173 goto fail;
174 }
175
176 /* use membar_producer() to ensure visibility of the xdevsw */
177 if (bdev != NULL) {
178 localcount_init(bdev->d_localcount);
179 membar_producer();
180 bdevsw[*bmajor] = bdev;
181 }
182 localcount_init(cdev->d_localcount);
183 membar_producer();
184 cdevsw[*cmajor] = cdev;
185
186 mutex_exit(&device_lock);
187 return (0);
188 }
189
190 error = bdevsw_attach(bdev, bmajor);
191 if (error != 0)
192 goto fail;
193 error = cdevsw_attach(cdev, cmajor);
194 if (error != 0) {
195 devsw_detach_locked(bdev, NULL);
196 goto fail;
197 }
198
199 for (i = 0 ; i < max_devsw_convs ; i++) {
200 if (devsw_conv[i].d_name == NULL)
201 break;
202 }
203 if (i == max_devsw_convs) {
204 struct devsw_conv *newptr;
205 int old_convs, new_convs;
206
207 old_convs = max_devsw_convs;
208 new_convs = old_convs + 1;
209
210 newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
211 if (newptr == NULL) {
212 devsw_detach_locked(bdev, cdev);
213 error = ENOMEM;
214 goto fail;
215 }
216 newptr[old_convs].d_name = NULL;
217 newptr[old_convs].d_bmajor = -1;
218 newptr[old_convs].d_cmajor = -1;
219 memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
220 if (devsw_conv != devsw_conv0)
221 kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
222 devsw_conv = newptr;
223 max_devsw_convs = new_convs;
224 }
225
226 len = strlen(devname) + 1;
227 name = kmem_alloc(len, KM_NOSLEEP);
228 if (name == NULL) {
229 devsw_detach_locked(bdev, cdev);
230 error = ENOMEM;
231 goto fail;
232 }
233 strlcpy(name, devname, len);
234
235 devsw_conv[i].d_name = name;
236 devsw_conv[i].d_bmajor = *bmajor;
237 devsw_conv[i].d_cmajor = *cmajor;
238
239 mutex_exit(&device_lock);
240 return (0);
241 fail:
242 mutex_exit(&device_lock);
243 return (error);
244 }
245
246 static int
247 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
248 {
249 const struct bdevsw **newptr;
250 devmajor_t bmajor;
251 int i;
252
253 KASSERT(mutex_owned(&device_lock));
254
255 if (devsw == NULL)
256 return (0);
257
258 if (*devmajor < 0) {
259 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
260 if (bdevsw[bmajor] != NULL)
261 continue;
262 for (i = 0 ; i < max_devsw_convs ; i++) {
263 if (devsw_conv[i].d_bmajor == bmajor)
264 break;
265 }
266 if (i != max_devsw_convs)
267 continue;
268 break;
269 }
270 *devmajor = bmajor;
271 }
272
273 if (*devmajor >= MAXDEVSW) {
274 printf("bdevsw_attach: block majors exhausted");
275 return (ENOMEM);
276 }
277
278 if (*devmajor >= max_bdevsws) {
279 KASSERT(bdevsw == bdevsw0);
280 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
281 if (newptr == NULL)
282 return (ENOMEM);
283 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
284 bdevsw = newptr;
285 max_bdevsws = MAXDEVSW;
286 }
287
288 if (bdevsw[*devmajor] != NULL)
289 return (EEXIST);
290
291 /* ensure visibility of the bdevsw */
292 membar_producer();
293
294 bdevsw[*devmajor] = devsw;
295 KASSERT(devsw->d_localcount != NULL);
296 localcount_init(devsw->d_localcount);
297
298 return (0);
299 }
300
301 static int
302 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
303 {
304 const struct cdevsw **newptr;
305 devmajor_t cmajor;
306 int i;
307
308 KASSERT(mutex_owned(&device_lock));
309
310 if (*devmajor < 0) {
311 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
312 if (cdevsw[cmajor] != NULL)
313 continue;
314 for (i = 0 ; i < max_devsw_convs ; i++) {
315 if (devsw_conv[i].d_cmajor == cmajor)
316 break;
317 }
318 if (i != max_devsw_convs)
319 continue;
320 break;
321 }
322 *devmajor = cmajor;
323 }
324
325 if (*devmajor >= MAXDEVSW) {
326 printf("cdevsw_attach: character majors exhausted");
327 return (ENOMEM);
328 }
329
330 if (*devmajor >= max_cdevsws) {
331 KASSERT(cdevsw == cdevsw0);
332 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
333 if (newptr == NULL)
334 return (ENOMEM);
335 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
336 cdevsw = newptr;
337 max_cdevsws = MAXDEVSW;
338 }
339
340 if (cdevsw[*devmajor] != NULL)
341 return (EEXIST);
342
343 /* ensure visibility of the bdevsw */
344 membar_producer();
345
346 cdevsw[*devmajor] = devsw;
347 KASSERT(devsw->d_localcount != NULL);
348 localcount_init(devsw->d_localcount);
349
350 return (0);
351 }
352
353 /*
354 * First, look up both bdev and cdev indices, and remove the
355 * {b,c]devsw[] entries so no new references can be taken. Then
356 * drain any existing references.
357 */
358
359 static void
360 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
361 {
362 int i, j, s;
363
364 KASSERT(mutex_owned(&device_lock));
365
366 i = max_bdevsws;
367 if (bdev != NULL) {
368 for (i = 0 ; i < max_bdevsws ; i++) {
369 if (bdevsw[i] != bdev)
370 continue;
371
372 KASSERTMSG(bdev->d_localcount != NULL,
373 "%s: no bdev localcount", __func__);
374 break;
375 }
376 }
377 j = max_cdevsws;
378 if (cdev != NULL) {
379 for (j = 0 ; j < max_cdevsws ; j++) {
380 if (cdevsw[j] != cdev)
381 continue;
382
383 KASSERTMSG(cdev->d_localcount != NULL,
384 "%s: no cdev localcount", __func__);
385 break;
386 }
387 }
388 if (i < max_bdevsws)
389 bdevsw[i] = NULL;
390 if (j < max_cdevsws )
391 cdevsw[j] = NULL;
392
393 s = pserialize_read_enter();
394 if (i < max_bdevsws && bdev->d_localcount != NULL) {
395 localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
396 localcount_fini(bdev->d_localcount);
397 }
398 if (j < max_cdevsws && cdev->d_localcount != NULL ) {
399 localcount_drain(cdev->d_localcount, &device_cv, &device_lock);
400 localcount_fini(cdev->d_localcount);
401 }
402 pserialize_read_exit(s);
403 }
404
405 int
406 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
407 {
408
409 mutex_enter(&device_lock);
410 devsw_detach_locked(bdev, cdev);
411 mutex_exit(&device_lock);
412 return 0;
413 }
414
415 /*
416 * Look up a block device by number.
417 *
418 * => Caller must ensure that the device is attached.
419 */
420 const struct bdevsw *
421 bdevsw_lookup(dev_t dev)
422 {
423 devmajor_t bmajor;
424
425 if (dev == NODEV)
426 return (NULL);
427 bmajor = major(dev);
428 if (bmajor < 0 || bmajor >= max_bdevsws)
429 return (NULL);
430
431 return (bdevsw[bmajor]);
432 }
433
434 const struct bdevsw *
435 bdevsw_lookup_acquire(dev_t dev)
436 {
437 devmajor_t bmajor;
438 const struct bdevsw *bdev = NULL;
439 int s;
440
441 if (dev == NODEV)
442 return (NULL);
443 bmajor = major(dev);
444 if (bmajor < 0 || bmajor >= max_bdevsws)
445 return (NULL);
446
447 /* Prevent any concurrent attempts to detach the device */
448 mutex_enter(&device_lock);
449
450 /* Start a read transaction to block localcount_drain() */
451 s = pserialize_read_enter();
452
453 /* Get the struct bdevsw pointer */
454 bdev = bdevsw[bmajor];
455 if (bdev == NULL)
456 goto out;
457
458 /* Wait for the content of the struct bdevsw to become visible */
459 membar_datadep_consumer();
460
461 /* If the devsw is not statically linked, acquire a reference */
462 if (bdevsw[bmajor]->d_localcount != NULL)
463 localcount_acquire(bdevsw[bmajor]->d_localcount);
464
465 out: pserialize_read_exit(s);
466 mutex_exit(&device_lock);
467
468 return bdev;
469 }
470
471 void
472 bdevsw_release(const struct bdevsw *bd)
473 {
474
475 KASSERT(bd != NULL);
476 if (bd->d_localcount != NULL)
477 localcount_release(bd->d_localcount, &device_cv, &device_lock);
478 }
479
480 /*
481 * Look up a character device by number.
482 *
483 * => Caller must ensure that the device is attached.
484 */
485 const struct cdevsw *
486 cdevsw_lookup(dev_t dev)
487 {
488 devmajor_t cmajor;
489
490 if (dev == NODEV)
491 return (NULL);
492 cmajor = major(dev);
493 if (cmajor < 0 || cmajor >= max_cdevsws)
494 return (NULL);
495
496 return (cdevsw[cmajor]);
497 }
498
499 const struct cdevsw *
500 cdevsw_lookup_acquire(dev_t dev)
501 {
502 devmajor_t cmajor;
503 const struct cdevsw *cdev = NULL;
504 int s;
505
506 if (dev == NODEV)
507 return (NULL);
508 cmajor = major(dev);
509 if (cmajor < 0 || cmajor >= max_cdevsws)
510 return (NULL);
511
512 /* Prevent any concurrent attempts to detach the device */
513 mutex_enter(&device_lock);
514
515 /* Start a read transaction to block localcount_drain() */
516 s = pserialize_read_enter();
517
518 /* Get the struct bdevsw pointer */
519 cdev = cdevsw[cmajor];
520 if (cdev == NULL)
521 goto out;
522
523 /* Wait for the content of the struct bdevsw to become visible */
524 membar_datadep_consumer();
525
526 /* If the devsw is not statically linked, acquire a reference */
527 if (cdevsw[cmajor]->d_localcount != NULL)
528 localcount_acquire(cdevsw[cmajor]->d_localcount);
529
530 out: pserialize_read_exit(s);
531 mutex_exit(&device_lock);
532
533 return cdev;
534 }
535
536 void
537 cdevsw_release(const struct cdevsw *cd)
538 {
539
540 KASSERT(cd != NULL);
541 if (cd->d_localcount != NULL)
542 localcount_release(cd->d_localcount, &device_cv, &device_lock);
543 }
544
545 /*
546 * Look up a block device by reference to its operations set.
547 *
548 * => Caller must ensure that the device is not detached, and therefore
549 * that the returned major is still valid when dereferenced.
550 */
551 devmajor_t
552 bdevsw_lookup_major(const struct bdevsw *bdev)
553 {
554 devmajor_t bmajor;
555
556 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
557 if (bdevsw[bmajor] == bdev)
558 return (bmajor);
559 }
560
561 return (NODEVMAJOR);
562 }
563
564 /*
565 * Look up a character device by reference to its operations set.
566 *
567 * => Caller must ensure that the device is not detached, and therefore
568 * that the returned major is still valid when dereferenced.
569 */
570 devmajor_t
571 cdevsw_lookup_major(const struct cdevsw *cdev)
572 {
573 devmajor_t cmajor;
574
575 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
576 if (cdevsw[cmajor] == cdev)
577 return (cmajor);
578 }
579
580 return (NODEVMAJOR);
581 }
582
583 /*
584 * Convert from block major number to name.
585 *
586 * => Caller must ensure that the device is not detached, and therefore
587 * that the name pointer is still valid when dereferenced.
588 */
589 const char *
590 devsw_blk2name(devmajor_t bmajor)
591 {
592 const char *name;
593 devmajor_t cmajor;
594 int i;
595
596 name = NULL;
597 cmajor = -1;
598
599 mutex_enter(&device_lock);
600 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
601 mutex_exit(&device_lock);
602 return (NULL);
603 }
604 for (i = 0 ; i < max_devsw_convs; i++) {
605 if (devsw_conv[i].d_bmajor == bmajor) {
606 cmajor = devsw_conv[i].d_cmajor;
607 break;
608 }
609 }
610 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
611 name = devsw_conv[i].d_name;
612 mutex_exit(&device_lock);
613
614 return (name);
615 }
616
617 /*
618 * Convert char major number to device driver name.
619 */
620 const char *
621 cdevsw_getname(devmajor_t major)
622 {
623 const char *name;
624 int i;
625
626 name = NULL;
627
628 if (major < 0)
629 return (NULL);
630
631 mutex_enter(&device_lock);
632 for (i = 0 ; i < max_devsw_convs; i++) {
633 if (devsw_conv[i].d_cmajor == major) {
634 name = devsw_conv[i].d_name;
635 break;
636 }
637 }
638 mutex_exit(&device_lock);
639 return (name);
640 }
641
642 /*
643 * Convert block major number to device driver name.
644 */
645 const char *
646 bdevsw_getname(devmajor_t major)
647 {
648 const char *name;
649 int i;
650
651 name = NULL;
652
653 if (major < 0)
654 return (NULL);
655
656 mutex_enter(&device_lock);
657 for (i = 0 ; i < max_devsw_convs; i++) {
658 if (devsw_conv[i].d_bmajor == major) {
659 name = devsw_conv[i].d_name;
660 break;
661 }
662 }
663 mutex_exit(&device_lock);
664 return (name);
665 }
666
667 /*
668 * Convert from device name to block major number.
669 *
670 * => Caller must ensure that the device is not detached, and therefore
671 * that the major number is still valid when dereferenced.
672 */
673 devmajor_t
674 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
675 {
676 struct devsw_conv *conv;
677 devmajor_t bmajor;
678 int i;
679
680 if (name == NULL)
681 return (NODEVMAJOR);
682
683 mutex_enter(&device_lock);
684 for (i = 0 ; i < max_devsw_convs ; i++) {
685 size_t len;
686
687 conv = &devsw_conv[i];
688 if (conv->d_name == NULL)
689 continue;
690 len = strlen(conv->d_name);
691 if (strncmp(conv->d_name, name, len) != 0)
692 continue;
693 if (*(name +len) && !isdigit(*(name + len)))
694 continue;
695 bmajor = conv->d_bmajor;
696 if (bmajor < 0 || bmajor >= max_bdevsws ||
697 bdevsw[bmajor] == NULL)
698 break;
699 if (devname != NULL) {
700 #ifdef DEVSW_DEBUG
701 if (strlen(conv->d_name) >= devnamelen)
702 printf("devsw_name2blk: too short buffer");
703 #endif /* DEVSW_DEBUG */
704 strncpy(devname, conv->d_name, devnamelen);
705 devname[devnamelen - 1] = '\0';
706 }
707 mutex_exit(&device_lock);
708 return (bmajor);
709 }
710
711 mutex_exit(&device_lock);
712 return (NODEVMAJOR);
713 }
714
715 /*
716 * Convert from device name to char major number.
717 *
718 * => Caller must ensure that the device is not detached, and therefore
719 * that the major number is still valid when dereferenced.
720 */
721 devmajor_t
722 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
723 {
724 struct devsw_conv *conv;
725 devmajor_t cmajor;
726 int i;
727
728 if (name == NULL)
729 return (NODEVMAJOR);
730
731 mutex_enter(&device_lock);
732 for (i = 0 ; i < max_devsw_convs ; i++) {
733 size_t len;
734
735 conv = &devsw_conv[i];
736 if (conv->d_name == NULL)
737 continue;
738 len = strlen(conv->d_name);
739 if (strncmp(conv->d_name, name, len) != 0)
740 continue;
741 if (*(name +len) && !isdigit(*(name + len)))
742 continue;
743 cmajor = conv->d_cmajor;
744 if (cmajor < 0 || cmajor >= max_cdevsws ||
745 cdevsw[cmajor] == NULL)
746 break;
747 if (devname != NULL) {
748 #ifdef DEVSW_DEBUG
749 if (strlen(conv->d_name) >= devnamelen)
750 printf("devsw_name2chr: too short buffer");
751 #endif /* DEVSW_DEBUG */
752 strncpy(devname, conv->d_name, devnamelen);
753 devname[devnamelen - 1] = '\0';
754 }
755 mutex_exit(&device_lock);
756 return (cmajor);
757 }
758
759 mutex_exit(&device_lock);
760 return (NODEVMAJOR);
761 }
762
763 /*
764 * Convert from character dev_t to block dev_t.
765 *
766 * => Caller must ensure that the device is not detached, and therefore
767 * that the major number is still valid when dereferenced.
768 */
769 dev_t
770 devsw_chr2blk(dev_t cdev)
771 {
772 devmajor_t bmajor, cmajor;
773 int i;
774 dev_t rv;
775
776 cmajor = major(cdev);
777 bmajor = NODEVMAJOR;
778 rv = NODEV;
779
780 mutex_enter(&device_lock);
781 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
782 mutex_exit(&device_lock);
783 return (NODEV);
784 }
785 for (i = 0 ; i < max_devsw_convs ; i++) {
786 if (devsw_conv[i].d_cmajor == cmajor) {
787 bmajor = devsw_conv[i].d_bmajor;
788 break;
789 }
790 }
791 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
792 rv = makedev(bmajor, minor(cdev));
793 mutex_exit(&device_lock);
794
795 return (rv);
796 }
797
798 /*
799 * Convert from block dev_t to character dev_t.
800 *
801 * => Caller must ensure that the device is not detached, and therefore
802 * that the major number is still valid when dereferenced.
803 */
804 dev_t
805 devsw_blk2chr(dev_t bdev)
806 {
807 devmajor_t bmajor, cmajor;
808 int i;
809 dev_t rv;
810
811 bmajor = major(bdev);
812 cmajor = NODEVMAJOR;
813 rv = NODEV;
814
815 mutex_enter(&device_lock);
816 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
817 mutex_exit(&device_lock);
818 return (NODEV);
819 }
820 for (i = 0 ; i < max_devsw_convs ; i++) {
821 if (devsw_conv[i].d_bmajor == bmajor) {
822 cmajor = devsw_conv[i].d_cmajor;
823 break;
824 }
825 }
826 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
827 rv = makedev(cmajor, minor(bdev));
828 mutex_exit(&device_lock);
829
830 return (rv);
831 }
832
833 /*
834 * Device access methods.
835 */
836
837 #define DEV_LOCK(d) \
838 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
839 KERNEL_LOCK(1, NULL); \
840 }
841
842 #define DEV_UNLOCK(d) \
843 if (mpflag == 0) { \
844 KERNEL_UNLOCK_ONE(NULL); \
845 }
846
847 int
848 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
849 {
850 const struct bdevsw *d;
851 int rv, mpflag;
852
853 /*
854 * For open we need to lock, in order to synchronize
855 * with attach/detach.
856 */
857 mutex_enter(&device_lock);
858 d = bdevsw_lookup(dev);
859 mutex_exit(&device_lock);
860 if (d == NULL)
861 return ENXIO;
862
863 DEV_LOCK(d);
864 rv = (*d->d_open)(dev, flag, devtype, l);
865 DEV_UNLOCK(d);
866
867 return rv;
868 }
869
870 int
871 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
872 {
873 const struct bdevsw *d;
874 int rv, mpflag;
875
876 if ((d = bdevsw_lookup(dev)) == NULL)
877 return ENXIO;
878
879 DEV_LOCK(d);
880 rv = (*d->d_close)(dev, flag, devtype, l);
881 DEV_UNLOCK(d);
882
883 return rv;
884 }
885
886 SDT_PROVIDER_DECLARE(io);
887 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
888
889 void
890 bdev_strategy(struct buf *bp)
891 {
892 const struct bdevsw *d;
893 int mpflag;
894
895 SDT_PROBE1(io, kernel, , start, bp);
896
897 if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
898 bp->b_error = ENXIO;
899 bp->b_resid = bp->b_bcount;
900 biodone_vfs(bp); /* biodone() iff vfs present */
901 return;
902 }
903
904 DEV_LOCK(d);
905 (*d->d_strategy)(bp);
906 DEV_UNLOCK(d);
907 }
908
909 int
910 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
911 {
912 const struct bdevsw *d;
913 int rv, mpflag;
914
915 if ((d = bdevsw_lookup(dev)) == NULL)
916 return ENXIO;
917
918 DEV_LOCK(d);
919 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
920 DEV_UNLOCK(d);
921
922 return rv;
923 }
924
925 int
926 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
927 {
928 const struct bdevsw *d;
929 int rv;
930
931 /*
932 * Dump can be called without the device open. Since it can
933 * currently only be called with the system paused (and in a
934 * potentially unstable state), we don't perform any locking.
935 */
936 if ((d = bdevsw_lookup(dev)) == NULL)
937 return ENXIO;
938
939 /* DEV_LOCK(d); */
940 rv = (*d->d_dump)(dev, addr, data, sz);
941 /* DEV_UNLOCK(d); */
942
943 return rv;
944 }
945
946 int
947 bdev_type(dev_t dev)
948 {
949 const struct bdevsw *d;
950
951 if ((d = bdevsw_lookup(dev)) == NULL)
952 return D_OTHER;
953 return d->d_flag & D_TYPEMASK;
954 }
955
956 int
957 bdev_size(dev_t dev)
958 {
959 const struct bdevsw *d;
960 int rv, mpflag = 0;
961
962 if ((d = bdevsw_lookup(dev)) == NULL ||
963 d->d_psize == NULL)
964 return -1;
965
966 /*
967 * Don't to try lock the device if we're dumping.
968 * XXX: is there a better way to test this?
969 */
970 if ((boothowto & RB_DUMP) == 0)
971 DEV_LOCK(d);
972 rv = (*d->d_psize)(dev);
973 if ((boothowto & RB_DUMP) == 0)
974 DEV_UNLOCK(d);
975
976 return rv;
977 }
978
979 int
980 bdev_discard(dev_t dev, off_t pos, off_t len)
981 {
982 const struct bdevsw *d;
983 int rv, mpflag;
984
985 if ((d = bdevsw_lookup(dev)) == NULL)
986 return ENXIO;
987
988 DEV_LOCK(d);
989 rv = (*d->d_discard)(dev, pos, len);
990 DEV_UNLOCK(d);
991
992 return rv;
993 }
994
995 int
996 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
997 {
998 const struct cdevsw *d;
999 int rv, mpflag;
1000
1001 /*
1002 * For open we need to lock, in order to synchronize
1003 * with attach/detach.
1004 */
1005 mutex_enter(&device_lock);
1006 d = cdevsw_lookup(dev);
1007 mutex_exit(&device_lock);
1008 if (d == NULL)
1009 return ENXIO;
1010
1011 DEV_LOCK(d);
1012 rv = (*d->d_open)(dev, flag, devtype, l);
1013 DEV_UNLOCK(d);
1014
1015 return rv;
1016 }
1017
1018 int
1019 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
1020 {
1021 const struct cdevsw *d;
1022 int rv, mpflag;
1023
1024 if ((d = cdevsw_lookup(dev)) == NULL)
1025 return ENXIO;
1026
1027 DEV_LOCK(d);
1028 rv = (*d->d_close)(dev, flag, devtype, l);
1029 DEV_UNLOCK(d);
1030
1031 return rv;
1032 }
1033
1034 int
1035 cdev_read(dev_t dev, struct uio *uio, int flag)
1036 {
1037 const struct cdevsw *d;
1038 int rv, mpflag;
1039
1040 if ((d = cdevsw_lookup(dev)) == NULL)
1041 return ENXIO;
1042
1043 DEV_LOCK(d);
1044 rv = (*d->d_read)(dev, uio, flag);
1045 DEV_UNLOCK(d);
1046
1047 return rv;
1048 }
1049
1050 int
1051 cdev_write(dev_t dev, struct uio *uio, int flag)
1052 {
1053 const struct cdevsw *d;
1054 int rv, mpflag;
1055
1056 if ((d = cdevsw_lookup(dev)) == NULL)
1057 return ENXIO;
1058
1059 DEV_LOCK(d);
1060 rv = (*d->d_write)(dev, uio, flag);
1061 DEV_UNLOCK(d);
1062
1063 return rv;
1064 }
1065
1066 int
1067 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
1068 {
1069 const struct cdevsw *d;
1070 int rv, mpflag;
1071
1072 if ((d = cdevsw_lookup(dev)) == NULL)
1073 return ENXIO;
1074
1075 DEV_LOCK(d);
1076 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
1077 DEV_UNLOCK(d);
1078
1079 return rv;
1080 }
1081
1082 void
1083 cdev_stop(struct tty *tp, int flag)
1084 {
1085 const struct cdevsw *d;
1086 int mpflag;
1087
1088 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
1089 return;
1090
1091 DEV_LOCK(d);
1092 (*d->d_stop)(tp, flag);
1093 DEV_UNLOCK(d);
1094 }
1095
1096 struct tty *
1097 cdev_tty(dev_t dev)
1098 {
1099 const struct cdevsw *d;
1100
1101 if ((d = cdevsw_lookup(dev)) == NULL)
1102 return NULL;
1103
1104 /* XXX Check if necessary. */
1105 if (d->d_tty == NULL)
1106 return NULL;
1107
1108 return (*d->d_tty)(dev);
1109 }
1110
1111 int
1112 cdev_poll(dev_t dev, int flag, lwp_t *l)
1113 {
1114 const struct cdevsw *d;
1115 int rv, mpflag;
1116
1117 if ((d = cdevsw_lookup(dev)) == NULL)
1118 return POLLERR;
1119
1120 DEV_LOCK(d);
1121 rv = (*d->d_poll)(dev, flag, l);
1122 DEV_UNLOCK(d);
1123
1124 return rv;
1125 }
1126
1127 paddr_t
1128 cdev_mmap(dev_t dev, off_t off, int flag)
1129 {
1130 const struct cdevsw *d;
1131 paddr_t rv;
1132 int mpflag;
1133
1134 if ((d = cdevsw_lookup(dev)) == NULL)
1135 return (paddr_t)-1LL;
1136
1137 DEV_LOCK(d);
1138 rv = (*d->d_mmap)(dev, off, flag);
1139 DEV_UNLOCK(d);
1140
1141 return rv;
1142 }
1143
1144 int
1145 cdev_kqfilter(dev_t dev, struct knote *kn)
1146 {
1147 const struct cdevsw *d;
1148 int rv, mpflag;
1149
1150 if ((d = cdevsw_lookup(dev)) == NULL)
1151 return ENXIO;
1152
1153 DEV_LOCK(d);
1154 rv = (*d->d_kqfilter)(dev, kn);
1155 DEV_UNLOCK(d);
1156
1157 return rv;
1158 }
1159
1160 int
1161 cdev_discard(dev_t dev, off_t pos, off_t len)
1162 {
1163 const struct cdevsw *d;
1164 int rv, mpflag;
1165
1166 if ((d = cdevsw_lookup(dev)) == NULL)
1167 return ENXIO;
1168
1169 DEV_LOCK(d);
1170 rv = (*d->d_discard)(dev, pos, len);
1171 DEV_UNLOCK(d);
1172
1173 return rv;
1174 }
1175
1176 int
1177 cdev_type(dev_t dev)
1178 {
1179 const struct cdevsw *d;
1180
1181 if ((d = cdevsw_lookup(dev)) == NULL)
1182 return D_OTHER;
1183 return d->d_flag & D_TYPEMASK;
1184 }
1185