subr_devsw.c revision 1.34.2.3 1 /* $NetBSD: subr_devsw.c,v 1.34.2.3 2016/07/17 02:37:54 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.34.2.3 2016/07/17 02:37:54 pgoyette Exp $");
73
74 #ifdef _KERNEL_OPT
75 #include "opt_dtrace.h"
76 #endif
77
78 #include <sys/param.h>
79 #include <sys/conf.h>
80 #include <sys/kmem.h>
81 #include <sys/systm.h>
82 #include <sys/poll.h>
83 #include <sys/tty.h>
84 #include <sys/cpu.h>
85 #include <sys/buf.h>
86 #include <sys/reboot.h>
87 #include <sys/sdt.h>
88 #include <sys/atomic.h>
89 #include <sys/condvar.h>
90 #include <sys/localcount.h>
91 #include <sys/pserialize.h>
92
93 #ifdef DEVSW_DEBUG
94 #define DPRINTF(x) printf x
95 #else /* DEVSW_DEBUG */
96 #define DPRINTF(x)
97 #endif /* DEVSW_DEBUG */
98
99 #define MAXDEVSW 512 /* the maximum of major device number */
100 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
101 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
102 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
103
104 extern const struct bdevsw **bdevsw, *bdevsw0[];
105 extern const struct cdevsw **cdevsw, *cdevsw0[];
106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
107 extern const int sys_bdevsws, sys_cdevsws;
108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
109
110 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
111 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
113
114 kmutex_t device_lock;
115 kcondvar_t device_cv;
116 pserialize_t device_psz;
117
118 void (*biodone_vfs)(buf_t *) = (void *)nullop;
119
120 void
121 devsw_init(void)
122 {
123
124 KASSERT(sys_bdevsws < MAXDEVSW - 1);
125 KASSERT(sys_cdevsws < MAXDEVSW - 1);
126 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
127 cv_init(&device_cv, "devsw");
128 device_psz = pserialize_init();
129 }
130
131 int
132 devsw_attach(const char *devname,
133 const struct bdevsw *bdev, devmajor_t *bmajor,
134 const struct cdevsw *cdev, devmajor_t *cmajor)
135 {
136 struct devsw_conv *conv;
137 char *name;
138 int error, i;
139 size_t len;
140
141 if (devname == NULL || cdev == NULL)
142 return (EINVAL);
143
144 mutex_enter(&device_lock);
145
146 if (bdev != NULL) {
147 KASSERT(bdev->d_localcount != NULL);
148 KASSERT(bdev->d_localcount != cdev->d_localcount);
149 }
150 if (cdev != NULL)
151 KASSERT(cdev->d_localcount != NULL);
152
153 for (i = 0 ; i < max_devsw_convs ; i++) {
154 conv = &devsw_conv[i];
155 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
156 continue;
157
158 if (*bmajor < 0)
159 *bmajor = conv->d_bmajor;
160 if (*cmajor < 0)
161 *cmajor = conv->d_cmajor;
162
163 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
164 error = EINVAL;
165 goto fail;
166 }
167 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
168 error = EINVAL;
169 goto fail;
170 }
171
172 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
173 cdevsw[*cmajor] != NULL) {
174 error = EEXIST;
175 goto fail;
176 }
177
178 /* use membar_producer() to ensure visibility of the xdevsw */
179 if (bdev != NULL) {
180 localcount_init(bdev->d_localcount);
181 membar_producer();
182 bdevsw[*bmajor] = bdev;
183 }
184 localcount_init(cdev->d_localcount);
185 membar_producer();
186 cdevsw[*cmajor] = cdev;
187
188 mutex_exit(&device_lock);
189 return (0);
190 }
191
192 error = bdevsw_attach(bdev, bmajor);
193 if (error != 0)
194 goto fail;
195 error = cdevsw_attach(cdev, cmajor);
196 if (error != 0) {
197 devsw_detach_locked(bdev, NULL);
198 goto fail;
199 }
200
201 for (i = 0 ; i < max_devsw_convs ; i++) {
202 if (devsw_conv[i].d_name == NULL)
203 break;
204 }
205 if (i == max_devsw_convs) {
206 struct devsw_conv *newptr;
207 int old_convs, new_convs;
208
209 old_convs = max_devsw_convs;
210 new_convs = old_convs + 1;
211
212 newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
213 if (newptr == NULL) {
214 devsw_detach_locked(bdev, cdev);
215 error = ENOMEM;
216 goto fail;
217 }
218 newptr[old_convs].d_name = NULL;
219 newptr[old_convs].d_bmajor = -1;
220 newptr[old_convs].d_cmajor = -1;
221 memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
222 if (devsw_conv != devsw_conv0)
223 kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
224 devsw_conv = newptr;
225 max_devsw_convs = new_convs;
226 }
227
228 len = strlen(devname) + 1;
229 name = kmem_alloc(len, KM_NOSLEEP);
230 if (name == NULL) {
231 devsw_detach_locked(bdev, cdev);
232 error = ENOMEM;
233 goto fail;
234 }
235 strlcpy(name, devname, len);
236
237 devsw_conv[i].d_name = name;
238 devsw_conv[i].d_bmajor = *bmajor;
239 devsw_conv[i].d_cmajor = *cmajor;
240
241 mutex_exit(&device_lock);
242 return (0);
243 fail:
244 mutex_exit(&device_lock);
245 return (error);
246 }
247
248 static int
249 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
250 {
251 const struct bdevsw **newptr;
252 devmajor_t bmajor;
253 int i;
254
255 KASSERT(mutex_owned(&device_lock));
256
257 if (devsw == NULL)
258 return (0);
259
260 if (*devmajor < 0) {
261 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
262 if (bdevsw[bmajor] != NULL)
263 continue;
264 for (i = 0 ; i < max_devsw_convs ; i++) {
265 if (devsw_conv[i].d_bmajor == bmajor)
266 break;
267 }
268 if (i != max_devsw_convs)
269 continue;
270 break;
271 }
272 *devmajor = bmajor;
273 }
274
275 if (*devmajor >= MAXDEVSW) {
276 printf("bdevsw_attach: block majors exhausted");
277 return (ENOMEM);
278 }
279
280 if (*devmajor >= max_bdevsws) {
281 KASSERT(bdevsw == bdevsw0);
282 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
283 if (newptr == NULL)
284 return (ENOMEM);
285 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
286 bdevsw = newptr;
287 max_bdevsws = MAXDEVSW;
288 }
289
290 if (bdevsw[*devmajor] != NULL)
291 return (EEXIST);
292
293 /* ensure visibility of the bdevsw */
294 membar_producer();
295
296 bdevsw[*devmajor] = devsw;
297 KASSERT(devsw->d_localcount != NULL);
298 localcount_init(devsw->d_localcount);
299
300 return (0);
301 }
302
303 static int
304 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
305 {
306 const struct cdevsw **newptr;
307 devmajor_t cmajor;
308 int i;
309
310 KASSERT(mutex_owned(&device_lock));
311
312 if (*devmajor < 0) {
313 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
314 if (cdevsw[cmajor] != NULL)
315 continue;
316 for (i = 0 ; i < max_devsw_convs ; i++) {
317 if (devsw_conv[i].d_cmajor == cmajor)
318 break;
319 }
320 if (i != max_devsw_convs)
321 continue;
322 break;
323 }
324 *devmajor = cmajor;
325 }
326
327 if (*devmajor >= MAXDEVSW) {
328 printf("cdevsw_attach: character majors exhausted");
329 return (ENOMEM);
330 }
331
332 if (*devmajor >= max_cdevsws) {
333 KASSERT(cdevsw == cdevsw0);
334 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
335 if (newptr == NULL)
336 return (ENOMEM);
337 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
338 cdevsw = newptr;
339 max_cdevsws = MAXDEVSW;
340 }
341
342 if (cdevsw[*devmajor] != NULL)
343 return (EEXIST);
344
345 /* ensure visibility of the bdevsw */
346 membar_producer();
347
348 cdevsw[*devmajor] = devsw;
349 KASSERT(devsw->d_localcount != NULL);
350 localcount_init(devsw->d_localcount);
351
352 return (0);
353 }
354
355 /*
356 * First, look up both bdev and cdev indices, and remove the
357 * {b,c]devsw[] entries so no new references can be taken. Then
358 * drain any existing references.
359 */
360
361 static void
362 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
363 {
364 int i, j;
365
366 KASSERT(mutex_owned(&device_lock));
367
368 i = max_bdevsws;
369 if (bdev != NULL) {
370 for (i = 0 ; i < max_bdevsws ; i++) {
371 if (bdevsw[i] != bdev)
372 continue;
373
374 KASSERTMSG(bdev->d_localcount != NULL,
375 "%s: no bdev localcount", __func__);
376 break;
377 }
378 }
379 j = max_cdevsws;
380 if (cdev != NULL) {
381 for (j = 0 ; j < max_cdevsws ; j++) {
382 if (cdevsw[j] != cdev)
383 continue;
384
385 KASSERTMSG(cdev->d_localcount != NULL,
386 "%s: no cdev localcount", __func__);
387 break;
388 }
389 }
390 if (i < max_bdevsws)
391 bdevsw[i] = NULL;
392 if (j < max_cdevsws )
393 cdevsw[j] = NULL;
394
395 /* We need to wait for all current readers to finish. */
396 pserialize_perform(device_psz);
397
398 /*
399 * Here, no new readers can reach the bdev and cdev via the
400 * {b,c}devsw[] arrays. Wait for existing references to
401 * drain, and then destroy.
402 */
403
404 if (i < max_bdevsws && bdev->d_localcount != NULL) {
405 localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
406 localcount_fini(bdev->d_localcount);
407 }
408 if (j < max_cdevsws && cdev->d_localcount != NULL ) {
409 localcount_drain(cdev->d_localcount, &device_cv, &device_lock);
410 localcount_fini(cdev->d_localcount);
411 }
412 }
413
414 int
415 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
416 {
417
418 mutex_enter(&device_lock);
419 devsw_detach_locked(bdev, cdev);
420 mutex_exit(&device_lock);
421 return 0;
422 }
423
424 /*
425 * Look up a block device by number.
426 *
427 * => Caller must ensure that the device is attached.
428 */
429 const struct bdevsw *
430 bdevsw_lookup(dev_t dev)
431 {
432 devmajor_t bmajor;
433
434 if (dev == NODEV)
435 return (NULL);
436 bmajor = major(dev);
437 if (bmajor < 0 || bmajor >= max_bdevsws)
438 return (NULL);
439
440 return (bdevsw[bmajor]);
441 }
442
443 const struct bdevsw *
444 bdevsw_lookup_acquire(dev_t dev)
445 {
446 devmajor_t bmajor;
447 const struct bdevsw *bdev = NULL;
448 int s;
449
450 if (dev == NODEV)
451 return (NULL);
452 bmajor = major(dev);
453 if (bmajor < 0 || bmajor >= max_bdevsws)
454 return (NULL);
455
456 /* Start a read transaction to block localcount_drain() */
457 s = pserialize_read_enter();
458
459 /* Get the struct bdevsw pointer */
460 bdev = bdevsw[bmajor];
461 if (bdev == NULL)
462 goto out;
463
464 /* Wait for the content of the struct bdevsw to become visible */
465 membar_datadep_consumer();
466
467 /* If the devsw is not statically linked, acquire a reference */
468 if (bdevsw[bmajor]->d_localcount != NULL)
469 localcount_acquire(bdevsw[bmajor]->d_localcount);
470
471 out: pserialize_read_exit(s);
472 }
473
474 void
475 bdevsw_release(const struct bdevsw *bd)
476 {
477
478 KASSERT(bd != NULL);
479 if (bd->d_localcount != NULL)
480 localcount_release(bd->d_localcount, &device_cv, &device_lock);
481 }
482
483 /*
484 * Look up a character device by number.
485 *
486 * => Caller must ensure that the device is attached.
487 */
488 const struct cdevsw *
489 cdevsw_lookup(dev_t dev)
490 {
491 devmajor_t cmajor;
492
493 if (dev == NODEV)
494 return (NULL);
495 cmajor = major(dev);
496 if (cmajor < 0 || cmajor >= max_cdevsws)
497 return (NULL);
498
499 return (cdevsw[cmajor]);
500 }
501
502 const struct cdevsw *
503 cdevsw_lookup_acquire(dev_t dev)
504 {
505 devmajor_t cmajor;
506 const struct cdevsw *cdev = NULL;
507 int s;
508
509 if (dev == NODEV)
510 return (NULL);
511 cmajor = major(dev);
512 if (cmajor < 0 || cmajor >= max_cdevsws)
513 return (NULL);
514
515 /* Prevent any concurrent attempts to detach the device */
516 mutex_enter(&device_lock);
517
518 /* Start a read transaction to block localcount_drain() */
519 s = pserialize_read_enter();
520
521 /* Get the struct bdevsw pointer */
522 cdev = cdevsw[cmajor];
523 if (cdev == NULL)
524 goto out;
525
526 /* Wait for the content of the struct bdevsw to become visible */
527 membar_datadep_consumer();
528
529 /* If the devsw is not statically linked, acquire a reference */
530 if (cdevsw[cmajor]->d_localcount != NULL)
531 localcount_acquire(cdevsw[cmajor]->d_localcount);
532
533 out: pserialize_read_exit(s);
534 mutex_exit(&device_lock);
535
536 return cdev;
537 }
538
539 void
540 cdevsw_release(const struct cdevsw *cd)
541 {
542
543 KASSERT(cd != NULL);
544 if (cd->d_localcount != NULL)
545 localcount_release(cd->d_localcount, &device_cv, &device_lock);
546 }
547
548 /*
549 * Look up a block device by reference to its operations set.
550 *
551 * => Caller must ensure that the device is not detached, and therefore
552 * that the returned major is still valid when dereferenced.
553 */
554 devmajor_t
555 bdevsw_lookup_major(const struct bdevsw *bdev)
556 {
557 devmajor_t bmajor;
558
559 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
560 if (bdevsw[bmajor] == bdev)
561 return (bmajor);
562 }
563
564 return (NODEVMAJOR);
565 }
566
567 /*
568 * Look up a character device by reference to its operations set.
569 *
570 * => Caller must ensure that the device is not detached, and therefore
571 * that the returned major is still valid when dereferenced.
572 */
573 devmajor_t
574 cdevsw_lookup_major(const struct cdevsw *cdev)
575 {
576 devmajor_t cmajor;
577
578 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
579 if (cdevsw[cmajor] == cdev)
580 return (cmajor);
581 }
582
583 return (NODEVMAJOR);
584 }
585
586 /*
587 * Convert from block major number to name.
588 *
589 * => Caller must ensure that the device is not detached, and therefore
590 * that the name pointer is still valid when dereferenced.
591 */
592 const char *
593 devsw_blk2name(devmajor_t bmajor)
594 {
595 const char *name;
596 devmajor_t cmajor;
597 int i;
598
599 name = NULL;
600 cmajor = -1;
601
602 mutex_enter(&device_lock);
603 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
604 mutex_exit(&device_lock);
605 return (NULL);
606 }
607 for (i = 0 ; i < max_devsw_convs; i++) {
608 if (devsw_conv[i].d_bmajor == bmajor) {
609 cmajor = devsw_conv[i].d_cmajor;
610 break;
611 }
612 }
613 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
614 name = devsw_conv[i].d_name;
615 mutex_exit(&device_lock);
616
617 return (name);
618 }
619
620 /*
621 * Convert char major number to device driver name.
622 */
623 const char *
624 cdevsw_getname(devmajor_t major)
625 {
626 const char *name;
627 int i;
628
629 name = NULL;
630
631 if (major < 0)
632 return (NULL);
633
634 mutex_enter(&device_lock);
635 for (i = 0 ; i < max_devsw_convs; i++) {
636 if (devsw_conv[i].d_cmajor == major) {
637 name = devsw_conv[i].d_name;
638 break;
639 }
640 }
641 mutex_exit(&device_lock);
642 return (name);
643 }
644
645 /*
646 * Convert block major number to device driver name.
647 */
648 const char *
649 bdevsw_getname(devmajor_t major)
650 {
651 const char *name;
652 int i;
653
654 name = NULL;
655
656 if (major < 0)
657 return (NULL);
658
659 mutex_enter(&device_lock);
660 for (i = 0 ; i < max_devsw_convs; i++) {
661 if (devsw_conv[i].d_bmajor == major) {
662 name = devsw_conv[i].d_name;
663 break;
664 }
665 }
666 mutex_exit(&device_lock);
667 return (name);
668 }
669
670 /*
671 * Convert from device name to block major number.
672 *
673 * => Caller must ensure that the device is not detached, and therefore
674 * that the major number is still valid when dereferenced.
675 */
676 devmajor_t
677 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
678 {
679 struct devsw_conv *conv;
680 devmajor_t bmajor;
681 int i;
682
683 if (name == NULL)
684 return (NODEVMAJOR);
685
686 mutex_enter(&device_lock);
687 for (i = 0 ; i < max_devsw_convs ; i++) {
688 size_t len;
689
690 conv = &devsw_conv[i];
691 if (conv->d_name == NULL)
692 continue;
693 len = strlen(conv->d_name);
694 if (strncmp(conv->d_name, name, len) != 0)
695 continue;
696 if (*(name +len) && !isdigit(*(name + len)))
697 continue;
698 bmajor = conv->d_bmajor;
699 if (bmajor < 0 || bmajor >= max_bdevsws ||
700 bdevsw[bmajor] == NULL)
701 break;
702 if (devname != NULL) {
703 #ifdef DEVSW_DEBUG
704 if (strlen(conv->d_name) >= devnamelen)
705 printf("devsw_name2blk: too short buffer");
706 #endif /* DEVSW_DEBUG */
707 strncpy(devname, conv->d_name, devnamelen);
708 devname[devnamelen - 1] = '\0';
709 }
710 mutex_exit(&device_lock);
711 return (bmajor);
712 }
713
714 mutex_exit(&device_lock);
715 return (NODEVMAJOR);
716 }
717
718 /*
719 * Convert from device name to char major number.
720 *
721 * => Caller must ensure that the device is not detached, and therefore
722 * that the major number is still valid when dereferenced.
723 */
724 devmajor_t
725 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
726 {
727 struct devsw_conv *conv;
728 devmajor_t cmajor;
729 int i;
730
731 if (name == NULL)
732 return (NODEVMAJOR);
733
734 mutex_enter(&device_lock);
735 for (i = 0 ; i < max_devsw_convs ; i++) {
736 size_t len;
737
738 conv = &devsw_conv[i];
739 if (conv->d_name == NULL)
740 continue;
741 len = strlen(conv->d_name);
742 if (strncmp(conv->d_name, name, len) != 0)
743 continue;
744 if (*(name +len) && !isdigit(*(name + len)))
745 continue;
746 cmajor = conv->d_cmajor;
747 if (cmajor < 0 || cmajor >= max_cdevsws ||
748 cdevsw[cmajor] == NULL)
749 break;
750 if (devname != NULL) {
751 #ifdef DEVSW_DEBUG
752 if (strlen(conv->d_name) >= devnamelen)
753 printf("devsw_name2chr: too short buffer");
754 #endif /* DEVSW_DEBUG */
755 strncpy(devname, conv->d_name, devnamelen);
756 devname[devnamelen - 1] = '\0';
757 }
758 mutex_exit(&device_lock);
759 return (cmajor);
760 }
761
762 mutex_exit(&device_lock);
763 return (NODEVMAJOR);
764 }
765
766 /*
767 * Convert from character dev_t to block dev_t.
768 *
769 * => Caller must ensure that the device is not detached, and therefore
770 * that the major number is still valid when dereferenced.
771 */
772 dev_t
773 devsw_chr2blk(dev_t cdev)
774 {
775 devmajor_t bmajor, cmajor;
776 int i;
777 dev_t rv;
778
779 cmajor = major(cdev);
780 bmajor = NODEVMAJOR;
781 rv = NODEV;
782
783 mutex_enter(&device_lock);
784 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
785 mutex_exit(&device_lock);
786 return (NODEV);
787 }
788 for (i = 0 ; i < max_devsw_convs ; i++) {
789 if (devsw_conv[i].d_cmajor == cmajor) {
790 bmajor = devsw_conv[i].d_bmajor;
791 break;
792 }
793 }
794 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
795 rv = makedev(bmajor, minor(cdev));
796 mutex_exit(&device_lock);
797
798 return (rv);
799 }
800
801 /*
802 * Convert from block dev_t to character dev_t.
803 *
804 * => Caller must ensure that the device is not detached, and therefore
805 * that the major number is still valid when dereferenced.
806 */
807 dev_t
808 devsw_blk2chr(dev_t bdev)
809 {
810 devmajor_t bmajor, cmajor;
811 int i;
812 dev_t rv;
813
814 bmajor = major(bdev);
815 cmajor = NODEVMAJOR;
816 rv = NODEV;
817
818 mutex_enter(&device_lock);
819 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
820 mutex_exit(&device_lock);
821 return (NODEV);
822 }
823 for (i = 0 ; i < max_devsw_convs ; i++) {
824 if (devsw_conv[i].d_bmajor == bmajor) {
825 cmajor = devsw_conv[i].d_cmajor;
826 break;
827 }
828 }
829 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
830 rv = makedev(cmajor, minor(bdev));
831 mutex_exit(&device_lock);
832
833 return (rv);
834 }
835
836 /*
837 * Device access methods.
838 */
839
840 #define DEV_LOCK(d) \
841 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
842 KERNEL_LOCK(1, NULL); \
843 }
844
845 #define DEV_UNLOCK(d) \
846 if (mpflag == 0) { \
847 KERNEL_UNLOCK_ONE(NULL); \
848 }
849
850 int
851 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
852 {
853 const struct bdevsw *d;
854 int rv, mpflag;
855
856 /*
857 * For open we need to lock, in order to synchronize
858 * with attach/detach.
859 */
860 mutex_enter(&device_lock);
861 d = bdevsw_lookup(dev);
862 mutex_exit(&device_lock);
863 if (d == NULL)
864 return ENXIO;
865
866 DEV_LOCK(d);
867 rv = (*d->d_open)(dev, flag, devtype, l);
868 DEV_UNLOCK(d);
869
870 return rv;
871 }
872
873 int
874 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
875 {
876 const struct bdevsw *d;
877 int rv, mpflag;
878
879 if ((d = bdevsw_lookup(dev)) == NULL)
880 return ENXIO;
881
882 DEV_LOCK(d);
883 rv = (*d->d_close)(dev, flag, devtype, l);
884 DEV_UNLOCK(d);
885
886 return rv;
887 }
888
889 SDT_PROVIDER_DECLARE(io);
890 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
891
892 void
893 bdev_strategy(struct buf *bp)
894 {
895 const struct bdevsw *d;
896 int mpflag;
897
898 SDT_PROBE1(io, kernel, , start, bp);
899
900 if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
901 bp->b_error = ENXIO;
902 bp->b_resid = bp->b_bcount;
903 biodone_vfs(bp); /* biodone() iff vfs present */
904 return;
905 }
906
907 DEV_LOCK(d);
908 (*d->d_strategy)(bp);
909 DEV_UNLOCK(d);
910 }
911
912 int
913 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
914 {
915 const struct bdevsw *d;
916 int rv, mpflag;
917
918 if ((d = bdevsw_lookup(dev)) == NULL)
919 return ENXIO;
920
921 DEV_LOCK(d);
922 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
923 DEV_UNLOCK(d);
924
925 return rv;
926 }
927
928 int
929 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
930 {
931 const struct bdevsw *d;
932 int rv;
933
934 /*
935 * Dump can be called without the device open. Since it can
936 * currently only be called with the system paused (and in a
937 * potentially unstable state), we don't perform any locking.
938 */
939 if ((d = bdevsw_lookup(dev)) == NULL)
940 return ENXIO;
941
942 /* DEV_LOCK(d); */
943 rv = (*d->d_dump)(dev, addr, data, sz);
944 /* DEV_UNLOCK(d); */
945
946 return rv;
947 }
948
949 int
950 bdev_type(dev_t dev)
951 {
952 const struct bdevsw *d;
953
954 if ((d = bdevsw_lookup(dev)) == NULL)
955 return D_OTHER;
956 return d->d_flag & D_TYPEMASK;
957 }
958
959 int
960 bdev_size(dev_t dev)
961 {
962 const struct bdevsw *d;
963 int rv, mpflag = 0;
964
965 if ((d = bdevsw_lookup(dev)) == NULL ||
966 d->d_psize == NULL)
967 return -1;
968
969 /*
970 * Don't to try lock the device if we're dumping.
971 * XXX: is there a better way to test this?
972 */
973 if ((boothowto & RB_DUMP) == 0)
974 DEV_LOCK(d);
975 rv = (*d->d_psize)(dev);
976 if ((boothowto & RB_DUMP) == 0)
977 DEV_UNLOCK(d);
978
979 return rv;
980 }
981
982 int
983 bdev_discard(dev_t dev, off_t pos, off_t len)
984 {
985 const struct bdevsw *d;
986 int rv, mpflag;
987
988 if ((d = bdevsw_lookup(dev)) == NULL)
989 return ENXIO;
990
991 DEV_LOCK(d);
992 rv = (*d->d_discard)(dev, pos, len);
993 DEV_UNLOCK(d);
994
995 return rv;
996 }
997
998 int
999 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
1000 {
1001 const struct cdevsw *d;
1002 int rv, mpflag;
1003
1004 /*
1005 * For open we need to lock, in order to synchronize
1006 * with attach/detach.
1007 */
1008 mutex_enter(&device_lock);
1009 d = cdevsw_lookup(dev);
1010 mutex_exit(&device_lock);
1011 if (d == NULL)
1012 return ENXIO;
1013
1014 DEV_LOCK(d);
1015 rv = (*d->d_open)(dev, flag, devtype, l);
1016 DEV_UNLOCK(d);
1017
1018 return rv;
1019 }
1020
1021 int
1022 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
1023 {
1024 const struct cdevsw *d;
1025 int rv, mpflag;
1026
1027 if ((d = cdevsw_lookup(dev)) == NULL)
1028 return ENXIO;
1029
1030 DEV_LOCK(d);
1031 rv = (*d->d_close)(dev, flag, devtype, l);
1032 DEV_UNLOCK(d);
1033
1034 return rv;
1035 }
1036
1037 int
1038 cdev_read(dev_t dev, struct uio *uio, int flag)
1039 {
1040 const struct cdevsw *d;
1041 int rv, mpflag;
1042
1043 if ((d = cdevsw_lookup(dev)) == NULL)
1044 return ENXIO;
1045
1046 DEV_LOCK(d);
1047 rv = (*d->d_read)(dev, uio, flag);
1048 DEV_UNLOCK(d);
1049
1050 return rv;
1051 }
1052
1053 int
1054 cdev_write(dev_t dev, struct uio *uio, int flag)
1055 {
1056 const struct cdevsw *d;
1057 int rv, mpflag;
1058
1059 if ((d = cdevsw_lookup(dev)) == NULL)
1060 return ENXIO;
1061
1062 DEV_LOCK(d);
1063 rv = (*d->d_write)(dev, uio, flag);
1064 DEV_UNLOCK(d);
1065
1066 return rv;
1067 }
1068
1069 int
1070 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
1071 {
1072 const struct cdevsw *d;
1073 int rv, mpflag;
1074
1075 if ((d = cdevsw_lookup(dev)) == NULL)
1076 return ENXIO;
1077
1078 DEV_LOCK(d);
1079 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
1080 DEV_UNLOCK(d);
1081
1082 return rv;
1083 }
1084
1085 void
1086 cdev_stop(struct tty *tp, int flag)
1087 {
1088 const struct cdevsw *d;
1089 int mpflag;
1090
1091 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
1092 return;
1093
1094 DEV_LOCK(d);
1095 (*d->d_stop)(tp, flag);
1096 DEV_UNLOCK(d);
1097 }
1098
1099 struct tty *
1100 cdev_tty(dev_t dev)
1101 {
1102 const struct cdevsw *d;
1103
1104 if ((d = cdevsw_lookup(dev)) == NULL)
1105 return NULL;
1106
1107 /* XXX Check if necessary. */
1108 if (d->d_tty == NULL)
1109 return NULL;
1110
1111 return (*d->d_tty)(dev);
1112 }
1113
1114 int
1115 cdev_poll(dev_t dev, int flag, lwp_t *l)
1116 {
1117 const struct cdevsw *d;
1118 int rv, mpflag;
1119
1120 if ((d = cdevsw_lookup(dev)) == NULL)
1121 return POLLERR;
1122
1123 DEV_LOCK(d);
1124 rv = (*d->d_poll)(dev, flag, l);
1125 DEV_UNLOCK(d);
1126
1127 return rv;
1128 }
1129
1130 paddr_t
1131 cdev_mmap(dev_t dev, off_t off, int flag)
1132 {
1133 const struct cdevsw *d;
1134 paddr_t rv;
1135 int mpflag;
1136
1137 if ((d = cdevsw_lookup(dev)) == NULL)
1138 return (paddr_t)-1LL;
1139
1140 DEV_LOCK(d);
1141 rv = (*d->d_mmap)(dev, off, flag);
1142 DEV_UNLOCK(d);
1143
1144 return rv;
1145 }
1146
1147 int
1148 cdev_kqfilter(dev_t dev, struct knote *kn)
1149 {
1150 const struct cdevsw *d;
1151 int rv, mpflag;
1152
1153 if ((d = cdevsw_lookup(dev)) == NULL)
1154 return ENXIO;
1155
1156 DEV_LOCK(d);
1157 rv = (*d->d_kqfilter)(dev, kn);
1158 DEV_UNLOCK(d);
1159
1160 return rv;
1161 }
1162
1163 int
1164 cdev_discard(dev_t dev, off_t pos, off_t len)
1165 {
1166 const struct cdevsw *d;
1167 int rv, mpflag;
1168
1169 if ((d = cdevsw_lookup(dev)) == NULL)
1170 return ENXIO;
1171
1172 DEV_LOCK(d);
1173 rv = (*d->d_discard)(dev, pos, len);
1174 DEV_UNLOCK(d);
1175
1176 return rv;
1177 }
1178
1179 int
1180 cdev_type(dev_t dev)
1181 {
1182 const struct cdevsw *d;
1183
1184 if ((d = cdevsw_lookup(dev)) == NULL)
1185 return D_OTHER;
1186 return d->d_flag & D_TYPEMASK;
1187 }
1188