subr_devsw.c revision 1.34.2.5 1 /* $NetBSD: subr_devsw.c,v 1.34.2.5 2016/07/17 12:09:21 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.34.2.5 2016/07/17 12:09:21 pgoyette Exp $");
73
74 #ifdef _KERNEL_OPT
75 #include "opt_dtrace.h"
76 #endif
77
78 #include <sys/param.h>
79 #include <sys/conf.h>
80 #include <sys/kmem.h>
81 #include <sys/systm.h>
82 #include <sys/poll.h>
83 #include <sys/tty.h>
84 #include <sys/cpu.h>
85 #include <sys/buf.h>
86 #include <sys/reboot.h>
87 #include <sys/sdt.h>
88 #include <sys/atomic.h>
89 #include <sys/condvar.h>
90 #include <sys/localcount.h>
91 #include <sys/pserialize.h>
92
93 #ifdef DEVSW_DEBUG
94 #define DPRINTF(x) printf x
95 #else /* DEVSW_DEBUG */
96 #define DPRINTF(x)
97 #endif /* DEVSW_DEBUG */
98
99 #define MAXDEVSW 512 /* the maximum of major device number */
100 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
101 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
102 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
103
104 extern const struct bdevsw **bdevsw, *bdevsw0[];
105 extern const struct cdevsw **cdevsw, *cdevsw0[];
106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
107 extern const int sys_bdevsws, sys_cdevsws;
108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
109
110 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
111 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
113
114 kmutex_t device_lock;
115 kcondvar_t device_cv;
116 pserialize_t device_psz = NULL;
117
118 void (*biodone_vfs)(buf_t *) = (void *)nullop;
119
120 void
121 devsw_init(void)
122 {
123
124 KASSERT(sys_bdevsws < MAXDEVSW - 1);
125 KASSERT(sys_cdevsws < MAXDEVSW - 1);
126 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
127 cv_init(&device_cv, "devsw");
128 }
129
130 int
131 devsw_attach(const char *devname,
132 const struct bdevsw *bdev, devmajor_t *bmajor,
133 const struct cdevsw *cdev, devmajor_t *cmajor)
134 {
135 struct devsw_conv *conv;
136 char *name;
137 int error, i;
138 size_t len;
139
140 if (devname == NULL || cdev == NULL)
141 return (EINVAL);
142
143 mutex_enter(&device_lock);
144
145 if (bdev != NULL) {
146 KASSERTMSG(bdev->d_localcount != NULL,
147 "%s: bdev %s has no d_localcount", __func__, devname);
148 KASSERTMSG(bdev->d_localcount != cdev->d_localcount,
149 "%s: bdev and cdev for %s have same d_localcount",
150 __func__, devname);
151 }
152 if (cdev != NULL)
153 KASSERTMGS(cdev->d_localcount != NULL,
154 "%s: cdev %s has no d_localcount", __func__, devname);
155
156 for (i = 0 ; i < max_devsw_convs ; i++) {
157 conv = &devsw_conv[i];
158 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
159 continue;
160
161 if (*bmajor < 0)
162 *bmajor = conv->d_bmajor;
163 if (*cmajor < 0)
164 *cmajor = conv->d_cmajor;
165
166 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
167 error = EINVAL;
168 goto fail;
169 }
170 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
171 error = EINVAL;
172 goto fail;
173 }
174
175 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
176 cdevsw[*cmajor] != NULL) {
177 error = EEXIST;
178 goto fail;
179 }
180
181 /* use membar_producer() to ensure visibility of the xdevsw */
182 if (bdev != NULL) {
183 localcount_init(bdev->d_localcount);
184 membar_producer();
185 bdevsw[*bmajor] = bdev;
186 }
187 localcount_init(cdev->d_localcount);
188 membar_producer();
189 cdevsw[*cmajor] = cdev;
190
191 mutex_exit(&device_lock);
192 return (0);
193 }
194
195 error = bdevsw_attach(bdev, bmajor);
196 if (error != 0)
197 goto fail;
198 error = cdevsw_attach(cdev, cmajor);
199 if (error != 0) {
200 devsw_detach_locked(bdev, NULL);
201 goto fail;
202 }
203
204 for (i = 0 ; i < max_devsw_convs ; i++) {
205 if (devsw_conv[i].d_name == NULL)
206 break;
207 }
208 if (i == max_devsw_convs) {
209 struct devsw_conv *newptr;
210 int old_convs, new_convs;
211
212 old_convs = max_devsw_convs;
213 new_convs = old_convs + 1;
214
215 newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
216 if (newptr == NULL) {
217 devsw_detach_locked(bdev, cdev);
218 error = ENOMEM;
219 goto fail;
220 }
221 newptr[old_convs].d_name = NULL;
222 newptr[old_convs].d_bmajor = -1;
223 newptr[old_convs].d_cmajor = -1;
224 memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
225 if (devsw_conv != devsw_conv0)
226 kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
227 devsw_conv = newptr;
228 max_devsw_convs = new_convs;
229 }
230
231 len = strlen(devname) + 1;
232 name = kmem_alloc(len, KM_NOSLEEP);
233 if (name == NULL) {
234 devsw_detach_locked(bdev, cdev);
235 error = ENOMEM;
236 goto fail;
237 }
238 strlcpy(name, devname, len);
239
240 devsw_conv[i].d_name = name;
241 devsw_conv[i].d_bmajor = *bmajor;
242 devsw_conv[i].d_cmajor = *cmajor;
243
244 mutex_exit(&device_lock);
245 return (0);
246 fail:
247 mutex_exit(&device_lock);
248 return (error);
249 }
250
251 static int
252 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
253 {
254 const struct bdevsw **newptr;
255 devmajor_t bmajor;
256 int i;
257
258 KASSERT(mutex_owned(&device_lock));
259
260 if (devsw == NULL)
261 return (0);
262
263 if (*devmajor < 0) {
264 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
265 if (bdevsw[bmajor] != NULL)
266 continue;
267 for (i = 0 ; i < max_devsw_convs ; i++) {
268 if (devsw_conv[i].d_bmajor == bmajor)
269 break;
270 }
271 if (i != max_devsw_convs)
272 continue;
273 break;
274 }
275 *devmajor = bmajor;
276 }
277
278 if (*devmajor >= MAXDEVSW) {
279 printf("%s: block majors exhausted", __func__);
280 return (ENOMEM);
281 }
282
283 if (*devmajor >= max_bdevsws) {
284 KASSERT(bdevsw == bdevsw0);
285 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
286 if (newptr == NULL)
287 return (ENOMEM);
288 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
289 bdevsw = newptr;
290 max_bdevsws = MAXDEVSW;
291 }
292
293 if (bdevsw[*devmajor] != NULL)
294 return (EEXIST);
295
296 /* ensure visibility of the bdevsw */
297 membar_producer();
298
299 bdevsw[*devmajor] = devsw;
300 KASSERTMSG(devsw->d_localcount != NULL, "%s: bdev for major %d has "
301 "no localcount", __func__, *devmajor);
302 localcount_init(devsw->d_localcount);
303
304 return (0);
305 }
306
307 static int
308 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
309 {
310 const struct cdevsw **newptr;
311 devmajor_t cmajor;
312 int i;
313
314 KASSERT(mutex_owned(&device_lock));
315
316 if (*devmajor < 0) {
317 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
318 if (cdevsw[cmajor] != NULL)
319 continue;
320 for (i = 0 ; i < max_devsw_convs ; i++) {
321 if (devsw_conv[i].d_cmajor == cmajor)
322 break;
323 }
324 if (i != max_devsw_convs)
325 continue;
326 break;
327 }
328 *devmajor = cmajor;
329 }
330
331 if (*devmajor >= MAXDEVSW) {
332 printf("%s: character majors exhausted", __func__);
333 return (ENOMEM);
334 }
335
336 if (*devmajor >= max_cdevsws) {
337 KASSERT(cdevsw == cdevsw0);
338 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
339 if (newptr == NULL)
340 return (ENOMEM);
341 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
342 cdevsw = newptr;
343 max_cdevsws = MAXDEVSW;
344 }
345
346 if (cdevsw[*devmajor] != NULL)
347 return (EEXIST);
348
349 /* ensure visibility of the bdevsw */
350 membar_producer();
351
352 cdevsw[*devmajor] = devsw;
353 KASSERTMSG(devsw->d_localcount != NULL, "%s: cdev for major %d has "
354 "no localcount", __func__, *devmajor);
355 localcount_init(devsw->d_localcount);
356
357 return (0);
358 }
359
360 /*
361 * First, look up both bdev and cdev indices, and remove the
362 * {b,c]devsw[] entries so no new references can be taken. Then
363 * drain any existing references.
364 */
365
366 static void
367 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
368 {
369 int i, j;
370
371 KASSERT(mutex_owned(&device_lock));
372
373 i = max_bdevsws;
374 if (bdev != NULL) {
375 for (i = 0 ; i < max_bdevsws ; i++) {
376 if (bdevsw[i] != bdev)
377 continue;
378
379 KASSERTMSG(bdev->d_localcount != NULL,
380 "%s: no bdev localcount for major %d", __func__, i);
381 break;
382 }
383 }
384 j = max_cdevsws;
385 if (cdev != NULL) {
386 for (j = 0 ; j < max_cdevsws ; j++) {
387 if (cdevsw[j] != cdev)
388 continue;
389
390 KASSERTMSG(cdev->d_localcount != NULL,
391 "%s: no cdev localcount for major %d", __func__, j);
392 break;
393 }
394 }
395 if (i < max_bdevsws)
396 bdevsw[i] = NULL;
397 if (j < max_cdevsws )
398 cdevsw[j] = NULL;
399
400 /*
401 * If we haven't already done so, create the serialization
402 * stucture. Then wait for all current readers to finish.
403 */
404 if(__predict_false(device_psz == NULL))
405 device_psz = pserialize_create();
406 pserialize_perform(device_psz);
407
408 /*
409 * No new readers can reach the bdev and cdev via the
410 * {b,c}devsw[] arrays. Wait for existing references to
411 * drain, and then destroy.
412 */
413
414 if (i < max_bdevsws && bdev->d_localcount != NULL) {
415 localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
416 localcount_fini(bdev->d_localcount);
417 }
418 if (j < max_cdevsws && cdev->d_localcount != NULL ) {
419 localcount_drain(cdev->d_localcount, &device_cv, &device_lock);
420 localcount_fini(cdev->d_localcount);
421 }
422 }
423
424 int
425 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
426 {
427
428 mutex_enter(&device_lock);
429 devsw_detach_locked(bdev, cdev);
430 mutex_exit(&device_lock);
431 return 0;
432 }
433
434 /*
435 * Look up a block device by number.
436 *
437 * => Caller must ensure that the device is attached.
438 */
439 const struct bdevsw *
440 bdevsw_lookup(dev_t dev)
441 {
442 devmajor_t bmajor;
443
444 if (dev == NODEV)
445 return (NULL);
446 bmajor = major(dev);
447 if (bmajor < 0 || bmajor >= max_bdevsws)
448 return (NULL);
449
450 return (bdevsw[bmajor]);
451 }
452
453 const struct bdevsw *
454 bdevsw_lookup_acquire(dev_t dev)
455 {
456 devmajor_t bmajor;
457 const struct bdevsw *bdev = NULL;
458 int s;
459
460 if (dev == NODEV)
461 return (NULL);
462 bmajor = major(dev);
463 if (bmajor < 0 || bmajor >= max_bdevsws)
464 return (NULL);
465
466 /* Start a read transaction to block localcount_drain() */
467 s = pserialize_read_enter();
468
469 /* Get the struct bdevsw pointer */
470 bdev = bdevsw[bmajor];
471 if (bdev == NULL)
472 goto out;
473
474 /* Wait for the content of the struct bdevsw to become visible */
475 membar_datadep_consumer();
476
477 /* If the devsw is not statically linked, acquire a reference */
478 if (bdevsw[bmajor]->d_localcount != NULL)
479 localcount_acquire(bdevsw[bmajor]->d_localcount);
480
481 out: pserialize_read_exit(s);
482
483 return bdev;
484 }
485
486 void
487 bdevsw_release(const struct bdevsw *bd)
488 {
489
490 KASSERT(bd != NULL);
491 if (bd->d_localcount != NULL)
492 localcount_release(bd->d_localcount, &device_cv, &device_lock);
493 }
494
495 /*
496 * Look up a character device by number.
497 *
498 * => Caller must ensure that the device is attached.
499 */
500 const struct cdevsw *
501 cdevsw_lookup(dev_t dev)
502 {
503 devmajor_t cmajor;
504
505 if (dev == NODEV)
506 return (NULL);
507 cmajor = major(dev);
508 if (cmajor < 0 || cmajor >= max_cdevsws)
509 return (NULL);
510
511 return (cdevsw[cmajor]);
512 }
513
514 const struct cdevsw *
515 cdevsw_lookup_acquire(dev_t dev)
516 {
517 devmajor_t cmajor;
518 const struct cdevsw *cdev = NULL;
519 int s;
520
521 if (dev == NODEV)
522 return (NULL);
523 cmajor = major(dev);
524 if (cmajor < 0 || cmajor >= max_cdevsws)
525 return (NULL);
526
527 /* Start a read transaction to block localcount_drain() */
528 s = pserialize_read_enter();
529
530 /* Get the struct bdevsw pointer */
531 cdev = cdevsw[cmajor];
532 if (cdev == NULL)
533 goto out;
534
535 /* Wait for the content of the struct cdevsw to become visible */
536 membar_datadep_consumer();
537
538 /* If the devsw is not statically linked, acquire a reference */
539 if (cdevsw[cmajor]->d_localcount != NULL)
540 localcount_acquire(cdevsw[cmajor]->d_localcount);
541
542 out: pserialize_read_exit(s);
543 mutex_exit(&device_lock);
544
545 return cdev;
546 }
547
548 void
549 cdevsw_release(const struct cdevsw *cd)
550 {
551
552 KASSERT(cd != NULL);
553 if (cd->d_localcount != NULL)
554 localcount_release(cd->d_localcount, &device_cv, &device_lock);
555 }
556
557 /*
558 * Look up a block device by reference to its operations set.
559 *
560 * => Caller must ensure that the device is not detached, and therefore
561 * that the returned major is still valid when dereferenced.
562 */
563 devmajor_t
564 bdevsw_lookup_major(const struct bdevsw *bdev)
565 {
566 devmajor_t bmajor;
567
568 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
569 if (bdevsw[bmajor] == bdev)
570 return (bmajor);
571 }
572
573 return (NODEVMAJOR);
574 }
575
576 /*
577 * Look up a character device by reference to its operations set.
578 *
579 * => Caller must ensure that the device is not detached, and therefore
580 * that the returned major is still valid when dereferenced.
581 */
582 devmajor_t
583 cdevsw_lookup_major(const struct cdevsw *cdev)
584 {
585 devmajor_t cmajor;
586
587 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
588 if (cdevsw[cmajor] == cdev)
589 return (cmajor);
590 }
591
592 return (NODEVMAJOR);
593 }
594
595 /*
596 * Convert from block major number to name.
597 *
598 * => Caller must ensure that the device is not detached, and therefore
599 * that the name pointer is still valid when dereferenced.
600 */
601 const char *
602 devsw_blk2name(devmajor_t bmajor)
603 {
604 const char *name;
605 devmajor_t cmajor;
606 int i;
607
608 name = NULL;
609 cmajor = -1;
610
611 mutex_enter(&device_lock);
612 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
613 mutex_exit(&device_lock);
614 return (NULL);
615 }
616 for (i = 0 ; i < max_devsw_convs; i++) {
617 if (devsw_conv[i].d_bmajor == bmajor) {
618 cmajor = devsw_conv[i].d_cmajor;
619 break;
620 }
621 }
622 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
623 name = devsw_conv[i].d_name;
624 mutex_exit(&device_lock);
625
626 return (name);
627 }
628
629 /*
630 * Convert char major number to device driver name.
631 */
632 const char *
633 cdevsw_getname(devmajor_t major)
634 {
635 const char *name;
636 int i;
637
638 name = NULL;
639
640 if (major < 0)
641 return (NULL);
642
643 mutex_enter(&device_lock);
644 for (i = 0 ; i < max_devsw_convs; i++) {
645 if (devsw_conv[i].d_cmajor == major) {
646 name = devsw_conv[i].d_name;
647 break;
648 }
649 }
650 mutex_exit(&device_lock);
651 return (name);
652 }
653
654 /*
655 * Convert block major number to device driver name.
656 */
657 const char *
658 bdevsw_getname(devmajor_t major)
659 {
660 const char *name;
661 int i;
662
663 name = NULL;
664
665 if (major < 0)
666 return (NULL);
667
668 mutex_enter(&device_lock);
669 for (i = 0 ; i < max_devsw_convs; i++) {
670 if (devsw_conv[i].d_bmajor == major) {
671 name = devsw_conv[i].d_name;
672 break;
673 }
674 }
675 mutex_exit(&device_lock);
676 return (name);
677 }
678
679 /*
680 * Convert from device name to block major number.
681 *
682 * => Caller must ensure that the device is not detached, and therefore
683 * that the major number is still valid when dereferenced.
684 */
685 devmajor_t
686 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
687 {
688 struct devsw_conv *conv;
689 devmajor_t bmajor;
690 int i;
691
692 if (name == NULL)
693 return (NODEVMAJOR);
694
695 mutex_enter(&device_lock);
696 for (i = 0 ; i < max_devsw_convs ; i++) {
697 size_t len;
698
699 conv = &devsw_conv[i];
700 if (conv->d_name == NULL)
701 continue;
702 len = strlen(conv->d_name);
703 if (strncmp(conv->d_name, name, len) != 0)
704 continue;
705 if (*(name +len) && !isdigit(*(name + len)))
706 continue;
707 bmajor = conv->d_bmajor;
708 if (bmajor < 0 || bmajor >= max_bdevsws ||
709 bdevsw[bmajor] == NULL)
710 break;
711 if (devname != NULL) {
712 #ifdef DEVSW_DEBUG
713 if (strlen(conv->d_name) >= devnamelen)
714 printf("devsw_name2blk: too short buffer");
715 #endif /* DEVSW_DEBUG */
716 strncpy(devname, conv->d_name, devnamelen);
717 devname[devnamelen - 1] = '\0';
718 }
719 mutex_exit(&device_lock);
720 return (bmajor);
721 }
722
723 mutex_exit(&device_lock);
724 return (NODEVMAJOR);
725 }
726
727 /*
728 * Convert from device name to char major number.
729 *
730 * => Caller must ensure that the device is not detached, and therefore
731 * that the major number is still valid when dereferenced.
732 */
733 devmajor_t
734 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
735 {
736 struct devsw_conv *conv;
737 devmajor_t cmajor;
738 int i;
739
740 if (name == NULL)
741 return (NODEVMAJOR);
742
743 mutex_enter(&device_lock);
744 for (i = 0 ; i < max_devsw_convs ; i++) {
745 size_t len;
746
747 conv = &devsw_conv[i];
748 if (conv->d_name == NULL)
749 continue;
750 len = strlen(conv->d_name);
751 if (strncmp(conv->d_name, name, len) != 0)
752 continue;
753 if (*(name +len) && !isdigit(*(name + len)))
754 continue;
755 cmajor = conv->d_cmajor;
756 if (cmajor < 0 || cmajor >= max_cdevsws ||
757 cdevsw[cmajor] == NULL)
758 break;
759 if (devname != NULL) {
760 #ifdef DEVSW_DEBUG
761 if (strlen(conv->d_name) >= devnamelen)
762 printf("devsw_name2chr: too short buffer");
763 #endif /* DEVSW_DEBUG */
764 strncpy(devname, conv->d_name, devnamelen);
765 devname[devnamelen - 1] = '\0';
766 }
767 mutex_exit(&device_lock);
768 return (cmajor);
769 }
770
771 mutex_exit(&device_lock);
772 return (NODEVMAJOR);
773 }
774
775 /*
776 * Convert from character dev_t to block dev_t.
777 *
778 * => Caller must ensure that the device is not detached, and therefore
779 * that the major number is still valid when dereferenced.
780 */
781 dev_t
782 devsw_chr2blk(dev_t cdev)
783 {
784 devmajor_t bmajor, cmajor;
785 int i;
786 dev_t rv;
787
788 cmajor = major(cdev);
789 bmajor = NODEVMAJOR;
790 rv = NODEV;
791
792 mutex_enter(&device_lock);
793 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
794 mutex_exit(&device_lock);
795 return (NODEV);
796 }
797 for (i = 0 ; i < max_devsw_convs ; i++) {
798 if (devsw_conv[i].d_cmajor == cmajor) {
799 bmajor = devsw_conv[i].d_bmajor;
800 break;
801 }
802 }
803 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
804 rv = makedev(bmajor, minor(cdev));
805 mutex_exit(&device_lock);
806
807 return (rv);
808 }
809
810 /*
811 * Convert from block dev_t to character dev_t.
812 *
813 * => Caller must ensure that the device is not detached, and therefore
814 * that the major number is still valid when dereferenced.
815 */
816 dev_t
817 devsw_blk2chr(dev_t bdev)
818 {
819 devmajor_t bmajor, cmajor;
820 int i;
821 dev_t rv;
822
823 bmajor = major(bdev);
824 cmajor = NODEVMAJOR;
825 rv = NODEV;
826
827 mutex_enter(&device_lock);
828 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
829 mutex_exit(&device_lock);
830 return (NODEV);
831 }
832 for (i = 0 ; i < max_devsw_convs ; i++) {
833 if (devsw_conv[i].d_bmajor == bmajor) {
834 cmajor = devsw_conv[i].d_cmajor;
835 break;
836 }
837 }
838 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
839 rv = makedev(cmajor, minor(bdev));
840 mutex_exit(&device_lock);
841
842 return (rv);
843 }
844
845 /*
846 * Device access methods.
847 */
848
849 #define DEV_LOCK(d) \
850 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
851 KERNEL_LOCK(1, NULL); \
852 }
853
854 #define DEV_UNLOCK(d) \
855 if (mpflag == 0) { \
856 KERNEL_UNLOCK_ONE(NULL); \
857 }
858
859 int
860 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
861 {
862 const struct bdevsw *d;
863 int rv, mpflag;
864
865 /*
866 * For open we need to lock, in order to synchronize
867 * with attach/detach.
868 */
869 mutex_enter(&device_lock);
870 d = bdevsw_lookup(dev);
871 mutex_exit(&device_lock);
872 if (d == NULL)
873 return ENXIO;
874
875 DEV_LOCK(d);
876 rv = (*d->d_open)(dev, flag, devtype, l);
877 DEV_UNLOCK(d);
878
879 return rv;
880 }
881
882 int
883 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
884 {
885 const struct bdevsw *d;
886 int rv, mpflag;
887
888 if ((d = bdevsw_lookup(dev)) == NULL)
889 return ENXIO;
890
891 DEV_LOCK(d);
892 rv = (*d->d_close)(dev, flag, devtype, l);
893 DEV_UNLOCK(d);
894
895 return rv;
896 }
897
898 SDT_PROVIDER_DECLARE(io);
899 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
900
901 void
902 bdev_strategy(struct buf *bp)
903 {
904 const struct bdevsw *d;
905 int mpflag;
906
907 SDT_PROBE1(io, kernel, , start, bp);
908
909 if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
910 bp->b_error = ENXIO;
911 bp->b_resid = bp->b_bcount;
912 biodone_vfs(bp); /* biodone() iff vfs present */
913 return;
914 }
915
916 DEV_LOCK(d);
917 (*d->d_strategy)(bp);
918 DEV_UNLOCK(d);
919 }
920
921 int
922 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
923 {
924 const struct bdevsw *d;
925 int rv, mpflag;
926
927 if ((d = bdevsw_lookup(dev)) == NULL)
928 return ENXIO;
929
930 DEV_LOCK(d);
931 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
932 DEV_UNLOCK(d);
933
934 return rv;
935 }
936
937 int
938 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
939 {
940 const struct bdevsw *d;
941 int rv;
942
943 /*
944 * Dump can be called without the device open. Since it can
945 * currently only be called with the system paused (and in a
946 * potentially unstable state), we don't perform any locking.
947 */
948 if ((d = bdevsw_lookup(dev)) == NULL)
949 return ENXIO;
950
951 /* DEV_LOCK(d); */
952 rv = (*d->d_dump)(dev, addr, data, sz);
953 /* DEV_UNLOCK(d); */
954
955 return rv;
956 }
957
958 int
959 bdev_type(dev_t dev)
960 {
961 const struct bdevsw *d;
962
963 if ((d = bdevsw_lookup(dev)) == NULL)
964 return D_OTHER;
965 return d->d_flag & D_TYPEMASK;
966 }
967
968 int
969 bdev_size(dev_t dev)
970 {
971 const struct bdevsw *d;
972 int rv, mpflag = 0;
973
974 if ((d = bdevsw_lookup(dev)) == NULL ||
975 d->d_psize == NULL)
976 return -1;
977
978 /*
979 * Don't to try lock the device if we're dumping.
980 * XXX: is there a better way to test this?
981 */
982 if ((boothowto & RB_DUMP) == 0)
983 DEV_LOCK(d);
984 rv = (*d->d_psize)(dev);
985 if ((boothowto & RB_DUMP) == 0)
986 DEV_UNLOCK(d);
987
988 return rv;
989 }
990
991 int
992 bdev_discard(dev_t dev, off_t pos, off_t len)
993 {
994 const struct bdevsw *d;
995 int rv, mpflag;
996
997 if ((d = bdevsw_lookup(dev)) == NULL)
998 return ENXIO;
999
1000 DEV_LOCK(d);
1001 rv = (*d->d_discard)(dev, pos, len);
1002 DEV_UNLOCK(d);
1003
1004 return rv;
1005 }
1006
1007 int
1008 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
1009 {
1010 const struct cdevsw *d;
1011 int rv, mpflag;
1012
1013 /*
1014 * For open we need to lock, in order to synchronize
1015 * with attach/detach.
1016 */
1017 mutex_enter(&device_lock);
1018 d = cdevsw_lookup(dev);
1019 mutex_exit(&device_lock);
1020 if (d == NULL)
1021 return ENXIO;
1022
1023 DEV_LOCK(d);
1024 rv = (*d->d_open)(dev, flag, devtype, l);
1025 DEV_UNLOCK(d);
1026
1027 return rv;
1028 }
1029
1030 int
1031 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
1032 {
1033 const struct cdevsw *d;
1034 int rv, mpflag;
1035
1036 if ((d = cdevsw_lookup(dev)) == NULL)
1037 return ENXIO;
1038
1039 DEV_LOCK(d);
1040 rv = (*d->d_close)(dev, flag, devtype, l);
1041 DEV_UNLOCK(d);
1042
1043 return rv;
1044 }
1045
1046 int
1047 cdev_read(dev_t dev, struct uio *uio, int flag)
1048 {
1049 const struct cdevsw *d;
1050 int rv, mpflag;
1051
1052 if ((d = cdevsw_lookup(dev)) == NULL)
1053 return ENXIO;
1054
1055 DEV_LOCK(d);
1056 rv = (*d->d_read)(dev, uio, flag);
1057 DEV_UNLOCK(d);
1058
1059 return rv;
1060 }
1061
1062 int
1063 cdev_write(dev_t dev, struct uio *uio, int flag)
1064 {
1065 const struct cdevsw *d;
1066 int rv, mpflag;
1067
1068 if ((d = cdevsw_lookup(dev)) == NULL)
1069 return ENXIO;
1070
1071 DEV_LOCK(d);
1072 rv = (*d->d_write)(dev, uio, flag);
1073 DEV_UNLOCK(d);
1074
1075 return rv;
1076 }
1077
1078 int
1079 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
1080 {
1081 const struct cdevsw *d;
1082 int rv, mpflag;
1083
1084 if ((d = cdevsw_lookup(dev)) == NULL)
1085 return ENXIO;
1086
1087 DEV_LOCK(d);
1088 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
1089 DEV_UNLOCK(d);
1090
1091 return rv;
1092 }
1093
1094 void
1095 cdev_stop(struct tty *tp, int flag)
1096 {
1097 const struct cdevsw *d;
1098 int mpflag;
1099
1100 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
1101 return;
1102
1103 DEV_LOCK(d);
1104 (*d->d_stop)(tp, flag);
1105 DEV_UNLOCK(d);
1106 }
1107
1108 struct tty *
1109 cdev_tty(dev_t dev)
1110 {
1111 const struct cdevsw *d;
1112
1113 if ((d = cdevsw_lookup(dev)) == NULL)
1114 return NULL;
1115
1116 /* XXX Check if necessary. */
1117 if (d->d_tty == NULL)
1118 return NULL;
1119
1120 return (*d->d_tty)(dev);
1121 }
1122
1123 int
1124 cdev_poll(dev_t dev, int flag, lwp_t *l)
1125 {
1126 const struct cdevsw *d;
1127 int rv, mpflag;
1128
1129 if ((d = cdevsw_lookup(dev)) == NULL)
1130 return POLLERR;
1131
1132 DEV_LOCK(d);
1133 rv = (*d->d_poll)(dev, flag, l);
1134 DEV_UNLOCK(d);
1135
1136 return rv;
1137 }
1138
1139 paddr_t
1140 cdev_mmap(dev_t dev, off_t off, int flag)
1141 {
1142 const struct cdevsw *d;
1143 paddr_t rv;
1144 int mpflag;
1145
1146 if ((d = cdevsw_lookup(dev)) == NULL)
1147 return (paddr_t)-1LL;
1148
1149 DEV_LOCK(d);
1150 rv = (*d->d_mmap)(dev, off, flag);
1151 DEV_UNLOCK(d);
1152
1153 return rv;
1154 }
1155
1156 int
1157 cdev_kqfilter(dev_t dev, struct knote *kn)
1158 {
1159 const struct cdevsw *d;
1160 int rv, mpflag;
1161
1162 if ((d = cdevsw_lookup(dev)) == NULL)
1163 return ENXIO;
1164
1165 DEV_LOCK(d);
1166 rv = (*d->d_kqfilter)(dev, kn);
1167 DEV_UNLOCK(d);
1168
1169 return rv;
1170 }
1171
1172 int
1173 cdev_discard(dev_t dev, off_t pos, off_t len)
1174 {
1175 const struct cdevsw *d;
1176 int rv, mpflag;
1177
1178 if ((d = cdevsw_lookup(dev)) == NULL)
1179 return ENXIO;
1180
1181 DEV_LOCK(d);
1182 rv = (*d->d_discard)(dev, pos, len);
1183 DEV_UNLOCK(d);
1184
1185 return rv;
1186 }
1187
1188 int
1189 cdev_type(dev_t dev)
1190 {
1191 const struct cdevsw *d;
1192
1193 if ((d = cdevsw_lookup(dev)) == NULL)
1194 return D_OTHER;
1195 return d->d_flag & D_TYPEMASK;
1196 }
1197