subr_devsw.c revision 1.34.2.18 1 /* $NetBSD: subr_devsw.c,v 1.34.2.18 2017/04/26 02:53:27 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.34.2.18 2017/04/26 02:53:27 pgoyette Exp $");
73
74 #ifdef _KERNEL_OPT
75 #include "opt_dtrace.h"
76 #endif
77
78 #include <sys/param.h>
79 #include <sys/conf.h>
80 #include <sys/kmem.h>
81 #include <sys/systm.h>
82 #include <sys/poll.h>
83 #include <sys/tty.h>
84 #include <sys/cpu.h>
85 #include <sys/buf.h>
86 #include <sys/reboot.h>
87 #include <sys/sdt.h>
88 #include <sys/atomic.h>
89 #include <sys/condvar.h>
90 #include <sys/localcount.h>
91 #include <sys/pserialize.h>
92
93 #ifdef DEVSW_DEBUG
94 #define DPRINTF(x) printf x
95 #else /* DEVSW_DEBUG */
96 #define DPRINTF(x)
97 #endif /* DEVSW_DEBUG */
98
99 #define MAXDEVSW 512 /* the maximum of major device number */
100 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
101 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
102 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
103
104 extern const struct bdevsw **bdevsw, *bdevsw0[];
105 extern const struct cdevsw **cdevsw, *cdevsw0[];
106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
107 extern const int sys_bdevsws, sys_cdevsws;
108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
109
110 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
111 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
113
114 kmutex_t device_lock;
115 kcondvar_t device_cv;
116 pserialize_t device_psz = NULL;
117
118 void (*biodone_vfs)(buf_t *) = (void *)nullop;
119
120 void
121 devsw_init(void)
122 {
123
124 KASSERT(sys_bdevsws < MAXDEVSW - 1);
125 KASSERT(sys_cdevsws < MAXDEVSW - 1);
126 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
127 cv_init(&device_cv, "devsw");
128 }
129
130 void
131 devsw_detach_init(void)
132 {
133
134 device_psz = pserialize_create();
135 }
136
137 int
138 devsw_attach(const char *devname,
139 const struct bdevsw *bdev, devmajor_t *bmajor,
140 const struct cdevsw *cdev, devmajor_t *cmajor)
141 {
142 struct devsw_conv *conv;
143 char *name;
144 int error, i;
145 size_t len;
146
147 if (devname == NULL || cdev == NULL)
148 return (EINVAL);
149
150 mutex_enter(&device_lock);
151
152 if (bdev != NULL) {
153 KASSERTMSG(bdev->d_localcount != NULL,
154 "%s: bdev %s has no d_localcount", __func__, devname);
155 KASSERTMSG(bdev->d_localcount != cdev->d_localcount,
156 "%s: bdev and cdev for %s have same d_localcount",
157 __func__, devname);
158 }
159 if (cdev != NULL)
160 KASSERTMSG(cdev->d_localcount != NULL,
161 "%s: cdev %s has no d_localcount", __func__, devname);
162
163 for (i = 0 ; i < max_devsw_convs ; i++) {
164 conv = &devsw_conv[i];
165 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
166 continue;
167
168 if (*bmajor < 0)
169 *bmajor = conv->d_bmajor;
170 if (*cmajor < 0)
171 *cmajor = conv->d_cmajor;
172
173 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
174 error = EINVAL;
175 goto fail;
176 }
177 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
178 error = EINVAL;
179 goto fail;
180 }
181
182 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
183 cdevsw[*cmajor] != NULL) {
184 error = EEXIST;
185 goto fail;
186 }
187
188 /* use membar_producer() to ensure visibility of the xdevsw */
189 if (bdev != NULL) {
190 localcount_init(bdev->d_localcount);
191 membar_producer();
192 bdevsw[*bmajor] = bdev;
193 }
194 localcount_init(cdev->d_localcount);
195 membar_producer();
196 cdevsw[*cmajor] = cdev;
197
198 mutex_exit(&device_lock);
199 return (0);
200 }
201
202 error = bdevsw_attach(bdev, bmajor);
203 if (error != 0)
204 goto fail;
205 error = cdevsw_attach(cdev, cmajor);
206 if (error != 0) {
207 devsw_detach_locked(bdev, NULL);
208 goto fail;
209 }
210
211 for (i = 0 ; i < max_devsw_convs ; i++) {
212 if (devsw_conv[i].d_name == NULL)
213 break;
214 }
215 if (i == max_devsw_convs) {
216 struct devsw_conv *newptr;
217 int old_convs, new_convs;
218
219 old_convs = max_devsw_convs;
220 new_convs = old_convs + 1;
221
222 newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
223 if (newptr == NULL) {
224 devsw_detach_locked(bdev, cdev);
225 error = ENOMEM;
226 goto fail;
227 }
228 newptr[old_convs].d_name = NULL;
229 newptr[old_convs].d_bmajor = -1;
230 newptr[old_convs].d_cmajor = -1;
231 memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
232 if (devsw_conv != devsw_conv0)
233 kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
234 devsw_conv = newptr;
235 max_devsw_convs = new_convs;
236 }
237
238 len = strlen(devname) + 1;
239 name = kmem_alloc(len, KM_NOSLEEP);
240 if (name == NULL) {
241 devsw_detach_locked(bdev, cdev);
242 error = ENOMEM;
243 goto fail;
244 }
245 strlcpy(name, devname, len);
246
247 devsw_conv[i].d_name = name;
248 devsw_conv[i].d_bmajor = *bmajor;
249 devsw_conv[i].d_cmajor = *cmajor;
250
251 mutex_exit(&device_lock);
252 return (0);
253 fail:
254 mutex_exit(&device_lock);
255 return (error);
256 }
257
258 static int
259 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
260 {
261 const struct bdevsw **newptr;
262 devmajor_t bmajor;
263 int i;
264
265 KASSERT(mutex_owned(&device_lock));
266
267 if (devsw == NULL)
268 return (0);
269
270 if (*devmajor < 0) {
271 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
272 if (bdevsw[bmajor] != NULL)
273 continue;
274 for (i = 0 ; i < max_devsw_convs ; i++) {
275 if (devsw_conv[i].d_bmajor == bmajor)
276 break;
277 }
278 if (i != max_devsw_convs)
279 continue;
280 break;
281 }
282 *devmajor = bmajor;
283 }
284
285 if (*devmajor >= MAXDEVSW) {
286 printf("%s: block majors exhausted", __func__);
287 return (ENOMEM);
288 }
289
290 if (*devmajor >= max_bdevsws) {
291 KASSERT(bdevsw == bdevsw0);
292 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
293 if (newptr == NULL)
294 return (ENOMEM);
295 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
296 bdevsw = newptr;
297 max_bdevsws = MAXDEVSW;
298 }
299
300 if (bdevsw[*devmajor] != NULL)
301 return (EEXIST);
302
303 KASSERTMSG(devsw->d_localcount != NULL, "%s: bdev for major %d has "
304 "no localcount", __func__, *devmajor);
305 localcount_init(devsw->d_localcount);
306
307 /* ensure visibility of the bdevsw */
308 membar_producer();
309
310 bdevsw[*devmajor] = devsw;
311
312 return (0);
313 }
314
315 static int
316 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
317 {
318 const struct cdevsw **newptr;
319 devmajor_t cmajor;
320 int i;
321
322 KASSERT(mutex_owned(&device_lock));
323
324 if (*devmajor < 0) {
325 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
326 if (cdevsw[cmajor] != NULL)
327 continue;
328 for (i = 0 ; i < max_devsw_convs ; i++) {
329 if (devsw_conv[i].d_cmajor == cmajor)
330 break;
331 }
332 if (i != max_devsw_convs)
333 continue;
334 break;
335 }
336 *devmajor = cmajor;
337 }
338
339 if (*devmajor >= MAXDEVSW) {
340 printf("%s: character majors exhausted", __func__);
341 return (ENOMEM);
342 }
343
344 if (*devmajor >= max_cdevsws) {
345 KASSERT(cdevsw == cdevsw0);
346 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
347 if (newptr == NULL)
348 return (ENOMEM);
349 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
350 cdevsw = newptr;
351 max_cdevsws = MAXDEVSW;
352 }
353
354 if (cdevsw[*devmajor] != NULL)
355 return (EEXIST);
356
357 KASSERTMSG(devsw->d_localcount != NULL, "%s: cdev for major %d has "
358 "no localcount", __func__, *devmajor);
359 localcount_init(devsw->d_localcount);
360
361 /* ensure visibility of the cdevsw */
362 membar_producer();
363
364 cdevsw[*devmajor] = devsw;
365
366 return (0);
367 }
368
369 /*
370 * First, look up both bdev and cdev indices, and remove the
371 * {b,c]devsw[] entries so no new references can be taken. Then
372 * drain any existing references.
373 */
374
375 static void
376 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
377 {
378 int i, j;
379
380 KASSERT(mutex_owned(&device_lock));
381
382 i = max_bdevsws;
383 if (bdev != NULL) {
384 for (i = 0 ; i < max_bdevsws ; i++) {
385 if (bdevsw[i] != bdev)
386 continue;
387
388 KASSERTMSG(bdev->d_localcount != NULL,
389 "%s: no bdev localcount for major %d", __func__, i);
390 break;
391 }
392 }
393 j = max_cdevsws;
394 if (cdev != NULL) {
395 for (j = 0 ; j < max_cdevsws ; j++) {
396 if (cdevsw[j] != cdev)
397 continue;
398
399 KASSERTMSG(cdev->d_localcount != NULL,
400 "%s: no cdev localcount for major %d", __func__, j);
401 break;
402 }
403 }
404 if (i < max_bdevsws)
405 bdevsw[i] = NULL;
406 if (j < max_cdevsws )
407 cdevsw[j] = NULL;
408
409 /* Wait for all current readers to finish with the devsw's */
410 pserialize_perform(device_psz);
411
412 /*
413 * No new accessors can reach the bdev and cdev via the
414 * {b,c}devsw[] arrays, so no new references can be
415 * acquired. Wait for all existing references to drain,
416 * and then destroy.
417 */
418
419 if (i < max_bdevsws && bdev->d_localcount != NULL) {
420 localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
421 localcount_fini(bdev->d_localcount);
422 }
423 if (j < max_cdevsws && cdev->d_localcount != NULL ) {
424 localcount_drain(cdev->d_localcount, &device_cv, &device_lock);
425 localcount_fini(cdev->d_localcount);
426 }
427 }
428
429 int
430 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
431 {
432
433 mutex_enter(&device_lock);
434 devsw_detach_locked(bdev, cdev);
435 mutex_exit(&device_lock);
436 return 0;
437 }
438
439 /*
440 * Look up a block device by number.
441 *
442 * => Caller must ensure that the device is attached.
443 */
444 const struct bdevsw *
445 bdevsw_lookup(dev_t dev)
446 {
447 devmajor_t bmajor;
448
449 if (dev == NODEV)
450 return (NULL);
451 bmajor = major(dev);
452 if (bmajor < 0 || bmajor >= max_bdevsws)
453 return (NULL);
454
455 /* Wait for the content of the struct bdevsw to become visible */
456 membar_datadep_consumer();
457
458 return (bdevsw[bmajor]);
459 }
460
461 const struct bdevsw *
462 bdevsw_lookup_acquire(dev_t dev)
463 {
464 devmajor_t bmajor;
465 const struct bdevsw *bdev = NULL;
466 int s;
467
468 if (dev == NODEV)
469 return (NULL);
470 bmajor = major(dev);
471 if (bmajor < 0 || bmajor >= max_bdevsws)
472 return (NULL);
473
474 /* Start a read transaction to block localcount_drain() */
475 s = pserialize_read_enter();
476
477 /* Get the struct bdevsw pointer */
478 bdev = bdevsw[bmajor];
479 if (bdev == NULL)
480 goto out;
481
482 /* Wait for the content of the struct bdevsw to become visible */
483 membar_datadep_consumer();
484
485 /* If the devsw is not statically linked, acquire a reference */
486 if (bdev->d_localcount != NULL)
487 localcount_acquire(bdev->d_localcount);
488
489 out: pserialize_read_exit(s);
490
491 return bdev;
492 }
493
494 void
495 bdevsw_release(const struct bdevsw *bd)
496 {
497
498 KASSERT(bd != NULL);
499 if (bd->d_localcount != NULL)
500 localcount_release(bd->d_localcount, &device_cv, &device_lock);
501 }
502
503 /*
504 * Look up a character device by number.
505 *
506 * => Caller must ensure that the device is attached.
507 */
508 const struct cdevsw *
509 cdevsw_lookup(dev_t dev)
510 {
511 devmajor_t cmajor;
512
513 if (dev == NODEV)
514 return (NULL);
515 cmajor = major(dev);
516 if (cmajor < 0 || cmajor >= max_cdevsws)
517 return (NULL);
518
519 /* Wait for the content of the struct bdevsw to become visible */
520 membar_datadep_consumer();
521
522 return (cdevsw[cmajor]);
523 }
524
525 const struct cdevsw *
526 cdevsw_lookup_acquire(dev_t dev)
527 {
528 devmajor_t cmajor;
529 const struct cdevsw *cdev = NULL;
530 int s;
531
532 if (dev == NODEV)
533 return (NULL);
534 cmajor = major(dev);
535 if (cmajor < 0 || cmajor >= max_cdevsws)
536 return (NULL);
537
538 /* Start a read transaction to block localcount_drain() */
539 s = pserialize_read_enter();
540
541 /* Get the struct bdevsw pointer */
542 cdev = cdevsw[cmajor];
543 if (cdev == NULL)
544 goto out;
545
546 /* Wait for the content of the struct cdevsw to become visible */
547 membar_datadep_consumer();
548
549 /* If the devsw is not statically linked, acquire a reference */
550 if (cdev->d_localcount != NULL)
551 localcount_acquire(cdev->d_localcount);
552
553 out: pserialize_read_exit(s);
554
555 return cdev;
556 }
557
558 void
559 cdevsw_release(const struct cdevsw *cd)
560 {
561
562 KASSERT(cd != NULL);
563 if (cd->d_localcount != NULL)
564 localcount_release(cd->d_localcount, &device_cv, &device_lock);
565 }
566
567 /*
568 * Look up a block device by reference to its operations set.
569 *
570 * => Caller must ensure that the device is not detached, and therefore
571 * that the returned major is still valid when dereferenced.
572 */
573 devmajor_t
574 bdevsw_lookup_major(const struct bdevsw *bdev)
575 {
576 devmajor_t bmajor;
577
578 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
579 if (bdevsw[bmajor] == bdev)
580 return (bmajor);
581 }
582
583 return (NODEVMAJOR);
584 }
585
586 /*
587 * Look up a character device by reference to its operations set.
588 *
589 * => Caller must ensure that the device is not detached, and therefore
590 * that the returned major is still valid when dereferenced.
591 */
592 devmajor_t
593 cdevsw_lookup_major(const struct cdevsw *cdev)
594 {
595 devmajor_t cmajor;
596
597 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
598 if (cdevsw[cmajor] == cdev)
599 return (cmajor);
600 }
601
602 return (NODEVMAJOR);
603 }
604
605 /*
606 * Convert from block major number to name.
607 *
608 * => Caller must ensure that the device is not detached, and therefore
609 * that the name pointer is still valid when dereferenced.
610 */
611 const char *
612 devsw_blk2name(devmajor_t bmajor)
613 {
614 const char *name;
615 devmajor_t cmajor;
616 int i;
617
618 name = NULL;
619 cmajor = -1;
620
621 mutex_enter(&device_lock);
622 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
623 mutex_exit(&device_lock);
624 return (NULL);
625 }
626 for (i = 0 ; i < max_devsw_convs; i++) {
627 if (devsw_conv[i].d_bmajor == bmajor) {
628 cmajor = devsw_conv[i].d_cmajor;
629 break;
630 }
631 }
632 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
633 name = devsw_conv[i].d_name;
634 mutex_exit(&device_lock);
635
636 return (name);
637 }
638
639 /*
640 * Convert char major number to device driver name.
641 */
642 const char *
643 cdevsw_getname(devmajor_t major)
644 {
645 const char *name;
646 int i;
647
648 name = NULL;
649
650 if (major < 0)
651 return (NULL);
652
653 mutex_enter(&device_lock);
654 for (i = 0 ; i < max_devsw_convs; i++) {
655 if (devsw_conv[i].d_cmajor == major) {
656 name = devsw_conv[i].d_name;
657 break;
658 }
659 }
660 mutex_exit(&device_lock);
661 return (name);
662 }
663
664 /*
665 * Convert block major number to device driver name.
666 */
667 const char *
668 bdevsw_getname(devmajor_t major)
669 {
670 const char *name;
671 int i;
672
673 name = NULL;
674
675 if (major < 0)
676 return (NULL);
677
678 mutex_enter(&device_lock);
679 for (i = 0 ; i < max_devsw_convs; i++) {
680 if (devsw_conv[i].d_bmajor == major) {
681 name = devsw_conv[i].d_name;
682 break;
683 }
684 }
685 mutex_exit(&device_lock);
686 return (name);
687 }
688
689 /*
690 * Convert from device name to block major number.
691 *
692 * => Caller must ensure that the device is not detached, and therefore
693 * that the major number is still valid when dereferenced.
694 */
695 devmajor_t
696 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
697 {
698 struct devsw_conv *conv;
699 devmajor_t bmajor;
700 int i;
701
702 if (name == NULL)
703 return (NODEVMAJOR);
704
705 mutex_enter(&device_lock);
706 for (i = 0 ; i < max_devsw_convs ; i++) {
707 size_t len;
708
709 conv = &devsw_conv[i];
710 if (conv->d_name == NULL)
711 continue;
712 len = strlen(conv->d_name);
713 if (strncmp(conv->d_name, name, len) != 0)
714 continue;
715 if (*(name +len) && !isdigit(*(name + len)))
716 continue;
717 bmajor = conv->d_bmajor;
718 if (bmajor < 0 || bmajor >= max_bdevsws ||
719 bdevsw[bmajor] == NULL)
720 break;
721 if (devname != NULL) {
722 #ifdef DEVSW_DEBUG
723 if (strlen(conv->d_name) >= devnamelen)
724 printf("%s: too short buffer", __func__);
725 #endif /* DEVSW_DEBUG */
726 strncpy(devname, conv->d_name, devnamelen);
727 devname[devnamelen - 1] = '\0';
728 }
729 mutex_exit(&device_lock);
730 return (bmajor);
731 }
732
733 mutex_exit(&device_lock);
734 return (NODEVMAJOR);
735 }
736
737 /*
738 * Convert from device name to char major number.
739 *
740 * => Caller must ensure that the device is not detached, and therefore
741 * that the major number is still valid when dereferenced.
742 */
743 devmajor_t
744 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
745 {
746 struct devsw_conv *conv;
747 devmajor_t cmajor;
748 int i;
749
750 if (name == NULL)
751 return (NODEVMAJOR);
752
753 mutex_enter(&device_lock);
754 for (i = 0 ; i < max_devsw_convs ; i++) {
755 size_t len;
756
757 conv = &devsw_conv[i];
758 if (conv->d_name == NULL)
759 continue;
760 len = strlen(conv->d_name);
761 if (strncmp(conv->d_name, name, len) != 0)
762 continue;
763 if (*(name +len) && !isdigit(*(name + len)))
764 continue;
765 cmajor = conv->d_cmajor;
766 if (cmajor < 0 || cmajor >= max_cdevsws ||
767 cdevsw[cmajor] == NULL)
768 break;
769 if (devname != NULL) {
770 #ifdef DEVSW_DEBUG
771 if (strlen(conv->d_name) >= devnamelen)
772 printf("%s: too short buffer", __func__);
773 #endif /* DEVSW_DEBUG */
774 strncpy(devname, conv->d_name, devnamelen);
775 devname[devnamelen - 1] = '\0';
776 }
777 mutex_exit(&device_lock);
778 return (cmajor);
779 }
780
781 mutex_exit(&device_lock);
782 return (NODEVMAJOR);
783 }
784
785 /*
786 * Convert from character dev_t to block dev_t.
787 *
788 * => Caller must ensure that the device is not detached, and therefore
789 * that the major number is still valid when dereferenced.
790 */
791 dev_t
792 devsw_chr2blk(dev_t cdev)
793 {
794 devmajor_t bmajor, cmajor;
795 int i;
796 dev_t rv;
797
798 cmajor = major(cdev);
799 bmajor = NODEVMAJOR;
800 rv = NODEV;
801
802 mutex_enter(&device_lock);
803 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
804 mutex_exit(&device_lock);
805 return (NODEV);
806 }
807 for (i = 0 ; i < max_devsw_convs ; i++) {
808 if (devsw_conv[i].d_cmajor == cmajor) {
809 bmajor = devsw_conv[i].d_bmajor;
810 break;
811 }
812 }
813 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
814 rv = makedev(bmajor, minor(cdev));
815 mutex_exit(&device_lock);
816
817 return (rv);
818 }
819
820 /*
821 * Convert from block dev_t to character dev_t.
822 *
823 * => Caller must ensure that the device is not detached, and therefore
824 * that the major number is still valid when dereferenced.
825 */
826 dev_t
827 devsw_blk2chr(dev_t bdev)
828 {
829 devmajor_t bmajor, cmajor;
830 int i;
831 dev_t rv;
832
833 bmajor = major(bdev);
834 cmajor = NODEVMAJOR;
835 rv = NODEV;
836
837 mutex_enter(&device_lock);
838 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
839 mutex_exit(&device_lock);
840 return (NODEV);
841 }
842 for (i = 0 ; i < max_devsw_convs ; i++) {
843 if (devsw_conv[i].d_bmajor == bmajor) {
844 cmajor = devsw_conv[i].d_cmajor;
845 break;
846 }
847 }
848 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
849 rv = makedev(cmajor, minor(bdev));
850 mutex_exit(&device_lock);
851
852 return (rv);
853 }
854
855 /*
856 * Device access methods.
857 */
858
859 #define DEV_LOCK(d) \
860 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
861 KERNEL_LOCK(1, NULL); \
862 }
863
864 #define DEV_UNLOCK(d) \
865 if (mpflag == 0) { \
866 KERNEL_UNLOCK_ONE(NULL); \
867 }
868
869 int
870 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
871 {
872 const struct bdevsw *d;
873 int rv, mpflag;
874
875 /*
876 * For open we need to lock, in order to synchronize
877 * with attach/detach.
878 */
879 mutex_enter(&device_lock);
880 d = bdevsw_lookup_acquire(dev);
881 mutex_exit(&device_lock);
882 if (d == NULL)
883 return ENXIO;
884
885 DEV_LOCK(d);
886 rv = (*d->d_open)(dev, flag, devtype, l);
887 DEV_UNLOCK(d);
888 bdevsw_release(dev);
889
890 return rv;
891 }
892
893 int
894 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
895 {
896 const struct bdevsw *d;
897 int rv, mpflag;
898
899 if ((d = bdevsw_lookup_acquire(dev)) == NULL)
900 return ENXIO;
901
902 DEV_LOCK(d);
903 rv = (*d->d_close)(dev, flag, devtype, l);
904 DEV_UNLOCK(d);
905 bdevsw_release(dev);
906
907 return rv;
908 }
909
910 SDT_PROVIDER_DECLARE(io);
911 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
912
913 void
914 bdev_strategy(struct buf *bp)
915 {
916 const struct bdevsw *d;
917 int mpflag;
918
919 SDT_PROBE1(io, kernel, , start, bp);
920
921 if ((d = bdevsw_lookup_acquire(bp->b_dev)) == NULL) {
922 bp->b_error = ENXIO;
923 bp->b_resid = bp->b_bcount;
924 biodone_vfs(bp); /* biodone() iff vfs present */
925 return;
926 }
927
928 DEV_LOCK(d);
929 (*d->d_strategy)(bp);
930 DEV_UNLOCK(d);
931 bdevsw_release(bp->b_dev);
932 }
933
934 int
935 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
936 {
937 const struct bdevsw *d;
938 int rv, mpflag;
939
940 if ((d = bdevsw_lookup_acquire(dev)) == NULL)
941 return ENXIO;
942
943 DEV_LOCK(d);
944 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
945 DEV_UNLOCK(d);
946 bdevsw_release(dev);
947
948 return rv;
949 }
950
951 int
952 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
953 {
954 const struct bdevsw *d;
955 int rv;
956
957 /*
958 * Dump can be called without the device open. Since it can
959 * currently only be called with the system paused (and in a
960 * potentially unstable state), we don't perform any locking.
961 */
962 if ((d = bdevsw_lookup(dev)) == NULL)
963 return ENXIO;
964
965 /* DEV_LOCK(d); */
966 rv = (*d->d_dump)(dev, addr, data, sz);
967 /* DEV_UNLOCK(d); */
968
969 return rv;
970 }
971
972 int
973 bdev_flags(dev_t dev)
974 {
975 const struct bdevsw *d;
976 int rv;
977
978 if ((d = bdevsw_lookup_acquire(dev)) == NULL)
979 return 0;
980 rv = d->d_flag & ~D_TYPEMASK;
981 bdevsw_release();
982
983 return rv;
984 }
985
986 int
987 bdev_type(dev_t dev)
988 {
989 const struct bdevsw *d;
990 int rv;
991
992 if ((d = bdevsw_lookup_acquire(dev)) == NULL)
993 return D_OTHER;
994 rv = d->d_flag & D_TYPEMASK;
995 bdevsw_release(dev);
996
997 return rv;
998 }
999
1000 int
1001 bdev_size(dev_t dev)
1002 {
1003 const struct bdevsw *d;
1004 int rv, mpflag = 0;
1005
1006 if ((d = bdevsw_lookup_acquire(dev)) == NULL ||
1007 d->d_psize == NULL)
1008 return -1;
1009
1010 /*
1011 * Don't to try lock the device if we're dumping.
1012 * XXX: is there a better way to test this?
1013 */
1014 if ((boothowto & RB_DUMP) == 0)
1015 DEV_LOCK(d);
1016 rv = (*d->d_psize)(dev);
1017 if ((boothowto & RB_DUMP) == 0)
1018 DEV_UNLOCK(d);
1019 bdevsw_release(dev);
1020 return rv;
1021 }
1022
1023 int
1024 bdev_discard(dev_t dev, off_t pos, off_t len)
1025 {
1026 const struct bdevsw *d;
1027 int rv, mpflag;
1028
1029 if ((d = bdevsw_lookup_acquire(dev)) == NULL)
1030 return ENXIO;
1031
1032 DEV_LOCK(d);
1033 rv = (*d->d_discard)(dev, pos, len);
1034 DEV_UNLOCK(d);
1035 bdevsw_release(dev);
1036
1037 return rv;
1038 }
1039
1040 int
1041 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
1042 {
1043 const struct cdevsw *d;
1044 int rv, mpflag;
1045
1046 /*
1047 * For open we need to lock, in order to synchronize
1048 * with attach/detach.
1049 */
1050 mutex_enter(&device_lock);
1051 d = cdevsw_lookup_acquire(dev);
1052 mutex_exit(&device_lock);
1053 if (d == NULL)
1054 return ENXIO;
1055
1056 DEV_LOCK(d);
1057 rv = (*d->d_open)(dev, flag, devtype, l);
1058 DEV_UNLOCK(d);
1059 cdevsw_release(dev);
1060
1061 return rv;
1062 }
1063
1064 int
1065 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
1066 {
1067 const struct cdevsw *d;
1068 int rv, mpflag;
1069
1070 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1071 return ENXIO;
1072
1073 DEV_LOCK(d);
1074 rv = (*d->d_close)(dev, flag, devtype, l);
1075 DEV_UNLOCK(d);
1076 cdevsw_release(dev);
1077
1078 return rv;
1079 }
1080
1081 int
1082 cdev_read(dev_t dev, struct uio *uio, int flag)
1083 {
1084 const struct cdevsw *d;
1085 int rv, mpflag;
1086
1087 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1088 return ENXIO;
1089
1090 DEV_LOCK(d);
1091 rv = (*d->d_read)(dev, uio, flag);
1092 DEV_UNLOCK(d);
1093 cdevsw_release(dev);
1094
1095 return rv;
1096 }
1097
1098 int
1099 cdev_write(dev_t dev, struct uio *uio, int flag)
1100 {
1101 const struct cdevsw *d;
1102 int rv, mpflag;
1103
1104 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1105 return ENXIO;
1106
1107 DEV_LOCK(d);
1108 rv = (*d->d_write)(dev, uio, flag);
1109 DEV_UNLOCK(d);
1110 cdevsw_release(dev);
1111
1112 return rv;
1113 }
1114
1115 int
1116 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
1117 {
1118 const struct cdevsw *d;
1119 int rv, mpflag;
1120
1121 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1122 return ENXIO;
1123
1124 DEV_LOCK(d);
1125 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
1126 DEV_UNLOCK(d);
1127 cdevsw_release(dev);
1128
1129 return rv;
1130 }
1131
1132 void
1133 cdev_stop(struct tty *tp, int flag)
1134 {
1135 const struct cdevsw *d;
1136 int mpflag;
1137
1138 if ((d = cdevsw_lookup_acquire(tp->t_dev)) == NULL)
1139 return;
1140
1141 DEV_LOCK(d);
1142 (*d->d_stop)(tp, flag);
1143 DEV_UNLOCK(d);
1144 cdevsw_release(tp->t_dev);
1145 }
1146
1147 struct tty *
1148 cdev_tty(dev_t dev)
1149 {
1150 const struct cdevsw *d;
1151 struct tty *rv;
1152
1153 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1154 return NULL;
1155
1156 /* XXX Check if necessary. */
1157 if (d->d_tty == NULL)
1158 rv = NULL;
1159 else
1160 rv= (*d->d_tty)(dev);
1161 cdevsw_release(dev);
1162
1163 return rv;
1164 }
1165
1166 int
1167 cdev_poll(dev_t dev, int flag, lwp_t *l)
1168 {
1169 const struct cdevsw *d;
1170 int rv, mpflag;
1171
1172 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1173 return POLLERR;
1174
1175 DEV_LOCK(d);
1176 rv = (*d->d_poll)(dev, flag, l);
1177 DEV_UNLOCK(d);
1178 cdevsw_release(dev);
1179
1180 return rv;
1181 }
1182
1183 paddr_t
1184 cdev_mmap(dev_t dev, off_t off, int flag)
1185 {
1186 const struct cdevsw *d;
1187 paddr_t rv;
1188 int mpflag;
1189
1190 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1191 return (paddr_t)-1LL;
1192
1193 DEV_LOCK(d);
1194 rv = (*d->d_mmap)(dev, off, flag);
1195 DEV_UNLOCK(d);
1196 cdevsw_release(dev);
1197
1198 return rv;
1199 }
1200
1201 int
1202 cdev_kqfilter(dev_t dev, struct knote *kn)
1203 {
1204 const struct cdevsw *d;
1205 int rv, mpflag;
1206
1207 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1208 return ENXIO;
1209
1210 DEV_LOCK(d);
1211 rv = (*d->d_kqfilter)(dev, kn);
1212 DEV_UNLOCK(d);
1213 cdevsw_release(dev);
1214
1215 return rv;
1216 }
1217
1218 int
1219 cdev_discard(dev_t dev, off_t pos, off_t len)
1220 {
1221 const struct cdevsw *d;
1222 int rv, mpflag;
1223
1224 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1225 return ENXIO;
1226
1227 DEV_LOCK(d);
1228 rv = (*d->d_discard)(dev, pos, len);
1229 DEV_UNLOCK(d);
1230 cdevsw_release(dev);
1231
1232 return rv;
1233 }
1234
1235 int
1236 cdev_flags(dev_t dev)
1237 {
1238 const struct cdevsw *d;
1239 int rv;
1240
1241 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1242 rv = 0;
1243 else
1244 rv = d->d_flag & ~D_TYPEMASK;
1245 cdevsw_release(dev);
1246
1247 return rv;
1248 }
1249
1250 int
1251 cdev_type(dev_t dev)
1252 {
1253 const struct cdevsw *d;
1254 int rv;
1255
1256 if ((d = cdevsw_lookup_acquire(dev)) == NULL)
1257 rv = D_OTHER;
1258 else
1259 rv = d->d_flag & D_TYPEMASK;
1260 cdevsw_release(dev);
1261
1262 return rv;
1263 }
1264
1265 /*
1266 * nommap(dev, off, prot)
1267 *
1268 * mmap routine that always fails, for non-mmappable devices.
1269 */
1270 paddr_t
1271 nommap(dev_t dev, off_t off, int prot)
1272 {
1273
1274 return (paddr_t)-1;
1275 }
1276