subr_devsw.c revision 1.34.2.4 1 /* $NetBSD: subr_devsw.c,v 1.34.2.4 2016/07/17 05:02:19 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.34.2.4 2016/07/17 05:02:19 pgoyette Exp $");
73
74 #ifdef _KERNEL_OPT
75 #include "opt_dtrace.h"
76 #endif
77
78 #include <sys/param.h>
79 #include <sys/conf.h>
80 #include <sys/kmem.h>
81 #include <sys/systm.h>
82 #include <sys/poll.h>
83 #include <sys/tty.h>
84 #include <sys/cpu.h>
85 #include <sys/buf.h>
86 #include <sys/reboot.h>
87 #include <sys/sdt.h>
88 #include <sys/atomic.h>
89 #include <sys/condvar.h>
90 #include <sys/localcount.h>
91 #include <sys/pserialize.h>
92
93 #ifdef DEVSW_DEBUG
94 #define DPRINTF(x) printf x
95 #else /* DEVSW_DEBUG */
96 #define DPRINTF(x)
97 #endif /* DEVSW_DEBUG */
98
99 #define MAXDEVSW 512 /* the maximum of major device number */
100 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
101 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
102 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
103
104 extern const struct bdevsw **bdevsw, *bdevsw0[];
105 extern const struct cdevsw **cdevsw, *cdevsw0[];
106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
107 extern const int sys_bdevsws, sys_cdevsws;
108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
109
110 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
111 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
113
114 kmutex_t device_lock;
115 kcondvar_t device_cv;
116 pserialize_t device_psz = NULL;
117
118 void (*biodone_vfs)(buf_t *) = (void *)nullop;
119
120 void
121 devsw_init(void)
122 {
123
124 KASSERT(sys_bdevsws < MAXDEVSW - 1);
125 KASSERT(sys_cdevsws < MAXDEVSW - 1);
126 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
127 cv_init(&device_cv, "devsw");
128 }
129
130 int
131 devsw_attach(const char *devname,
132 const struct bdevsw *bdev, devmajor_t *bmajor,
133 const struct cdevsw *cdev, devmajor_t *cmajor)
134 {
135 struct devsw_conv *conv;
136 char *name;
137 int error, i;
138 size_t len;
139
140 if (devname == NULL || cdev == NULL)
141 return (EINVAL);
142
143 mutex_enter(&device_lock);
144
145 if (bdev != NULL) {
146 KASSERT(bdev->d_localcount != NULL);
147 KASSERT(bdev->d_localcount != cdev->d_localcount);
148 }
149 if (cdev != NULL)
150 KASSERT(cdev->d_localcount != NULL);
151
152 for (i = 0 ; i < max_devsw_convs ; i++) {
153 conv = &devsw_conv[i];
154 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
155 continue;
156
157 if (*bmajor < 0)
158 *bmajor = conv->d_bmajor;
159 if (*cmajor < 0)
160 *cmajor = conv->d_cmajor;
161
162 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
163 error = EINVAL;
164 goto fail;
165 }
166 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
167 error = EINVAL;
168 goto fail;
169 }
170
171 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
172 cdevsw[*cmajor] != NULL) {
173 error = EEXIST;
174 goto fail;
175 }
176
177 /* use membar_producer() to ensure visibility of the xdevsw */
178 if (bdev != NULL) {
179 localcount_init(bdev->d_localcount);
180 membar_producer();
181 bdevsw[*bmajor] = bdev;
182 }
183 localcount_init(cdev->d_localcount);
184 membar_producer();
185 cdevsw[*cmajor] = cdev;
186
187 mutex_exit(&device_lock);
188 return (0);
189 }
190
191 error = bdevsw_attach(bdev, bmajor);
192 if (error != 0)
193 goto fail;
194 error = cdevsw_attach(cdev, cmajor);
195 if (error != 0) {
196 devsw_detach_locked(bdev, NULL);
197 goto fail;
198 }
199
200 for (i = 0 ; i < max_devsw_convs ; i++) {
201 if (devsw_conv[i].d_name == NULL)
202 break;
203 }
204 if (i == max_devsw_convs) {
205 struct devsw_conv *newptr;
206 int old_convs, new_convs;
207
208 old_convs = max_devsw_convs;
209 new_convs = old_convs + 1;
210
211 newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
212 if (newptr == NULL) {
213 devsw_detach_locked(bdev, cdev);
214 error = ENOMEM;
215 goto fail;
216 }
217 newptr[old_convs].d_name = NULL;
218 newptr[old_convs].d_bmajor = -1;
219 newptr[old_convs].d_cmajor = -1;
220 memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
221 if (devsw_conv != devsw_conv0)
222 kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
223 devsw_conv = newptr;
224 max_devsw_convs = new_convs;
225 }
226
227 len = strlen(devname) + 1;
228 name = kmem_alloc(len, KM_NOSLEEP);
229 if (name == NULL) {
230 devsw_detach_locked(bdev, cdev);
231 error = ENOMEM;
232 goto fail;
233 }
234 strlcpy(name, devname, len);
235
236 devsw_conv[i].d_name = name;
237 devsw_conv[i].d_bmajor = *bmajor;
238 devsw_conv[i].d_cmajor = *cmajor;
239
240 mutex_exit(&device_lock);
241 return (0);
242 fail:
243 mutex_exit(&device_lock);
244 return (error);
245 }
246
247 static int
248 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
249 {
250 const struct bdevsw **newptr;
251 devmajor_t bmajor;
252 int i;
253
254 KASSERT(mutex_owned(&device_lock));
255
256 if (devsw == NULL)
257 return (0);
258
259 if (*devmajor < 0) {
260 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
261 if (bdevsw[bmajor] != NULL)
262 continue;
263 for (i = 0 ; i < max_devsw_convs ; i++) {
264 if (devsw_conv[i].d_bmajor == bmajor)
265 break;
266 }
267 if (i != max_devsw_convs)
268 continue;
269 break;
270 }
271 *devmajor = bmajor;
272 }
273
274 if (*devmajor >= MAXDEVSW) {
275 printf("bdevsw_attach: block majors exhausted");
276 return (ENOMEM);
277 }
278
279 if (*devmajor >= max_bdevsws) {
280 KASSERT(bdevsw == bdevsw0);
281 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
282 if (newptr == NULL)
283 return (ENOMEM);
284 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
285 bdevsw = newptr;
286 max_bdevsws = MAXDEVSW;
287 }
288
289 if (bdevsw[*devmajor] != NULL)
290 return (EEXIST);
291
292 /* ensure visibility of the bdevsw */
293 membar_producer();
294
295 bdevsw[*devmajor] = devsw;
296 KASSERT(devsw->d_localcount != NULL);
297 localcount_init(devsw->d_localcount);
298
299 return (0);
300 }
301
302 static int
303 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
304 {
305 const struct cdevsw **newptr;
306 devmajor_t cmajor;
307 int i;
308
309 KASSERT(mutex_owned(&device_lock));
310
311 if (*devmajor < 0) {
312 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
313 if (cdevsw[cmajor] != NULL)
314 continue;
315 for (i = 0 ; i < max_devsw_convs ; i++) {
316 if (devsw_conv[i].d_cmajor == cmajor)
317 break;
318 }
319 if (i != max_devsw_convs)
320 continue;
321 break;
322 }
323 *devmajor = cmajor;
324 }
325
326 if (*devmajor >= MAXDEVSW) {
327 printf("cdevsw_attach: character majors exhausted");
328 return (ENOMEM);
329 }
330
331 if (*devmajor >= max_cdevsws) {
332 KASSERT(cdevsw == cdevsw0);
333 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
334 if (newptr == NULL)
335 return (ENOMEM);
336 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
337 cdevsw = newptr;
338 max_cdevsws = MAXDEVSW;
339 }
340
341 if (cdevsw[*devmajor] != NULL)
342 return (EEXIST);
343
344 /* ensure visibility of the bdevsw */
345 membar_producer();
346
347 cdevsw[*devmajor] = devsw;
348 KASSERT(devsw->d_localcount != NULL);
349 localcount_init(devsw->d_localcount);
350
351 return (0);
352 }
353
354 /*
355 * First, look up both bdev and cdev indices, and remove the
356 * {b,c]devsw[] entries so no new references can be taken. Then
357 * drain any existing references.
358 */
359
360 static void
361 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
362 {
363 int i, j;
364
365 KASSERT(mutex_owned(&device_lock));
366
367 i = max_bdevsws;
368 if (bdev != NULL) {
369 for (i = 0 ; i < max_bdevsws ; i++) {
370 if (bdevsw[i] != bdev)
371 continue;
372
373 KASSERTMSG(bdev->d_localcount != NULL,
374 "%s: no bdev localcount", __func__);
375 break;
376 }
377 }
378 j = max_cdevsws;
379 if (cdev != NULL) {
380 for (j = 0 ; j < max_cdevsws ; j++) {
381 if (cdevsw[j] != cdev)
382 continue;
383
384 KASSERTMSG(cdev->d_localcount != NULL,
385 "%s: no cdev localcount", __func__);
386 break;
387 }
388 }
389 if (i < max_bdevsws)
390 bdevsw[i] = NULL;
391 if (j < max_cdevsws )
392 cdevsw[j] = NULL;
393
394 /*
395 * If we haven't already done so, create the serialization
396 * stucture. Then wait for all current readers to finish.
397 */
398 if(__predict_false(device_psz == NULL))
399 device_psz = pserialize_create();
400 pserialize_perform(device_psz);
401
402 /*
403 * Here, no new readers can reach the bdev and cdev via the
404 * {b,c}devsw[] arrays. Wait for existing references to
405 * drain, and then destroy.
406 */
407
408 if (i < max_bdevsws && bdev->d_localcount != NULL) {
409 localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
410 localcount_fini(bdev->d_localcount);
411 }
412 if (j < max_cdevsws && cdev->d_localcount != NULL ) {
413 localcount_drain(cdev->d_localcount, &device_cv, &device_lock);
414 localcount_fini(cdev->d_localcount);
415 }
416 }
417
418 int
419 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
420 {
421
422 mutex_enter(&device_lock);
423 devsw_detach_locked(bdev, cdev);
424 mutex_exit(&device_lock);
425 return 0;
426 }
427
428 /*
429 * Look up a block device by number.
430 *
431 * => Caller must ensure that the device is attached.
432 */
433 const struct bdevsw *
434 bdevsw_lookup(dev_t dev)
435 {
436 devmajor_t bmajor;
437
438 if (dev == NODEV)
439 return (NULL);
440 bmajor = major(dev);
441 if (bmajor < 0 || bmajor >= max_bdevsws)
442 return (NULL);
443
444 return (bdevsw[bmajor]);
445 }
446
447 const struct bdevsw *
448 bdevsw_lookup_acquire(dev_t dev)
449 {
450 devmajor_t bmajor;
451 const struct bdevsw *bdev = NULL;
452 int s;
453
454 if (dev == NODEV)
455 return (NULL);
456 bmajor = major(dev);
457 if (bmajor < 0 || bmajor >= max_bdevsws)
458 return (NULL);
459
460 /* Start a read transaction to block localcount_drain() */
461 s = pserialize_read_enter();
462
463 /* Get the struct bdevsw pointer */
464 bdev = bdevsw[bmajor];
465 if (bdev == NULL)
466 goto out;
467
468 /* Wait for the content of the struct bdevsw to become visible */
469 membar_datadep_consumer();
470
471 /* If the devsw is not statically linked, acquire a reference */
472 if (bdevsw[bmajor]->d_localcount != NULL)
473 localcount_acquire(bdevsw[bmajor]->d_localcount);
474
475 out: pserialize_read_exit(s);
476
477 return bdev;
478 }
479
480 void
481 bdevsw_release(const struct bdevsw *bd)
482 {
483
484 KASSERT(bd != NULL);
485 if (bd->d_localcount != NULL)
486 localcount_release(bd->d_localcount, &device_cv, &device_lock);
487 }
488
489 /*
490 * Look up a character device by number.
491 *
492 * => Caller must ensure that the device is attached.
493 */
494 const struct cdevsw *
495 cdevsw_lookup(dev_t dev)
496 {
497 devmajor_t cmajor;
498
499 if (dev == NODEV)
500 return (NULL);
501 cmajor = major(dev);
502 if (cmajor < 0 || cmajor >= max_cdevsws)
503 return (NULL);
504
505 return (cdevsw[cmajor]);
506 }
507
508 const struct cdevsw *
509 cdevsw_lookup_acquire(dev_t dev)
510 {
511 devmajor_t cmajor;
512 const struct cdevsw *cdev = NULL;
513 int s;
514
515 if (dev == NODEV)
516 return (NULL);
517 cmajor = major(dev);
518 if (cmajor < 0 || cmajor >= max_cdevsws)
519 return (NULL);
520
521 /* Start a read transaction to block localcount_drain() */
522 s = pserialize_read_enter();
523
524 /* Get the struct bdevsw pointer */
525 cdev = cdevsw[cmajor];
526 if (cdev == NULL)
527 goto out;
528
529 /* Wait for the content of the struct cdevsw to become visible */
530 membar_datadep_consumer();
531
532 /* If the devsw is not statically linked, acquire a reference */
533 if (cdevsw[cmajor]->d_localcount != NULL)
534 localcount_acquire(cdevsw[cmajor]->d_localcount);
535
536 out: pserialize_read_exit(s);
537 mutex_exit(&device_lock);
538
539 return cdev;
540 }
541
542 void
543 cdevsw_release(const struct cdevsw *cd)
544 {
545
546 KASSERT(cd != NULL);
547 if (cd->d_localcount != NULL)
548 localcount_release(cd->d_localcount, &device_cv, &device_lock);
549 }
550
551 /*
552 * Look up a block device by reference to its operations set.
553 *
554 * => Caller must ensure that the device is not detached, and therefore
555 * that the returned major is still valid when dereferenced.
556 */
557 devmajor_t
558 bdevsw_lookup_major(const struct bdevsw *bdev)
559 {
560 devmajor_t bmajor;
561
562 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
563 if (bdevsw[bmajor] == bdev)
564 return (bmajor);
565 }
566
567 return (NODEVMAJOR);
568 }
569
570 /*
571 * Look up a character device by reference to its operations set.
572 *
573 * => Caller must ensure that the device is not detached, and therefore
574 * that the returned major is still valid when dereferenced.
575 */
576 devmajor_t
577 cdevsw_lookup_major(const struct cdevsw *cdev)
578 {
579 devmajor_t cmajor;
580
581 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
582 if (cdevsw[cmajor] == cdev)
583 return (cmajor);
584 }
585
586 return (NODEVMAJOR);
587 }
588
589 /*
590 * Convert from block major number to name.
591 *
592 * => Caller must ensure that the device is not detached, and therefore
593 * that the name pointer is still valid when dereferenced.
594 */
595 const char *
596 devsw_blk2name(devmajor_t bmajor)
597 {
598 const char *name;
599 devmajor_t cmajor;
600 int i;
601
602 name = NULL;
603 cmajor = -1;
604
605 mutex_enter(&device_lock);
606 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
607 mutex_exit(&device_lock);
608 return (NULL);
609 }
610 for (i = 0 ; i < max_devsw_convs; i++) {
611 if (devsw_conv[i].d_bmajor == bmajor) {
612 cmajor = devsw_conv[i].d_cmajor;
613 break;
614 }
615 }
616 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
617 name = devsw_conv[i].d_name;
618 mutex_exit(&device_lock);
619
620 return (name);
621 }
622
623 /*
624 * Convert char major number to device driver name.
625 */
626 const char *
627 cdevsw_getname(devmajor_t major)
628 {
629 const char *name;
630 int i;
631
632 name = NULL;
633
634 if (major < 0)
635 return (NULL);
636
637 mutex_enter(&device_lock);
638 for (i = 0 ; i < max_devsw_convs; i++) {
639 if (devsw_conv[i].d_cmajor == major) {
640 name = devsw_conv[i].d_name;
641 break;
642 }
643 }
644 mutex_exit(&device_lock);
645 return (name);
646 }
647
648 /*
649 * Convert block major number to device driver name.
650 */
651 const char *
652 bdevsw_getname(devmajor_t major)
653 {
654 const char *name;
655 int i;
656
657 name = NULL;
658
659 if (major < 0)
660 return (NULL);
661
662 mutex_enter(&device_lock);
663 for (i = 0 ; i < max_devsw_convs; i++) {
664 if (devsw_conv[i].d_bmajor == major) {
665 name = devsw_conv[i].d_name;
666 break;
667 }
668 }
669 mutex_exit(&device_lock);
670 return (name);
671 }
672
673 /*
674 * Convert from device name to block major number.
675 *
676 * => Caller must ensure that the device is not detached, and therefore
677 * that the major number is still valid when dereferenced.
678 */
679 devmajor_t
680 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
681 {
682 struct devsw_conv *conv;
683 devmajor_t bmajor;
684 int i;
685
686 if (name == NULL)
687 return (NODEVMAJOR);
688
689 mutex_enter(&device_lock);
690 for (i = 0 ; i < max_devsw_convs ; i++) {
691 size_t len;
692
693 conv = &devsw_conv[i];
694 if (conv->d_name == NULL)
695 continue;
696 len = strlen(conv->d_name);
697 if (strncmp(conv->d_name, name, len) != 0)
698 continue;
699 if (*(name +len) && !isdigit(*(name + len)))
700 continue;
701 bmajor = conv->d_bmajor;
702 if (bmajor < 0 || bmajor >= max_bdevsws ||
703 bdevsw[bmajor] == NULL)
704 break;
705 if (devname != NULL) {
706 #ifdef DEVSW_DEBUG
707 if (strlen(conv->d_name) >= devnamelen)
708 printf("devsw_name2blk: too short buffer");
709 #endif /* DEVSW_DEBUG */
710 strncpy(devname, conv->d_name, devnamelen);
711 devname[devnamelen - 1] = '\0';
712 }
713 mutex_exit(&device_lock);
714 return (bmajor);
715 }
716
717 mutex_exit(&device_lock);
718 return (NODEVMAJOR);
719 }
720
721 /*
722 * Convert from device name to char major number.
723 *
724 * => Caller must ensure that the device is not detached, and therefore
725 * that the major number is still valid when dereferenced.
726 */
727 devmajor_t
728 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
729 {
730 struct devsw_conv *conv;
731 devmajor_t cmajor;
732 int i;
733
734 if (name == NULL)
735 return (NODEVMAJOR);
736
737 mutex_enter(&device_lock);
738 for (i = 0 ; i < max_devsw_convs ; i++) {
739 size_t len;
740
741 conv = &devsw_conv[i];
742 if (conv->d_name == NULL)
743 continue;
744 len = strlen(conv->d_name);
745 if (strncmp(conv->d_name, name, len) != 0)
746 continue;
747 if (*(name +len) && !isdigit(*(name + len)))
748 continue;
749 cmajor = conv->d_cmajor;
750 if (cmajor < 0 || cmajor >= max_cdevsws ||
751 cdevsw[cmajor] == NULL)
752 break;
753 if (devname != NULL) {
754 #ifdef DEVSW_DEBUG
755 if (strlen(conv->d_name) >= devnamelen)
756 printf("devsw_name2chr: too short buffer");
757 #endif /* DEVSW_DEBUG */
758 strncpy(devname, conv->d_name, devnamelen);
759 devname[devnamelen - 1] = '\0';
760 }
761 mutex_exit(&device_lock);
762 return (cmajor);
763 }
764
765 mutex_exit(&device_lock);
766 return (NODEVMAJOR);
767 }
768
769 /*
770 * Convert from character dev_t to block dev_t.
771 *
772 * => Caller must ensure that the device is not detached, and therefore
773 * that the major number is still valid when dereferenced.
774 */
775 dev_t
776 devsw_chr2blk(dev_t cdev)
777 {
778 devmajor_t bmajor, cmajor;
779 int i;
780 dev_t rv;
781
782 cmajor = major(cdev);
783 bmajor = NODEVMAJOR;
784 rv = NODEV;
785
786 mutex_enter(&device_lock);
787 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
788 mutex_exit(&device_lock);
789 return (NODEV);
790 }
791 for (i = 0 ; i < max_devsw_convs ; i++) {
792 if (devsw_conv[i].d_cmajor == cmajor) {
793 bmajor = devsw_conv[i].d_bmajor;
794 break;
795 }
796 }
797 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
798 rv = makedev(bmajor, minor(cdev));
799 mutex_exit(&device_lock);
800
801 return (rv);
802 }
803
804 /*
805 * Convert from block dev_t to character dev_t.
806 *
807 * => Caller must ensure that the device is not detached, and therefore
808 * that the major number is still valid when dereferenced.
809 */
810 dev_t
811 devsw_blk2chr(dev_t bdev)
812 {
813 devmajor_t bmajor, cmajor;
814 int i;
815 dev_t rv;
816
817 bmajor = major(bdev);
818 cmajor = NODEVMAJOR;
819 rv = NODEV;
820
821 mutex_enter(&device_lock);
822 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
823 mutex_exit(&device_lock);
824 return (NODEV);
825 }
826 for (i = 0 ; i < max_devsw_convs ; i++) {
827 if (devsw_conv[i].d_bmajor == bmajor) {
828 cmajor = devsw_conv[i].d_cmajor;
829 break;
830 }
831 }
832 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
833 rv = makedev(cmajor, minor(bdev));
834 mutex_exit(&device_lock);
835
836 return (rv);
837 }
838
839 /*
840 * Device access methods.
841 */
842
843 #define DEV_LOCK(d) \
844 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
845 KERNEL_LOCK(1, NULL); \
846 }
847
848 #define DEV_UNLOCK(d) \
849 if (mpflag == 0) { \
850 KERNEL_UNLOCK_ONE(NULL); \
851 }
852
853 int
854 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
855 {
856 const struct bdevsw *d;
857 int rv, mpflag;
858
859 /*
860 * For open we need to lock, in order to synchronize
861 * with attach/detach.
862 */
863 mutex_enter(&device_lock);
864 d = bdevsw_lookup(dev);
865 mutex_exit(&device_lock);
866 if (d == NULL)
867 return ENXIO;
868
869 DEV_LOCK(d);
870 rv = (*d->d_open)(dev, flag, devtype, l);
871 DEV_UNLOCK(d);
872
873 return rv;
874 }
875
876 int
877 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
878 {
879 const struct bdevsw *d;
880 int rv, mpflag;
881
882 if ((d = bdevsw_lookup(dev)) == NULL)
883 return ENXIO;
884
885 DEV_LOCK(d);
886 rv = (*d->d_close)(dev, flag, devtype, l);
887 DEV_UNLOCK(d);
888
889 return rv;
890 }
891
892 SDT_PROVIDER_DECLARE(io);
893 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
894
895 void
896 bdev_strategy(struct buf *bp)
897 {
898 const struct bdevsw *d;
899 int mpflag;
900
901 SDT_PROBE1(io, kernel, , start, bp);
902
903 if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
904 bp->b_error = ENXIO;
905 bp->b_resid = bp->b_bcount;
906 biodone_vfs(bp); /* biodone() iff vfs present */
907 return;
908 }
909
910 DEV_LOCK(d);
911 (*d->d_strategy)(bp);
912 DEV_UNLOCK(d);
913 }
914
915 int
916 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
917 {
918 const struct bdevsw *d;
919 int rv, mpflag;
920
921 if ((d = bdevsw_lookup(dev)) == NULL)
922 return ENXIO;
923
924 DEV_LOCK(d);
925 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
926 DEV_UNLOCK(d);
927
928 return rv;
929 }
930
931 int
932 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
933 {
934 const struct bdevsw *d;
935 int rv;
936
937 /*
938 * Dump can be called without the device open. Since it can
939 * currently only be called with the system paused (and in a
940 * potentially unstable state), we don't perform any locking.
941 */
942 if ((d = bdevsw_lookup(dev)) == NULL)
943 return ENXIO;
944
945 /* DEV_LOCK(d); */
946 rv = (*d->d_dump)(dev, addr, data, sz);
947 /* DEV_UNLOCK(d); */
948
949 return rv;
950 }
951
952 int
953 bdev_type(dev_t dev)
954 {
955 const struct bdevsw *d;
956
957 if ((d = bdevsw_lookup(dev)) == NULL)
958 return D_OTHER;
959 return d->d_flag & D_TYPEMASK;
960 }
961
962 int
963 bdev_size(dev_t dev)
964 {
965 const struct bdevsw *d;
966 int rv, mpflag = 0;
967
968 if ((d = bdevsw_lookup(dev)) == NULL ||
969 d->d_psize == NULL)
970 return -1;
971
972 /*
973 * Don't to try lock the device if we're dumping.
974 * XXX: is there a better way to test this?
975 */
976 if ((boothowto & RB_DUMP) == 0)
977 DEV_LOCK(d);
978 rv = (*d->d_psize)(dev);
979 if ((boothowto & RB_DUMP) == 0)
980 DEV_UNLOCK(d);
981
982 return rv;
983 }
984
985 int
986 bdev_discard(dev_t dev, off_t pos, off_t len)
987 {
988 const struct bdevsw *d;
989 int rv, mpflag;
990
991 if ((d = bdevsw_lookup(dev)) == NULL)
992 return ENXIO;
993
994 DEV_LOCK(d);
995 rv = (*d->d_discard)(dev, pos, len);
996 DEV_UNLOCK(d);
997
998 return rv;
999 }
1000
1001 int
1002 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
1003 {
1004 const struct cdevsw *d;
1005 int rv, mpflag;
1006
1007 /*
1008 * For open we need to lock, in order to synchronize
1009 * with attach/detach.
1010 */
1011 mutex_enter(&device_lock);
1012 d = cdevsw_lookup(dev);
1013 mutex_exit(&device_lock);
1014 if (d == NULL)
1015 return ENXIO;
1016
1017 DEV_LOCK(d);
1018 rv = (*d->d_open)(dev, flag, devtype, l);
1019 DEV_UNLOCK(d);
1020
1021 return rv;
1022 }
1023
1024 int
1025 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
1026 {
1027 const struct cdevsw *d;
1028 int rv, mpflag;
1029
1030 if ((d = cdevsw_lookup(dev)) == NULL)
1031 return ENXIO;
1032
1033 DEV_LOCK(d);
1034 rv = (*d->d_close)(dev, flag, devtype, l);
1035 DEV_UNLOCK(d);
1036
1037 return rv;
1038 }
1039
1040 int
1041 cdev_read(dev_t dev, struct uio *uio, int flag)
1042 {
1043 const struct cdevsw *d;
1044 int rv, mpflag;
1045
1046 if ((d = cdevsw_lookup(dev)) == NULL)
1047 return ENXIO;
1048
1049 DEV_LOCK(d);
1050 rv = (*d->d_read)(dev, uio, flag);
1051 DEV_UNLOCK(d);
1052
1053 return rv;
1054 }
1055
1056 int
1057 cdev_write(dev_t dev, struct uio *uio, int flag)
1058 {
1059 const struct cdevsw *d;
1060 int rv, mpflag;
1061
1062 if ((d = cdevsw_lookup(dev)) == NULL)
1063 return ENXIO;
1064
1065 DEV_LOCK(d);
1066 rv = (*d->d_write)(dev, uio, flag);
1067 DEV_UNLOCK(d);
1068
1069 return rv;
1070 }
1071
1072 int
1073 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
1074 {
1075 const struct cdevsw *d;
1076 int rv, mpflag;
1077
1078 if ((d = cdevsw_lookup(dev)) == NULL)
1079 return ENXIO;
1080
1081 DEV_LOCK(d);
1082 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
1083 DEV_UNLOCK(d);
1084
1085 return rv;
1086 }
1087
1088 void
1089 cdev_stop(struct tty *tp, int flag)
1090 {
1091 const struct cdevsw *d;
1092 int mpflag;
1093
1094 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
1095 return;
1096
1097 DEV_LOCK(d);
1098 (*d->d_stop)(tp, flag);
1099 DEV_UNLOCK(d);
1100 }
1101
1102 struct tty *
1103 cdev_tty(dev_t dev)
1104 {
1105 const struct cdevsw *d;
1106
1107 if ((d = cdevsw_lookup(dev)) == NULL)
1108 return NULL;
1109
1110 /* XXX Check if necessary. */
1111 if (d->d_tty == NULL)
1112 return NULL;
1113
1114 return (*d->d_tty)(dev);
1115 }
1116
1117 int
1118 cdev_poll(dev_t dev, int flag, lwp_t *l)
1119 {
1120 const struct cdevsw *d;
1121 int rv, mpflag;
1122
1123 if ((d = cdevsw_lookup(dev)) == NULL)
1124 return POLLERR;
1125
1126 DEV_LOCK(d);
1127 rv = (*d->d_poll)(dev, flag, l);
1128 DEV_UNLOCK(d);
1129
1130 return rv;
1131 }
1132
1133 paddr_t
1134 cdev_mmap(dev_t dev, off_t off, int flag)
1135 {
1136 const struct cdevsw *d;
1137 paddr_t rv;
1138 int mpflag;
1139
1140 if ((d = cdevsw_lookup(dev)) == NULL)
1141 return (paddr_t)-1LL;
1142
1143 DEV_LOCK(d);
1144 rv = (*d->d_mmap)(dev, off, flag);
1145 DEV_UNLOCK(d);
1146
1147 return rv;
1148 }
1149
1150 int
1151 cdev_kqfilter(dev_t dev, struct knote *kn)
1152 {
1153 const struct cdevsw *d;
1154 int rv, mpflag;
1155
1156 if ((d = cdevsw_lookup(dev)) == NULL)
1157 return ENXIO;
1158
1159 DEV_LOCK(d);
1160 rv = (*d->d_kqfilter)(dev, kn);
1161 DEV_UNLOCK(d);
1162
1163 return rv;
1164 }
1165
1166 int
1167 cdev_discard(dev_t dev, off_t pos, off_t len)
1168 {
1169 const struct cdevsw *d;
1170 int rv, mpflag;
1171
1172 if ((d = cdevsw_lookup(dev)) == NULL)
1173 return ENXIO;
1174
1175 DEV_LOCK(d);
1176 rv = (*d->d_discard)(dev, pos, len);
1177 DEV_UNLOCK(d);
1178
1179 return rv;
1180 }
1181
1182 int
1183 cdev_type(dev_t dev)
1184 {
1185 const struct cdevsw *d;
1186
1187 if ((d = cdevsw_lookup(dev)) == NULL)
1188 return D_OTHER;
1189 return d->d_flag & D_TYPEMASK;
1190 }
1191