Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.34.2.1
      1 /*	$NetBSD: subr_devsw.c,v 1.34.2.1 2016/07/16 07:54:13 pgoyette Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	subr_devsw.c: registers device drivers by name and by major
     36  *	number, and provides wrapper methods for performing I/O and
     37  *	other tasks on device drivers, keying on the device number
     38  *	(dev_t).
     39  *
     40  *	When the system is built, the config(8) command generates
     41  *	static tables of device drivers built into the kernel image
     42  *	along with their associated methods.  These are recorded in
     43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     44  *	and removed from the system dynamically.
     45  *
     46  * Allocation
     47  *
     48  *	When the system initially boots only the statically allocated
     49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     50  *	allocation, we allocate a fixed block of memory to hold the new,
     51  *	expanded index.  This "fork" of the table is only ever performed
     52  *	once in order to guarantee that other threads may safely access
     53  *	the device tables:
     54  *
     55  *	o Once a thread has a "reference" to the table via an earlier
     56  *	  open() call, we know that the entry in the table must exist
     57  *	  and so it is safe to access it.
     58  *
     59  *	o Regardless of whether other threads see the old or new
     60  *	  pointers, they will point to a correct device switch
     61  *	  structure for the operation being performed.
     62  *
     63  *	XXX Currently, the wrapper methods such as cdev_read() verify
     64  *	that a device driver does in fact exist before calling the
     65  *	associated driver method.  This should be changed so that
     66  *	once the device is has been referenced by a vnode (opened),
     67  *	calling	the other methods should be valid until that reference
     68  *	is dropped.
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.34.2.1 2016/07/16 07:54:13 pgoyette Exp $");
     73 
     74 #ifdef _KERNEL_OPT
     75 #include "opt_dtrace.h"
     76 #endif
     77 
     78 #include <sys/param.h>
     79 #include <sys/conf.h>
     80 #include <sys/kmem.h>
     81 #include <sys/systm.h>
     82 #include <sys/poll.h>
     83 #include <sys/tty.h>
     84 #include <sys/cpu.h>
     85 #include <sys/buf.h>
     86 #include <sys/reboot.h>
     87 #include <sys/sdt.h>
     88 #include <sys/condvar.h>
     89 #include <sys/localcount.h>
     90 
     91 #ifdef DEVSW_DEBUG
     92 #define	DPRINTF(x)	printf x
     93 #else /* DEVSW_DEBUG */
     94 #define	DPRINTF(x)
     95 #endif /* DEVSW_DEBUG */
     96 
     97 #define	MAXDEVSW	512	/* the maximum of major device number */
     98 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
     99 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
    100 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
    101 
    102 extern const struct bdevsw **bdevsw, *bdevsw0[];
    103 extern const struct cdevsw **cdevsw, *cdevsw0[];
    104 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    105 extern const int sys_bdevsws, sys_cdevsws;
    106 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    107 
    108 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
    109 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
    110 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    111 
    112 kmutex_t	device_lock;
    113 kcondvar_t	device_cv;
    114 
    115 void (*biodone_vfs)(buf_t *) = (void *)nullop;
    116 
    117 void
    118 devsw_init(void)
    119 {
    120 
    121 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    122 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    123 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
    124 	cv_init(&device_cv, "devsw");
    125 }
    126 
    127 int
    128 devsw_attach(const char *devname,
    129 	     const struct bdevsw *bdev, devmajor_t *bmajor,
    130 	     const struct cdevsw *cdev, devmajor_t *cmajor)
    131 {
    132 	struct devsw_conv *conv;
    133 	char *name;
    134 	int error, i;
    135 	size_t len;
    136 
    137 	if (devname == NULL || cdev == NULL)
    138 		return (EINVAL);
    139 
    140 	mutex_enter(&device_lock);
    141 
    142 	for (i = 0 ; i < max_devsw_convs ; i++) {
    143 		conv = &devsw_conv[i];
    144 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    145 			continue;
    146 
    147 		if (*bmajor < 0)
    148 			*bmajor = conv->d_bmajor;
    149 		if (*cmajor < 0)
    150 			*cmajor = conv->d_cmajor;
    151 
    152 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    153 			error = EINVAL;
    154 			goto fail;
    155 		}
    156 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    157 			error = EINVAL;
    158 			goto fail;
    159 		}
    160 
    161 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    162 		    cdevsw[*cmajor] != NULL) {
    163 			error = EEXIST;
    164 			goto fail;
    165 		}
    166 
    167 		if (bdev != NULL) {
    168 			KASSERT(bdev->d_localcount != NULL);
    169 			localcount_init(bdev->d_localcount);
    170 			bdevsw[*bmajor] = bdev;
    171 		}
    172 		KASSERT(cdev->d_localcount != NULL);
    173 		localcount_init(cdev->d_localcount);
    174 		cdevsw[*cmajor] = cdev;
    175 
    176 		mutex_exit(&device_lock);
    177 		return (0);
    178 	}
    179 
    180 	error = bdevsw_attach(bdev, bmajor);
    181 	if (error != 0)
    182 		goto fail;
    183 	error = cdevsw_attach(cdev, cmajor);
    184 	if (error != 0) {
    185 		devsw_detach_locked(bdev, NULL);
    186 		goto fail;
    187 	}
    188 
    189 	for (i = 0 ; i < max_devsw_convs ; i++) {
    190 		if (devsw_conv[i].d_name == NULL)
    191 			break;
    192 	}
    193 	if (i == max_devsw_convs) {
    194 		struct devsw_conv *newptr;
    195 		int old_convs, new_convs;
    196 
    197 		old_convs = max_devsw_convs;
    198 		new_convs = old_convs + 1;
    199 
    200 		newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
    201 		if (newptr == NULL) {
    202 			devsw_detach_locked(bdev, cdev);
    203 			error = ENOMEM;
    204 			goto fail;
    205 		}
    206 		newptr[old_convs].d_name = NULL;
    207 		newptr[old_convs].d_bmajor = -1;
    208 		newptr[old_convs].d_cmajor = -1;
    209 		memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
    210 		if (devsw_conv != devsw_conv0)
    211 			kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
    212 		devsw_conv = newptr;
    213 		max_devsw_convs = new_convs;
    214 	}
    215 
    216 	len = strlen(devname) + 1;
    217 	name = kmem_alloc(len, KM_NOSLEEP);
    218 	if (name == NULL) {
    219 		devsw_detach_locked(bdev, cdev);
    220 		error = ENOMEM;
    221 		goto fail;
    222 	}
    223 	strlcpy(name, devname, len);
    224 
    225 	devsw_conv[i].d_name = name;
    226 	devsw_conv[i].d_bmajor = *bmajor;
    227 	devsw_conv[i].d_cmajor = *cmajor;
    228 
    229 	mutex_exit(&device_lock);
    230 	return (0);
    231  fail:
    232 	mutex_exit(&device_lock);
    233 	return (error);
    234 }
    235 
    236 static int
    237 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
    238 {
    239 	const struct bdevsw **newptr;
    240 	devmajor_t bmajor;
    241 	int i;
    242 
    243 	KASSERT(mutex_owned(&device_lock));
    244 
    245 	if (devsw == NULL)
    246 		return (0);
    247 
    248 	if (*devmajor < 0) {
    249 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    250 			if (bdevsw[bmajor] != NULL)
    251 				continue;
    252 			for (i = 0 ; i < max_devsw_convs ; i++) {
    253 				if (devsw_conv[i].d_bmajor == bmajor)
    254 					break;
    255 			}
    256 			if (i != max_devsw_convs)
    257 				continue;
    258 			break;
    259 		}
    260 		*devmajor = bmajor;
    261 	}
    262 
    263 	if (*devmajor >= MAXDEVSW) {
    264 		printf("bdevsw_attach: block majors exhausted");
    265 		return (ENOMEM);
    266 	}
    267 
    268 	if (*devmajor >= max_bdevsws) {
    269 		KASSERT(bdevsw == bdevsw0);
    270 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    271 		if (newptr == NULL)
    272 			return (ENOMEM);
    273 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    274 		bdevsw = newptr;
    275 		max_bdevsws = MAXDEVSW;
    276 	}
    277 
    278 	if (bdevsw[*devmajor] != NULL)
    279 		return (EEXIST);
    280 
    281 	bdevsw[*devmajor] = devsw;
    282 	KASSERT(devsw->d_localcount != NULL);
    283 	localcount_init(devsw->d_localcount);
    284 
    285 	return (0);
    286 }
    287 
    288 static int
    289 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
    290 {
    291 	const struct cdevsw **newptr;
    292 	devmajor_t cmajor;
    293 	int i;
    294 
    295 	KASSERT(mutex_owned(&device_lock));
    296 
    297 	if (*devmajor < 0) {
    298 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    299 			if (cdevsw[cmajor] != NULL)
    300 				continue;
    301 			for (i = 0 ; i < max_devsw_convs ; i++) {
    302 				if (devsw_conv[i].d_cmajor == cmajor)
    303 					break;
    304 			}
    305 			if (i != max_devsw_convs)
    306 				continue;
    307 			break;
    308 		}
    309 		*devmajor = cmajor;
    310 	}
    311 
    312 	if (*devmajor >= MAXDEVSW) {
    313 		printf("cdevsw_attach: character majors exhausted");
    314 		return (ENOMEM);
    315 	}
    316 
    317 	if (*devmajor >= max_cdevsws) {
    318 		KASSERT(cdevsw == cdevsw0);
    319 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    320 		if (newptr == NULL)
    321 			return (ENOMEM);
    322 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    323 		cdevsw = newptr;
    324 		max_cdevsws = MAXDEVSW;
    325 	}
    326 
    327 	if (cdevsw[*devmajor] != NULL)
    328 		return (EEXIST);
    329 
    330 	cdevsw[*devmajor] = devsw;
    331 	KASSERT(devsw->d_localcount != NULL);
    332 	localcount_init(devsw->d_localcount);
    333 
    334 	return (0);
    335 }
    336 
    337 /*
    338  * First, look up both bdev and cdev indices, and confirm that the
    339  * localcount pointer(s) exist.  Then drain any existing references,
    340  * deactivate the localcount(s).  Finally, remove the {b,c}devsw[]
    341  * entries.
    342  */
    343 
    344 static void
    345 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    346 {
    347 	int i, j;
    348 
    349 	KASSERT(mutex_owned(&device_lock));
    350 
    351 	i = max_bdevsws;
    352 	if (bdev != NULL) {
    353 		for (i = 0 ; i < max_bdevsws ; i++) {
    354 			if (bdevsw[i] != bdev)
    355 				continue;
    356 
    357 			KASSERTMSG(bdev->d_localcount != NULL,
    358 			    "%s: no bdev localcount", __func__);
    359 			break;
    360 		}
    361 	}
    362 	j = max_cdevsws;
    363 	if (cdev != NULL) {
    364 		for (j = 0 ; j < max_cdevsws ; j++) {
    365 			if (cdevsw[j] != cdev)
    366 				continue;
    367 
    368 			KASSERTMSG(cdev->d_localcount != NULL,
    369 			    "%s: no cdev localcount", __func__);
    370 			break;
    371 		}
    372 	}
    373 	if (i < max_bdevsws) {
    374 		localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
    375 		localcount_fini(bdev->d_localcount);
    376 		bdevsw[i] = NULL;
    377 	}
    378 	if (j < max_cdevsws ) {
    379 		/*
    380 		 * Take care not to drain/fini the d_localcount if the same
    381 		 * one was used for both cdev and bdev!
    382 		 */
    383 		if (i >= max_bdevsws ||
    384 		    bdev->d_localcount != cdev->d_localcount) {
    385 			localcount_drain(cdev->d_localcount, &device_cv,
    386 			    &device_lock);
    387 			localcount_fini(cdev->d_localcount);
    388 		}
    389 		cdevsw[j] = NULL;
    390 	}
    391 }
    392 
    393 int
    394 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    395 {
    396 
    397 	mutex_enter(&device_lock);
    398 	devsw_detach_locked(bdev, cdev);
    399 	mutex_exit(&device_lock);
    400 	return 0;
    401 }
    402 
    403 /*
    404  * Look up a block device by number.
    405  *
    406  * => Caller must ensure that the device is attached.
    407  */
    408 const struct bdevsw *
    409 bdevsw_lookup(dev_t dev)
    410 {
    411 	devmajor_t bmajor;
    412 
    413 	if (dev == NODEV)
    414 		return (NULL);
    415 	bmajor = major(dev);
    416 	if (bmajor < 0 || bmajor >= max_bdevsws)
    417 		return (NULL);
    418 
    419 	return (bdevsw[bmajor]);
    420 }
    421 
    422 const struct bdevsw *
    423 bdevsw_lookup_acquire(dev_t dev)
    424 {
    425 	devmajor_t bmajor;
    426 
    427 	if (dev == NODEV)
    428 		return (NULL);
    429 	bmajor = major(dev);
    430 	if (bmajor < 0 || bmajor >= max_bdevsws)
    431 		return (NULL);
    432 
    433 	if (bdevsw[bmajor]->d_localcount != NULL)
    434 		localcount_acquire(bdevsw[bmajor]->d_localcount);
    435 
    436 	return (bdevsw[bmajor]);
    437 }
    438 
    439 void
    440 bdevsw_release(const struct bdevsw *bd)
    441 {
    442 	devmajor_t bmaj;
    443 
    444 	bmaj = bdevsw_lookup_major(bd);
    445 
    446 	KASSERTMSG(bmaj != NODEVMAJOR, "%s: no bmajor to release!", __func__);
    447 	if (bd->d_localcount != NULL)
    448 		localcount_release(bd->d_localcount, &device_cv, &device_lock);
    449 }
    450 
    451 /*
    452  * Look up a character device by number.
    453  *
    454  * => Caller must ensure that the device is attached.
    455  */
    456 const struct cdevsw *
    457 cdevsw_lookup(dev_t dev)
    458 {
    459 	devmajor_t cmajor;
    460 
    461 	if (dev == NODEV)
    462 		return (NULL);
    463 	cmajor = major(dev);
    464 	if (cmajor < 0 || cmajor >= max_cdevsws)
    465 		return (NULL);
    466 
    467 	return (cdevsw[cmajor]);
    468 }
    469 
    470 const struct cdevsw *
    471 cdevsw_lookup_acquire(dev_t dev)
    472 {
    473 	devmajor_t cmajor;
    474 
    475 	if (dev == NODEV)
    476 		return (NULL);
    477 	cmajor = major(dev);
    478 	if (cmajor < 0 || cmajor >= max_cdevsws)
    479 		return (NULL);
    480 
    481 	if (cdevsw[cmajor]->d_localcount != NULL)
    482 		localcount_acquire(cdevsw[cmajor]->d_localcount);
    483 
    484 	return (cdevsw[cmajor]);
    485 }
    486 
    487 void
    488 cdevsw_release(const struct cdevsw *cd)
    489 {
    490 	devmajor_t cmaj;
    491 
    492 	cmaj = cdevsw_lookup_major(cd);
    493 
    494 	KASSERTMSG(cmaj != NODEVMAJOR, "%s: no cmajor to release!", __func__);
    495 	if (cd->d_localcount != NULL)
    496 		localcount_release(cd->d_localcount, &device_cv, &device_lock);
    497 }
    498 
    499 /*
    500  * Look up a block device by reference to its operations set.
    501  *
    502  * => Caller must ensure that the device is not detached, and therefore
    503  *    that the returned major is still valid when dereferenced.
    504  */
    505 devmajor_t
    506 bdevsw_lookup_major(const struct bdevsw *bdev)
    507 {
    508 	devmajor_t bmajor;
    509 
    510 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    511 		if (bdevsw[bmajor] == bdev)
    512 			return (bmajor);
    513 	}
    514 
    515 	return (NODEVMAJOR);
    516 }
    517 
    518 /*
    519  * Look up a character device by reference to its operations set.
    520  *
    521  * => Caller must ensure that the device is not detached, and therefore
    522  *    that the returned major is still valid when dereferenced.
    523  */
    524 devmajor_t
    525 cdevsw_lookup_major(const struct cdevsw *cdev)
    526 {
    527 	devmajor_t cmajor;
    528 
    529 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    530 		if (cdevsw[cmajor] == cdev)
    531 			return (cmajor);
    532 	}
    533 
    534 	return (NODEVMAJOR);
    535 }
    536 
    537 /*
    538  * Convert from block major number to name.
    539  *
    540  * => Caller must ensure that the device is not detached, and therefore
    541  *    that the name pointer is still valid when dereferenced.
    542  */
    543 const char *
    544 devsw_blk2name(devmajor_t bmajor)
    545 {
    546 	const char *name;
    547 	devmajor_t cmajor;
    548 	int i;
    549 
    550 	name = NULL;
    551 	cmajor = -1;
    552 
    553 	mutex_enter(&device_lock);
    554 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    555 		mutex_exit(&device_lock);
    556 		return (NULL);
    557 	}
    558 	for (i = 0 ; i < max_devsw_convs; i++) {
    559 		if (devsw_conv[i].d_bmajor == bmajor) {
    560 			cmajor = devsw_conv[i].d_cmajor;
    561 			break;
    562 		}
    563 	}
    564 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    565 		name = devsw_conv[i].d_name;
    566 	mutex_exit(&device_lock);
    567 
    568 	return (name);
    569 }
    570 
    571 /*
    572  * Convert char major number to device driver name.
    573  */
    574 const char *
    575 cdevsw_getname(devmajor_t major)
    576 {
    577 	const char *name;
    578 	int i;
    579 
    580 	name = NULL;
    581 
    582 	if (major < 0)
    583 		return (NULL);
    584 
    585 	mutex_enter(&device_lock);
    586 	for (i = 0 ; i < max_devsw_convs; i++) {
    587 		if (devsw_conv[i].d_cmajor == major) {
    588 			name = devsw_conv[i].d_name;
    589 			break;
    590 		}
    591 	}
    592 	mutex_exit(&device_lock);
    593 	return (name);
    594 }
    595 
    596 /*
    597  * Convert block major number to device driver name.
    598  */
    599 const char *
    600 bdevsw_getname(devmajor_t major)
    601 {
    602 	const char *name;
    603 	int i;
    604 
    605 	name = NULL;
    606 
    607 	if (major < 0)
    608 		return (NULL);
    609 
    610 	mutex_enter(&device_lock);
    611 	for (i = 0 ; i < max_devsw_convs; i++) {
    612 		if (devsw_conv[i].d_bmajor == major) {
    613 			name = devsw_conv[i].d_name;
    614 			break;
    615 		}
    616 	}
    617 	mutex_exit(&device_lock);
    618 	return (name);
    619 }
    620 
    621 /*
    622  * Convert from device name to block major number.
    623  *
    624  * => Caller must ensure that the device is not detached, and therefore
    625  *    that the major number is still valid when dereferenced.
    626  */
    627 devmajor_t
    628 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    629 {
    630 	struct devsw_conv *conv;
    631 	devmajor_t bmajor;
    632 	int i;
    633 
    634 	if (name == NULL)
    635 		return (NODEVMAJOR);
    636 
    637 	mutex_enter(&device_lock);
    638 	for (i = 0 ; i < max_devsw_convs ; i++) {
    639 		size_t len;
    640 
    641 		conv = &devsw_conv[i];
    642 		if (conv->d_name == NULL)
    643 			continue;
    644 		len = strlen(conv->d_name);
    645 		if (strncmp(conv->d_name, name, len) != 0)
    646 			continue;
    647 		if (*(name +len) && !isdigit(*(name + len)))
    648 			continue;
    649 		bmajor = conv->d_bmajor;
    650 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    651 		    bdevsw[bmajor] == NULL)
    652 			break;
    653 		if (devname != NULL) {
    654 #ifdef DEVSW_DEBUG
    655 			if (strlen(conv->d_name) >= devnamelen)
    656 				printf("devsw_name2blk: too short buffer");
    657 #endif /* DEVSW_DEBUG */
    658 			strncpy(devname, conv->d_name, devnamelen);
    659 			devname[devnamelen - 1] = '\0';
    660 		}
    661 		mutex_exit(&device_lock);
    662 		return (bmajor);
    663 	}
    664 
    665 	mutex_exit(&device_lock);
    666 	return (NODEVMAJOR);
    667 }
    668 
    669 /*
    670  * Convert from device name to char major number.
    671  *
    672  * => Caller must ensure that the device is not detached, and therefore
    673  *    that the major number is still valid when dereferenced.
    674  */
    675 devmajor_t
    676 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    677 {
    678 	struct devsw_conv *conv;
    679 	devmajor_t cmajor;
    680 	int i;
    681 
    682 	if (name == NULL)
    683 		return (NODEVMAJOR);
    684 
    685 	mutex_enter(&device_lock);
    686 	for (i = 0 ; i < max_devsw_convs ; i++) {
    687 		size_t len;
    688 
    689 		conv = &devsw_conv[i];
    690 		if (conv->d_name == NULL)
    691 			continue;
    692 		len = strlen(conv->d_name);
    693 		if (strncmp(conv->d_name, name, len) != 0)
    694 			continue;
    695 		if (*(name +len) && !isdigit(*(name + len)))
    696 			continue;
    697 		cmajor = conv->d_cmajor;
    698 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    699 		    cdevsw[cmajor] == NULL)
    700 			break;
    701 		if (devname != NULL) {
    702 #ifdef DEVSW_DEBUG
    703 			if (strlen(conv->d_name) >= devnamelen)
    704 				printf("devsw_name2chr: too short buffer");
    705 #endif /* DEVSW_DEBUG */
    706 			strncpy(devname, conv->d_name, devnamelen);
    707 			devname[devnamelen - 1] = '\0';
    708 		}
    709 		mutex_exit(&device_lock);
    710 		return (cmajor);
    711 	}
    712 
    713 	mutex_exit(&device_lock);
    714 	return (NODEVMAJOR);
    715 }
    716 
    717 /*
    718  * Convert from character dev_t to block dev_t.
    719  *
    720  * => Caller must ensure that the device is not detached, and therefore
    721  *    that the major number is still valid when dereferenced.
    722  */
    723 dev_t
    724 devsw_chr2blk(dev_t cdev)
    725 {
    726 	devmajor_t bmajor, cmajor;
    727 	int i;
    728 	dev_t rv;
    729 
    730 	cmajor = major(cdev);
    731 	bmajor = NODEVMAJOR;
    732 	rv = NODEV;
    733 
    734 	mutex_enter(&device_lock);
    735 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    736 		mutex_exit(&device_lock);
    737 		return (NODEV);
    738 	}
    739 	for (i = 0 ; i < max_devsw_convs ; i++) {
    740 		if (devsw_conv[i].d_cmajor == cmajor) {
    741 			bmajor = devsw_conv[i].d_bmajor;
    742 			break;
    743 		}
    744 	}
    745 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    746 		rv = makedev(bmajor, minor(cdev));
    747 	mutex_exit(&device_lock);
    748 
    749 	return (rv);
    750 }
    751 
    752 /*
    753  * Convert from block dev_t to character dev_t.
    754  *
    755  * => Caller must ensure that the device is not detached, and therefore
    756  *    that the major number is still valid when dereferenced.
    757  */
    758 dev_t
    759 devsw_blk2chr(dev_t bdev)
    760 {
    761 	devmajor_t bmajor, cmajor;
    762 	int i;
    763 	dev_t rv;
    764 
    765 	bmajor = major(bdev);
    766 	cmajor = NODEVMAJOR;
    767 	rv = NODEV;
    768 
    769 	mutex_enter(&device_lock);
    770 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    771 		mutex_exit(&device_lock);
    772 		return (NODEV);
    773 	}
    774 	for (i = 0 ; i < max_devsw_convs ; i++) {
    775 		if (devsw_conv[i].d_bmajor == bmajor) {
    776 			cmajor = devsw_conv[i].d_cmajor;
    777 			break;
    778 		}
    779 	}
    780 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    781 		rv = makedev(cmajor, minor(bdev));
    782 	mutex_exit(&device_lock);
    783 
    784 	return (rv);
    785 }
    786 
    787 /*
    788  * Device access methods.
    789  */
    790 
    791 #define	DEV_LOCK(d)						\
    792 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
    793 		KERNEL_LOCK(1, NULL);				\
    794 	}
    795 
    796 #define	DEV_UNLOCK(d)						\
    797 	if (mpflag == 0) {					\
    798 		KERNEL_UNLOCK_ONE(NULL);			\
    799 	}
    800 
    801 int
    802 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    803 {
    804 	const struct bdevsw *d;
    805 	int rv, mpflag;
    806 
    807 	/*
    808 	 * For open we need to lock, in order to synchronize
    809 	 * with attach/detach.
    810 	 */
    811 	mutex_enter(&device_lock);
    812 	d = bdevsw_lookup(dev);
    813 	mutex_exit(&device_lock);
    814 	if (d == NULL)
    815 		return ENXIO;
    816 
    817 	DEV_LOCK(d);
    818 	rv = (*d->d_open)(dev, flag, devtype, l);
    819 	DEV_UNLOCK(d);
    820 
    821 	return rv;
    822 }
    823 
    824 int
    825 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    826 {
    827 	const struct bdevsw *d;
    828 	int rv, mpflag;
    829 
    830 	if ((d = bdevsw_lookup(dev)) == NULL)
    831 		return ENXIO;
    832 
    833 	DEV_LOCK(d);
    834 	rv = (*d->d_close)(dev, flag, devtype, l);
    835 	DEV_UNLOCK(d);
    836 
    837 	return rv;
    838 }
    839 
    840 SDT_PROVIDER_DECLARE(io);
    841 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
    842 
    843 void
    844 bdev_strategy(struct buf *bp)
    845 {
    846 	const struct bdevsw *d;
    847 	int mpflag;
    848 
    849 	SDT_PROBE1(io, kernel, , start, bp);
    850 
    851 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
    852 		bp->b_error = ENXIO;
    853 		bp->b_resid = bp->b_bcount;
    854 		biodone_vfs(bp); /* biodone() iff vfs present */
    855 		return;
    856 	}
    857 
    858 	DEV_LOCK(d);
    859 	(*d->d_strategy)(bp);
    860 	DEV_UNLOCK(d);
    861 }
    862 
    863 int
    864 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    865 {
    866 	const struct bdevsw *d;
    867 	int rv, mpflag;
    868 
    869 	if ((d = bdevsw_lookup(dev)) == NULL)
    870 		return ENXIO;
    871 
    872 	DEV_LOCK(d);
    873 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    874 	DEV_UNLOCK(d);
    875 
    876 	return rv;
    877 }
    878 
    879 int
    880 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    881 {
    882 	const struct bdevsw *d;
    883 	int rv;
    884 
    885 	/*
    886 	 * Dump can be called without the device open.  Since it can
    887 	 * currently only be called with the system paused (and in a
    888 	 * potentially unstable state), we don't perform any locking.
    889 	 */
    890 	if ((d = bdevsw_lookup(dev)) == NULL)
    891 		return ENXIO;
    892 
    893 	/* DEV_LOCK(d); */
    894 	rv = (*d->d_dump)(dev, addr, data, sz);
    895 	/* DEV_UNLOCK(d); */
    896 
    897 	return rv;
    898 }
    899 
    900 int
    901 bdev_type(dev_t dev)
    902 {
    903 	const struct bdevsw *d;
    904 
    905 	if ((d = bdevsw_lookup(dev)) == NULL)
    906 		return D_OTHER;
    907 	return d->d_flag & D_TYPEMASK;
    908 }
    909 
    910 int
    911 bdev_size(dev_t dev)
    912 {
    913 	const struct bdevsw *d;
    914 	int rv, mpflag = 0;
    915 
    916 	if ((d = bdevsw_lookup(dev)) == NULL ||
    917 	    d->d_psize == NULL)
    918 		return -1;
    919 
    920 	/*
    921 	 * Don't to try lock the device if we're dumping.
    922 	 * XXX: is there a better way to test this?
    923 	 */
    924 	if ((boothowto & RB_DUMP) == 0)
    925 		DEV_LOCK(d);
    926 	rv = (*d->d_psize)(dev);
    927 	if ((boothowto & RB_DUMP) == 0)
    928 		DEV_UNLOCK(d);
    929 
    930 	return rv;
    931 }
    932 
    933 int
    934 bdev_discard(dev_t dev, off_t pos, off_t len)
    935 {
    936 	const struct bdevsw *d;
    937 	int rv, mpflag;
    938 
    939 	if ((d = bdevsw_lookup(dev)) == NULL)
    940 		return ENXIO;
    941 
    942 	DEV_LOCK(d);
    943 	rv = (*d->d_discard)(dev, pos, len);
    944 	DEV_UNLOCK(d);
    945 
    946 	return rv;
    947 }
    948 
    949 int
    950 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    951 {
    952 	const struct cdevsw *d;
    953 	int rv, mpflag;
    954 
    955 	/*
    956 	 * For open we need to lock, in order to synchronize
    957 	 * with attach/detach.
    958 	 */
    959 	mutex_enter(&device_lock);
    960 	d = cdevsw_lookup(dev);
    961 	mutex_exit(&device_lock);
    962 	if (d == NULL)
    963 		return ENXIO;
    964 
    965 	DEV_LOCK(d);
    966 	rv = (*d->d_open)(dev, flag, devtype, l);
    967 	DEV_UNLOCK(d);
    968 
    969 	return rv;
    970 }
    971 
    972 int
    973 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    974 {
    975 	const struct cdevsw *d;
    976 	int rv, mpflag;
    977 
    978 	if ((d = cdevsw_lookup(dev)) == NULL)
    979 		return ENXIO;
    980 
    981 	DEV_LOCK(d);
    982 	rv = (*d->d_close)(dev, flag, devtype, l);
    983 	DEV_UNLOCK(d);
    984 
    985 	return rv;
    986 }
    987 
    988 int
    989 cdev_read(dev_t dev, struct uio *uio, int flag)
    990 {
    991 	const struct cdevsw *d;
    992 	int rv, mpflag;
    993 
    994 	if ((d = cdevsw_lookup(dev)) == NULL)
    995 		return ENXIO;
    996 
    997 	DEV_LOCK(d);
    998 	rv = (*d->d_read)(dev, uio, flag);
    999 	DEV_UNLOCK(d);
   1000 
   1001 	return rv;
   1002 }
   1003 
   1004 int
   1005 cdev_write(dev_t dev, struct uio *uio, int flag)
   1006 {
   1007 	const struct cdevsw *d;
   1008 	int rv, mpflag;
   1009 
   1010 	if ((d = cdevsw_lookup(dev)) == NULL)
   1011 		return ENXIO;
   1012 
   1013 	DEV_LOCK(d);
   1014 	rv = (*d->d_write)(dev, uio, flag);
   1015 	DEV_UNLOCK(d);
   1016 
   1017 	return rv;
   1018 }
   1019 
   1020 int
   1021 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
   1022 {
   1023 	const struct cdevsw *d;
   1024 	int rv, mpflag;
   1025 
   1026 	if ((d = cdevsw_lookup(dev)) == NULL)
   1027 		return ENXIO;
   1028 
   1029 	DEV_LOCK(d);
   1030 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
   1031 	DEV_UNLOCK(d);
   1032 
   1033 	return rv;
   1034 }
   1035 
   1036 void
   1037 cdev_stop(struct tty *tp, int flag)
   1038 {
   1039 	const struct cdevsw *d;
   1040 	int mpflag;
   1041 
   1042 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
   1043 		return;
   1044 
   1045 	DEV_LOCK(d);
   1046 	(*d->d_stop)(tp, flag);
   1047 	DEV_UNLOCK(d);
   1048 }
   1049 
   1050 struct tty *
   1051 cdev_tty(dev_t dev)
   1052 {
   1053 	const struct cdevsw *d;
   1054 
   1055 	if ((d = cdevsw_lookup(dev)) == NULL)
   1056 		return NULL;
   1057 
   1058 	/* XXX Check if necessary. */
   1059 	if (d->d_tty == NULL)
   1060 		return NULL;
   1061 
   1062 	return (*d->d_tty)(dev);
   1063 }
   1064 
   1065 int
   1066 cdev_poll(dev_t dev, int flag, lwp_t *l)
   1067 {
   1068 	const struct cdevsw *d;
   1069 	int rv, mpflag;
   1070 
   1071 	if ((d = cdevsw_lookup(dev)) == NULL)
   1072 		return POLLERR;
   1073 
   1074 	DEV_LOCK(d);
   1075 	rv = (*d->d_poll)(dev, flag, l);
   1076 	DEV_UNLOCK(d);
   1077 
   1078 	return rv;
   1079 }
   1080 
   1081 paddr_t
   1082 cdev_mmap(dev_t dev, off_t off, int flag)
   1083 {
   1084 	const struct cdevsw *d;
   1085 	paddr_t rv;
   1086 	int mpflag;
   1087 
   1088 	if ((d = cdevsw_lookup(dev)) == NULL)
   1089 		return (paddr_t)-1LL;
   1090 
   1091 	DEV_LOCK(d);
   1092 	rv = (*d->d_mmap)(dev, off, flag);
   1093 	DEV_UNLOCK(d);
   1094 
   1095 	return rv;
   1096 }
   1097 
   1098 int
   1099 cdev_kqfilter(dev_t dev, struct knote *kn)
   1100 {
   1101 	const struct cdevsw *d;
   1102 	int rv, mpflag;
   1103 
   1104 	if ((d = cdevsw_lookup(dev)) == NULL)
   1105 		return ENXIO;
   1106 
   1107 	DEV_LOCK(d);
   1108 	rv = (*d->d_kqfilter)(dev, kn);
   1109 	DEV_UNLOCK(d);
   1110 
   1111 	return rv;
   1112 }
   1113 
   1114 int
   1115 cdev_discard(dev_t dev, off_t pos, off_t len)
   1116 {
   1117 	const struct cdevsw *d;
   1118 	int rv, mpflag;
   1119 
   1120 	if ((d = cdevsw_lookup(dev)) == NULL)
   1121 		return ENXIO;
   1122 
   1123 	DEV_LOCK(d);
   1124 	rv = (*d->d_discard)(dev, pos, len);
   1125 	DEV_UNLOCK(d);
   1126 
   1127 	return rv;
   1128 }
   1129 
   1130 int
   1131 cdev_type(dev_t dev)
   1132 {
   1133 	const struct cdevsw *d;
   1134 
   1135 	if ((d = cdevsw_lookup(dev)) == NULL)
   1136 		return D_OTHER;
   1137 	return d->d_flag & D_TYPEMASK;
   1138 }
   1139