Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.17.4.2
      1 /*	$NetBSD: subr_devsw.c,v 1.17.4.2 2009/05/04 08:13:47 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	subr_devsw.c: registers device drivers by name and by major
     36  *	number, and provides wrapper methods for performing I/O and
     37  *	other tasks on device drivers, keying on the device number
     38  *	(dev_t).
     39  *
     40  *	When the system is built, the config(8) command generates
     41  *	static tables of device drivers built into the kernel image
     42  *	along with their associated methods.  These are recorded in
     43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     44  *	and removed from the system dynamically.
     45  *
     46  * Allocation
     47  *
     48  *	When the system initially boots only the statically allocated
     49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     50  *	allocation, we allocate a fixed block of memory to hold the new,
     51  *	expanded index.  This "fork" of the table is only ever performed
     52  *	once in order to guarantee that other threads may safely access
     53  *	the device tables:
     54  *
     55  *	o Once a thread has a "reference" to the table via an earlier
     56  *	  open() call, we know that the entry in the table must exist
     57  *	  and so it is safe to access it.
     58  *
     59  *	o Regardless of whether other threads see the old or new
     60  *	  pointers, they will point to a correct device switch
     61  *	  structure for the operation being performed.
     62  *
     63  *	XXX Currently, the wrapper methods such as cdev_read() verify
     64  *	that a device driver does in fact exist before calling the
     65  *	associated driver method.  This should be changed so that
     66  *	once the device is has been referenced by a vnode (opened),
     67  *	calling	the other methods should be valid until that reference
     68  *	is dropped.
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.17.4.2 2009/05/04 08:13:47 yamt Exp $");
     73 
     74 #include <sys/param.h>
     75 #include <sys/conf.h>
     76 #include <sys/kmem.h>
     77 #include <sys/systm.h>
     78 #include <sys/poll.h>
     79 #include <sys/tty.h>
     80 #include <sys/cpu.h>
     81 #include <sys/buf.h>
     82 
     83 #ifdef DEVSW_DEBUG
     84 #define	DPRINTF(x)	printf x
     85 #else /* DEVSW_DEBUG */
     86 #define	DPRINTF(x)
     87 #endif /* DEVSW_DEBUG */
     88 
     89 #define	MAXDEVSW	512	/* the maximum of major device number */
     90 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
     91 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
     92 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
     93 
     94 extern const struct bdevsw **bdevsw, *bdevsw0[];
     95 extern const struct cdevsw **cdevsw, *cdevsw0[];
     96 extern struct devsw_conv *devsw_conv, devsw_conv0[];
     97 extern const int sys_bdevsws, sys_cdevsws;
     98 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
     99 
    100 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
    101 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
    102 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    103 
    104 kmutex_t device_lock;
    105 
    106 void
    107 devsw_init(void)
    108 {
    109 
    110 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    111 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    112 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
    113 }
    114 
    115 int
    116 devsw_attach(const char *devname,
    117 	     const struct bdevsw *bdev, devmajor_t *bmajor,
    118 	     const struct cdevsw *cdev, devmajor_t *cmajor)
    119 {
    120 	struct devsw_conv *conv;
    121 	char *name;
    122 	int error, i;
    123 	size_t len;
    124 
    125 	if (devname == NULL || cdev == NULL)
    126 		return (EINVAL);
    127 
    128 	mutex_enter(&device_lock);
    129 
    130 	for (i = 0 ; i < max_devsw_convs ; i++) {
    131 		conv = &devsw_conv[i];
    132 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    133 			continue;
    134 
    135 		if (*bmajor < 0)
    136 			*bmajor = conv->d_bmajor;
    137 		if (*cmajor < 0)
    138 			*cmajor = conv->d_cmajor;
    139 
    140 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    141 			error = EINVAL;
    142 			goto fail;
    143 		}
    144 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    145 			error = EINVAL;
    146 			goto fail;
    147 		}
    148 
    149 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    150 		    cdevsw[*cmajor] != NULL) {
    151 			error = EEXIST;
    152 			goto fail;
    153 		}
    154 
    155 		if (bdev != NULL)
    156 			bdevsw[*bmajor] = bdev;
    157 		cdevsw[*cmajor] = cdev;
    158 
    159 		mutex_exit(&device_lock);
    160 		return (0);
    161 	}
    162 
    163 	error = bdevsw_attach(bdev, bmajor);
    164 	if (error != 0)
    165 		goto fail;
    166 	error = cdevsw_attach(cdev, cmajor);
    167 	if (error != 0) {
    168 		devsw_detach_locked(bdev, NULL);
    169 		goto fail;
    170 	}
    171 
    172 	for (i = 0 ; i < max_devsw_convs ; i++) {
    173 		if (devsw_conv[i].d_name == NULL)
    174 			break;
    175 	}
    176 	if (i == max_devsw_convs) {
    177 		struct devsw_conv *newptr;
    178 		int old, new;
    179 
    180 		old = max_devsw_convs;
    181 		new = old + 1;
    182 
    183 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
    184 		if (newptr == NULL) {
    185 			devsw_detach_locked(bdev, cdev);
    186 			error = ENOMEM;
    187 			goto fail;
    188 		}
    189 		newptr[old].d_name = NULL;
    190 		newptr[old].d_bmajor = -1;
    191 		newptr[old].d_cmajor = -1;
    192 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
    193 		if (devsw_conv != devsw_conv0)
    194 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
    195 		devsw_conv = newptr;
    196 		max_devsw_convs = new;
    197 	}
    198 
    199 	len = strlen(devname) + 1;
    200 	name = kmem_alloc(len, KM_NOSLEEP);
    201 	if (name == NULL) {
    202 		devsw_detach_locked(bdev, cdev);
    203 		error = ENOMEM;
    204 		goto fail;
    205 	}
    206 	strlcpy(name, devname, len);
    207 
    208 	devsw_conv[i].d_name = name;
    209 	devsw_conv[i].d_bmajor = *bmajor;
    210 	devsw_conv[i].d_cmajor = *cmajor;
    211 
    212 	mutex_exit(&device_lock);
    213 	return (0);
    214  fail:
    215 	mutex_exit(&device_lock);
    216 	return (error);
    217 }
    218 
    219 static int
    220 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
    221 {
    222 	const struct bdevsw **newptr;
    223 	devmajor_t bmajor;
    224 	int i;
    225 
    226 	KASSERT(mutex_owned(&device_lock));
    227 
    228 	if (devsw == NULL)
    229 		return (0);
    230 
    231 	if (*devmajor < 0) {
    232 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    233 			if (bdevsw[bmajor] != NULL)
    234 				continue;
    235 			for (i = 0 ; i < max_devsw_convs ; i++) {
    236 				if (devsw_conv[i].d_bmajor == bmajor)
    237 					break;
    238 			}
    239 			if (i != max_devsw_convs)
    240 				continue;
    241 			break;
    242 		}
    243 		*devmajor = bmajor;
    244 	}
    245 
    246 	if (*devmajor >= MAXDEVSW) {
    247 		printf("bdevsw_attach: block majors exhausted");
    248 		return (ENOMEM);
    249 	}
    250 
    251 	if (*devmajor >= max_bdevsws) {
    252 		KASSERT(bdevsw == bdevsw0);
    253 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    254 		if (newptr == NULL)
    255 			return (ENOMEM);
    256 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    257 		bdevsw = newptr;
    258 		max_bdevsws = MAXDEVSW;
    259 	}
    260 
    261 	if (bdevsw[*devmajor] != NULL)
    262 		return (EEXIST);
    263 
    264 	bdevsw[*devmajor] = devsw;
    265 
    266 	return (0);
    267 }
    268 
    269 static int
    270 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
    271 {
    272 	const struct cdevsw **newptr;
    273 	devmajor_t cmajor;
    274 	int i;
    275 
    276 	KASSERT(mutex_owned(&device_lock));
    277 
    278 	if (*devmajor < 0) {
    279 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    280 			if (cdevsw[cmajor] != NULL)
    281 				continue;
    282 			for (i = 0 ; i < max_devsw_convs ; i++) {
    283 				if (devsw_conv[i].d_cmajor == cmajor)
    284 					break;
    285 			}
    286 			if (i != max_devsw_convs)
    287 				continue;
    288 			break;
    289 		}
    290 		*devmajor = cmajor;
    291 	}
    292 
    293 	if (*devmajor >= MAXDEVSW) {
    294 		printf("cdevsw_attach: character majors exhausted");
    295 		return (ENOMEM);
    296 	}
    297 
    298 	if (*devmajor >= max_cdevsws) {
    299 		KASSERT(cdevsw == cdevsw0);
    300 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    301 		if (newptr == NULL)
    302 			return (ENOMEM);
    303 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    304 		cdevsw = newptr;
    305 		max_cdevsws = MAXDEVSW;
    306 	}
    307 
    308 	if (cdevsw[*devmajor] != NULL)
    309 		return (EEXIST);
    310 
    311 	cdevsw[*devmajor] = devsw;
    312 
    313 	return (0);
    314 }
    315 
    316 static void
    317 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    318 {
    319 	int i;
    320 
    321 	KASSERT(mutex_owned(&device_lock));
    322 
    323 	if (bdev != NULL) {
    324 		for (i = 0 ; i < max_bdevsws ; i++) {
    325 			if (bdevsw[i] != bdev)
    326 				continue;
    327 			bdevsw[i] = NULL;
    328 			break;
    329 		}
    330 	}
    331 	if (cdev != NULL) {
    332 		for (i = 0 ; i < max_cdevsws ; i++) {
    333 			if (cdevsw[i] != cdev)
    334 				continue;
    335 			cdevsw[i] = NULL;
    336 			break;
    337 		}
    338 	}
    339 }
    340 
    341 int
    342 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    343 {
    344 
    345 	mutex_enter(&device_lock);
    346 	devsw_detach_locked(bdev, cdev);
    347 	mutex_exit(&device_lock);
    348 	return 0;
    349 }
    350 
    351 /*
    352  * Look up a block device by number.
    353  *
    354  * => Caller must ensure that the device is attached.
    355  */
    356 const struct bdevsw *
    357 bdevsw_lookup(dev_t dev)
    358 {
    359 	devmajor_t bmajor;
    360 
    361 	if (dev == NODEV)
    362 		return (NULL);
    363 	bmajor = major(dev);
    364 	if (bmajor < 0 || bmajor >= max_bdevsws)
    365 		return (NULL);
    366 
    367 	return (bdevsw[bmajor]);
    368 }
    369 
    370 /*
    371  * Look up a character device by number.
    372  *
    373  * => Caller must ensure that the device is attached.
    374  */
    375 const struct cdevsw *
    376 cdevsw_lookup(dev_t dev)
    377 {
    378 	devmajor_t cmajor;
    379 
    380 	if (dev == NODEV)
    381 		return (NULL);
    382 	cmajor = major(dev);
    383 	if (cmajor < 0 || cmajor >= max_cdevsws)
    384 		return (NULL);
    385 
    386 	return (cdevsw[cmajor]);
    387 }
    388 
    389 /*
    390  * Look up a block device by reference to its operations set.
    391  *
    392  * => Caller must ensure that the device is not detached, and therefore
    393  *    that the returned major is still valid when dereferenced.
    394  */
    395 devmajor_t
    396 bdevsw_lookup_major(const struct bdevsw *bdev)
    397 {
    398 	devmajor_t bmajor;
    399 
    400 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    401 		if (bdevsw[bmajor] == bdev)
    402 			return (bmajor);
    403 	}
    404 
    405 	return (NODEVMAJOR);
    406 }
    407 
    408 /*
    409  * Look up a character device by reference to its operations set.
    410  *
    411  * => Caller must ensure that the device is not detached, and therefore
    412  *    that the returned major is still valid when dereferenced.
    413  */
    414 devmajor_t
    415 cdevsw_lookup_major(const struct cdevsw *cdev)
    416 {
    417 	devmajor_t cmajor;
    418 
    419 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    420 		if (cdevsw[cmajor] == cdev)
    421 			return (cmajor);
    422 	}
    423 
    424 	return (NODEVMAJOR);
    425 }
    426 
    427 /*
    428  * Convert from block major number to name.
    429  *
    430  * => Caller must ensure that the device is not detached, and therefore
    431  *    that the name pointer is still valid when dereferenced.
    432  */
    433 const char *
    434 devsw_blk2name(devmajor_t bmajor)
    435 {
    436 	const char *name;
    437 	devmajor_t cmajor;
    438 	int i;
    439 
    440 	name = NULL;
    441 	cmajor = -1;
    442 
    443 	mutex_enter(&device_lock);
    444 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    445 		mutex_exit(&device_lock);
    446 		return (NULL);
    447 	}
    448 	for (i = 0 ; i < max_devsw_convs; i++) {
    449 		if (devsw_conv[i].d_bmajor == bmajor) {
    450 			cmajor = devsw_conv[i].d_cmajor;
    451 			break;
    452 		}
    453 	}
    454 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    455 		name = devsw_conv[i].d_name;
    456 	mutex_exit(&device_lock);
    457 
    458 	return (name);
    459 }
    460 
    461 /*
    462  * Convert char major number to device driver name.
    463  */
    464 const char*
    465 cdevsw_getname(devmajor_t major)
    466 {
    467 	const char *name;
    468 	int i;
    469 
    470 	name = NULL;
    471 
    472 	if (major < 0)
    473 		return (NULL);
    474 
    475 	mutex_enter(&device_lock);
    476 	for (i = 0 ; i < max_devsw_convs; i++) {
    477 		if (devsw_conv[i].d_cmajor == major) {
    478 			name = devsw_conv[i].d_name;
    479 			break;
    480 		}
    481 	}
    482 	mutex_exit(&device_lock);
    483 	return (name);
    484 }
    485 
    486 /*
    487  * Convert block major number to device driver name.
    488  */
    489 const char*
    490 bdevsw_getname(devmajor_t major)
    491 {
    492 	const char *name;
    493 	int i;
    494 
    495 	name = NULL;
    496 
    497 	if (major < 0)
    498 		return (NULL);
    499 
    500 	mutex_enter(&device_lock);
    501 	for (i = 0 ; i < max_devsw_convs; i++) {
    502 		if (devsw_conv[i].d_bmajor == major) {
    503 			name = devsw_conv[i].d_name;
    504 			break;
    505 		}
    506 	}
    507 	mutex_exit(&device_lock);
    508 	return (name);
    509 }
    510 
    511 /*
    512  * Convert from device name to block major number.
    513  *
    514  * => Caller must ensure that the device is not detached, and therefore
    515  *    that the major number is still valid when dereferenced.
    516  */
    517 devmajor_t
    518 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    519 {
    520 	struct devsw_conv *conv;
    521 	devmajor_t bmajor;
    522 	int i;
    523 
    524 	if (name == NULL)
    525 		return (NODEVMAJOR);
    526 
    527 	mutex_enter(&device_lock);
    528 	for (i = 0 ; i < max_devsw_convs ; i++) {
    529 		size_t len;
    530 
    531 		conv = &devsw_conv[i];
    532 		if (conv->d_name == NULL)
    533 			continue;
    534 		len = strlen(conv->d_name);
    535 		if (strncmp(conv->d_name, name, len) != 0)
    536 			continue;
    537 		if (*(name +len) && !isdigit(*(name + len)))
    538 			continue;
    539 		bmajor = conv->d_bmajor;
    540 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    541 		    bdevsw[bmajor] == NULL)
    542 			break;
    543 		if (devname != NULL) {
    544 #ifdef DEVSW_DEBUG
    545 			if (strlen(conv->d_name) >= devnamelen)
    546 				printf("devsw_name2blk: too short buffer");
    547 #endif /* DEVSW_DEBUG */
    548 			strncpy(devname, conv->d_name, devnamelen);
    549 			devname[devnamelen - 1] = '\0';
    550 		}
    551 		mutex_exit(&device_lock);
    552 		return (bmajor);
    553 	}
    554 
    555 	mutex_exit(&device_lock);
    556 	return (NODEVMAJOR);
    557 }
    558 
    559 /*
    560  * Convert from device name to char major number.
    561  *
    562  * => Caller must ensure that the device is not detached, and therefore
    563  *    that the major number is still valid when dereferenced.
    564  */
    565 devmajor_t
    566 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    567 {
    568 	struct devsw_conv *conv;
    569 	devmajor_t cmajor;
    570 	int i;
    571 
    572 	if (name == NULL)
    573 		return (NODEVMAJOR);
    574 
    575 	mutex_enter(&device_lock);
    576 	for (i = 0 ; i < max_devsw_convs ; i++) {
    577 		size_t len;
    578 
    579 		conv = &devsw_conv[i];
    580 		if (conv->d_name == NULL)
    581 			continue;
    582 		len = strlen(conv->d_name);
    583 		if (strncmp(conv->d_name, name, len) != 0)
    584 			continue;
    585 		if (*(name +len) && !isdigit(*(name + len)))
    586 			continue;
    587 		cmajor = conv->d_cmajor;
    588 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    589 		    cdevsw[cmajor] == NULL)
    590 			break;
    591 		if (devname != NULL) {
    592 #ifdef DEVSW_DEBUG
    593 			if (strlen(conv->d_name) >= devnamelen)
    594 				printf("devsw_name2chr: too short buffer");
    595 #endif /* DEVSW_DEBUG */
    596 			strncpy(devname, conv->d_name, devnamelen);
    597 			devname[devnamelen - 1] = '\0';
    598 		}
    599 		mutex_exit(&device_lock);
    600 		return (cmajor);
    601 	}
    602 
    603 	mutex_exit(&device_lock);
    604 	return (NODEVMAJOR);
    605 }
    606 
    607 /*
    608  * Convert from character dev_t to block dev_t.
    609  *
    610  * => Caller must ensure that the device is not detached, and therefore
    611  *    that the major number is still valid when dereferenced.
    612  */
    613 dev_t
    614 devsw_chr2blk(dev_t cdev)
    615 {
    616 	devmajor_t bmajor, cmajor;
    617 	int i;
    618 	dev_t rv;
    619 
    620 	cmajor = major(cdev);
    621 	bmajor = NODEVMAJOR;
    622 	rv = NODEV;
    623 
    624 	mutex_enter(&device_lock);
    625 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    626 		mutex_exit(&device_lock);
    627 		return (NODEV);
    628 	}
    629 	for (i = 0 ; i < max_devsw_convs ; i++) {
    630 		if (devsw_conv[i].d_cmajor == cmajor) {
    631 			bmajor = devsw_conv[i].d_bmajor;
    632 			break;
    633 		}
    634 	}
    635 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    636 		rv = makedev(bmajor, minor(cdev));
    637 	mutex_exit(&device_lock);
    638 
    639 	return (rv);
    640 }
    641 
    642 /*
    643  * Convert from block dev_t to character dev_t.
    644  *
    645  * => Caller must ensure that the device is not detached, and therefore
    646  *    that the major number is still valid when dereferenced.
    647  */
    648 dev_t
    649 devsw_blk2chr(dev_t bdev)
    650 {
    651 	devmajor_t bmajor, cmajor;
    652 	int i;
    653 	dev_t rv;
    654 
    655 	bmajor = major(bdev);
    656 	cmajor = NODEVMAJOR;
    657 	rv = NODEV;
    658 
    659 	mutex_enter(&device_lock);
    660 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    661 		mutex_exit(&device_lock);
    662 		return (NODEV);
    663 	}
    664 	for (i = 0 ; i < max_devsw_convs ; i++) {
    665 		if (devsw_conv[i].d_bmajor == bmajor) {
    666 			cmajor = devsw_conv[i].d_cmajor;
    667 			break;
    668 		}
    669 	}
    670 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    671 		rv = makedev(cmajor, minor(bdev));
    672 	mutex_exit(&device_lock);
    673 
    674 	return (rv);
    675 }
    676 
    677 /*
    678  * Device access methods.
    679  */
    680 
    681 #define	DEV_LOCK(d)						\
    682 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
    683 		KERNEL_LOCK(1, NULL);				\
    684 	}
    685 
    686 #define	DEV_UNLOCK(d)						\
    687 	if (mpflag == 0) {					\
    688 		KERNEL_UNLOCK_ONE(NULL);			\
    689 	}
    690 
    691 int
    692 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    693 {
    694 	const struct bdevsw *d;
    695 	int rv, mpflag;
    696 
    697 	/*
    698 	 * For open we need to lock, in order to synchronize
    699 	 * with attach/detach.
    700 	 */
    701 	mutex_enter(&device_lock);
    702 	d = bdevsw_lookup(dev);
    703 	mutex_exit(&device_lock);
    704 	if (d == NULL)
    705 		return ENXIO;
    706 
    707 	DEV_LOCK(d);
    708 	rv = (*d->d_open)(dev, flag, devtype, l);
    709 	DEV_UNLOCK(d);
    710 
    711 	return rv;
    712 }
    713 
    714 int
    715 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    716 {
    717 	const struct bdevsw *d;
    718 	int rv, mpflag;
    719 
    720 	if ((d = bdevsw_lookup(dev)) == NULL)
    721 		return ENXIO;
    722 
    723 	DEV_LOCK(d);
    724 	rv = (*d->d_close)(dev, flag, devtype, l);
    725 	DEV_UNLOCK(d);
    726 
    727 	return rv;
    728 }
    729 
    730 void
    731 bdev_strategy(struct buf *bp)
    732 {
    733 	const struct bdevsw *d;
    734 	int mpflag;
    735 
    736 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
    737 		panic("bdev_strategy");
    738 
    739 	DEV_LOCK(d);
    740 	(*d->d_strategy)(bp);
    741 	DEV_UNLOCK(d);
    742 }
    743 
    744 int
    745 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    746 {
    747 	const struct bdevsw *d;
    748 	int rv, mpflag;
    749 
    750 	if ((d = bdevsw_lookup(dev)) == NULL)
    751 		return ENXIO;
    752 
    753 	DEV_LOCK(d);
    754 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    755 	DEV_UNLOCK(d);
    756 
    757 	return rv;
    758 }
    759 
    760 int
    761 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    762 {
    763 	const struct bdevsw *d;
    764 	int rv;
    765 
    766 	/*
    767 	 * Dump can be called without the device open.  Since it can
    768 	 * currently only be called with the system paused (and in a
    769 	 * potentially unstable state), we don't perform any locking.
    770 	 */
    771 	if ((d = bdevsw_lookup(dev)) == NULL)
    772 		return ENXIO;
    773 
    774 	/* DEV_LOCK(d); */
    775 	rv = (*d->d_dump)(dev, addr, data, sz);
    776 	/* DEV_UNLOCK(d); */
    777 
    778 	return rv;
    779 }
    780 
    781 int
    782 bdev_type(dev_t dev)
    783 {
    784 	const struct bdevsw *d;
    785 
    786 	if ((d = bdevsw_lookup(dev)) == NULL)
    787 		return D_OTHER;
    788 	return d->d_flag & D_TYPEMASK;
    789 }
    790 
    791 int
    792 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    793 {
    794 	const struct cdevsw *d;
    795 	int rv, mpflag;
    796 
    797 	/*
    798 	 * For open we need to lock, in order to synchronize
    799 	 * with attach/detach.
    800 	 */
    801 	mutex_enter(&device_lock);
    802 	d = cdevsw_lookup(dev);
    803 	mutex_exit(&device_lock);
    804 	if (d == NULL)
    805 		return ENXIO;
    806 
    807 	DEV_LOCK(d);
    808 	rv = (*d->d_open)(dev, flag, devtype, l);
    809 	DEV_UNLOCK(d);
    810 
    811 	return rv;
    812 }
    813 
    814 int
    815 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    816 {
    817 	const struct cdevsw *d;
    818 	int rv, mpflag;
    819 
    820 	if ((d = cdevsw_lookup(dev)) == NULL)
    821 		return ENXIO;
    822 
    823 	DEV_LOCK(d);
    824 	rv = (*d->d_close)(dev, flag, devtype, l);
    825 	DEV_UNLOCK(d);
    826 
    827 	return rv;
    828 }
    829 
    830 int
    831 cdev_read(dev_t dev, struct uio *uio, int flag)
    832 {
    833 	const struct cdevsw *d;
    834 	int rv, mpflag;
    835 
    836 	if ((d = cdevsw_lookup(dev)) == NULL)
    837 		return ENXIO;
    838 
    839 	DEV_LOCK(d);
    840 	rv = (*d->d_read)(dev, uio, flag);
    841 	DEV_UNLOCK(d);
    842 
    843 	return rv;
    844 }
    845 
    846 int
    847 cdev_write(dev_t dev, struct uio *uio, int flag)
    848 {
    849 	const struct cdevsw *d;
    850 	int rv, mpflag;
    851 
    852 	if ((d = cdevsw_lookup(dev)) == NULL)
    853 		return ENXIO;
    854 
    855 	DEV_LOCK(d);
    856 	rv = (*d->d_write)(dev, uio, flag);
    857 	DEV_UNLOCK(d);
    858 
    859 	return rv;
    860 }
    861 
    862 int
    863 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    864 {
    865 	const struct cdevsw *d;
    866 	int rv, mpflag;
    867 
    868 	if ((d = cdevsw_lookup(dev)) == NULL)
    869 		return ENXIO;
    870 
    871 	DEV_LOCK(d);
    872 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    873 	DEV_UNLOCK(d);
    874 
    875 	return rv;
    876 }
    877 
    878 void
    879 cdev_stop(struct tty *tp, int flag)
    880 {
    881 	const struct cdevsw *d;
    882 	int mpflag;
    883 
    884 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
    885 		return;
    886 
    887 	DEV_LOCK(d);
    888 	(*d->d_stop)(tp, flag);
    889 	DEV_UNLOCK(d);
    890 }
    891 
    892 struct tty *
    893 cdev_tty(dev_t dev)
    894 {
    895 	const struct cdevsw *d;
    896 
    897 	if ((d = cdevsw_lookup(dev)) == NULL)
    898 		return NULL;
    899 
    900 	/* XXX Check if necessary. */
    901 	if (d->d_tty == NULL)
    902 		return NULL;
    903 
    904 	return (*d->d_tty)(dev);
    905 }
    906 
    907 int
    908 cdev_poll(dev_t dev, int flag, lwp_t *l)
    909 {
    910 	const struct cdevsw *d;
    911 	int rv, mpflag;
    912 
    913 	if ((d = cdevsw_lookup(dev)) == NULL)
    914 		return POLLERR;
    915 
    916 	DEV_LOCK(d);
    917 	rv = (*d->d_poll)(dev, flag, l);
    918 	DEV_UNLOCK(d);
    919 
    920 	return rv;
    921 }
    922 
    923 paddr_t
    924 cdev_mmap(dev_t dev, off_t off, int flag)
    925 {
    926 	const struct cdevsw *d;
    927 	paddr_t rv;
    928 	int mpflag;
    929 
    930 	if ((d = cdevsw_lookup(dev)) == NULL)
    931 		return (paddr_t)-1LL;
    932 
    933 	DEV_LOCK(d);
    934 	rv = (*d->d_mmap)(dev, off, flag);
    935 	DEV_UNLOCK(d);
    936 
    937 	return rv;
    938 }
    939 
    940 int
    941 cdev_kqfilter(dev_t dev, struct knote *kn)
    942 {
    943 	const struct cdevsw *d;
    944 	int rv, mpflag;
    945 
    946 	if ((d = cdevsw_lookup(dev)) == NULL)
    947 		return ENXIO;
    948 
    949 	DEV_LOCK(d);
    950 	rv = (*d->d_kqfilter)(dev, kn);
    951 	DEV_UNLOCK(d);
    952 
    953 	return rv;
    954 }
    955 
    956 int
    957 cdev_type(dev_t dev)
    958 {
    959 	const struct cdevsw *d;
    960 
    961 	if ((d = cdevsw_lookup(dev)) == NULL)
    962 		return D_OTHER;
    963 	return d->d_flag & D_TYPEMASK;
    964 }
    965