Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.24
      1 /*	$NetBSD: subr_devsw.c,v 1.24 2009/01/20 18:20:48 drochner Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	subr_devsw.c: registers device drivers by name and by major
     36  *	number, and provides wrapper methods for performing I/O and
     37  *	other tasks on device drivers, keying on the device number
     38  *	(dev_t).
     39  *
     40  *	When the system is built, the config(8) command generates
     41  *	static tables of device drivers built into the kernel image
     42  *	along with their associated methods.  These are recorded in
     43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     44  *	and removed from the system dynamically.
     45  *
     46  * Allocation
     47  *
     48  *	When the system initially boots only the statically allocated
     49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     50  *	allocation, we allocate a fixed block of memory to hold the new,
     51  *	expanded index.  This "fork" of the table is only ever performed
     52  *	once in order to guarantee that other threads may safely access
     53  *	the device tables:
     54  *
     55  *	o Once a thread has a "reference" to the table via an earlier
     56  *	  open() call, we know that the entry in the table must exist
     57  *	  and so it is safe to access it.
     58  *
     59  *	o Regardless of whether other threads see the old or new
     60  *	  pointers, they will point to a correct device switch
     61  *	  structure for the operation being performed.
     62  *
     63  *	XXX Currently, the wrapper methods such as cdev_read() verify
     64  *	that a device driver does in fact exist before calling the
     65  *	associated driver method.  This should be changed so that
     66  *	once the device is has been referenced by a vnode (opened),
     67  *	calling	the other methods should be valid until that reference
     68  *	is dropped.
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.24 2009/01/20 18:20:48 drochner Exp $");
     73 
     74 #include <sys/param.h>
     75 #include <sys/conf.h>
     76 #include <sys/kmem.h>
     77 #include <sys/systm.h>
     78 #include <sys/poll.h>
     79 #include <sys/tty.h>
     80 #include <sys/cpu.h>
     81 #include <sys/buf.h>
     82 
     83 #ifdef DEVSW_DEBUG
     84 #define	DPRINTF(x)	printf x
     85 #else /* DEVSW_DEBUG */
     86 #define	DPRINTF(x)
     87 #endif /* DEVSW_DEBUG */
     88 
     89 #define	MAXDEVSW	512	/* the maximum of major device number */
     90 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
     91 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
     92 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
     93 
     94 extern const struct bdevsw **bdevsw, *bdevsw0[];
     95 extern const struct cdevsw **cdevsw, *cdevsw0[];
     96 extern struct devsw_conv *devsw_conv, devsw_conv0[];
     97 extern const int sys_bdevsws, sys_cdevsws;
     98 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
     99 
    100 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
    101 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
    102 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    103 
    104 kmutex_t device_lock;
    105 
    106 void
    107 devsw_init(void)
    108 {
    109 
    110 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    111 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    112 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
    113 }
    114 
    115 int
    116 devsw_attach(const char *devname,
    117 	     const struct bdevsw *bdev, devmajor_t *bmajor,
    118 	     const struct cdevsw *cdev, devmajor_t *cmajor)
    119 {
    120 	struct devsw_conv *conv;
    121 	char *name;
    122 	int error, i;
    123 
    124 	if (devname == NULL || cdev == NULL)
    125 		return (EINVAL);
    126 
    127 	mutex_enter(&device_lock);
    128 
    129 	for (i = 0 ; i < max_devsw_convs ; i++) {
    130 		conv = &devsw_conv[i];
    131 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    132 			continue;
    133 
    134 		if (*bmajor < 0)
    135 			*bmajor = conv->d_bmajor;
    136 		if (*cmajor < 0)
    137 			*cmajor = conv->d_cmajor;
    138 
    139 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    140 			error = EINVAL;
    141 			goto fail;
    142 		}
    143 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    144 			error = EINVAL;
    145 			goto fail;
    146 		}
    147 
    148 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    149 		    cdevsw[*cmajor] != NULL) {
    150 			error = EEXIST;
    151 			goto fail;
    152 		}
    153 
    154 		if (bdev != NULL)
    155 			bdevsw[*bmajor] = bdev;
    156 		cdevsw[*cmajor] = cdev;
    157 
    158 		mutex_exit(&device_lock);
    159 		return (0);
    160 	}
    161 
    162 	error = bdevsw_attach(bdev, bmajor);
    163 	if (error != 0)
    164 		goto fail;
    165 	error = cdevsw_attach(cdev, cmajor);
    166 	if (error != 0) {
    167 		devsw_detach_locked(bdev, NULL);
    168 		goto fail;
    169 	}
    170 
    171 	for (i = 0 ; i < max_devsw_convs ; i++) {
    172 		if (devsw_conv[i].d_name == NULL)
    173 			break;
    174 	}
    175 	if (i == max_devsw_convs) {
    176 		struct devsw_conv *newptr;
    177 		int old, new;
    178 
    179 		old = max_devsw_convs;
    180 		new = old + 1;
    181 
    182 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
    183 		if (newptr == NULL) {
    184 			devsw_detach_locked(bdev, cdev);
    185 			error = ENOMEM;
    186 			goto fail;
    187 		}
    188 		newptr[old].d_name = NULL;
    189 		newptr[old].d_bmajor = -1;
    190 		newptr[old].d_cmajor = -1;
    191 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
    192 		if (devsw_conv != devsw_conv0)
    193 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
    194 		devsw_conv = newptr;
    195 		max_devsw_convs = new;
    196 	}
    197 
    198 	i = strlen(devname) + 1;
    199 	name = kmem_alloc(i, KM_NOSLEEP);
    200 	if (name == NULL) {
    201 		devsw_detach_locked(bdev, cdev);
    202 		goto fail;
    203 	}
    204 	strlcpy(name, devname, i);
    205 
    206 	devsw_conv[i].d_name = name;
    207 	devsw_conv[i].d_bmajor = *bmajor;
    208 	devsw_conv[i].d_cmajor = *cmajor;
    209 
    210 	mutex_exit(&device_lock);
    211 	return (0);
    212  fail:
    213 	mutex_exit(&device_lock);
    214 	return (error);
    215 }
    216 
    217 static int
    218 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
    219 {
    220 	const struct bdevsw **newptr;
    221 	devmajor_t bmajor;
    222 	int i;
    223 
    224 	KASSERT(mutex_owned(&device_lock));
    225 
    226 	if (devsw == NULL)
    227 		return (0);
    228 
    229 	if (*devmajor < 0) {
    230 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    231 			if (bdevsw[bmajor] != NULL)
    232 				continue;
    233 			for (i = 0 ; i < max_devsw_convs ; i++) {
    234 				if (devsw_conv[i].d_bmajor == bmajor)
    235 					break;
    236 			}
    237 			if (i != max_devsw_convs)
    238 				continue;
    239 			break;
    240 		}
    241 		*devmajor = bmajor;
    242 	}
    243 
    244 	if (*devmajor >= MAXDEVSW) {
    245 		printf("bdevsw_attach: block majors exhausted");
    246 		return (ENOMEM);
    247 	}
    248 
    249 	if (*devmajor >= max_bdevsws) {
    250 		KASSERT(bdevsw == bdevsw0);
    251 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    252 		if (newptr == NULL)
    253 			return (ENOMEM);
    254 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    255 		bdevsw = newptr;
    256 		max_bdevsws = MAXDEVSW;
    257 	}
    258 
    259 	if (bdevsw[*devmajor] != NULL)
    260 		return (EEXIST);
    261 
    262 	bdevsw[*devmajor] = devsw;
    263 
    264 	return (0);
    265 }
    266 
    267 static int
    268 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
    269 {
    270 	const struct cdevsw **newptr;
    271 	devmajor_t cmajor;
    272 	int i;
    273 
    274 	KASSERT(mutex_owned(&device_lock));
    275 
    276 	if (*devmajor < 0) {
    277 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    278 			if (cdevsw[cmajor] != NULL)
    279 				continue;
    280 			for (i = 0 ; i < max_devsw_convs ; i++) {
    281 				if (devsw_conv[i].d_cmajor == cmajor)
    282 					break;
    283 			}
    284 			if (i != max_devsw_convs)
    285 				continue;
    286 			break;
    287 		}
    288 		*devmajor = cmajor;
    289 	}
    290 
    291 	if (*devmajor >= MAXDEVSW) {
    292 		printf("cdevsw_attach: character majors exhausted");
    293 		return (ENOMEM);
    294 	}
    295 
    296 	if (*devmajor >= max_cdevsws) {
    297 		KASSERT(cdevsw == cdevsw0);
    298 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    299 		if (newptr == NULL)
    300 			return (ENOMEM);
    301 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    302 		cdevsw = newptr;
    303 		max_cdevsws = MAXDEVSW;
    304 	}
    305 
    306 	if (cdevsw[*devmajor] != NULL)
    307 		return (EEXIST);
    308 
    309 	cdevsw[*devmajor] = devsw;
    310 
    311 	return (0);
    312 }
    313 
    314 static void
    315 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    316 {
    317 	int i;
    318 
    319 	KASSERT(mutex_owned(&device_lock));
    320 
    321 	if (bdev != NULL) {
    322 		for (i = 0 ; i < max_bdevsws ; i++) {
    323 			if (bdevsw[i] != bdev)
    324 				continue;
    325 			bdevsw[i] = NULL;
    326 			break;
    327 		}
    328 	}
    329 	if (cdev != NULL) {
    330 		for (i = 0 ; i < max_cdevsws ; i++) {
    331 			if (cdevsw[i] != cdev)
    332 				continue;
    333 			cdevsw[i] = NULL;
    334 			break;
    335 		}
    336 	}
    337 }
    338 
    339 int
    340 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    341 {
    342 
    343 	mutex_enter(&device_lock);
    344 	devsw_detach_locked(bdev, cdev);
    345 	mutex_exit(&device_lock);
    346 	return 0;
    347 }
    348 
    349 /*
    350  * Look up a block device by number.
    351  *
    352  * => Caller must ensure that the device is attached.
    353  */
    354 const struct bdevsw *
    355 bdevsw_lookup(dev_t dev)
    356 {
    357 	devmajor_t bmajor;
    358 
    359 	if (dev == NODEV)
    360 		return (NULL);
    361 	bmajor = major(dev);
    362 	if (bmajor < 0 || bmajor >= max_bdevsws)
    363 		return (NULL);
    364 
    365 	return (bdevsw[bmajor]);
    366 }
    367 
    368 /*
    369  * Look up a character device by number.
    370  *
    371  * => Caller must ensure that the device is attached.
    372  */
    373 const struct cdevsw *
    374 cdevsw_lookup(dev_t dev)
    375 {
    376 	devmajor_t cmajor;
    377 
    378 	if (dev == NODEV)
    379 		return (NULL);
    380 	cmajor = major(dev);
    381 	if (cmajor < 0 || cmajor >= max_cdevsws)
    382 		return (NULL);
    383 
    384 	return (cdevsw[cmajor]);
    385 }
    386 
    387 /*
    388  * Look up a block device by reference to its operations set.
    389  *
    390  * => Caller must ensure that the device is not detached, and therefore
    391  *    that the returned major is still valid when dereferenced.
    392  */
    393 devmajor_t
    394 bdevsw_lookup_major(const struct bdevsw *bdev)
    395 {
    396 	devmajor_t bmajor;
    397 
    398 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    399 		if (bdevsw[bmajor] == bdev)
    400 			return (bmajor);
    401 	}
    402 
    403 	return (NODEVMAJOR);
    404 }
    405 
    406 /*
    407  * Look up a character device by reference to its operations set.
    408  *
    409  * => Caller must ensure that the device is not detached, and therefore
    410  *    that the returned major is still valid when dereferenced.
    411  */
    412 devmajor_t
    413 cdevsw_lookup_major(const struct cdevsw *cdev)
    414 {
    415 	devmajor_t cmajor;
    416 
    417 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    418 		if (cdevsw[cmajor] == cdev)
    419 			return (cmajor);
    420 	}
    421 
    422 	return (NODEVMAJOR);
    423 }
    424 
    425 /*
    426  * Convert from block major number to name.
    427  *
    428  * => Caller must ensure that the device is not detached, and therefore
    429  *    that the name pointer is still valid when dereferenced.
    430  */
    431 const char *
    432 devsw_blk2name(devmajor_t bmajor)
    433 {
    434 	const char *name;
    435 	devmajor_t cmajor;
    436 	int i;
    437 
    438 	name = NULL;
    439 	cmajor = -1;
    440 
    441 	mutex_enter(&device_lock);
    442 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    443 		mutex_exit(&device_lock);
    444 		return (NULL);
    445 	}
    446 	for (i = 0 ; i < max_devsw_convs; i++) {
    447 		if (devsw_conv[i].d_bmajor == bmajor) {
    448 			cmajor = devsw_conv[i].d_cmajor;
    449 			break;
    450 		}
    451 	}
    452 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    453 		name = devsw_conv[i].d_name;
    454 	mutex_exit(&device_lock);
    455 
    456 	return (name);
    457 }
    458 
    459 /*
    460  * Convert from device name to block major number.
    461  *
    462  * => Caller must ensure that the device is not detached, and therefore
    463  *    that the major number is still valid when dereferenced.
    464  */
    465 devmajor_t
    466 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    467 {
    468 	struct devsw_conv *conv;
    469 	devmajor_t bmajor;
    470 	int i;
    471 
    472 	if (name == NULL)
    473 		return (NODEVMAJOR);
    474 
    475 	mutex_enter(&device_lock);
    476 	for (i = 0 ; i < max_devsw_convs ; i++) {
    477 		size_t len;
    478 
    479 		conv = &devsw_conv[i];
    480 		if (conv->d_name == NULL)
    481 			continue;
    482 		len = strlen(conv->d_name);
    483 		if (strncmp(conv->d_name, name, len) != 0)
    484 			continue;
    485 		if (*(name +len) && !isdigit(*(name + len)))
    486 			continue;
    487 		bmajor = conv->d_bmajor;
    488 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    489 		    bdevsw[bmajor] == NULL)
    490 			break;
    491 		if (devname != NULL) {
    492 #ifdef DEVSW_DEBUG
    493 			if (strlen(conv->d_name) >= devnamelen)
    494 				printf("devsw_name2blk: too short buffer");
    495 #endif /* DEVSW_DEBUG */
    496 			strncpy(devname, conv->d_name, devnamelen);
    497 			devname[devnamelen - 1] = '\0';
    498 		}
    499 		mutex_exit(&device_lock);
    500 		return (bmajor);
    501 	}
    502 
    503 	mutex_exit(&device_lock);
    504 	return (NODEVMAJOR);
    505 }
    506 
    507 /*
    508  * Convert from device name to char major number.
    509  *
    510  * => Caller must ensure that the device is not detached, and therefore
    511  *    that the major number is still valid when dereferenced.
    512  */
    513 devmajor_t
    514 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    515 {
    516 	struct devsw_conv *conv;
    517 	devmajor_t cmajor;
    518 	int i;
    519 
    520 	if (name == NULL)
    521 		return (NODEVMAJOR);
    522 
    523 	mutex_enter(&device_lock);
    524 	for (i = 0 ; i < max_devsw_convs ; i++) {
    525 		size_t len;
    526 
    527 		conv = &devsw_conv[i];
    528 		if (conv->d_name == NULL)
    529 			continue;
    530 		len = strlen(conv->d_name);
    531 		if (strncmp(conv->d_name, name, len) != 0)
    532 			continue;
    533 		if (*(name +len) && !isdigit(*(name + len)))
    534 			continue;
    535 		cmajor = conv->d_cmajor;
    536 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    537 		    cdevsw[cmajor] == NULL)
    538 			break;
    539 		if (devname != NULL) {
    540 #ifdef DEVSW_DEBUG
    541 			if (strlen(conv->d_name) >= devnamelen)
    542 				printf("devsw_name2chr: too short buffer");
    543 #endif /* DEVSW_DEBUG */
    544 			strncpy(devname, conv->d_name, devnamelen);
    545 			devname[devnamelen - 1] = '\0';
    546 		}
    547 		mutex_exit(&device_lock);
    548 		return (cmajor);
    549 	}
    550 
    551 	mutex_exit(&device_lock);
    552 	return (NODEVMAJOR);
    553 }
    554 
    555 /*
    556  * Convert from character dev_t to block dev_t.
    557  *
    558  * => Caller must ensure that the device is not detached, and therefore
    559  *    that the major number is still valid when dereferenced.
    560  */
    561 dev_t
    562 devsw_chr2blk(dev_t cdev)
    563 {
    564 	devmajor_t bmajor, cmajor;
    565 	int i;
    566 	dev_t rv;
    567 
    568 	cmajor = major(cdev);
    569 	bmajor = NODEVMAJOR;
    570 	rv = NODEV;
    571 
    572 	mutex_enter(&device_lock);
    573 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    574 		mutex_exit(&device_lock);
    575 		return (NODEV);
    576 	}
    577 	for (i = 0 ; i < max_devsw_convs ; i++) {
    578 		if (devsw_conv[i].d_cmajor == cmajor) {
    579 			bmajor = devsw_conv[i].d_bmajor;
    580 			break;
    581 		}
    582 	}
    583 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    584 		rv = makedev(bmajor, minor(cdev));
    585 	mutex_exit(&device_lock);
    586 
    587 	return (rv);
    588 }
    589 
    590 /*
    591  * Convert from block dev_t to character dev_t.
    592  *
    593  * => Caller must ensure that the device is not detached, and therefore
    594  *    that the major number is still valid when dereferenced.
    595  */
    596 dev_t
    597 devsw_blk2chr(dev_t bdev)
    598 {
    599 	devmajor_t bmajor, cmajor;
    600 	int i;
    601 	dev_t rv;
    602 
    603 	bmajor = major(bdev);
    604 	cmajor = NODEVMAJOR;
    605 	rv = NODEV;
    606 
    607 	mutex_enter(&device_lock);
    608 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    609 		mutex_exit(&device_lock);
    610 		return (NODEV);
    611 	}
    612 	for (i = 0 ; i < max_devsw_convs ; i++) {
    613 		if (devsw_conv[i].d_bmajor == bmajor) {
    614 			cmajor = devsw_conv[i].d_cmajor;
    615 			break;
    616 		}
    617 	}
    618 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    619 		rv = makedev(cmajor, minor(bdev));
    620 	mutex_exit(&device_lock);
    621 
    622 	return (rv);
    623 }
    624 
    625 /*
    626  * Device access methods.
    627  */
    628 
    629 #define	DEV_LOCK(d)						\
    630 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
    631 		KERNEL_LOCK(1, NULL);				\
    632 	}
    633 
    634 #define	DEV_UNLOCK(d)						\
    635 	if (mpflag == 0) {					\
    636 		KERNEL_UNLOCK_ONE(NULL);			\
    637 	}
    638 
    639 int
    640 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    641 {
    642 	const struct bdevsw *d;
    643 	int rv, mpflag;
    644 
    645 	/*
    646 	 * For open we need to lock, in order to synchronize
    647 	 * with attach/detach.
    648 	 */
    649 	mutex_enter(&device_lock);
    650 	d = bdevsw_lookup(dev);
    651 	mutex_exit(&device_lock);
    652 	if (d == NULL)
    653 		return ENXIO;
    654 
    655 	DEV_LOCK(d);
    656 	rv = (*d->d_open)(dev, flag, devtype, l);
    657 	DEV_UNLOCK(d);
    658 
    659 	return rv;
    660 }
    661 
    662 int
    663 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    664 {
    665 	const struct bdevsw *d;
    666 	int rv, mpflag;
    667 
    668 	if ((d = bdevsw_lookup(dev)) == NULL)
    669 		return ENXIO;
    670 
    671 	DEV_LOCK(d);
    672 	rv = (*d->d_close)(dev, flag, devtype, l);
    673 	DEV_UNLOCK(d);
    674 
    675 	return rv;
    676 }
    677 
    678 void
    679 bdev_strategy(struct buf *bp)
    680 {
    681 	const struct bdevsw *d;
    682 	int mpflag;
    683 
    684 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
    685 		panic("bdev_strategy");
    686 
    687 	DEV_LOCK(d);
    688 	(*d->d_strategy)(bp);
    689 	DEV_UNLOCK(d);
    690 }
    691 
    692 int
    693 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    694 {
    695 	const struct bdevsw *d;
    696 	int rv, mpflag;
    697 
    698 	if ((d = bdevsw_lookup(dev)) == NULL)
    699 		return ENXIO;
    700 
    701 	DEV_LOCK(d);
    702 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    703 	DEV_UNLOCK(d);
    704 
    705 	return rv;
    706 }
    707 
    708 int
    709 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    710 {
    711 	const struct bdevsw *d;
    712 	int rv;
    713 
    714 	/*
    715 	 * Dump can be called without the device open.  Since it can
    716 	 * currently only be called with the system paused (and in a
    717 	 * potentially unstable state), we don't perform any locking.
    718 	 */
    719 	if ((d = bdevsw_lookup(dev)) == NULL)
    720 		return ENXIO;
    721 
    722 	/* DEV_LOCK(d); */
    723 	rv = (*d->d_dump)(dev, addr, data, sz);
    724 	/* DEV_UNLOCK(d); */
    725 
    726 	return rv;
    727 }
    728 
    729 int
    730 bdev_type(dev_t dev)
    731 {
    732 	const struct bdevsw *d;
    733 
    734 	if ((d = bdevsw_lookup(dev)) == NULL)
    735 		return D_OTHER;
    736 	return d->d_flag & D_TYPEMASK;
    737 }
    738 
    739 int
    740 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    741 {
    742 	const struct cdevsw *d;
    743 	int rv, mpflag;
    744 
    745 	/*
    746 	 * For open we need to lock, in order to synchronize
    747 	 * with attach/detach.
    748 	 */
    749 	mutex_enter(&device_lock);
    750 	d = cdevsw_lookup(dev);
    751 	mutex_exit(&device_lock);
    752 	if (d == NULL)
    753 		return ENXIO;
    754 
    755 	DEV_LOCK(d);
    756 	rv = (*d->d_open)(dev, flag, devtype, l);
    757 	DEV_UNLOCK(d);
    758 
    759 	return rv;
    760 }
    761 
    762 int
    763 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    764 {
    765 	const struct cdevsw *d;
    766 	int rv, mpflag;
    767 
    768 	if ((d = cdevsw_lookup(dev)) == NULL)
    769 		return ENXIO;
    770 
    771 	DEV_LOCK(d);
    772 	rv = (*d->d_close)(dev, flag, devtype, l);
    773 	DEV_UNLOCK(d);
    774 
    775 	return rv;
    776 }
    777 
    778 int
    779 cdev_read(dev_t dev, struct uio *uio, int flag)
    780 {
    781 	const struct cdevsw *d;
    782 	int rv, mpflag;
    783 
    784 	if ((d = cdevsw_lookup(dev)) == NULL)
    785 		return ENXIO;
    786 
    787 	DEV_LOCK(d);
    788 	rv = (*d->d_read)(dev, uio, flag);
    789 	DEV_UNLOCK(d);
    790 
    791 	return rv;
    792 }
    793 
    794 int
    795 cdev_write(dev_t dev, struct uio *uio, int flag)
    796 {
    797 	const struct cdevsw *d;
    798 	int rv, mpflag;
    799 
    800 	if ((d = cdevsw_lookup(dev)) == NULL)
    801 		return ENXIO;
    802 
    803 	DEV_LOCK(d);
    804 	rv = (*d->d_write)(dev, uio, flag);
    805 	DEV_UNLOCK(d);
    806 
    807 	return rv;
    808 }
    809 
    810 int
    811 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    812 {
    813 	const struct cdevsw *d;
    814 	int rv, mpflag;
    815 
    816 	if ((d = cdevsw_lookup(dev)) == NULL)
    817 		return ENXIO;
    818 
    819 	DEV_LOCK(d);
    820 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    821 	DEV_UNLOCK(d);
    822 
    823 	return rv;
    824 }
    825 
    826 void
    827 cdev_stop(struct tty *tp, int flag)
    828 {
    829 	const struct cdevsw *d;
    830 	int mpflag;
    831 
    832 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
    833 		return;
    834 
    835 	DEV_LOCK(d);
    836 	(*d->d_stop)(tp, flag);
    837 	DEV_UNLOCK(d);
    838 }
    839 
    840 struct tty *
    841 cdev_tty(dev_t dev)
    842 {
    843 	const struct cdevsw *d;
    844 
    845 	if ((d = cdevsw_lookup(dev)) == NULL)
    846 		return NULL;
    847 
    848 	/* XXX Check if necessary. */
    849 	if (d->d_tty == NULL)
    850 		return NULL;
    851 
    852 	return (*d->d_tty)(dev);
    853 }
    854 
    855 int
    856 cdev_poll(dev_t dev, int flag, lwp_t *l)
    857 {
    858 	const struct cdevsw *d;
    859 	int rv, mpflag;
    860 
    861 	if ((d = cdevsw_lookup(dev)) == NULL)
    862 		return POLLERR;
    863 
    864 	DEV_LOCK(d);
    865 	rv = (*d->d_poll)(dev, flag, l);
    866 	DEV_UNLOCK(d);
    867 
    868 	return rv;
    869 }
    870 
    871 paddr_t
    872 cdev_mmap(dev_t dev, off_t off, int flag)
    873 {
    874 	const struct cdevsw *d;
    875 	paddr_t rv;
    876 	int mpflag;
    877 
    878 	if ((d = cdevsw_lookup(dev)) == NULL)
    879 		return (paddr_t)-1LL;
    880 
    881 	DEV_LOCK(d);
    882 	rv = (*d->d_mmap)(dev, off, flag);
    883 	DEV_UNLOCK(d);
    884 
    885 	return rv;
    886 }
    887 
    888 int
    889 cdev_kqfilter(dev_t dev, struct knote *kn)
    890 {
    891 	const struct cdevsw *d;
    892 	int rv, mpflag;
    893 
    894 	if ((d = cdevsw_lookup(dev)) == NULL)
    895 		return ENXIO;
    896 
    897 	DEV_LOCK(d);
    898 	rv = (*d->d_kqfilter)(dev, kn);
    899 	DEV_UNLOCK(d);
    900 
    901 	return rv;
    902 }
    903 
    904 int
    905 cdev_type(dev_t dev)
    906 {
    907 	const struct cdevsw *d;
    908 
    909 	if ((d = cdevsw_lookup(dev)) == NULL)
    910 		return D_OTHER;
    911 	return d->d_flag & D_TYPEMASK;
    912 }
    913