Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.7.16.6
      1 /*	$NetBSD: subr_devsw.c,v 1.7.16.6 2008/03/24 09:39:02 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Overview
     41  *
     42  *	subr_devsw.c: registers device drivers by name and by major
     43  *	number, and provides wrapper methods for performing I/O and
     44  *	other tasks on device drivers, keying on the device number
     45  *	(dev_t).
     46  *
     47  *	When the system is built, the config(8) command generates
     48  *	static tables of device drivers built into the kernel image
     49  *	along with their associated methods.  These are recorded in
     50  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     51  *	and removed from the system dynamically.
     52  *
     53  * Allocation
     54  *
     55  *	When the system initially boots only the statically allocated
     56  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     57  *	allocation, we allocate a fixed block of memory to hold the new,
     58  *	expanded index.  This "fork" of the table is only ever performed
     59  *	once in order to guarantee that other threads may safely access
     60  *	the device tables:
     61  *
     62  *	o Once a thread has a "reference" to the table via an earlier
     63  *	  open() call, we know that the entry in the table must exist
     64  *	  and so it is safe to access it.
     65  *
     66  *	o Regardless of whether other threads see the old or new
     67  *	  pointers, they will point to a correct device switch
     68  *	  structure for the operation being performed.
     69  *
     70  *	XXX Currently, the wrapper methods such as cdev_read() verify
     71  *	that a device driver does in fact exist before calling the
     72  *	associated driver method.  This should be changed so that
     73  *	once the device is has been referenced by a vnode (opened),
     74  *	calling	the other methods should be valid until that reference
     75  *	is dropped.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.7.16.6 2008/03/24 09:39:02 yamt Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/conf.h>
     83 #include <sys/kmem.h>
     84 #include <sys/systm.h>
     85 #include <sys/poll.h>
     86 #include <sys/tty.h>
     87 #include <sys/cpu.h>
     88 #include <sys/buf.h>
     89 
     90 #ifdef DEVSW_DEBUG
     91 #define	DPRINTF(x)	printf x
     92 #else /* DEVSW_DEBUG */
     93 #define	DPRINTF(x)
     94 #endif /* DEVSW_DEBUG */
     95 
     96 #define	MAXDEVSW	512	/* the maximum of major device number */
     97 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
     98 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
     99 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
    100 
    101 extern const struct bdevsw **bdevsw, *bdevsw0[];
    102 extern const struct cdevsw **cdevsw, *cdevsw0[];
    103 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    104 extern const int sys_bdevsws, sys_cdevsws;
    105 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    106 
    107 static int bdevsw_attach(const struct bdevsw *, int *);
    108 static int cdevsw_attach(const struct cdevsw *, int *);
    109 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    110 
    111 kmutex_t devsw_lock;
    112 
    113 void
    114 devsw_init(void)
    115 {
    116 
    117 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    118 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    119 
    120 	mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE);
    121 }
    122 
    123 int
    124 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
    125 	     const struct cdevsw *cdev, int *cmajor)
    126 {
    127 	struct devsw_conv *conv;
    128 	char *name;
    129 	int error, i;
    130 
    131 	if (devname == NULL || cdev == NULL)
    132 		return (EINVAL);
    133 
    134 	mutex_enter(&devsw_lock);
    135 
    136 	for (i = 0 ; i < max_devsw_convs ; i++) {
    137 		conv = &devsw_conv[i];
    138 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    139 			continue;
    140 
    141 		if (*bmajor < 0)
    142 			*bmajor = conv->d_bmajor;
    143 		if (*cmajor < 0)
    144 			*cmajor = conv->d_cmajor;
    145 
    146 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    147 			error = EINVAL;
    148 			goto fail;
    149 		}
    150 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    151 			error = EINVAL;
    152 			goto fail;
    153 		}
    154 
    155 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    156 		    cdevsw[*cmajor] != NULL) {
    157 			error = EEXIST;
    158 			goto fail;
    159 		}
    160 
    161 		if (bdev != NULL)
    162 			bdevsw[*bmajor] = bdev;
    163 		cdevsw[*cmajor] = cdev;
    164 
    165 		mutex_exit(&devsw_lock);
    166 		return (0);
    167 	}
    168 
    169 	error = bdevsw_attach(bdev, bmajor);
    170 	if (error != 0)
    171 		goto fail;
    172 	error = cdevsw_attach(cdev, cmajor);
    173 	if (error != 0) {
    174 		devsw_detach_locked(bdev, NULL);
    175 		goto fail;
    176 	}
    177 
    178 	for (i = 0 ; i < max_devsw_convs ; i++) {
    179 		if (devsw_conv[i].d_name == NULL)
    180 			break;
    181 	}
    182 	if (i == max_devsw_convs) {
    183 		struct devsw_conv *newptr;
    184 		int old, new;
    185 
    186 		old = max_devsw_convs;
    187 		new = old + 1;
    188 
    189 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
    190 		if (newptr == NULL) {
    191 			devsw_detach_locked(bdev, cdev);
    192 			error = ENOMEM;
    193 			goto fail;
    194 		}
    195 		newptr[old].d_name = NULL;
    196 		newptr[old].d_bmajor = -1;
    197 		newptr[old].d_cmajor = -1;
    198 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
    199 		if (devsw_conv != devsw_conv0)
    200 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
    201 		devsw_conv = newptr;
    202 		max_devsw_convs = new;
    203 	}
    204 
    205 	i = strlen(devname) + 1;
    206 	name = kmem_alloc(i, KM_NOSLEEP);
    207 	if (name == NULL) {
    208 		devsw_detach_locked(bdev, cdev);
    209 		goto fail;
    210 	}
    211 	strlcpy(name, devname, i);
    212 
    213 	devsw_conv[i].d_name = name;
    214 	devsw_conv[i].d_bmajor = *bmajor;
    215 	devsw_conv[i].d_cmajor = *cmajor;
    216 
    217 	mutex_exit(&devsw_lock);
    218 	return (0);
    219  fail:
    220 	mutex_exit(&devsw_lock);
    221 	return (error);
    222 }
    223 
    224 static int
    225 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
    226 {
    227 	const struct bdevsw **newptr;
    228 	int bmajor, i;
    229 
    230 	KASSERT(mutex_owned(&devsw_lock));
    231 
    232 	if (devsw == NULL)
    233 		return (0);
    234 
    235 	if (*devmajor < 0) {
    236 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    237 			if (bdevsw[bmajor] != NULL)
    238 				continue;
    239 			for (i = 0 ; i < max_devsw_convs ; i++) {
    240 				if (devsw_conv[i].d_bmajor == bmajor)
    241 					break;
    242 			}
    243 			if (i != max_devsw_convs)
    244 				continue;
    245 			break;
    246 		}
    247 		*devmajor = bmajor;
    248 	}
    249 
    250 	if (*devmajor >= MAXDEVSW) {
    251 		printf("bdevsw_attach: block majors exhausted");
    252 		return (ENOMEM);
    253 	}
    254 
    255 	if (*devmajor >= max_bdevsws) {
    256 		KASSERT(bdevsw == bdevsw0);
    257 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    258 		if (newptr == NULL)
    259 			return (ENOMEM);
    260 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    261 		bdevsw = newptr;
    262 		max_bdevsws = MAXDEVSW;
    263 	}
    264 
    265 	if (bdevsw[*devmajor] != NULL)
    266 		return (EEXIST);
    267 
    268 	bdevsw[*devmajor] = devsw;
    269 
    270 	return (0);
    271 }
    272 
    273 static int
    274 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
    275 {
    276 	const struct cdevsw **newptr;
    277 	int cmajor, i;
    278 
    279 	KASSERT(mutex_owned(&devsw_lock));
    280 
    281 	if (*devmajor < 0) {
    282 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    283 			if (cdevsw[cmajor] != NULL)
    284 				continue;
    285 			for (i = 0 ; i < max_devsw_convs ; i++) {
    286 				if (devsw_conv[i].d_cmajor == cmajor)
    287 					break;
    288 			}
    289 			if (i != max_devsw_convs)
    290 				continue;
    291 			break;
    292 		}
    293 		*devmajor = cmajor;
    294 	}
    295 
    296 	if (*devmajor >= MAXDEVSW) {
    297 		printf("cdevsw_attach: character majors exhausted");
    298 		return (ENOMEM);
    299 	}
    300 
    301 	if (*devmajor >= max_cdevsws) {
    302 		KASSERT(cdevsw == cdevsw0);
    303 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    304 		if (newptr == NULL)
    305 			return (ENOMEM);
    306 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    307 		cdevsw = newptr;
    308 		max_cdevsws = MAXDEVSW;
    309 	}
    310 
    311 	if (cdevsw[*devmajor] != NULL)
    312 		return (EEXIST);
    313 
    314 	cdevsw[*devmajor] = devsw;
    315 
    316 	return (0);
    317 }
    318 
    319 static void
    320 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    321 {
    322 	int i;
    323 
    324 	KASSERT(mutex_owned(&devsw_lock));
    325 
    326 	if (bdev != NULL) {
    327 		for (i = 0 ; i < max_bdevsws ; i++) {
    328 			if (bdevsw[i] != bdev)
    329 				continue;
    330 			bdevsw[i] = NULL;
    331 			break;
    332 		}
    333 	}
    334 	if (cdev != NULL) {
    335 		for (i = 0 ; i < max_cdevsws ; i++) {
    336 			if (cdevsw[i] != cdev)
    337 				continue;
    338 			cdevsw[i] = NULL;
    339 			break;
    340 		}
    341 	}
    342 }
    343 
    344 void
    345 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    346 {
    347 
    348 	mutex_enter(&devsw_lock);
    349 	devsw_detach_locked(bdev, cdev);
    350 	mutex_exit(&devsw_lock);
    351 }
    352 
    353 /*
    354  * Look up a block device by number.
    355  *
    356  * => Caller must ensure that the device is attached.
    357  */
    358 const struct bdevsw *
    359 bdevsw_lookup(dev_t dev)
    360 {
    361 	int bmajor;
    362 
    363 	if (dev == NODEV)
    364 		return (NULL);
    365 	bmajor = major(dev);
    366 	if (bmajor < 0 || bmajor >= max_bdevsws)
    367 		return (NULL);
    368 
    369 	return (bdevsw[bmajor]);
    370 }
    371 
    372 /*
    373  * Look up a character device by number.
    374  *
    375  * => Caller must ensure that the device is attached.
    376  */
    377 const struct cdevsw *
    378 cdevsw_lookup(dev_t dev)
    379 {
    380 	int cmajor;
    381 
    382 	if (dev == NODEV)
    383 		return (NULL);
    384 	cmajor = major(dev);
    385 	if (cmajor < 0 || cmajor >= max_cdevsws)
    386 		return (NULL);
    387 
    388 	return (cdevsw[cmajor]);
    389 }
    390 
    391 /*
    392  * Look up a block device by reference to its operations set.
    393  *
    394  * => Caller must ensure that the device is not detached, and therefore
    395  *    that the returned major is still valid when dereferenced.
    396  */
    397 int
    398 bdevsw_lookup_major(const struct bdevsw *bdev)
    399 {
    400 	int bmajor;
    401 
    402 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    403 		if (bdevsw[bmajor] == bdev)
    404 			return (bmajor);
    405 	}
    406 
    407 	return (-1);
    408 }
    409 
    410 /*
    411  * Look up a character device by reference to its operations set.
    412  *
    413  * => Caller must ensure that the device is not detached, and therefore
    414  *    that the returned major is still valid when dereferenced.
    415  */
    416 int
    417 cdevsw_lookup_major(const struct cdevsw *cdev)
    418 {
    419 	int cmajor;
    420 
    421 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    422 		if (cdevsw[cmajor] == cdev)
    423 			return (cmajor);
    424 	}
    425 
    426 	return (-1);
    427 }
    428 
    429 /*
    430  * Convert from block major number to name.
    431  *
    432  * => Caller must ensure that the device is not detached, and therefore
    433  *    that the name pointer is still valid when dereferenced.
    434  */
    435 const char *
    436 devsw_blk2name(int bmajor)
    437 {
    438 	const char *name;
    439 	int cmajor, i;
    440 
    441 	name = NULL;
    442 	cmajor = -1;
    443 
    444 	mutex_enter(&devsw_lock);
    445 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    446 		mutex_exit(&devsw_lock);
    447 		return (NULL);
    448 	}
    449 	for (i = 0 ; i < max_devsw_convs; i++) {
    450 		if (devsw_conv[i].d_bmajor == bmajor) {
    451 			cmajor = devsw_conv[i].d_cmajor;
    452 			break;
    453 		}
    454 	}
    455 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    456 		name = devsw_conv[i].d_name;
    457 	mutex_exit(&devsw_lock);
    458 
    459 	return (name);
    460 }
    461 
    462 /*
    463  * Convert from device name to block major number.
    464  *
    465  * => Caller must ensure that the device is not detached, and therefore
    466  *    that the major number is still valid when dereferenced.
    467  */
    468 int
    469 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    470 {
    471 	struct devsw_conv *conv;
    472 	int bmajor, i;
    473 
    474 	if (name == NULL)
    475 		return (-1);
    476 
    477 	mutex_enter(&devsw_lock);
    478 	for (i = 0 ; i < max_devsw_convs ; i++) {
    479 		size_t len;
    480 
    481 		conv = &devsw_conv[i];
    482 		if (conv->d_name == NULL)
    483 			continue;
    484 		len = strlen(conv->d_name);
    485 		if (strncmp(conv->d_name, name, len) != 0)
    486 			continue;
    487 		if (*(name +len) && !isdigit(*(name + len)))
    488 			continue;
    489 		bmajor = conv->d_bmajor;
    490 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    491 		    bdevsw[bmajor] == NULL)
    492 			break;
    493 		if (devname != NULL) {
    494 #ifdef DEVSW_DEBUG
    495 			if (strlen(conv->d_name) >= devnamelen)
    496 				printf("devsw_name2blk: too short buffer");
    497 #endif /* DEVSW_DEBUG */
    498 			strncpy(devname, conv->d_name, devnamelen);
    499 			devname[devnamelen - 1] = '\0';
    500 		}
    501 		mutex_exit(&devsw_lock);
    502 		return (bmajor);
    503 	}
    504 
    505 	mutex_exit(&devsw_lock);
    506 	return (-1);
    507 }
    508 
    509 /*
    510  * Convert from device name to char major number.
    511  *
    512  * => Caller must ensure that the device is not detached, and therefore
    513  *    that the major number is still valid when dereferenced.
    514  */
    515 int
    516 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    517 {
    518 	struct devsw_conv *conv;
    519 	int cmajor, i;
    520 
    521 	if (name == NULL)
    522 		return (-1);
    523 
    524 	mutex_enter(&devsw_lock);
    525 	for (i = 0 ; i < max_devsw_convs ; i++) {
    526 		size_t len;
    527 
    528 		conv = &devsw_conv[i];
    529 		if (conv->d_name == NULL)
    530 			continue;
    531 		len = strlen(conv->d_name);
    532 		if (strncmp(conv->d_name, name, len) != 0)
    533 			continue;
    534 		if (*(name +len) && !isdigit(*(name + len)))
    535 			continue;
    536 		cmajor = conv->d_cmajor;
    537 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    538 		    cdevsw[cmajor] == NULL)
    539 			break;
    540 		if (devname != NULL) {
    541 #ifdef DEVSW_DEBUG
    542 			if (strlen(conv->d_name) >= devnamelen)
    543 				printf("devsw_name2chr: too short buffer");
    544 #endif /* DEVSW_DEBUG */
    545 			strncpy(devname, conv->d_name, devnamelen);
    546 			devname[devnamelen - 1] = '\0';
    547 		}
    548 		mutex_exit(&devsw_lock);
    549 		return (cmajor);
    550 	}
    551 
    552 	mutex_exit(&devsw_lock);
    553 	return (-1);
    554 }
    555 
    556 /*
    557  * Convert from character dev_t to block dev_t.
    558  *
    559  * => Caller must ensure that the device is not detached, and therefore
    560  *    that the major number is still valid when dereferenced.
    561  */
    562 dev_t
    563 devsw_chr2blk(dev_t cdev)
    564 {
    565 	int bmajor, cmajor, i;
    566 	dev_t rv;
    567 
    568 	cmajor = major(cdev);
    569 	bmajor = -1;
    570 	rv = NODEV;
    571 
    572 	mutex_enter(&devsw_lock);
    573 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    574 		mutex_exit(&devsw_lock);
    575 		return (NODEV);
    576 	}
    577 	for (i = 0 ; i < max_devsw_convs ; i++) {
    578 		if (devsw_conv[i].d_cmajor == cmajor) {
    579 			bmajor = devsw_conv[i].d_bmajor;
    580 			break;
    581 		}
    582 	}
    583 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    584 		rv = makedev(bmajor, minor(cdev));
    585 	mutex_exit(&devsw_lock);
    586 
    587 	return (rv);
    588 }
    589 
    590 /*
    591  * Convert from block dev_t to character dev_t.
    592  *
    593  * => Caller must ensure that the device is not detached, and therefore
    594  *    that the major number is still valid when dereferenced.
    595  */
    596 dev_t
    597 devsw_blk2chr(dev_t bdev)
    598 {
    599 	int bmajor, cmajor, i;
    600 	dev_t rv;
    601 
    602 	bmajor = major(bdev);
    603 	cmajor = -1;
    604 	rv = NODEV;
    605 
    606 	mutex_enter(&devsw_lock);
    607 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    608 		mutex_exit(&devsw_lock);
    609 		return (NODEV);
    610 	}
    611 	for (i = 0 ; i < max_devsw_convs ; i++) {
    612 		if (devsw_conv[i].d_bmajor == bmajor) {
    613 			cmajor = devsw_conv[i].d_cmajor;
    614 			break;
    615 		}
    616 	}
    617 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    618 		rv = makedev(cmajor, minor(bdev));
    619 	mutex_exit(&devsw_lock);
    620 
    621 	return (rv);
    622 }
    623 
    624 /*
    625  * Device access methods.
    626  */
    627 
    628 #define	DEV_LOCK(d)						\
    629 	if ((d->d_flag & D_MPSAFE) == 0) {			\
    630 		KERNEL_LOCK(1, curlwp);				\
    631 	}
    632 
    633 #define	DEV_UNLOCK(d)						\
    634 	if ((d->d_flag & D_MPSAFE) == 0) {			\
    635 		KERNEL_UNLOCK_ONE(curlwp);			\
    636 	}
    637 
    638 int
    639 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    640 {
    641 	const struct bdevsw *d;
    642 	int rv;
    643 
    644 	/*
    645 	 * For open we need to lock, in order to synchronize
    646 	 * with attach/detach.
    647 	 */
    648 	mutex_enter(&devsw_lock);
    649 	d = bdevsw_lookup(dev);
    650 	mutex_exit(&devsw_lock);
    651 	if (d == NULL)
    652 		return ENXIO;
    653 
    654 	DEV_LOCK(d);
    655 	rv = (*d->d_open)(dev, flag, devtype, l);
    656 	DEV_UNLOCK(d);
    657 
    658 	return rv;
    659 }
    660 
    661 int
    662 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    663 {
    664 	const struct bdevsw *d;
    665 	int rv;
    666 
    667 	if ((d = bdevsw_lookup(dev)) == NULL)
    668 		return ENXIO;
    669 
    670 	DEV_LOCK(d);
    671 	rv = (*d->d_close)(dev, flag, devtype, l);
    672 	DEV_UNLOCK(d);
    673 
    674 	return rv;
    675 }
    676 
    677 void
    678 bdev_strategy(struct buf *bp)
    679 {
    680 	const struct bdevsw *d;
    681 
    682 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
    683 		panic("bdev_strategy");
    684 
    685 	DEV_LOCK(d);
    686 	(*d->d_strategy)(bp);
    687 	DEV_UNLOCK(d);
    688 }
    689 
    690 int
    691 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    692 {
    693 	const struct bdevsw *d;
    694 	int rv;
    695 
    696 	if ((d = bdevsw_lookup(dev)) == NULL)
    697 		return ENXIO;
    698 
    699 	DEV_LOCK(d);
    700 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    701 	DEV_UNLOCK(d);
    702 
    703 	return rv;
    704 }
    705 
    706 int
    707 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    708 {
    709 	const struct bdevsw *d;
    710 	int rv;
    711 
    712 	/*
    713 	 * Dump can be called without the device open.  Since it can
    714 	 * currently only be called with the system paused (and in a
    715 	 * potentially unstable state), we don't perform any locking.
    716 	 */
    717 	if ((d = bdevsw_lookup(dev)) == NULL)
    718 		return ENXIO;
    719 
    720 	/* DEV_LOCK(d); */
    721 	rv = (*d->d_dump)(dev, addr, data, sz);
    722 	/* DEV_UNLOCK(d); */
    723 
    724 	return rv;
    725 }
    726 
    727 int
    728 bdev_type(dev_t dev)
    729 {
    730 	const struct bdevsw *d;
    731 
    732 	if ((d = bdevsw_lookup(dev)) == NULL)
    733 		return D_OTHER;
    734 	return d->d_flag & D_TYPEMASK;
    735 }
    736 
    737 int
    738 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    739 {
    740 	const struct cdevsw *d;
    741 	int rv;
    742 
    743 	/*
    744 	 * For open we need to lock, in order to synchronize
    745 	 * with attach/detach.
    746 	 */
    747 	mutex_enter(&devsw_lock);
    748 	d = cdevsw_lookup(dev);
    749 	mutex_exit(&devsw_lock);
    750 	if (d == NULL)
    751 		return ENXIO;
    752 
    753 	DEV_LOCK(d);
    754 	rv = (*d->d_open)(dev, flag, devtype, l);
    755 	DEV_UNLOCK(d);
    756 
    757 	return rv;
    758 }
    759 
    760 int
    761 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    762 {
    763 	const struct cdevsw *d;
    764 	int rv;
    765 
    766 	if ((d = cdevsw_lookup(dev)) == NULL)
    767 		return ENXIO;
    768 
    769 	DEV_LOCK(d);
    770 	rv = (*d->d_close)(dev, flag, devtype, l);
    771 	DEV_UNLOCK(d);
    772 
    773 	return rv;
    774 }
    775 
    776 int
    777 cdev_read(dev_t dev, struct uio *uio, int flag)
    778 {
    779 	const struct cdevsw *d;
    780 	int rv;
    781 
    782 	if ((d = cdevsw_lookup(dev)) == NULL)
    783 		return ENXIO;
    784 
    785 	DEV_LOCK(d);
    786 	rv = (*d->d_read)(dev, uio, flag);
    787 	DEV_UNLOCK(d);
    788 
    789 	return rv;
    790 }
    791 
    792 int
    793 cdev_write(dev_t dev, struct uio *uio, int flag)
    794 {
    795 	const struct cdevsw *d;
    796 	int rv;
    797 
    798 	if ((d = cdevsw_lookup(dev)) == NULL)
    799 		return ENXIO;
    800 
    801 	DEV_LOCK(d);
    802 	rv = (*d->d_write)(dev, uio, flag);
    803 	DEV_UNLOCK(d);
    804 
    805 	return rv;
    806 }
    807 
    808 int
    809 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    810 {
    811 	const struct cdevsw *d;
    812 	int rv;
    813 
    814 	if ((d = cdevsw_lookup(dev)) == NULL)
    815 		return ENXIO;
    816 
    817 	DEV_LOCK(d);
    818 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    819 	DEV_UNLOCK(d);
    820 
    821 	return rv;
    822 }
    823 
    824 void
    825 cdev_stop(struct tty *tp, int flag)
    826 {
    827 	const struct cdevsw *d;
    828 
    829 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
    830 		return;
    831 
    832 	DEV_LOCK(d);
    833 	(*d->d_stop)(tp, flag);
    834 	DEV_UNLOCK(d);
    835 }
    836 
    837 struct tty *
    838 cdev_tty(dev_t dev)
    839 {
    840 	const struct cdevsw *d;
    841 	struct tty * rv;
    842 
    843 	if ((d = cdevsw_lookup(dev)) == NULL)
    844 		return NULL;
    845 
    846 	/* XXX Check if necessary. */
    847 	if (d->d_tty == NULL)
    848 		return NULL;
    849 
    850 	DEV_LOCK(d);
    851 	rv = (*d->d_tty)(dev);
    852 	DEV_UNLOCK(d);
    853 
    854 	return rv;
    855 }
    856 
    857 int
    858 cdev_poll(dev_t dev, int flag, lwp_t *l)
    859 {
    860 	const struct cdevsw *d;
    861 	int rv;
    862 
    863 	if ((d = cdevsw_lookup(dev)) == NULL)
    864 		return POLLERR;
    865 
    866 	DEV_LOCK(d);
    867 	rv = (*d->d_poll)(dev, flag, l);
    868 	DEV_UNLOCK(d);
    869 
    870 	return rv;
    871 }
    872 
    873 paddr_t
    874 cdev_mmap(dev_t dev, off_t off, int flag)
    875 {
    876 	const struct cdevsw *d;
    877 	paddr_t rv;
    878 
    879 	if ((d = cdevsw_lookup(dev)) == NULL)
    880 		return (paddr_t)-1LL;
    881 
    882 	DEV_LOCK(d);
    883 	rv = (*d->d_mmap)(dev, off, flag);
    884 	DEV_UNLOCK(d);
    885 
    886 	return rv;
    887 }
    888 
    889 int
    890 cdev_kqfilter(dev_t dev, struct knote *kn)
    891 {
    892 	const struct cdevsw *d;
    893 	int rv;
    894 
    895 	if ((d = cdevsw_lookup(dev)) == NULL)
    896 		return ENXIO;
    897 
    898 	DEV_LOCK(d);
    899 	rv = (*d->d_kqfilter)(dev, kn);
    900 	DEV_UNLOCK(d);
    901 
    902 	return rv;
    903 }
    904 
    905 int
    906 cdev_type(dev_t dev)
    907 {
    908 	const struct cdevsw *d;
    909 
    910 	if ((d = cdevsw_lookup(dev)) == NULL)
    911 		return D_OTHER;
    912 	return d->d_flag & D_TYPEMASK;
    913 }
    914