Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.10.8.4
      1 /*	$NetBSD: subr_devsw.c,v 1.10.8.4 2007/08/24 23:28:40 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Overview
     41  *
     42  *	subr_devsw.c: registers device drivers by name and by major
     43  *	number, and provides wrapper methods for performing I/O and
     44  *	other tasks on device drivers, keying on the device number
     45  *	(dev_t).
     46  *
     47  *	When the system is built, the config(8) command generates
     48  *	static tables of device drivers built into the kernel image
     49  *	along with their associated methods.  These are recorded in
     50  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     51  *	and removed from the system dynamically.
     52  *
     53  * Allocation
     54  *
     55  *	When the system initially boots only the statically allocated
     56  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     57  *	allocation, we allocate a fixed block of memory to hold the new,
     58  *	expanded index.  This "fork" of the table is only ever performed
     59  *	once in order to guarantee that other threads may safely access
     60  *	the device tables:
     61  *
     62  *	o Once a thread has a "reference" to the table via an earlier
     63  *	  open() call, we know that the entry in the table must exist
     64  *	  and so it is safe to access it.
     65  *
     66  *	o Regardless of whether other threads see the old or new
     67  *	  pointers, they will point to a correct device switch
     68  *	  structure for the operation being performed.
     69  *
     70  *	XXX Currently, the wrapper methods such as cdev_read() verify
     71  *	that a device driver does in fact exist before calling the
     72  *	associated driver method.  This should be changed so that
     73  *	once the device is has been referenced by a vnode (opened),
     74  *	calling	the other methods should be valid until that reference
     75  *	is dropped.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.10.8.4 2007/08/24 23:28:40 ad Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/conf.h>
     83 #include <sys/kmem.h>
     84 #include <sys/systm.h>
     85 #include <sys/poll.h>
     86 #include <sys/tty.h>
     87 #include <sys/buf.h>
     88 
     89 #ifdef DEVSW_DEBUG
     90 #define	DPRINTF(x)	printf x
     91 #else /* DEVSW_DEBUG */
     92 #define	DPRINTF(x)
     93 #endif /* DEVSW_DEBUG */
     94 
     95 #define	MAXDEVSW	512	/* the maximum of major device number */
     96 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
     97 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
     98 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
     99 
    100 extern const struct bdevsw **bdevsw, *bdevsw0[];
    101 extern const struct cdevsw **cdevsw, *cdevsw0[];
    102 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    103 extern const int sys_bdevsws, sys_cdevsws;
    104 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    105 
    106 static int bdevsw_attach(const char *, const struct bdevsw *, int *);
    107 static int cdevsw_attach(const char *, const struct cdevsw *, int *);
    108 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    109 
    110 static kmutex_t devsw_lock;
    111 
    112 void
    113 devsw_init(void)
    114 {
    115 
    116 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    117 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    118 
    119 	mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE);
    120 }
    121 
    122 int
    123 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
    124 	     const struct cdevsw *cdev, int *cmajor)
    125 {
    126 	struct devsw_conv *conv;
    127 	char *name;
    128 	int error, i;
    129 
    130 	if (devname == NULL || cdev == NULL)
    131 		return (EINVAL);
    132 
    133 	mutex_enter(&devsw_lock);
    134 
    135 	for (i = 0 ; i < max_devsw_convs ; i++) {
    136 		conv = &devsw_conv[i];
    137 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    138 			continue;
    139 
    140 		if (*bmajor < 0)
    141 			*bmajor = conv->d_bmajor;
    142 		if (*cmajor < 0)
    143 			*cmajor = conv->d_cmajor;
    144 
    145 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    146 			error = EINVAL;
    147 			goto fail;
    148 		}
    149 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    150 			error = EINVAL;
    151 			goto fail;
    152 		}
    153 
    154 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    155 		    cdevsw[*cmajor] != NULL) {
    156 			error = EEXIST;
    157 			goto fail;
    158 		}
    159 
    160 		if (bdev != NULL)
    161 			bdevsw[*bmajor] = bdev;
    162 		cdevsw[*cmajor] = cdev;
    163 
    164 		mutex_exit(&devsw_lock);
    165 		return (0);
    166 	}
    167 
    168 	error = bdevsw_attach(devname, bdev, bmajor);
    169 	if (error != 0)
    170 		goto fail;
    171 	error = cdevsw_attach(devname, cdev, cmajor);
    172 	if (error != 0) {
    173 		devsw_detach_locked(bdev, NULL);
    174 		goto fail;
    175 	}
    176 
    177 	for (i = 0 ; i < max_devsw_convs ; i++) {
    178 		if (devsw_conv[i].d_name == NULL)
    179 			break;
    180 	}
    181 	if (i == max_devsw_convs) {
    182 		struct devsw_conv *newptr;
    183 		int old, new;
    184 
    185 		old = max_devsw_convs;
    186 		new = old + 1;
    187 
    188 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
    189 		if (newptr == NULL) {
    190 			devsw_detach_locked(bdev, cdev);
    191 			error = ENOMEM;
    192 			goto fail;
    193 		}
    194 		newptr[old].d_name = NULL;
    195 		newptr[old].d_bmajor = -1;
    196 		newptr[old].d_cmajor = -1;
    197 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
    198 		if (devsw_conv != devsw_conv0)
    199 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
    200 		devsw_conv = newptr;
    201 		max_devsw_convs = new;
    202 	}
    203 
    204 	i = strlen(devname) + 1;
    205 	name = kmem_alloc(i, KM_NOSLEEP);
    206 	if (name == NULL) {
    207 		devsw_detach_locked(bdev, cdev);
    208 		goto fail;
    209 	}
    210 	strlcpy(name, devname, i);
    211 
    212 	devsw_conv[i].d_name = name;
    213 	devsw_conv[i].d_bmajor = *bmajor;
    214 	devsw_conv[i].d_cmajor = *cmajor;
    215 
    216 	mutex_exit(&devsw_lock);
    217 	return (0);
    218  fail:
    219 	mutex_exit(&devsw_lock);
    220 	return (error);
    221 }
    222 
    223 static int
    224 bdevsw_attach(const char *devname, const struct bdevsw *devsw, int *devmajor)
    225 {
    226 	const struct bdevsw **newptr;
    227 	int bmajor, i;
    228 
    229 	KASSERT(mutex_owned(&devsw_lock));
    230 
    231 	if (devsw == NULL)
    232 		return (0);
    233 
    234 	if (*devmajor < 0) {
    235 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    236 			if (bdevsw[bmajor] != NULL)
    237 				continue;
    238 			for (i = 0 ; i < max_devsw_convs ; i++) {
    239 				if (devsw_conv[i].d_bmajor == bmajor)
    240 					break;
    241 			}
    242 			if (i != max_devsw_convs)
    243 				continue;
    244 			break;
    245 		}
    246 		*devmajor = bmajor;
    247 	}
    248 
    249 	if (*devmajor >= MAXDEVSW) {
    250 		printf("bdevsw_attach: block majors exhausted");
    251 		return (ENOMEM);
    252 	}
    253 
    254 	if (*devmajor >= max_bdevsws) {
    255 		KASSERT(bdevsw == bdevsw0);
    256 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    257 		if (newptr == NULL)
    258 			return (ENOMEM);
    259 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    260 		bdevsw = newptr;
    261 		max_bdevsws = MAXDEVSW;
    262 	}
    263 
    264 	if (bdevsw[*devmajor] != NULL)
    265 		return (EEXIST);
    266 
    267 	bdevsw[*devmajor] = devsw;
    268 
    269 	return (0);
    270 }
    271 
    272 static int
    273 cdevsw_attach(const char *devname, const struct cdevsw *devsw, int *devmajor)
    274 {
    275 	const struct cdevsw **newptr;
    276 	int cmajor, i;
    277 
    278 	KASSERT(mutex_owned(&devsw_lock));
    279 
    280 	if (*devmajor < 0) {
    281 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    282 			if (cdevsw[cmajor] != NULL)
    283 				continue;
    284 			for (i = 0 ; i < max_devsw_convs ; i++) {
    285 				if (devsw_conv[i].d_cmajor == cmajor)
    286 					break;
    287 			}
    288 			if (i != max_devsw_convs)
    289 				continue;
    290 			break;
    291 		}
    292 		*devmajor = cmajor;
    293 	}
    294 
    295 	if (*devmajor >= MAXDEVSW) {
    296 		printf("cdevsw_attach: character majors exhausted");
    297 		return (ENOMEM);
    298 	}
    299 
    300 	if (*devmajor >= max_cdevsws) {
    301 		KASSERT(cdevsw == cdevsw0);
    302 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    303 		if (newptr == NULL)
    304 			return (ENOMEM);
    305 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    306 		cdevsw = newptr;
    307 		max_cdevsws = MAXDEVSW;
    308 	}
    309 
    310 	if (cdevsw[*devmajor] != NULL)
    311 		return (EEXIST);
    312 
    313 	cdevsw[*devmajor] = devsw;
    314 
    315 	return (0);
    316 }
    317 
    318 static void
    319 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    320 {
    321 	int i;
    322 
    323 	KASSERT(mutex_owned(&devsw_lock));
    324 
    325 	if (bdev != NULL) {
    326 		for (i = 0 ; i < max_bdevsws ; i++) {
    327 			if (bdevsw[i] != bdev)
    328 				continue;
    329 			bdevsw[i] = NULL;
    330 			break;
    331 		}
    332 	}
    333 	if (cdev != NULL) {
    334 		for (i = 0 ; i < max_cdevsws ; i++) {
    335 			if (cdevsw[i] != cdev)
    336 				continue;
    337 			cdevsw[i] = NULL;
    338 			break;
    339 		}
    340 	}
    341 }
    342 
    343 void
    344 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    345 {
    346 
    347 	mutex_enter(&devsw_lock);
    348 	devsw_detach_locked(bdev, cdev);
    349 	mutex_exit(&devsw_lock);
    350 }
    351 
    352 /*
    353  * Look up a block device by number.
    354  *
    355  * => Caller must ensure that the device is attached.
    356  */
    357 const struct bdevsw *
    358 bdevsw_lookup(dev_t dev)
    359 {
    360 	int bmajor;
    361 
    362 	if (dev == NODEV)
    363 		return (NULL);
    364 	bmajor = major(dev);
    365 	if (bmajor < 0 || bmajor >= max_bdevsws)
    366 		return (NULL);
    367 
    368 	return (bdevsw[bmajor]);
    369 }
    370 
    371 /*
    372  * Look up a character device by number.
    373  *
    374  * => Caller must ensure that the device is attached.
    375  */
    376 const struct cdevsw *
    377 cdevsw_lookup(dev_t dev)
    378 {
    379 	int cmajor;
    380 
    381 	if (dev == NODEV)
    382 		return (NULL);
    383 	cmajor = major(dev);
    384 	if (cmajor < 0 || cmajor >= max_cdevsws)
    385 		return (NULL);
    386 
    387 	return (cdevsw[cmajor]);
    388 }
    389 
    390 /*
    391  * Look up a block device by reference to its operations set.
    392  *
    393  * => Caller must ensure that the device is not detached, and therefore
    394  *    that the returned major is still valid when dereferenced.
    395  */
    396 int
    397 bdevsw_lookup_major(const struct bdevsw *bdev)
    398 {
    399 	int bmajor;
    400 
    401 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    402 		if (bdevsw[bmajor] == bdev)
    403 			return (bmajor);
    404 	}
    405 
    406 	return (-1);
    407 }
    408 
    409 /*
    410  * Look up a character device by reference to its operations set.
    411  *
    412  * => Caller must ensure that the device is not detached, and therefore
    413  *    that the returned major is still valid when dereferenced.
    414  */
    415 int
    416 cdevsw_lookup_major(const struct cdevsw *cdev)
    417 {
    418 	int cmajor;
    419 
    420 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    421 		if (cdevsw[cmajor] == cdev)
    422 			return (cmajor);
    423 	}
    424 
    425 	return (-1);
    426 }
    427 
    428 /*
    429  * Convert from block major number to name.
    430  *
    431  * => Caller must ensure that the device is not detached, and therefore
    432  *    that the name pointer is still valid when dereferenced.
    433  */
    434 const char *
    435 devsw_blk2name(int bmajor)
    436 {
    437 	const char *name;
    438 	int cmajor, i;
    439 
    440 	name = NULL;
    441 	cmajor = -1;
    442 
    443 	mutex_enter(&devsw_lock);
    444 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    445 		mutex_exit(&devsw_lock);
    446 		return (NULL);
    447 	}
    448 	for (i = 0 ; i < max_devsw_convs; i++) {
    449 		if (devsw_conv[i].d_bmajor == bmajor) {
    450 			cmajor = devsw_conv[i].d_cmajor;
    451 			break;
    452 		}
    453 	}
    454 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    455 		name = devsw_conv[i].d_name;
    456 	mutex_exit(&devsw_lock);
    457 
    458 	return (name);
    459 }
    460 
    461 /*
    462  * Convert from device name to block major number.
    463  *
    464  * => Caller must ensure that the device is not detached, and therefore
    465  *    that the major number is still valid when dereferenced.
    466  */
    467 int
    468 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    469 {
    470 	struct devsw_conv *conv;
    471 	int bmajor, i;
    472 
    473 	if (name == NULL)
    474 		return (-1);
    475 
    476 	mutex_enter(&devsw_lock);
    477 	for (i = 0 ; i < max_devsw_convs ; i++) {
    478 		size_t len;
    479 
    480 		conv = &devsw_conv[i];
    481 		if (conv->d_name == NULL)
    482 			continue;
    483 		len = strlen(conv->d_name);
    484 		if (strncmp(conv->d_name, name, len) != 0)
    485 			continue;
    486 		if (*(name +len) && !isdigit(*(name + len)))
    487 			continue;
    488 		bmajor = conv->d_bmajor;
    489 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    490 		    bdevsw[bmajor] == NULL)
    491 			break;
    492 		if (devname != NULL) {
    493 #ifdef DEVSW_DEBUG
    494 			if (strlen(conv->d_name) >= devnamelen)
    495 				printf("devsw_name2blk: too short buffer");
    496 #endif /* DEVSW_DEBUG */
    497 			strncpy(devname, conv->d_name, devnamelen);
    498 			devname[devnamelen - 1] = '\0';
    499 		}
    500 		mutex_exit(&devsw_lock);
    501 		return (bmajor);
    502 	}
    503 
    504 	mutex_exit(&devsw_lock);
    505 	return (-1);
    506 }
    507 
    508 /*
    509  * Convert from character dev_t to block dev_t.
    510  *
    511  * => Caller must ensure that the device is not detached, and therefore
    512  *    that the major number is still valid when dereferenced.
    513  */
    514 dev_t
    515 devsw_chr2blk(dev_t cdev)
    516 {
    517 	int bmajor, cmajor, i;
    518 	dev_t rv;
    519 
    520 	cmajor = major(cdev);
    521 	bmajor = -1;
    522 	rv = NODEV;
    523 
    524 	mutex_enter(&devsw_lock);
    525 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    526 		mutex_exit(&devsw_lock);
    527 		return (NODEV);
    528 	}
    529 	for (i = 0 ; i < max_devsw_convs ; i++) {
    530 		if (devsw_conv[i].d_cmajor == cmajor) {
    531 			bmajor = devsw_conv[i].d_bmajor;
    532 			break;
    533 		}
    534 	}
    535 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    536 		rv = makedev(bmajor, minor(cdev));
    537 	mutex_exit(&devsw_lock);
    538 
    539 	return (rv);
    540 }
    541 
    542 /*
    543  * Convert from block dev_t to character dev_t.
    544  *
    545  * => Caller must ensure that the device is not detached, and therefore
    546  *    that the major number is still valid when dereferenced.
    547  */
    548 dev_t
    549 devsw_blk2chr(dev_t bdev)
    550 {
    551 	int bmajor, cmajor, i;
    552 	dev_t rv;
    553 
    554 	bmajor = major(bdev);
    555 	cmajor = -1;
    556 	rv = NODEV;
    557 
    558 	mutex_enter(&devsw_lock);
    559 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    560 		mutex_exit(&devsw_lock);
    561 		return (NODEV);
    562 	}
    563 	for (i = 0 ; i < max_devsw_convs ; i++) {
    564 		if (devsw_conv[i].d_bmajor == bmajor) {
    565 			cmajor = devsw_conv[i].d_cmajor;
    566 			break;
    567 		}
    568 	}
    569 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    570 		rv = makedev(cmajor, minor(bdev));
    571 	mutex_exit(&devsw_lock);
    572 
    573 	return (rv);
    574 }
    575 
    576 /*
    577  * Device access methods.
    578  */
    579 
    580 #define	DEV_LOCK(d)						\
    581 	if ((d->d_flag & D_MPSAFE) == 0) {			\
    582 		KERNEL_LOCK(1, curlwp);				\
    583 	}
    584 
    585 #define	DEV_UNLOCK(d)						\
    586 	if ((d->d_flag & D_MPSAFE) == 0) {			\
    587 		KERNEL_UNLOCK_ONE(curlwp);			\
    588 	}
    589 
    590 int
    591 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    592 {
    593 	const struct bdevsw *d;
    594 	int rv;
    595 
    596 	/*
    597 	 * For open we need to lock, in order to synchronize
    598 	 * with attach/detach.
    599 	 */
    600 	mutex_enter(&devsw_lock);
    601 	d = bdevsw_lookup(dev);
    602 	mutex_exit(&devsw_lock);
    603 	if (d == NULL)
    604 		return ENXIO;
    605 
    606 	DEV_LOCK(d);
    607 	rv = (*d->d_open)(dev, flag, devtype, l);
    608 	DEV_UNLOCK(d);
    609 
    610 	return rv;
    611 }
    612 
    613 int
    614 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    615 {
    616 	const struct bdevsw *d;
    617 	int rv;
    618 
    619 	if ((d = bdevsw_lookup(dev)) == NULL)
    620 		return ENXIO;
    621 
    622 	DEV_LOCK(d);
    623 	rv = (*d->d_close)(dev, flag, devtype, l);
    624 	DEV_UNLOCK(d);
    625 
    626 	return rv;
    627 }
    628 
    629 void
    630 bdev_strategy(struct buf *bp)
    631 {
    632 	const struct bdevsw *d;
    633 
    634 	KASSERT((bp->b_oflags & BO_DONE) == 0);
    635 
    636 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
    637 		panic("bdev_strategy");
    638 
    639 	DEV_LOCK(d);
    640 	(*d->d_strategy)(bp);
    641 	DEV_UNLOCK(d);
    642 }
    643 
    644 int
    645 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    646 {
    647 	const struct bdevsw *d;
    648 	int rv;
    649 
    650 	if ((d = bdevsw_lookup(dev)) == NULL)
    651 		return ENXIO;
    652 
    653 	DEV_LOCK(d);
    654 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    655 	DEV_UNLOCK(d);
    656 
    657 	return rv;
    658 }
    659 
    660 int
    661 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    662 {
    663 	const struct bdevsw *d;
    664 	int rv;
    665 
    666 	/*
    667 	 * Dump can be called without the device open.  Since it can
    668 	 * currently only be called with the system paused (and in a
    669 	 * potentially unstable state), we don't perform any locking.
    670 	 */
    671 	if ((d = bdevsw_lookup(dev)) == NULL)
    672 		return ENXIO;
    673 
    674 	/* DEV_LOCK(d); */
    675 	rv = (*d->d_dump)(dev, addr, data, sz);
    676 	/* DEV_UNLOCK(d); */
    677 
    678 	return rv;
    679 }
    680 
    681 int
    682 bdev_type(dev_t dev)
    683 {
    684 	const struct bdevsw *d;
    685 
    686 	if ((d = bdevsw_lookup(dev)) == NULL)
    687 		return D_OTHER;
    688 	return d->d_flag & D_TYPEMASK;
    689 }
    690 
    691 int
    692 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    693 {
    694 	const struct cdevsw *d;
    695 	int rv;
    696 
    697 	/*
    698 	 * For open we need to lock, in order to synchronize
    699 	 * with attach/detach.
    700 	 */
    701 	mutex_enter(&devsw_lock);
    702 	d = cdevsw_lookup(dev);
    703 	mutex_exit(&devsw_lock);
    704 	if (d == NULL)
    705 		return ENXIO;
    706 
    707 	DEV_LOCK(d);
    708 	rv = (*d->d_open)(dev, flag, devtype, l);
    709 	DEV_UNLOCK(d);
    710 
    711 	return rv;
    712 }
    713 
    714 int
    715 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    716 {
    717 	const struct cdevsw *d;
    718 	int rv;
    719 
    720 	if ((d = cdevsw_lookup(dev)) == NULL)
    721 		return ENXIO;
    722 
    723 	DEV_LOCK(d);
    724 	rv = (*d->d_close)(dev, flag, devtype, l);
    725 	DEV_UNLOCK(d);
    726 
    727 	return rv;
    728 }
    729 
    730 int
    731 cdev_read(dev_t dev, struct uio *uio, int flag)
    732 {
    733 	const struct cdevsw *d;
    734 	int rv;
    735 
    736 	if ((d = cdevsw_lookup(dev)) == NULL)
    737 		return ENXIO;
    738 
    739 	DEV_LOCK(d);
    740 	rv = (*d->d_read)(dev, uio, flag);
    741 	DEV_UNLOCK(d);
    742 
    743 	return rv;
    744 }
    745 
    746 int
    747 cdev_write(dev_t dev, struct uio *uio, int flag)
    748 {
    749 	const struct cdevsw *d;
    750 	int rv;
    751 
    752 	if ((d = cdevsw_lookup(dev)) == NULL)
    753 		return ENXIO;
    754 
    755 	DEV_LOCK(d);
    756 	rv = (*d->d_write)(dev, uio, flag);
    757 	DEV_UNLOCK(d);
    758 
    759 	return rv;
    760 }
    761 
    762 int
    763 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    764 {
    765 	const struct cdevsw *d;
    766 	int rv;
    767 
    768 	if ((d = cdevsw_lookup(dev)) == NULL)
    769 		return ENXIO;
    770 
    771 	DEV_LOCK(d);
    772 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    773 	DEV_UNLOCK(d);
    774 
    775 	return rv;
    776 }
    777 
    778 void
    779 cdev_stop(struct tty *tp, int flag)
    780 {
    781 	const struct cdevsw *d;
    782 
    783 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
    784 		return;
    785 
    786 	DEV_LOCK(d);
    787 	(*d->d_stop)(tp, flag);
    788 	DEV_UNLOCK(d);
    789 }
    790 
    791 struct tty *
    792 cdev_tty(dev_t dev)
    793 {
    794 	const struct cdevsw *d;
    795 	struct tty * rv;
    796 
    797 	if ((d = cdevsw_lookup(dev)) == NULL)
    798 		return NULL;
    799 
    800 	DEV_LOCK(d);
    801 	rv = (*d->d_tty)(dev);
    802 	DEV_UNLOCK(d);
    803 
    804 	return rv;
    805 }
    806 
    807 int
    808 cdev_poll(dev_t dev, int flag, lwp_t *l)
    809 {
    810 	const struct cdevsw *d;
    811 	int rv;
    812 
    813 	if ((d = cdevsw_lookup(dev)) == NULL)
    814 		return POLLERR;
    815 
    816 	DEV_LOCK(d);
    817 	rv = (*d->d_poll)(dev, flag, l);
    818 	DEV_UNLOCK(d);
    819 
    820 	return rv;
    821 }
    822 
    823 paddr_t
    824 cdev_mmap(dev_t dev, off_t off, int flag)
    825 {
    826 	const struct cdevsw *d;
    827 	paddr_t rv;
    828 
    829 	if ((d = cdevsw_lookup(dev)) == NULL)
    830 		return (paddr_t)-1LL;
    831 
    832 	DEV_LOCK(d);
    833 	rv = (*d->d_mmap)(dev, off, flag);
    834 	DEV_UNLOCK(d);
    835 
    836 	return rv;
    837 }
    838 
    839 int
    840 cdev_kqfilter(dev_t dev, struct knote *kn)
    841 {
    842 	const struct cdevsw *d;
    843 	int rv;
    844 
    845 	if ((d = cdevsw_lookup(dev)) == NULL)
    846 		return ENXIO;
    847 
    848 	DEV_LOCK(d);
    849 	rv = (*d->d_kqfilter)(dev, kn);
    850 	DEV_UNLOCK(d);
    851 
    852 	return rv;
    853 }
    854 
    855 int
    856 cdev_type(dev_t dev)
    857 {
    858 	const struct cdevsw *d;
    859 
    860 	if ((d = cdevsw_lookup(dev)) == NULL)
    861 		return D_OTHER;
    862 	return d->d_flag & D_TYPEMASK;
    863 }
    864