Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.15.6.2
      1 /*	$NetBSD: subr_devsw.c,v 1.15.6.2 2008/04/03 11:37:26 mjf Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Overview
     41  *
     42  *	subr_devsw.c: registers device drivers by name and by major
     43  *	number, and provides wrapper methods for performing I/O and
     44  *	other tasks on device drivers, keying on the device number
     45  *	(dev_t).
     46  *
     47  *	When the system is built, the config(8) command generates
     48  *	static tables of device drivers built into the kernel image
     49  *	along with their associated methods.  These are recorded in
     50  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     51  *	and removed from the system dynamically.
     52  *
     53  * Allocation
     54  *
     55  *	When the system initially boots only the statically allocated
     56  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     57  *	allocation, we allocate a fixed block of memory to hold the new,
     58  *	expanded index.  This "fork" of the table is only ever performed
     59  *	once in order to guarantee that other threads may safely access
     60  *	the device tables:
     61  *
     62  *	o Once a thread has a "reference" to the table via an earlier
     63  *	  open() call, we know that the entry in the table must exist
     64  *	  and so it is safe to access it.
     65  *
     66  *	o Regardless of whether other threads see the old or new
     67  *	  pointers, they will point to a correct device switch
     68  *	  structure for the operation being performed.
     69  *
     70  *	XXX Currently, the wrapper methods such as cdev_read() verify
     71  *	that a device driver does in fact exist before calling the
     72  *	associated driver method.  This should be changed so that
     73  *	once the device is has been referenced by a vnode (opened),
     74  *	calling	the other methods should be valid until that reference
     75  *	is dropped.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.15.6.2 2008/04/03 11:37:26 mjf Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/conf.h>
     83 #include <sys/kmem.h>
     84 #include <sys/systm.h>
     85 #include <sys/poll.h>
     86 #include <sys/tty.h>
     87 #include <sys/cpu.h>
     88 #include <sys/buf.h>
     89 #include <sys/dirent.h>
     90 #include <machine/stdarg.h>
     91 #include <sys/disklabel.h>
     92 
     93 #ifdef DEVSW_DEBUG
     94 #define	DPRINTF(x)	printf x
     95 #else /* DEVSW_DEBUG */
     96 #define	DPRINTF(x)
     97 #endif /* DEVSW_DEBUG */
     98 
     99 #define	MAXDEVSW	512	/* the maximum of major device number */
    100 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
    101 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
    102 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
    103 
    104 extern const struct bdevsw **bdevsw, *bdevsw0[];
    105 extern const struct cdevsw **cdevsw, *cdevsw0[];
    106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    107 extern const int sys_bdevsws, sys_cdevsws;
    108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    109 
    110 static int bdevsw_attach(const struct bdevsw *, int *);
    111 static int cdevsw_attach(const struct cdevsw *, int *);
    112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    113 
    114 kmutex_t devsw_lock;
    115 extern kmutex_t dname_lock;
    116 
    117 /*
    118  * A table of initialisation functions for device drivers that
    119  * don't have an attach routine.
    120  */
    121 void (*devsw_init_funcs[])(void) = {
    122 	bpf_init,
    123 	cttyinit,
    124 	mem_init,
    125 	swap_init,
    126 	NULL,
    127 };
    128 
    129 void
    130 devsw_init(void)
    131 {
    132 	int i;
    133 
    134 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    135 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    136 
    137 	mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE);
    138 	mutex_init(&dname_lock, MUTEX_DEFAULT, IPL_NONE);
    139 	TAILQ_INIT(&device_names);
    140 
    141 	/*
    142 	 * Technically, some device drivers don't ever get 'attached'
    143 	 * so we provide this table to allow device drivers to register
    144 	 * their device names.
    145 	 */
    146 	for (i = 0; devsw_init_funcs[i] != NULL; i++)
    147 		devsw_init_funcs[i]();
    148 }
    149 
    150 int
    151 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
    152 	     const struct cdevsw *cdev, int *cmajor)
    153 {
    154 	struct devsw_conv *conv;
    155 	char *name;
    156 	int error, i;
    157 
    158 	if (devname == NULL || cdev == NULL)
    159 		return (EINVAL);
    160 
    161 	mutex_enter(&devsw_lock);
    162 
    163 	for (i = 0 ; i < max_devsw_convs ; i++) {
    164 		conv = &devsw_conv[i];
    165 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    166 			continue;
    167 
    168 		if (*bmajor < 0)
    169 			*bmajor = conv->d_bmajor;
    170 		if (*cmajor < 0)
    171 			*cmajor = conv->d_cmajor;
    172 
    173 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    174 			error = EINVAL;
    175 			goto fail;
    176 		}
    177 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    178 			error = EINVAL;
    179 			goto fail;
    180 		}
    181 
    182 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    183 		    cdevsw[*cmajor] != NULL) {
    184 			error = EEXIST;
    185 			goto fail;
    186 		}
    187 
    188 		if (bdev != NULL)
    189 			bdevsw[*bmajor] = bdev;
    190 		cdevsw[*cmajor] = cdev;
    191 
    192 		mutex_exit(&devsw_lock);
    193 		return (0);
    194 	}
    195 
    196 	error = bdevsw_attach(bdev, bmajor);
    197 	if (error != 0)
    198 		goto fail;
    199 	error = cdevsw_attach(cdev, cmajor);
    200 	if (error != 0) {
    201 		devsw_detach_locked(bdev, NULL);
    202 		goto fail;
    203 	}
    204 
    205 	for (i = 0 ; i < max_devsw_convs ; i++) {
    206 		if (devsw_conv[i].d_name == NULL)
    207 			break;
    208 	}
    209 	if (i == max_devsw_convs) {
    210 		struct devsw_conv *newptr;
    211 		int old, new;
    212 
    213 		old = max_devsw_convs;
    214 		new = old + 1;
    215 
    216 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
    217 		if (newptr == NULL) {
    218 			devsw_detach_locked(bdev, cdev);
    219 			error = ENOMEM;
    220 			goto fail;
    221 		}
    222 		newptr[old].d_name = NULL;
    223 		newptr[old].d_bmajor = -1;
    224 		newptr[old].d_cmajor = -1;
    225 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
    226 		if (devsw_conv != devsw_conv0)
    227 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
    228 		devsw_conv = newptr;
    229 		max_devsw_convs = new;
    230 	}
    231 
    232 	i = strlen(devname) + 1;
    233 	name = kmem_alloc(i, KM_NOSLEEP);
    234 	if (name == NULL) {
    235 		devsw_detach_locked(bdev, cdev);
    236 		goto fail;
    237 	}
    238 	strlcpy(name, devname, i);
    239 
    240 	devsw_conv[i].d_name = name;
    241 	devsw_conv[i].d_bmajor = *bmajor;
    242 	devsw_conv[i].d_cmajor = *cmajor;
    243 
    244 	mutex_exit(&devsw_lock);
    245 	return (0);
    246  fail:
    247 	mutex_exit(&devsw_lock);
    248 	return (error);
    249 }
    250 
    251 static int
    252 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
    253 {
    254 	const struct bdevsw **newptr;
    255 	int bmajor, i;
    256 
    257 	KASSERT(mutex_owned(&devsw_lock));
    258 
    259 	if (devsw == NULL)
    260 		return (0);
    261 
    262 	if (*devmajor < 0) {
    263 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    264 			if (bdevsw[bmajor] != NULL)
    265 				continue;
    266 			for (i = 0 ; i < max_devsw_convs ; i++) {
    267 				if (devsw_conv[i].d_bmajor == bmajor)
    268 					break;
    269 			}
    270 			if (i != max_devsw_convs)
    271 				continue;
    272 			break;
    273 		}
    274 		*devmajor = bmajor;
    275 	}
    276 
    277 	if (*devmajor >= MAXDEVSW) {
    278 		printf("bdevsw_attach: block majors exhausted");
    279 		return (ENOMEM);
    280 	}
    281 
    282 	if (*devmajor >= max_bdevsws) {
    283 		KASSERT(bdevsw == bdevsw0);
    284 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    285 		if (newptr == NULL)
    286 			return (ENOMEM);
    287 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    288 		bdevsw = newptr;
    289 		max_bdevsws = MAXDEVSW;
    290 	}
    291 
    292 	if (bdevsw[*devmajor] != NULL)
    293 		return (EEXIST);
    294 
    295 	bdevsw[*devmajor] = devsw;
    296 
    297 	return (0);
    298 }
    299 
    300 static int
    301 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
    302 {
    303 	const struct cdevsw **newptr;
    304 	int cmajor, i;
    305 
    306 	KASSERT(mutex_owned(&devsw_lock));
    307 
    308 	if (*devmajor < 0) {
    309 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    310 			if (cdevsw[cmajor] != NULL)
    311 				continue;
    312 			for (i = 0 ; i < max_devsw_convs ; i++) {
    313 				if (devsw_conv[i].d_cmajor == cmajor)
    314 					break;
    315 			}
    316 			if (i != max_devsw_convs)
    317 				continue;
    318 			break;
    319 		}
    320 		*devmajor = cmajor;
    321 	}
    322 
    323 	if (*devmajor >= MAXDEVSW) {
    324 		printf("cdevsw_attach: character majors exhausted");
    325 		return (ENOMEM);
    326 	}
    327 
    328 	if (*devmajor >= max_cdevsws) {
    329 		KASSERT(cdevsw == cdevsw0);
    330 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    331 		if (newptr == NULL)
    332 			return (ENOMEM);
    333 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    334 		cdevsw = newptr;
    335 		max_cdevsws = MAXDEVSW;
    336 	}
    337 
    338 	if (cdevsw[*devmajor] != NULL)
    339 		return (EEXIST);
    340 
    341 	cdevsw[*devmajor] = devsw;
    342 
    343 	return (0);
    344 }
    345 
    346 static void
    347 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    348 {
    349 	int i;
    350 
    351 	KASSERT(mutex_owned(&devsw_lock));
    352 
    353 	if (bdev != NULL) {
    354 		for (i = 0 ; i < max_bdevsws ; i++) {
    355 			if (bdevsw[i] != bdev)
    356 				continue;
    357 			bdevsw[i] = NULL;
    358 			break;
    359 		}
    360 	}
    361 	if (cdev != NULL) {
    362 		for (i = 0 ; i < max_cdevsws ; i++) {
    363 			if (cdevsw[i] != cdev)
    364 				continue;
    365 			cdevsw[i] = NULL;
    366 			break;
    367 		}
    368 	}
    369 }
    370 
    371 void
    372 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    373 {
    374 
    375 	mutex_enter(&devsw_lock);
    376 	devsw_detach_locked(bdev, cdev);
    377 	mutex_exit(&devsw_lock);
    378 }
    379 
    380 /*
    381  * Look up a block device by number.
    382  *
    383  * => Caller must ensure that the device is attached.
    384  */
    385 const struct bdevsw *
    386 bdevsw_lookup(dev_t dev)
    387 {
    388 	int bmajor;
    389 
    390 	if (dev == NODEV)
    391 		return (NULL);
    392 	bmajor = major(dev);
    393 	if (bmajor < 0 || bmajor >= max_bdevsws)
    394 		return (NULL);
    395 
    396 	return (bdevsw[bmajor]);
    397 }
    398 
    399 /*
    400  * Look up a character device by number.
    401  *
    402  * => Caller must ensure that the device is attached.
    403  */
    404 const struct cdevsw *
    405 cdevsw_lookup(dev_t dev)
    406 {
    407 	int cmajor;
    408 
    409 	if (dev == NODEV)
    410 		return (NULL);
    411 	cmajor = major(dev);
    412 	if (cmajor < 0 || cmajor >= max_cdevsws)
    413 		return (NULL);
    414 
    415 	return (cdevsw[cmajor]);
    416 }
    417 
    418 /*
    419  * Look up a block device by reference to its operations set.
    420  *
    421  * => Caller must ensure that the device is not detached, and therefore
    422  *    that the returned major is still valid when dereferenced.
    423  */
    424 int
    425 bdevsw_lookup_major(const struct bdevsw *bdev)
    426 {
    427 	int bmajor;
    428 
    429 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    430 		if (bdevsw[bmajor] == bdev)
    431 			return (bmajor);
    432 	}
    433 
    434 	return (-1);
    435 }
    436 
    437 /*
    438  * Look up a character device by reference to its operations set.
    439  *
    440  * => Caller must ensure that the device is not detached, and therefore
    441  *    that the returned major is still valid when dereferenced.
    442  */
    443 int
    444 cdevsw_lookup_major(const struct cdevsw *cdev)
    445 {
    446 	int cmajor;
    447 
    448 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    449 		if (cdevsw[cmajor] == cdev)
    450 			return (cmajor);
    451 	}
    452 
    453 	return (-1);
    454 }
    455 
    456 /*
    457  * Convert from block major number to name.
    458  *
    459  * => Caller must ensure that the device is not detached, and therefore
    460  *    that the name pointer is still valid when dereferenced.
    461  */
    462 const char *
    463 devsw_blk2name(int bmajor)
    464 {
    465 	const char *name;
    466 	int cmajor, i;
    467 
    468 	name = NULL;
    469 	cmajor = -1;
    470 
    471 	mutex_enter(&devsw_lock);
    472 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    473 		mutex_exit(&devsw_lock);
    474 		return (NULL);
    475 	}
    476 	for (i = 0 ; i < max_devsw_convs; i++) {
    477 		if (devsw_conv[i].d_bmajor == bmajor) {
    478 			cmajor = devsw_conv[i].d_cmajor;
    479 			break;
    480 		}
    481 	}
    482 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    483 		name = devsw_conv[i].d_name;
    484 	mutex_exit(&devsw_lock);
    485 
    486 	return (name);
    487 }
    488 
    489 /*
    490  * Convert from device name to block major number.
    491  *
    492  * => Caller must ensure that the device is not detached, and therefore
    493  *    that the major number is still valid when dereferenced.
    494  */
    495 int
    496 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    497 {
    498 	struct devsw_conv *conv;
    499 	int bmajor, i;
    500 
    501 	if (name == NULL)
    502 		return (-1);
    503 
    504 	mutex_enter(&devsw_lock);
    505 	for (i = 0 ; i < max_devsw_convs ; i++) {
    506 		size_t len;
    507 
    508 		conv = &devsw_conv[i];
    509 		if (conv->d_name == NULL)
    510 			continue;
    511 		len = strlen(conv->d_name);
    512 		if (strncmp(conv->d_name, name, len) != 0)
    513 			continue;
    514 		if (*(name +len) && !isdigit(*(name + len)))
    515 			continue;
    516 		bmajor = conv->d_bmajor;
    517 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    518 		    bdevsw[bmajor] == NULL)
    519 			break;
    520 		if (devname != NULL) {
    521 #ifdef DEVSW_DEBUG
    522 			if (strlen(conv->d_name) >= devnamelen)
    523 				printf("devsw_name2blk: too short buffer");
    524 #endif /* DEVSW_DEBUG */
    525 			strncpy(devname, conv->d_name, devnamelen);
    526 			devname[devnamelen - 1] = '\0';
    527 		}
    528 		mutex_exit(&devsw_lock);
    529 		return (bmajor);
    530 	}
    531 
    532 	mutex_exit(&devsw_lock);
    533 	return (-1);
    534 }
    535 
    536 /*
    537  * Convert from character dev_t to block dev_t.
    538  *
    539  * => Caller must ensure that the device is not detached, and therefore
    540  *    that the major number is still valid when dereferenced.
    541  */
    542 dev_t
    543 devsw_chr2blk(dev_t cdev)
    544 {
    545 	int bmajor, cmajor, i;
    546 	dev_t rv;
    547 
    548 	cmajor = major(cdev);
    549 	bmajor = -1;
    550 	rv = NODEV;
    551 
    552 	mutex_enter(&devsw_lock);
    553 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    554 		mutex_exit(&devsw_lock);
    555 		return (NODEV);
    556 	}
    557 	for (i = 0 ; i < max_devsw_convs ; i++) {
    558 		if (devsw_conv[i].d_cmajor == cmajor) {
    559 			bmajor = devsw_conv[i].d_bmajor;
    560 			break;
    561 		}
    562 	}
    563 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    564 		rv = makedev(bmajor, minor(cdev));
    565 	mutex_exit(&devsw_lock);
    566 
    567 	return (rv);
    568 }
    569 
    570 /*
    571  * Convert from block dev_t to character dev_t.
    572  *
    573  * => Caller must ensure that the device is not detached, and therefore
    574  *    that the major number is still valid when dereferenced.
    575  */
    576 dev_t
    577 devsw_blk2chr(dev_t bdev)
    578 {
    579 	int bmajor, cmajor, i;
    580 	dev_t rv;
    581 
    582 	bmajor = major(bdev);
    583 	cmajor = -1;
    584 	rv = NODEV;
    585 
    586 	mutex_enter(&devsw_lock);
    587 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    588 		mutex_exit(&devsw_lock);
    589 		return (NODEV);
    590 	}
    591 	for (i = 0 ; i < max_devsw_convs ; i++) {
    592 		if (devsw_conv[i].d_bmajor == bmajor) {
    593 			cmajor = devsw_conv[i].d_cmajor;
    594 			break;
    595 		}
    596 	}
    597 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    598 		rv = makedev(cmajor, minor(bdev));
    599 	mutex_exit(&devsw_lock);
    600 
    601 	return (rv);
    602 }
    603 
    604 /*
    605  * Device access methods.
    606  */
    607 
    608 #define	DEV_LOCK(d)						\
    609 	if ((d->d_flag & D_MPSAFE) == 0) {			\
    610 		KERNEL_LOCK(1, curlwp);				\
    611 	}
    612 
    613 #define	DEV_UNLOCK(d)						\
    614 	if ((d->d_flag & D_MPSAFE) == 0) {			\
    615 		KERNEL_UNLOCK_ONE(curlwp);			\
    616 	}
    617 
    618 int
    619 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    620 {
    621 	const struct bdevsw *d;
    622 	int rv;
    623 
    624 	/*
    625 	 * For open we need to lock, in order to synchronize
    626 	 * with attach/detach.
    627 	 */
    628 	mutex_enter(&devsw_lock);
    629 	d = bdevsw_lookup(dev);
    630 	mutex_exit(&devsw_lock);
    631 	if (d == NULL)
    632 		return ENXIO;
    633 
    634 	DEV_LOCK(d);
    635 	rv = (*d->d_open)(dev, flag, devtype, l);
    636 	DEV_UNLOCK(d);
    637 
    638 	return rv;
    639 }
    640 
    641 int
    642 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    643 {
    644 	const struct bdevsw *d;
    645 	int rv;
    646 
    647 	if ((d = bdevsw_lookup(dev)) == NULL)
    648 		return ENXIO;
    649 
    650 	DEV_LOCK(d);
    651 	rv = (*d->d_close)(dev, flag, devtype, l);
    652 	DEV_UNLOCK(d);
    653 
    654 	return rv;
    655 }
    656 
    657 void
    658 bdev_strategy(struct buf *bp)
    659 {
    660 	const struct bdevsw *d;
    661 
    662 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
    663 		panic("bdev_strategy");
    664 
    665 	DEV_LOCK(d);
    666 	(*d->d_strategy)(bp);
    667 	DEV_UNLOCK(d);
    668 }
    669 
    670 int
    671 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    672 {
    673 	const struct bdevsw *d;
    674 	int rv;
    675 
    676 	if ((d = bdevsw_lookup(dev)) == NULL)
    677 		return ENXIO;
    678 
    679 	DEV_LOCK(d);
    680 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    681 	DEV_UNLOCK(d);
    682 
    683 	return rv;
    684 }
    685 
    686 int
    687 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    688 {
    689 	const struct bdevsw *d;
    690 	int rv;
    691 
    692 	/*
    693 	 * Dump can be called without the device open.  Since it can
    694 	 * currently only be called with the system paused (and in a
    695 	 * potentially unstable state), we don't perform any locking.
    696 	 */
    697 	if ((d = bdevsw_lookup(dev)) == NULL)
    698 		return ENXIO;
    699 
    700 	/* DEV_LOCK(d); */
    701 	rv = (*d->d_dump)(dev, addr, data, sz);
    702 	/* DEV_UNLOCK(d); */
    703 
    704 	return rv;
    705 }
    706 
    707 int
    708 bdev_type(dev_t dev)
    709 {
    710 	const struct bdevsw *d;
    711 
    712 	if ((d = bdevsw_lookup(dev)) == NULL)
    713 		return D_OTHER;
    714 	return d->d_flag & D_TYPEMASK;
    715 }
    716 
    717 int
    718 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    719 {
    720 	const struct cdevsw *d;
    721 	int rv;
    722 
    723 	/*
    724 	 * For open we need to lock, in order to synchronize
    725 	 * with attach/detach.
    726 	 */
    727 	mutex_enter(&devsw_lock);
    728 	d = cdevsw_lookup(dev);
    729 	mutex_exit(&devsw_lock);
    730 	if (d == NULL)
    731 		return ENXIO;
    732 
    733 	DEV_LOCK(d);
    734 	rv = (*d->d_open)(dev, flag, devtype, l);
    735 	DEV_UNLOCK(d);
    736 
    737 	return rv;
    738 }
    739 
    740 int
    741 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    742 {
    743 	const struct cdevsw *d;
    744 	int rv;
    745 
    746 	if ((d = cdevsw_lookup(dev)) == NULL)
    747 		return ENXIO;
    748 
    749 	DEV_LOCK(d);
    750 	rv = (*d->d_close)(dev, flag, devtype, l);
    751 	DEV_UNLOCK(d);
    752 
    753 	return rv;
    754 }
    755 
    756 int
    757 cdev_read(dev_t dev, struct uio *uio, int flag)
    758 {
    759 	const struct cdevsw *d;
    760 	int rv;
    761 
    762 	if ((d = cdevsw_lookup(dev)) == NULL)
    763 		return ENXIO;
    764 
    765 	DEV_LOCK(d);
    766 	rv = (*d->d_read)(dev, uio, flag);
    767 	DEV_UNLOCK(d);
    768 
    769 	return rv;
    770 }
    771 
    772 int
    773 cdev_write(dev_t dev, struct uio *uio, int flag)
    774 {
    775 	const struct cdevsw *d;
    776 	int rv;
    777 
    778 	if ((d = cdevsw_lookup(dev)) == NULL)
    779 		return ENXIO;
    780 
    781 	DEV_LOCK(d);
    782 	rv = (*d->d_write)(dev, uio, flag);
    783 	DEV_UNLOCK(d);
    784 
    785 	return rv;
    786 }
    787 
    788 int
    789 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    790 {
    791 	const struct cdevsw *d;
    792 	int rv;
    793 
    794 	if ((d = cdevsw_lookup(dev)) == NULL)
    795 		return ENXIO;
    796 
    797 	DEV_LOCK(d);
    798 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    799 	DEV_UNLOCK(d);
    800 
    801 	return rv;
    802 }
    803 
    804 void
    805 cdev_stop(struct tty *tp, int flag)
    806 {
    807 	const struct cdevsw *d;
    808 
    809 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
    810 		return;
    811 
    812 	DEV_LOCK(d);
    813 	(*d->d_stop)(tp, flag);
    814 	DEV_UNLOCK(d);
    815 }
    816 
    817 struct tty *
    818 cdev_tty(dev_t dev)
    819 {
    820 	const struct cdevsw *d;
    821 	struct tty * rv;
    822 
    823 	if ((d = cdevsw_lookup(dev)) == NULL)
    824 		return NULL;
    825 
    826 	/* XXX Check if necessary. */
    827 	if (d->d_tty == NULL)
    828 		return NULL;
    829 
    830 	DEV_LOCK(d);
    831 	rv = (*d->d_tty)(dev);
    832 	DEV_UNLOCK(d);
    833 
    834 	return rv;
    835 }
    836 
    837 int
    838 cdev_poll(dev_t dev, int flag, lwp_t *l)
    839 {
    840 	const struct cdevsw *d;
    841 	int rv;
    842 
    843 	if ((d = cdevsw_lookup(dev)) == NULL)
    844 		return POLLERR;
    845 
    846 	DEV_LOCK(d);
    847 	rv = (*d->d_poll)(dev, flag, l);
    848 	DEV_UNLOCK(d);
    849 
    850 	return rv;
    851 }
    852 
    853 paddr_t
    854 cdev_mmap(dev_t dev, off_t off, int flag)
    855 {
    856 	const struct cdevsw *d;
    857 	paddr_t rv;
    858 
    859 	if ((d = cdevsw_lookup(dev)) == NULL)
    860 		return (paddr_t)-1LL;
    861 
    862 	DEV_LOCK(d);
    863 	rv = (*d->d_mmap)(dev, off, flag);
    864 	DEV_UNLOCK(d);
    865 
    866 	return rv;
    867 }
    868 
    869 int
    870 cdev_kqfilter(dev_t dev, struct knote *kn)
    871 {
    872 	const struct cdevsw *d;
    873 	int rv;
    874 
    875 	if ((d = cdevsw_lookup(dev)) == NULL)
    876 		return ENXIO;
    877 
    878 	DEV_LOCK(d);
    879 	rv = (*d->d_kqfilter)(dev, kn);
    880 	DEV_UNLOCK(d);
    881 
    882 	return rv;
    883 }
    884 
    885 int
    886 cdev_type(dev_t dev)
    887 {
    888 	const struct cdevsw *d;
    889 
    890 	if ((d = cdevsw_lookup(dev)) == NULL)
    891 		return D_OTHER;
    892 	return d->d_flag & D_TYPEMASK;
    893 }
    894 
    895 /*
    896  * Register a dev_t and name for a device driver with devfs.
    897  * We maintain a TAILQ of registered device drivers names and dev_t's.
    898  *
    899  * => if devp is NULL this device has no device_t instance. An example
    900  *    of this is zero(4).
    901  *
    902  * => if there already exists another name for this dev_t, then 'name'
    903  *    is assumed to be an alias of a previously registered device driver.
    904  * TODO: The above isn't actually true at the moment, we just return 0.
    905  *
    906  * => 'cdev' indiciates whether we are a char or block device.
    907  *     If 'cdev' is true, we are a character device, otherwise we
    908  *     are a block device.
    909  */
    910 int
    911 device_register_name(dev_t dev, device_t devp, boolean_t cdev,
    912 	enum devtype dtype, const char *fmt, ...)
    913 {
    914 	struct device_name *dn;
    915 	va_list ap;
    916 
    917 	/* TODO: Check for aliases */
    918 
    919 	dn = kmem_zalloc(sizeof(*dn), KM_NOSLEEP);
    920 	if (dn == NULL)
    921 		return ENOMEM;
    922 
    923 	dn->d_dev = dev;
    924 	dn->d_devp = devp;
    925 	dn->d_char = cdev;
    926 	dn->d_type = dtype;
    927 
    928 	dn->d_name = kmem_zalloc(MAXNAMLEN, KM_NOSLEEP);
    929 	va_start(ap, fmt);
    930 	vsnprintf(dn->d_name, MAXNAMLEN, fmt, ap);
    931 	va_end(ap);
    932 
    933 	mutex_enter(&dname_lock);
    934 	TAILQ_INSERT_TAIL(&device_names, dn, d_next);
    935 	mutex_exit(&dname_lock);
    936 
    937 	return 0;
    938 }
    939 
    940 /*
    941  * Remove a previously registered name for 'dev'.
    942  *
    943  * => This must be called twice with different values for 'dev' if
    944  *    the caller previously registered a name for a character device
    945  *    and a name for a block device.
    946  */
    947 int
    948 device_unregister_name(dev_t dev, const char *fmt, ...)
    949 {
    950 	int error = 0;
    951 	struct device_name *dn;
    952 	va_list ap;
    953 	char name[MAXNAMLEN];
    954 
    955 	va_start(ap, fmt);
    956 	vsnprintf(name, MAXNAMLEN, fmt, ap);
    957 	va_end(ap);
    958 
    959 	mutex_enter(&dname_lock);
    960 	TAILQ_FOREACH(dn, &device_names, d_next) {
    961 		if (strcmp(dn->d_name, name) == 0)
    962 			break;
    963 	}
    964 
    965 	if (dn != NULL)
    966 		dn->d_gone = true;
    967 	else
    968 		error = EINVAL;
    969 
    970 	mutex_exit(&dname_lock);
    971 	return error;
    972 }
    973 
    974 struct device_name *
    975 device_lookup_info(dev_t dev, int is_char)
    976 {
    977 	struct device_name *dn;
    978 
    979 	mutex_enter(&dname_lock);
    980 	TAILQ_FOREACH(dn, &device_names, d_next) {
    981 		if ((dn->d_dev == dev) && (dn->d_char == is_char))
    982 			break;
    983 	}
    984 	mutex_exit(&dname_lock);
    985 
    986 	return dn;
    987 }
    988