Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.15.6.6
      1 /*	$NetBSD: subr_devsw.c,v 1.15.6.6 2008/04/14 16:23:56 mjf Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Overview
     41  *
     42  *	subr_devsw.c: registers device drivers by name and by major
     43  *	number, and provides wrapper methods for performing I/O and
     44  *	other tasks on device drivers, keying on the device number
     45  *	(dev_t).
     46  *
     47  *	When the system is built, the config(8) command generates
     48  *	static tables of device drivers built into the kernel image
     49  *	along with their associated methods.  These are recorded in
     50  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     51  *	and removed from the system dynamically.
     52  *
     53  * Allocation
     54  *
     55  *	When the system initially boots only the statically allocated
     56  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     57  *	allocation, we allocate a fixed block of memory to hold the new,
     58  *	expanded index.  This "fork" of the table is only ever performed
     59  *	once in order to guarantee that other threads may safely access
     60  *	the device tables:
     61  *
     62  *	o Once a thread has a "reference" to the table via an earlier
     63  *	  open() call, we know that the entry in the table must exist
     64  *	  and so it is safe to access it.
     65  *
     66  *	o Regardless of whether other threads see the old or new
     67  *	  pointers, they will point to a correct device switch
     68  *	  structure for the operation being performed.
     69  *
     70  *	XXX Currently, the wrapper methods such as cdev_read() verify
     71  *	that a device driver does in fact exist before calling the
     72  *	associated driver method.  This should be changed so that
     73  *	once the device is has been referenced by a vnode (opened),
     74  *	calling	the other methods should be valid until that reference
     75  *	is dropped.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.15.6.6 2008/04/14 16:23:56 mjf Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/conf.h>
     83 #include <sys/kmem.h>
     84 #include <sys/systm.h>
     85 #include <sys/poll.h>
     86 #include <sys/tty.h>
     87 #include <sys/cpu.h>
     88 #include <sys/buf.h>
     89 #include <sys/dirent.h>
     90 #include <machine/stdarg.h>
     91 #include <sys/disklabel.h>
     92 
     93 #ifdef DEVSW_DEBUG
     94 #define	DPRINTF(x)	printf x
     95 #else /* DEVSW_DEBUG */
     96 #define	DPRINTF(x)
     97 #endif /* DEVSW_DEBUG */
     98 
     99 #define	MAXDEVSW	512	/* the maximum of major device number */
    100 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
    101 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
    102 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
    103 
    104 extern const struct bdevsw **bdevsw, *bdevsw0[];
    105 extern const struct cdevsw **cdevsw, *cdevsw0[];
    106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    107 extern const int sys_bdevsws, sys_cdevsws;
    108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    109 
    110 static int bdevsw_attach(const struct bdevsw *, int *);
    111 static int cdevsw_attach(const struct cdevsw *, int *);
    112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    113 
    114 static struct device_name *device_name_alloc(dev_t, device_t, bool,
    115     enum devtype, const char *, va_list);
    116 
    117 kmutex_t devsw_lock;
    118 extern kmutex_t dname_lock;
    119 
    120 /*
    121  * A table of initialisation functions for device drivers that
    122  * don't have an attach routine.
    123  */
    124 void (*devsw_init_funcs[])(void) = {
    125 	bpf_init,
    126 	cttyinit,
    127 	mem_init,
    128 	swap_init,
    129 	NULL,
    130 };
    131 
    132 void
    133 devsw_init(void)
    134 {
    135 	int i;
    136 
    137 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    138 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    139 
    140 	mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE);
    141 	mutex_init(&dname_lock, MUTEX_DEFAULT, IPL_NONE);
    142 	TAILQ_INIT(&device_names);
    143 
    144 	/*
    145 	 * Technically, some device drivers don't ever get 'attached'
    146 	 * so we provide this table to allow device drivers to register
    147 	 * their device names.
    148 	 */
    149 	for (i = 0; devsw_init_funcs[i] != NULL; i++)
    150 		devsw_init_funcs[i]();
    151 }
    152 
    153 int
    154 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
    155 	     const struct cdevsw *cdev, int *cmajor)
    156 {
    157 	struct devsw_conv *conv;
    158 	char *name;
    159 	int error, i;
    160 
    161 	if (devname == NULL || cdev == NULL)
    162 		return (EINVAL);
    163 
    164 	mutex_enter(&devsw_lock);
    165 
    166 	for (i = 0 ; i < max_devsw_convs ; i++) {
    167 		conv = &devsw_conv[i];
    168 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    169 			continue;
    170 
    171 		if (*bmajor < 0)
    172 			*bmajor = conv->d_bmajor;
    173 		if (*cmajor < 0)
    174 			*cmajor = conv->d_cmajor;
    175 
    176 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    177 			error = EINVAL;
    178 			goto fail;
    179 		}
    180 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    181 			error = EINVAL;
    182 			goto fail;
    183 		}
    184 
    185 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    186 		    cdevsw[*cmajor] != NULL) {
    187 			error = EEXIST;
    188 			goto fail;
    189 		}
    190 
    191 		if (bdev != NULL)
    192 			bdevsw[*bmajor] = bdev;
    193 		cdevsw[*cmajor] = cdev;
    194 
    195 		mutex_exit(&devsw_lock);
    196 		return (0);
    197 	}
    198 
    199 	error = bdevsw_attach(bdev, bmajor);
    200 	if (error != 0)
    201 		goto fail;
    202 	error = cdevsw_attach(cdev, cmajor);
    203 	if (error != 0) {
    204 		devsw_detach_locked(bdev, NULL);
    205 		goto fail;
    206 	}
    207 
    208 	for (i = 0 ; i < max_devsw_convs ; i++) {
    209 		if (devsw_conv[i].d_name == NULL)
    210 			break;
    211 	}
    212 	if (i == max_devsw_convs) {
    213 		struct devsw_conv *newptr;
    214 		int old, new;
    215 
    216 		old = max_devsw_convs;
    217 		new = old + 1;
    218 
    219 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
    220 		if (newptr == NULL) {
    221 			devsw_detach_locked(bdev, cdev);
    222 			error = ENOMEM;
    223 			goto fail;
    224 		}
    225 		newptr[old].d_name = NULL;
    226 		newptr[old].d_bmajor = -1;
    227 		newptr[old].d_cmajor = -1;
    228 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
    229 		if (devsw_conv != devsw_conv0)
    230 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
    231 		devsw_conv = newptr;
    232 		max_devsw_convs = new;
    233 	}
    234 
    235 	i = strlen(devname) + 1;
    236 	name = kmem_alloc(i, KM_NOSLEEP);
    237 	if (name == NULL) {
    238 		devsw_detach_locked(bdev, cdev);
    239 		goto fail;
    240 	}
    241 	strlcpy(name, devname, i);
    242 
    243 	devsw_conv[i].d_name = name;
    244 	devsw_conv[i].d_bmajor = *bmajor;
    245 	devsw_conv[i].d_cmajor = *cmajor;
    246 
    247 	mutex_exit(&devsw_lock);
    248 	return (0);
    249  fail:
    250 	mutex_exit(&devsw_lock);
    251 	return (error);
    252 }
    253 
    254 static int
    255 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
    256 {
    257 	const struct bdevsw **newptr;
    258 	int bmajor, i;
    259 
    260 	KASSERT(mutex_owned(&devsw_lock));
    261 
    262 	if (devsw == NULL)
    263 		return (0);
    264 
    265 	if (*devmajor < 0) {
    266 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    267 			if (bdevsw[bmajor] != NULL)
    268 				continue;
    269 			for (i = 0 ; i < max_devsw_convs ; i++) {
    270 				if (devsw_conv[i].d_bmajor == bmajor)
    271 					break;
    272 			}
    273 			if (i != max_devsw_convs)
    274 				continue;
    275 			break;
    276 		}
    277 		*devmajor = bmajor;
    278 	}
    279 
    280 	if (*devmajor >= MAXDEVSW) {
    281 		printf("bdevsw_attach: block majors exhausted");
    282 		return (ENOMEM);
    283 	}
    284 
    285 	if (*devmajor >= max_bdevsws) {
    286 		KASSERT(bdevsw == bdevsw0);
    287 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    288 		if (newptr == NULL)
    289 			return (ENOMEM);
    290 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    291 		bdevsw = newptr;
    292 		max_bdevsws = MAXDEVSW;
    293 	}
    294 
    295 	if (bdevsw[*devmajor] != NULL)
    296 		return (EEXIST);
    297 
    298 	bdevsw[*devmajor] = devsw;
    299 
    300 	return (0);
    301 }
    302 
    303 static int
    304 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
    305 {
    306 	const struct cdevsw **newptr;
    307 	int cmajor, i;
    308 
    309 	KASSERT(mutex_owned(&devsw_lock));
    310 
    311 	if (*devmajor < 0) {
    312 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    313 			if (cdevsw[cmajor] != NULL)
    314 				continue;
    315 			for (i = 0 ; i < max_devsw_convs ; i++) {
    316 				if (devsw_conv[i].d_cmajor == cmajor)
    317 					break;
    318 			}
    319 			if (i != max_devsw_convs)
    320 				continue;
    321 			break;
    322 		}
    323 		*devmajor = cmajor;
    324 	}
    325 
    326 	if (*devmajor >= MAXDEVSW) {
    327 		printf("cdevsw_attach: character majors exhausted");
    328 		return (ENOMEM);
    329 	}
    330 
    331 	if (*devmajor >= max_cdevsws) {
    332 		KASSERT(cdevsw == cdevsw0);
    333 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    334 		if (newptr == NULL)
    335 			return (ENOMEM);
    336 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    337 		cdevsw = newptr;
    338 		max_cdevsws = MAXDEVSW;
    339 	}
    340 
    341 	if (cdevsw[*devmajor] != NULL)
    342 		return (EEXIST);
    343 
    344 	cdevsw[*devmajor] = devsw;
    345 
    346 	return (0);
    347 }
    348 
    349 static void
    350 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    351 {
    352 	int i;
    353 
    354 	KASSERT(mutex_owned(&devsw_lock));
    355 
    356 	if (bdev != NULL) {
    357 		for (i = 0 ; i < max_bdevsws ; i++) {
    358 			if (bdevsw[i] != bdev)
    359 				continue;
    360 			bdevsw[i] = NULL;
    361 			break;
    362 		}
    363 	}
    364 	if (cdev != NULL) {
    365 		for (i = 0 ; i < max_cdevsws ; i++) {
    366 			if (cdevsw[i] != cdev)
    367 				continue;
    368 			cdevsw[i] = NULL;
    369 			break;
    370 		}
    371 	}
    372 }
    373 
    374 void
    375 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    376 {
    377 
    378 	mutex_enter(&devsw_lock);
    379 	devsw_detach_locked(bdev, cdev);
    380 	mutex_exit(&devsw_lock);
    381 }
    382 
    383 /*
    384  * Look up a block device by number.
    385  *
    386  * => Caller must ensure that the device is attached.
    387  */
    388 const struct bdevsw *
    389 bdevsw_lookup(dev_t dev)
    390 {
    391 	int bmajor;
    392 
    393 	if (dev == NODEV)
    394 		return (NULL);
    395 	bmajor = major(dev);
    396 	if (bmajor < 0 || bmajor >= max_bdevsws)
    397 		return (NULL);
    398 
    399 	return (bdevsw[bmajor]);
    400 }
    401 
    402 /*
    403  * Look up a character device by number.
    404  *
    405  * => Caller must ensure that the device is attached.
    406  */
    407 const struct cdevsw *
    408 cdevsw_lookup(dev_t dev)
    409 {
    410 	int cmajor;
    411 
    412 	if (dev == NODEV)
    413 		return (NULL);
    414 	cmajor = major(dev);
    415 	if (cmajor < 0 || cmajor >= max_cdevsws)
    416 		return (NULL);
    417 
    418 	return (cdevsw[cmajor]);
    419 }
    420 
    421 /*
    422  * Look up a block device by reference to its operations set.
    423  *
    424  * => Caller must ensure that the device is not detached, and therefore
    425  *    that the returned major is still valid when dereferenced.
    426  */
    427 int
    428 bdevsw_lookup_major(const struct bdevsw *bdev)
    429 {
    430 	int bmajor;
    431 
    432 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    433 		if (bdevsw[bmajor] == bdev)
    434 			return (bmajor);
    435 	}
    436 
    437 	return (-1);
    438 }
    439 
    440 /*
    441  * Look up a character device by reference to its operations set.
    442  *
    443  * => Caller must ensure that the device is not detached, and therefore
    444  *    that the returned major is still valid when dereferenced.
    445  */
    446 int
    447 cdevsw_lookup_major(const struct cdevsw *cdev)
    448 {
    449 	int cmajor;
    450 
    451 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    452 		if (cdevsw[cmajor] == cdev)
    453 			return (cmajor);
    454 	}
    455 
    456 	return (-1);
    457 }
    458 
    459 /*
    460  * Convert from block major number to name.
    461  *
    462  * => Caller must ensure that the device is not detached, and therefore
    463  *    that the name pointer is still valid when dereferenced.
    464  */
    465 const char *
    466 devsw_blk2name(int bmajor)
    467 {
    468 	const char *name;
    469 	int cmajor, i;
    470 
    471 	name = NULL;
    472 	cmajor = -1;
    473 
    474 	mutex_enter(&devsw_lock);
    475 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    476 		mutex_exit(&devsw_lock);
    477 		return (NULL);
    478 	}
    479 	for (i = 0 ; i < max_devsw_convs; i++) {
    480 		if (devsw_conv[i].d_bmajor == bmajor) {
    481 			cmajor = devsw_conv[i].d_cmajor;
    482 			break;
    483 		}
    484 	}
    485 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    486 		name = devsw_conv[i].d_name;
    487 	mutex_exit(&devsw_lock);
    488 
    489 	return (name);
    490 }
    491 
    492 /*
    493  * Convert from device name to block major number.
    494  *
    495  * => Caller must ensure that the device is not detached, and therefore
    496  *    that the major number is still valid when dereferenced.
    497  */
    498 int
    499 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    500 {
    501 	struct devsw_conv *conv;
    502 	int bmajor, i;
    503 
    504 	if (name == NULL)
    505 		return (-1);
    506 
    507 	mutex_enter(&devsw_lock);
    508 	for (i = 0 ; i < max_devsw_convs ; i++) {
    509 		size_t len;
    510 
    511 		conv = &devsw_conv[i];
    512 		if (conv->d_name == NULL)
    513 			continue;
    514 		len = strlen(conv->d_name);
    515 		if (strncmp(conv->d_name, name, len) != 0)
    516 			continue;
    517 		if (*(name +len) && !isdigit(*(name + len)))
    518 			continue;
    519 		bmajor = conv->d_bmajor;
    520 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    521 		    bdevsw[bmajor] == NULL)
    522 			break;
    523 		if (devname != NULL) {
    524 #ifdef DEVSW_DEBUG
    525 			if (strlen(conv->d_name) >= devnamelen)
    526 				printf("devsw_name2blk: too short buffer");
    527 #endif /* DEVSW_DEBUG */
    528 			strncpy(devname, conv->d_name, devnamelen);
    529 			devname[devnamelen - 1] = '\0';
    530 		}
    531 		mutex_exit(&devsw_lock);
    532 		return (bmajor);
    533 	}
    534 
    535 	mutex_exit(&devsw_lock);
    536 	return (-1);
    537 }
    538 
    539 /*
    540  * Convert from device name to char major number.
    541  *
    542  * => Caller must ensure that the device is not detached, and therefore
    543  *    that the major number is still valid when dereferenced.
    544  */
    545 int
    546 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    547 {
    548 	struct devsw_conv *conv;
    549 	int cmajor, i;
    550 
    551 	if (name == NULL)
    552 		return (-1);
    553 
    554 	mutex_enter(&devsw_lock);
    555 	for (i = 0 ; i < max_devsw_convs ; i++) {
    556 		size_t len;
    557 
    558 		conv = &devsw_conv[i];
    559 		if (conv->d_name == NULL)
    560 			continue;
    561 		len = strlen(conv->d_name);
    562 		if (strncmp(conv->d_name, name, len) != 0)
    563 			continue;
    564 		if (*(name +len) && !isdigit(*(name + len)))
    565 			continue;
    566 		cmajor = conv->d_cmajor;
    567 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    568 		    cdevsw[cmajor] == NULL)
    569 			break;
    570 		if (devname != NULL) {
    571 #ifdef DEVSW_DEBUG
    572 			if (strlen(conv->d_name) >= devnamelen)
    573 				printf("devsw_name2chr: too short buffer");
    574 #endif /* DEVSW_DEBUG */
    575 			strncpy(devname, conv->d_name, devnamelen);
    576 			devname[devnamelen - 1] = '\0';
    577 		}
    578 		mutex_exit(&devsw_lock);
    579 		return (cmajor);
    580 	}
    581 
    582 	mutex_exit(&devsw_lock);
    583 	return (-1);
    584 }
    585 
    586 /*
    587  * Convert from character dev_t to block dev_t.
    588  *
    589  * => Caller must ensure that the device is not detached, and therefore
    590  *    that the major number is still valid when dereferenced.
    591  */
    592 dev_t
    593 devsw_chr2blk(dev_t cdev)
    594 {
    595 	int bmajor, cmajor, i;
    596 	dev_t rv;
    597 
    598 	cmajor = major(cdev);
    599 	bmajor = -1;
    600 	rv = NODEV;
    601 
    602 	mutex_enter(&devsw_lock);
    603 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    604 		mutex_exit(&devsw_lock);
    605 		return (NODEV);
    606 	}
    607 	for (i = 0 ; i < max_devsw_convs ; i++) {
    608 		if (devsw_conv[i].d_cmajor == cmajor) {
    609 			bmajor = devsw_conv[i].d_bmajor;
    610 			break;
    611 		}
    612 	}
    613 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    614 		rv = makedev(bmajor, minor(cdev));
    615 	mutex_exit(&devsw_lock);
    616 
    617 	return (rv);
    618 }
    619 
    620 /*
    621  * Convert from block dev_t to character dev_t.
    622  *
    623  * => Caller must ensure that the device is not detached, and therefore
    624  *    that the major number is still valid when dereferenced.
    625  */
    626 dev_t
    627 devsw_blk2chr(dev_t bdev)
    628 {
    629 	int bmajor, cmajor, i;
    630 	dev_t rv;
    631 
    632 	bmajor = major(bdev);
    633 	cmajor = -1;
    634 	rv = NODEV;
    635 
    636 	mutex_enter(&devsw_lock);
    637 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    638 		mutex_exit(&devsw_lock);
    639 		return (NODEV);
    640 	}
    641 	for (i = 0 ; i < max_devsw_convs ; i++) {
    642 		if (devsw_conv[i].d_bmajor == bmajor) {
    643 			cmajor = devsw_conv[i].d_cmajor;
    644 			break;
    645 		}
    646 	}
    647 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    648 		rv = makedev(cmajor, minor(bdev));
    649 	mutex_exit(&devsw_lock);
    650 
    651 	return (rv);
    652 }
    653 
    654 /*
    655  * Device access methods.
    656  */
    657 
    658 #define	DEV_LOCK(d)						\
    659 	if ((d->d_flag & D_MPSAFE) == 0) {			\
    660 		KERNEL_LOCK(1, curlwp);				\
    661 	}
    662 
    663 #define	DEV_UNLOCK(d)						\
    664 	if ((d->d_flag & D_MPSAFE) == 0) {			\
    665 		KERNEL_UNLOCK_ONE(curlwp);			\
    666 	}
    667 
    668 int
    669 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    670 {
    671 	const struct bdevsw *d;
    672 	int rv;
    673 
    674 	/*
    675 	 * For open we need to lock, in order to synchronize
    676 	 * with attach/detach.
    677 	 */
    678 	mutex_enter(&devsw_lock);
    679 	d = bdevsw_lookup(dev);
    680 	mutex_exit(&devsw_lock);
    681 	if (d == NULL)
    682 		return ENXIO;
    683 
    684 	DEV_LOCK(d);
    685 	rv = (*d->d_open)(dev, flag, devtype, l);
    686 	DEV_UNLOCK(d);
    687 
    688 	return rv;
    689 }
    690 
    691 int
    692 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    693 {
    694 	const struct bdevsw *d;
    695 	int rv;
    696 
    697 	if ((d = bdevsw_lookup(dev)) == NULL)
    698 		return ENXIO;
    699 
    700 	DEV_LOCK(d);
    701 	rv = (*d->d_close)(dev, flag, devtype, l);
    702 	DEV_UNLOCK(d);
    703 
    704 	return rv;
    705 }
    706 
    707 void
    708 bdev_strategy(struct buf *bp)
    709 {
    710 	const struct bdevsw *d;
    711 
    712 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
    713 		panic("bdev_strategy");
    714 
    715 	DEV_LOCK(d);
    716 	(*d->d_strategy)(bp);
    717 	DEV_UNLOCK(d);
    718 }
    719 
    720 int
    721 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    722 {
    723 	const struct bdevsw *d;
    724 	int rv;
    725 
    726 	if ((d = bdevsw_lookup(dev)) == NULL)
    727 		return ENXIO;
    728 
    729 	DEV_LOCK(d);
    730 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    731 	DEV_UNLOCK(d);
    732 
    733 	return rv;
    734 }
    735 
    736 int
    737 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    738 {
    739 	const struct bdevsw *d;
    740 	int rv;
    741 
    742 	/*
    743 	 * Dump can be called without the device open.  Since it can
    744 	 * currently only be called with the system paused (and in a
    745 	 * potentially unstable state), we don't perform any locking.
    746 	 */
    747 	if ((d = bdevsw_lookup(dev)) == NULL)
    748 		return ENXIO;
    749 
    750 	/* DEV_LOCK(d); */
    751 	rv = (*d->d_dump)(dev, addr, data, sz);
    752 	/* DEV_UNLOCK(d); */
    753 
    754 	return rv;
    755 }
    756 
    757 int
    758 bdev_type(dev_t dev)
    759 {
    760 	const struct bdevsw *d;
    761 
    762 	if ((d = bdevsw_lookup(dev)) == NULL)
    763 		return D_OTHER;
    764 	return d->d_flag & D_TYPEMASK;
    765 }
    766 
    767 int
    768 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    769 {
    770 	const struct cdevsw *d;
    771 	int rv;
    772 
    773 	/*
    774 	 * For open we need to lock, in order to synchronize
    775 	 * with attach/detach.
    776 	 */
    777 	mutex_enter(&devsw_lock);
    778 	d = cdevsw_lookup(dev);
    779 	mutex_exit(&devsw_lock);
    780 	if (d == NULL)
    781 		return ENXIO;
    782 
    783 	DEV_LOCK(d);
    784 	rv = (*d->d_open)(dev, flag, devtype, l);
    785 	DEV_UNLOCK(d);
    786 
    787 	return rv;
    788 }
    789 
    790 int
    791 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    792 {
    793 	const struct cdevsw *d;
    794 	int rv;
    795 
    796 	if ((d = cdevsw_lookup(dev)) == NULL)
    797 		return ENXIO;
    798 
    799 	DEV_LOCK(d);
    800 	rv = (*d->d_close)(dev, flag, devtype, l);
    801 	DEV_UNLOCK(d);
    802 
    803 	return rv;
    804 }
    805 
    806 int
    807 cdev_read(dev_t dev, struct uio *uio, int flag)
    808 {
    809 	const struct cdevsw *d;
    810 	int rv;
    811 
    812 	if ((d = cdevsw_lookup(dev)) == NULL)
    813 		return ENXIO;
    814 
    815 	DEV_LOCK(d);
    816 	rv = (*d->d_read)(dev, uio, flag);
    817 	DEV_UNLOCK(d);
    818 
    819 	return rv;
    820 }
    821 
    822 int
    823 cdev_write(dev_t dev, struct uio *uio, int flag)
    824 {
    825 	const struct cdevsw *d;
    826 	int rv;
    827 
    828 	if ((d = cdevsw_lookup(dev)) == NULL)
    829 		return ENXIO;
    830 
    831 	DEV_LOCK(d);
    832 	rv = (*d->d_write)(dev, uio, flag);
    833 	DEV_UNLOCK(d);
    834 
    835 	return rv;
    836 }
    837 
    838 int
    839 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    840 {
    841 	const struct cdevsw *d;
    842 	int rv;
    843 
    844 	if ((d = cdevsw_lookup(dev)) == NULL)
    845 		return ENXIO;
    846 
    847 	DEV_LOCK(d);
    848 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    849 	DEV_UNLOCK(d);
    850 
    851 	return rv;
    852 }
    853 
    854 void
    855 cdev_stop(struct tty *tp, int flag)
    856 {
    857 	const struct cdevsw *d;
    858 
    859 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
    860 		return;
    861 
    862 	DEV_LOCK(d);
    863 	(*d->d_stop)(tp, flag);
    864 	DEV_UNLOCK(d);
    865 }
    866 
    867 struct tty *
    868 cdev_tty(dev_t dev)
    869 {
    870 	const struct cdevsw *d;
    871 	struct tty * rv;
    872 
    873 	if ((d = cdevsw_lookup(dev)) == NULL)
    874 		return NULL;
    875 
    876 	/* XXX Check if necessary. */
    877 	if (d->d_tty == NULL)
    878 		return NULL;
    879 
    880 	DEV_LOCK(d);
    881 	rv = (*d->d_tty)(dev);
    882 	DEV_UNLOCK(d);
    883 
    884 	return rv;
    885 }
    886 
    887 int
    888 cdev_poll(dev_t dev, int flag, lwp_t *l)
    889 {
    890 	const struct cdevsw *d;
    891 	int rv;
    892 
    893 	if ((d = cdevsw_lookup(dev)) == NULL)
    894 		return POLLERR;
    895 
    896 	DEV_LOCK(d);
    897 	rv = (*d->d_poll)(dev, flag, l);
    898 	DEV_UNLOCK(d);
    899 
    900 	return rv;
    901 }
    902 
    903 paddr_t
    904 cdev_mmap(dev_t dev, off_t off, int flag)
    905 {
    906 	const struct cdevsw *d;
    907 	paddr_t rv;
    908 
    909 	if ((d = cdevsw_lookup(dev)) == NULL)
    910 		return (paddr_t)-1LL;
    911 
    912 	DEV_LOCK(d);
    913 	rv = (*d->d_mmap)(dev, off, flag);
    914 	DEV_UNLOCK(d);
    915 
    916 	return rv;
    917 }
    918 
    919 int
    920 cdev_kqfilter(dev_t dev, struct knote *kn)
    921 {
    922 	const struct cdevsw *d;
    923 	int rv;
    924 
    925 	if ((d = cdevsw_lookup(dev)) == NULL)
    926 		return ENXIO;
    927 
    928 	DEV_LOCK(d);
    929 	rv = (*d->d_kqfilter)(dev, kn);
    930 	DEV_UNLOCK(d);
    931 
    932 	return rv;
    933 }
    934 
    935 int
    936 cdev_type(dev_t dev)
    937 {
    938 	const struct cdevsw *d;
    939 
    940 	if ((d = cdevsw_lookup(dev)) == NULL)
    941 		return D_OTHER;
    942 	return d->d_flag & D_TYPEMASK;
    943 }
    944 
    945 static struct device_name *
    946 device_name_alloc(dev_t dev, device_t devp, bool cdev,
    947 	enum devtype dtype, const char *fmt, va_list src)
    948 {
    949 	struct device_name *dn;
    950 	va_list dst;
    951 
    952 	/* TODO: Check for aliases */
    953 
    954 	dn = kmem_zalloc(sizeof(*dn), KM_NOSLEEP);
    955 	if (dn == NULL)
    956 		return NULL;
    957 
    958 	dn->d_dev = dev;
    959 	dn->d_devp = devp;
    960 	dn->d_char = cdev;
    961 	dn->d_type = dtype;
    962 
    963 	dn->d_name = kmem_zalloc(MAXNAMLEN, KM_NOSLEEP);
    964 	va_copy(dst, src);
    965 	vsnprintf(dn->d_name, MAXNAMLEN, fmt, dst);
    966 	va_end(dst);
    967 
    968 	return dn;
    969 }
    970 
    971 /*
    972  * Register a dev_t and name for a device driver with devfs.
    973  * We maintain a TAILQ of registered device drivers names and dev_t's.
    974  *
    975  * => if devp is NULL this device has no device_t instance. An example
    976  *    of this is zero(4).
    977  *
    978  * => if there already exists another name for this dev_t, then 'name'
    979  *    is assumed to be an alias of a previously registered device driver.
    980  * TODO: The above isn't actually true at the moment, we just return 0.
    981  *
    982  * => 'cdev' indiciates whether we are a char or block device.
    983  *     If 'cdev' is true, we are a character device, otherwise we
    984  *     are a block device.
    985  */
    986 int
    987 device_register_name(dev_t dev, device_t devp, bool cdev,
    988 	enum devtype dtype, const char *fmt, ...)
    989 {
    990 	struct device_name *dn;
    991 	va_list ap;
    992 
    993 	va_start(ap, fmt);
    994 
    995 	if ((dn = device_name_alloc(dev, devp, cdev, dtype, fmt, ap)) == NULL)
    996 		return ENOMEM;
    997 
    998 	va_end(ap);
    999 
   1000 	mutex_enter(&dname_lock);
   1001 	TAILQ_INSERT_TAIL(&device_names, dn, d_next);
   1002 	mutex_exit(&dname_lock);
   1003 
   1004 	return 0;
   1005 }
   1006 
   1007 /*
   1008  * Remove a previously registered name for 'dev'.
   1009  *
   1010  * => This must be called twice with different values for 'dev' if
   1011  *    the caller previously registered a name for a character device
   1012  *    and a name for a block device.
   1013  */
   1014 int
   1015 device_deregister_name(dev_t dev, const char *fmt, ...)
   1016 {
   1017 	int error = 0;
   1018 	struct device_name *dn;
   1019 	va_list ap;
   1020 	char name[MAXNAMLEN];
   1021 
   1022 	va_start(ap, fmt);
   1023 	vsnprintf(name, MAXNAMLEN, fmt, ap);
   1024 	va_end(ap);
   1025 
   1026 	mutex_enter(&dname_lock);
   1027 	TAILQ_FOREACH(dn, &device_names, d_next) {
   1028 		if ((strcmp(dn->d_name, name) == 0) && (dn->d_gone == false))
   1029 			break;
   1030 	}
   1031 
   1032 	if (dn != NULL)
   1033 		dn->d_gone = true;
   1034 	else
   1035 		error = EINVAL;
   1036 
   1037 	mutex_exit(&dname_lock);
   1038 	return error;
   1039 }
   1040 
   1041 /*
   1042  * Remove all device names for this device_t.
   1043  */
   1044 int
   1045 device_deregister_all(device_t dev)
   1046 {
   1047 	struct device_name *dn;
   1048 
   1049 	mutex_enter(&dname_lock);
   1050 	TAILQ_FOREACH(dn, &device_names, d_next) {
   1051 		if ((dn->d_devp == dev) && (dn->d_gone == false))
   1052 			dn->d_gone = true;
   1053 	}
   1054 	mutex_exit(&dname_lock);
   1055 	return 0;
   1056 }
   1057 
   1058 struct device_name *
   1059 device_lookup_info(dev_t dev, int is_char)
   1060 {
   1061 	struct device_name *dn;
   1062 
   1063 	mutex_enter(&dname_lock);
   1064 	TAILQ_FOREACH(dn, &device_names, d_next) {
   1065 		if ((dn->d_dev == dev) && (dn->d_char == is_char))
   1066 			break;
   1067 	}
   1068 	mutex_exit(&dname_lock);
   1069 
   1070 	return dn;
   1071 }
   1072 
   1073 /*
   1074  * Register a name for a device_t and wait for the device file to be
   1075  * created in devfs mounts. Normally this operation is asynchronous in
   1076  * the sense that a device name is registered and at some later time
   1077  * a device file will appear in a devfs mount.
   1078  *
   1079  * cond - A kernel condition variable
   1080  * ticks - Timeout value in hz
   1081  *
   1082  * NOTE: There is no guarantee that a device file will be created,
   1083  *	 however, the caller will be notified in a synchronous manner
   1084  *	 whether the creation failed or not.
   1085  */
   1086 int
   1087 device_register_sync(dev_t dev, device_t devp, bool cdev,
   1088 	enum devtype dtype, kcondvar_t cond, int ticks, const char *fmt, ...)
   1089 {
   1090 	int error = 0;
   1091 	struct device_name *dn;
   1092 	va_list ap;
   1093 
   1094 	va_start(ap, fmt);
   1095 
   1096 	if ((dn = device_name_alloc(dev, devp, cdev, dtype, fmt, ap)) == NULL)
   1097 		return ENOMEM;
   1098 	dn->d_busy = true;
   1099 	dn->d_cv = cond;
   1100 
   1101 	va_end(ap);
   1102 
   1103 	mutex_enter(&dname_lock);
   1104 	TAILQ_INSERT_TAIL(&device_names, dn, d_next);
   1105 	mutex_exit(&dname_lock);
   1106 
   1107 	mutex_init(&dn->d_cvmutex, MUTEX_DEFAULT, IPL_NONE);
   1108 
   1109 	mutex_enter(&dn->d_cvmutex);
   1110 
   1111 	while (dn->d_busy == true) {
   1112 		if (ticks <= 0)
   1113 			error = cv_wait_sig(&dn->d_cv, &dn->d_cvmutex);
   1114 		else
   1115 			error = cv_timedwait_sig(&dn->d_cv,
   1116 			    &dn->d_cvmutex, ticks);
   1117 
   1118 	}
   1119 	error = dn->d_retval;
   1120 	mutex_exit(&dn->d_cvmutex);
   1121 
   1122 	return error;
   1123 }
   1124