Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.34.2.10
      1 /*	$NetBSD: subr_devsw.c,v 1.34.2.10 2016/07/22 11:59:51 pgoyette Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	subr_devsw.c: registers device drivers by name and by major
     36  *	number, and provides wrapper methods for performing I/O and
     37  *	other tasks on device drivers, keying on the device number
     38  *	(dev_t).
     39  *
     40  *	When the system is built, the config(8) command generates
     41  *	static tables of device drivers built into the kernel image
     42  *	along with their associated methods.  These are recorded in
     43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     44  *	and removed from the system dynamically.
     45  *
     46  * Allocation
     47  *
     48  *	When the system initially boots only the statically allocated
     49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     50  *	allocation, we allocate a fixed block of memory to hold the new,
     51  *	expanded index.  This "fork" of the table is only ever performed
     52  *	once in order to guarantee that other threads may safely access
     53  *	the device tables:
     54  *
     55  *	o Once a thread has a "reference" to the table via an earlier
     56  *	  open() call, we know that the entry in the table must exist
     57  *	  and so it is safe to access it.
     58  *
     59  *	o Regardless of whether other threads see the old or new
     60  *	  pointers, they will point to a correct device switch
     61  *	  structure for the operation being performed.
     62  *
     63  *	XXX Currently, the wrapper methods such as cdev_read() verify
     64  *	that a device driver does in fact exist before calling the
     65  *	associated driver method.  This should be changed so that
     66  *	once the device is has been referenced by a vnode (opened),
     67  *	calling	the other methods should be valid until that reference
     68  *	is dropped.
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.34.2.10 2016/07/22 11:59:51 pgoyette Exp $");
     73 
     74 #ifdef _KERNEL_OPT
     75 #include "opt_dtrace.h"
     76 #endif
     77 
     78 #include <sys/param.h>
     79 #include <sys/conf.h>
     80 #include <sys/kmem.h>
     81 #include <sys/systm.h>
     82 #include <sys/poll.h>
     83 #include <sys/tty.h>
     84 #include <sys/cpu.h>
     85 #include <sys/buf.h>
     86 #include <sys/reboot.h>
     87 #include <sys/sdt.h>
     88 #include <sys/atomic.h>
     89 #include <sys/condvar.h>
     90 #include <sys/localcount.h>
     91 #include <sys/pserialize.h>
     92 
     93 #ifdef DEVSW_DEBUG
     94 #define	DPRINTF(x)	printf x
     95 #else /* DEVSW_DEBUG */
     96 #define	DPRINTF(x)
     97 #endif /* DEVSW_DEBUG */
     98 
     99 #define	MAXDEVSW	512	/* the maximum of major device number */
    100 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
    101 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
    102 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
    103 
    104 extern const struct bdevsw **bdevsw, *bdevsw0[];
    105 extern const struct cdevsw **cdevsw, *cdevsw0[];
    106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    107 extern const int sys_bdevsws, sys_cdevsws;
    108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    109 
    110 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
    111 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
    112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    113 
    114 kmutex_t	device_lock;
    115 kcondvar_t	device_cv;
    116 pserialize_t	device_psz = NULL;
    117 
    118 void (*biodone_vfs)(buf_t *) = (void *)nullop;
    119 
    120 void
    121 devsw_init(void)
    122 {
    123 
    124 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    125 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    126 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
    127 	cv_init(&device_cv, "devsw");
    128 }
    129 
    130 void
    131 devsw_detach_init(void)
    132 {
    133 
    134 	device_psz = pserialize_create();
    135 }
    136 
    137 int
    138 devsw_attach(const char *devname,
    139 	     const struct bdevsw *bdev, devmajor_t *bmajor,
    140 	     const struct cdevsw *cdev, devmajor_t *cmajor)
    141 {
    142 	struct devsw_conv *conv;
    143 	char *name;
    144 	int error, i;
    145 	size_t len;
    146 
    147 	if (devname == NULL || cdev == NULL)
    148 		return (EINVAL);
    149 
    150 	mutex_enter(&device_lock);
    151 
    152 	if (bdev != NULL) {
    153 		KASSERTMSG(bdev->d_localcount != NULL,
    154 		    "%s: bdev %s has no d_localcount", __func__, devname);
    155 		KASSERTMSG(bdev->d_localcount != cdev->d_localcount,
    156 		    "%s: bdev and cdev for %s have same d_localcount",
    157 		    __func__, devname);
    158 	}
    159 	if (cdev != NULL)
    160 		KASSERTMSG(cdev->d_localcount != NULL,
    161 		    "%s: cdev %s has no d_localcount", __func__, devname);
    162 
    163 	for (i = 0 ; i < max_devsw_convs ; i++) {
    164 		conv = &devsw_conv[i];
    165 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    166 			continue;
    167 
    168 		if (*bmajor < 0)
    169 			*bmajor = conv->d_bmajor;
    170 		if (*cmajor < 0)
    171 			*cmajor = conv->d_cmajor;
    172 
    173 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    174 			error = EINVAL;
    175 			goto fail;
    176 		}
    177 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    178 			error = EINVAL;
    179 			goto fail;
    180 		}
    181 
    182 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    183 		    cdevsw[*cmajor] != NULL) {
    184 			error = EEXIST;
    185 			goto fail;
    186 		}
    187 
    188 		/* use membar_producer() to ensure visibility of the xdevsw */
    189 		if (bdev != NULL) {
    190 			localcount_init(bdev->d_localcount);
    191 			membar_producer();
    192 			bdevsw[*bmajor] = bdev;
    193 		}
    194 		localcount_init(cdev->d_localcount);
    195 		membar_producer();
    196 		cdevsw[*cmajor] = cdev;
    197 
    198 		mutex_exit(&device_lock);
    199 		return (0);
    200 	}
    201 
    202 	error = bdevsw_attach(bdev, bmajor);
    203 	if (error != 0)
    204 		goto fail;
    205 	error = cdevsw_attach(cdev, cmajor);
    206 	if (error != 0) {
    207 		devsw_detach_locked(bdev, NULL);
    208 		goto fail;
    209 	}
    210 
    211 	for (i = 0 ; i < max_devsw_convs ; i++) {
    212 		if (devsw_conv[i].d_name == NULL)
    213 			break;
    214 	}
    215 	if (i == max_devsw_convs) {
    216 		struct devsw_conv *newptr;
    217 		int old_convs, new_convs;
    218 
    219 		old_convs = max_devsw_convs;
    220 		new_convs = old_convs + 1;
    221 
    222 		newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
    223 		if (newptr == NULL) {
    224 			devsw_detach_locked(bdev, cdev);
    225 			error = ENOMEM;
    226 			goto fail;
    227 		}
    228 		newptr[old_convs].d_name = NULL;
    229 		newptr[old_convs].d_bmajor = -1;
    230 		newptr[old_convs].d_cmajor = -1;
    231 		memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
    232 		if (devsw_conv != devsw_conv0)
    233 			kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
    234 		devsw_conv = newptr;
    235 		max_devsw_convs = new_convs;
    236 	}
    237 
    238 	len = strlen(devname) + 1;
    239 	name = kmem_alloc(len, KM_NOSLEEP);
    240 	if (name == NULL) {
    241 		devsw_detach_locked(bdev, cdev);
    242 		error = ENOMEM;
    243 		goto fail;
    244 	}
    245 	strlcpy(name, devname, len);
    246 
    247 	devsw_conv[i].d_name = name;
    248 	devsw_conv[i].d_bmajor = *bmajor;
    249 	devsw_conv[i].d_cmajor = *cmajor;
    250 
    251 	mutex_exit(&device_lock);
    252 	return (0);
    253  fail:
    254 	mutex_exit(&device_lock);
    255 	return (error);
    256 }
    257 
    258 static int
    259 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
    260 {
    261 	const struct bdevsw **newptr;
    262 	devmajor_t bmajor;
    263 	int i;
    264 
    265 	KASSERT(mutex_owned(&device_lock));
    266 
    267 	if (devsw == NULL)
    268 		return (0);
    269 
    270 	if (*devmajor < 0) {
    271 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    272 			if (bdevsw[bmajor] != NULL)
    273 				continue;
    274 			for (i = 0 ; i < max_devsw_convs ; i++) {
    275 				if (devsw_conv[i].d_bmajor == bmajor)
    276 					break;
    277 			}
    278 			if (i != max_devsw_convs)
    279 				continue;
    280 			break;
    281 		}
    282 		*devmajor = bmajor;
    283 	}
    284 
    285 	if (*devmajor >= MAXDEVSW) {
    286 		printf("%s: block majors exhausted", __func__);
    287 		return (ENOMEM);
    288 	}
    289 
    290 	if (*devmajor >= max_bdevsws) {
    291 		KASSERT(bdevsw == bdevsw0);
    292 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    293 		if (newptr == NULL)
    294 			return (ENOMEM);
    295 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    296 		bdevsw = newptr;
    297 		max_bdevsws = MAXDEVSW;
    298 	}
    299 
    300 	if (bdevsw[*devmajor] != NULL)
    301 		return (EEXIST);
    302 
    303 	/* ensure visibility of the bdevsw */
    304 	membar_producer();
    305 
    306 	bdevsw[*devmajor] = devsw;
    307 	KASSERTMSG(devsw->d_localcount != NULL, "%s: bdev for major %d has "
    308 	    "no localcount", __func__, *devmajor);
    309 	localcount_init(devsw->d_localcount);
    310 
    311 	return (0);
    312 }
    313 
    314 static int
    315 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
    316 {
    317 	const struct cdevsw **newptr;
    318 	devmajor_t cmajor;
    319 	int i;
    320 
    321 	KASSERT(mutex_owned(&device_lock));
    322 
    323 	if (*devmajor < 0) {
    324 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    325 			if (cdevsw[cmajor] != NULL)
    326 				continue;
    327 			for (i = 0 ; i < max_devsw_convs ; i++) {
    328 				if (devsw_conv[i].d_cmajor == cmajor)
    329 					break;
    330 			}
    331 			if (i != max_devsw_convs)
    332 				continue;
    333 			break;
    334 		}
    335 		*devmajor = cmajor;
    336 	}
    337 
    338 	if (*devmajor >= MAXDEVSW) {
    339 		printf("%s: character majors exhausted", __func__);
    340 		return (ENOMEM);
    341 	}
    342 
    343 	if (*devmajor >= max_cdevsws) {
    344 		KASSERT(cdevsw == cdevsw0);
    345 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    346 		if (newptr == NULL)
    347 			return (ENOMEM);
    348 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    349 		cdevsw = newptr;
    350 		max_cdevsws = MAXDEVSW;
    351 	}
    352 
    353 	if (cdevsw[*devmajor] != NULL)
    354 		return (EEXIST);
    355 
    356 	/* ensure visibility of the bdevsw */
    357 	membar_producer();
    358 
    359 	cdevsw[*devmajor] = devsw;
    360 	KASSERTMSG(devsw->d_localcount != NULL, "%s: cdev for major %d has "
    361 	    "no localcount", __func__, *devmajor);
    362 	localcount_init(devsw->d_localcount);
    363 
    364 	return (0);
    365 }
    366 
    367 /*
    368  * First, look up both bdev and cdev indices, and remove the
    369  * {b,c]devsw[] entries so no new references can be taken.  Then
    370  * drain any existing references.
    371  */
    372 
    373 static void
    374 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    375 {
    376 	int i, j;
    377 
    378 	KASSERT(mutex_owned(&device_lock));
    379 
    380 	i = max_bdevsws;
    381 	if (bdev != NULL) {
    382 		for (i = 0 ; i < max_bdevsws ; i++) {
    383 			if (bdevsw[i] != bdev)
    384 				continue;
    385 
    386 			KASSERTMSG(bdev->d_localcount != NULL,
    387 			    "%s: no bdev localcount for major %d", __func__, i);
    388 			break;
    389 		}
    390 	}
    391 	j = max_cdevsws;
    392 	if (cdev != NULL) {
    393 		for (j = 0 ; j < max_cdevsws ; j++) {
    394 			if (cdevsw[j] != cdev)
    395 				continue;
    396 
    397 			KASSERTMSG(cdev->d_localcount != NULL,
    398 			    "%s: no cdev localcount for major %d", __func__, j);
    399 			break;
    400 		}
    401 	}
    402 	if (i < max_bdevsws)
    403 		bdevsw[i] = NULL;
    404 	if (j < max_cdevsws )
    405 		cdevsw[j] = NULL;
    406 
    407 	/* Wait for all current readers to finish with the devsw's */
    408 	pserialize_perform(device_psz);
    409 
    410 	/*
    411 	 * No new accessors can reach the bdev and cdev via the
    412 	 * {b,c}devsw[] arrays, so no new references can be
    413 	 * acquired.  Wait for all existing references to drain,
    414 	 * and then destroy.
    415 	 */
    416 
    417 	if (i < max_bdevsws && bdev->d_localcount != NULL) {
    418 		localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
    419 		localcount_fini(bdev->d_localcount);
    420 	}
    421 	if (j < max_cdevsws && cdev->d_localcount != NULL ) {
    422 		localcount_drain(cdev->d_localcount, &device_cv, &device_lock);
    423 		localcount_fini(cdev->d_localcount);
    424 	}
    425 }
    426 
    427 int
    428 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    429 {
    430 
    431 	mutex_enter(&device_lock);
    432 	devsw_detach_locked(bdev, cdev);
    433 	mutex_exit(&device_lock);
    434 	return 0;
    435 }
    436 
    437 /*
    438  * Look up a block device by number.
    439  *
    440  * => Caller must ensure that the device is attached.
    441  */
    442 const struct bdevsw *
    443 bdevsw_lookup(dev_t dev)
    444 {
    445 	devmajor_t bmajor;
    446 
    447 	if (dev == NODEV)
    448 		return (NULL);
    449 	bmajor = major(dev);
    450 	if (bmajor < 0 || bmajor >= max_bdevsws)
    451 		return (NULL);
    452 
    453 	return (bdevsw[bmajor]);
    454 }
    455 
    456 const struct bdevsw *
    457 bdevsw_lookup_acquire(dev_t dev)
    458 {
    459 	devmajor_t bmajor;
    460 	const struct bdevsw *bdev = NULL;
    461 	int s;
    462 
    463 	if (dev == NODEV)
    464 		return (NULL);
    465 	bmajor = major(dev);
    466 	if (bmajor < 0 || bmajor >= max_bdevsws)
    467 		return (NULL);
    468 
    469 	/* Start a read transaction to block localcount_drain() */
    470 	s = pserialize_read_enter();
    471 
    472 	/* Get the struct bdevsw pointer */
    473 	bdev = bdevsw[bmajor];
    474 	if (bdev == NULL)
    475 		goto out;
    476 
    477 	/* Wait for the content of the struct bdevsw to become visible */
    478 	membar_datadep_consumer();
    479 
    480 	/* If the devsw is not statically linked, acquire a reference */
    481 	if (bdevsw[bmajor]->d_localcount != NULL)
    482 		localcount_acquire(bdevsw[bmajor]->d_localcount);
    483 
    484  out:	pserialize_read_exit(s);
    485 
    486 	return bdev;
    487 }
    488 
    489 void
    490 bdevsw_release(const struct bdevsw *bd)
    491 {
    492 
    493 	KASSERT(bd != NULL);
    494 	if (bd->d_localcount != NULL)
    495 		localcount_release(bd->d_localcount, &device_cv, &device_lock);
    496 }
    497 
    498 /*
    499  * Look up a character device by number.
    500  *
    501  * => Caller must ensure that the device is attached.
    502  */
    503 const struct cdevsw *
    504 cdevsw_lookup(dev_t dev)
    505 {
    506 	devmajor_t cmajor;
    507 
    508 	if (dev == NODEV)
    509 		return (NULL);
    510 	cmajor = major(dev);
    511 	if (cmajor < 0 || cmajor >= max_cdevsws)
    512 		return (NULL);
    513 
    514 	return (cdevsw[cmajor]);
    515 }
    516 
    517 const struct cdevsw *
    518 cdevsw_lookup_acquire(dev_t dev)
    519 {
    520 	devmajor_t cmajor;
    521 	const struct cdevsw *cdev = NULL;
    522 	int s;
    523 
    524 	if (dev == NODEV)
    525 		return (NULL);
    526 	cmajor = major(dev);
    527 	if (cmajor < 0 || cmajor >= max_cdevsws)
    528 		return (NULL);
    529 
    530 	/* Start a read transaction to block localcount_drain() */
    531 	s = pserialize_read_enter();
    532 
    533 	/* Get the struct bdevsw pointer */
    534 	cdev = cdevsw[cmajor];
    535 	if (cdev == NULL)
    536 		goto out;
    537 
    538 	/* Wait for the content of the struct cdevsw to become visible */
    539 	membar_datadep_consumer();
    540 
    541 	/* If the devsw is not statically linked, acquire a reference */
    542 	if (cdevsw[cmajor]->d_localcount != NULL)
    543 		localcount_acquire(cdevsw[cmajor]->d_localcount);
    544 
    545  out:	pserialize_read_exit(s);
    546 
    547 	return cdev;
    548 }
    549 
    550 void
    551 cdevsw_release(const struct cdevsw *cd)
    552 {
    553 
    554 	KASSERT(cd != NULL);
    555 	if (cd->d_localcount != NULL)
    556 		localcount_release(cd->d_localcount, &device_cv, &device_lock);
    557 }
    558 
    559 /*
    560  * Look up a block device by reference to its operations set.
    561  *
    562  * => Caller must ensure that the device is not detached, and therefore
    563  *    that the returned major is still valid when dereferenced.
    564  */
    565 devmajor_t
    566 bdevsw_lookup_major(const struct bdevsw *bdev)
    567 {
    568 	devmajor_t bmajor;
    569 
    570 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    571 		if (bdevsw[bmajor] == bdev)
    572 			return (bmajor);
    573 	}
    574 
    575 	return (NODEVMAJOR);
    576 }
    577 
    578 /*
    579  * Look up a character device by reference to its operations set.
    580  *
    581  * => Caller must ensure that the device is not detached, and therefore
    582  *    that the returned major is still valid when dereferenced.
    583  */
    584 devmajor_t
    585 cdevsw_lookup_major(const struct cdevsw *cdev)
    586 {
    587 	devmajor_t cmajor;
    588 
    589 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    590 		if (cdevsw[cmajor] == cdev)
    591 			return (cmajor);
    592 	}
    593 
    594 	return (NODEVMAJOR);
    595 }
    596 
    597 /*
    598  * Convert from block major number to name.
    599  *
    600  * => Caller must ensure that the device is not detached, and therefore
    601  *    that the name pointer is still valid when dereferenced.
    602  */
    603 const char *
    604 devsw_blk2name(devmajor_t bmajor)
    605 {
    606 	const char *name;
    607 	devmajor_t cmajor;
    608 	int i;
    609 
    610 	name = NULL;
    611 	cmajor = -1;
    612 
    613 	mutex_enter(&device_lock);
    614 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    615 		mutex_exit(&device_lock);
    616 		return (NULL);
    617 	}
    618 	for (i = 0 ; i < max_devsw_convs; i++) {
    619 		if (devsw_conv[i].d_bmajor == bmajor) {
    620 			cmajor = devsw_conv[i].d_cmajor;
    621 			break;
    622 		}
    623 	}
    624 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    625 		name = devsw_conv[i].d_name;
    626 	mutex_exit(&device_lock);
    627 
    628 	return (name);
    629 }
    630 
    631 /*
    632  * Convert char major number to device driver name.
    633  */
    634 const char *
    635 cdevsw_getname(devmajor_t major)
    636 {
    637 	const char *name;
    638 	int i;
    639 
    640 	name = NULL;
    641 
    642 	if (major < 0)
    643 		return (NULL);
    644 
    645 	mutex_enter(&device_lock);
    646 	for (i = 0 ; i < max_devsw_convs; i++) {
    647 		if (devsw_conv[i].d_cmajor == major) {
    648 			name = devsw_conv[i].d_name;
    649 			break;
    650 		}
    651 	}
    652 	mutex_exit(&device_lock);
    653 	return (name);
    654 }
    655 
    656 /*
    657  * Convert block major number to device driver name.
    658  */
    659 const char *
    660 bdevsw_getname(devmajor_t major)
    661 {
    662 	const char *name;
    663 	int i;
    664 
    665 	name = NULL;
    666 
    667 	if (major < 0)
    668 		return (NULL);
    669 
    670 	mutex_enter(&device_lock);
    671 	for (i = 0 ; i < max_devsw_convs; i++) {
    672 		if (devsw_conv[i].d_bmajor == major) {
    673 			name = devsw_conv[i].d_name;
    674 			break;
    675 		}
    676 	}
    677 	mutex_exit(&device_lock);
    678 	return (name);
    679 }
    680 
    681 /*
    682  * Convert from device name to block major number.
    683  *
    684  * => Caller must ensure that the device is not detached, and therefore
    685  *    that the major number is still valid when dereferenced.
    686  */
    687 devmajor_t
    688 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    689 {
    690 	struct devsw_conv *conv;
    691 	devmajor_t bmajor;
    692 	int i;
    693 
    694 	if (name == NULL)
    695 		return (NODEVMAJOR);
    696 
    697 	mutex_enter(&device_lock);
    698 	for (i = 0 ; i < max_devsw_convs ; i++) {
    699 		size_t len;
    700 
    701 		conv = &devsw_conv[i];
    702 		if (conv->d_name == NULL)
    703 			continue;
    704 		len = strlen(conv->d_name);
    705 		if (strncmp(conv->d_name, name, len) != 0)
    706 			continue;
    707 		if (*(name +len) && !isdigit(*(name + len)))
    708 			continue;
    709 		bmajor = conv->d_bmajor;
    710 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    711 		    bdevsw[bmajor] == NULL)
    712 			break;
    713 		if (devname != NULL) {
    714 #ifdef DEVSW_DEBUG
    715 			if (strlen(conv->d_name) >= devnamelen)
    716 				printf("devsw_name2blk: too short buffer");
    717 #endif /* DEVSW_DEBUG */
    718 			strncpy(devname, conv->d_name, devnamelen);
    719 			devname[devnamelen - 1] = '\0';
    720 		}
    721 		mutex_exit(&device_lock);
    722 		return (bmajor);
    723 	}
    724 
    725 	mutex_exit(&device_lock);
    726 	return (NODEVMAJOR);
    727 }
    728 
    729 /*
    730  * Convert from device name to char major number.
    731  *
    732  * => Caller must ensure that the device is not detached, and therefore
    733  *    that the major number is still valid when dereferenced.
    734  */
    735 devmajor_t
    736 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    737 {
    738 	struct devsw_conv *conv;
    739 	devmajor_t cmajor;
    740 	int i;
    741 
    742 	if (name == NULL)
    743 		return (NODEVMAJOR);
    744 
    745 	mutex_enter(&device_lock);
    746 	for (i = 0 ; i < max_devsw_convs ; i++) {
    747 		size_t len;
    748 
    749 		conv = &devsw_conv[i];
    750 		if (conv->d_name == NULL)
    751 			continue;
    752 		len = strlen(conv->d_name);
    753 		if (strncmp(conv->d_name, name, len) != 0)
    754 			continue;
    755 		if (*(name +len) && !isdigit(*(name + len)))
    756 			continue;
    757 		cmajor = conv->d_cmajor;
    758 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    759 		    cdevsw[cmajor] == NULL)
    760 			break;
    761 		if (devname != NULL) {
    762 #ifdef DEVSW_DEBUG
    763 			if (strlen(conv->d_name) >= devnamelen)
    764 				printf("devsw_name2chr: too short buffer");
    765 #endif /* DEVSW_DEBUG */
    766 			strncpy(devname, conv->d_name, devnamelen);
    767 			devname[devnamelen - 1] = '\0';
    768 		}
    769 		mutex_exit(&device_lock);
    770 		return (cmajor);
    771 	}
    772 
    773 	mutex_exit(&device_lock);
    774 	return (NODEVMAJOR);
    775 }
    776 
    777 /*
    778  * Convert from character dev_t to block dev_t.
    779  *
    780  * => Caller must ensure that the device is not detached, and therefore
    781  *    that the major number is still valid when dereferenced.
    782  */
    783 dev_t
    784 devsw_chr2blk(dev_t cdev)
    785 {
    786 	devmajor_t bmajor, cmajor;
    787 	int i;
    788 	dev_t rv;
    789 
    790 	cmajor = major(cdev);
    791 	bmajor = NODEVMAJOR;
    792 	rv = NODEV;
    793 
    794 	mutex_enter(&device_lock);
    795 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    796 		mutex_exit(&device_lock);
    797 		return (NODEV);
    798 	}
    799 	for (i = 0 ; i < max_devsw_convs ; i++) {
    800 		if (devsw_conv[i].d_cmajor == cmajor) {
    801 			bmajor = devsw_conv[i].d_bmajor;
    802 			break;
    803 		}
    804 	}
    805 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    806 		rv = makedev(bmajor, minor(cdev));
    807 	mutex_exit(&device_lock);
    808 
    809 	return (rv);
    810 }
    811 
    812 /*
    813  * Convert from block dev_t to character dev_t.
    814  *
    815  * => Caller must ensure that the device is not detached, and therefore
    816  *    that the major number is still valid when dereferenced.
    817  */
    818 dev_t
    819 devsw_blk2chr(dev_t bdev)
    820 {
    821 	devmajor_t bmajor, cmajor;
    822 	int i;
    823 	dev_t rv;
    824 
    825 	bmajor = major(bdev);
    826 	cmajor = NODEVMAJOR;
    827 	rv = NODEV;
    828 
    829 	mutex_enter(&device_lock);
    830 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    831 		mutex_exit(&device_lock);
    832 		return (NODEV);
    833 	}
    834 	for (i = 0 ; i < max_devsw_convs ; i++) {
    835 		if (devsw_conv[i].d_bmajor == bmajor) {
    836 			cmajor = devsw_conv[i].d_cmajor;
    837 			break;
    838 		}
    839 	}
    840 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    841 		rv = makedev(cmajor, minor(bdev));
    842 	mutex_exit(&device_lock);
    843 
    844 	return (rv);
    845 }
    846 
    847 /*
    848  * Device access methods.
    849  */
    850 
    851 #define	DEV_LOCK(d)						\
    852 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
    853 		KERNEL_LOCK(1, NULL);				\
    854 	}
    855 
    856 #define	DEV_UNLOCK(d)						\
    857 	if (mpflag == 0) {					\
    858 		KERNEL_UNLOCK_ONE(NULL);			\
    859 	}
    860 
    861 int
    862 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    863 {
    864 	const struct bdevsw *d;
    865 	int rv, mpflag;
    866 
    867 	/*
    868 	 * For open we need to lock, in order to synchronize
    869 	 * with attach/detach.
    870 	 */
    871 	mutex_enter(&device_lock);
    872 	d = bdevsw_lookup(dev);
    873 	mutex_exit(&device_lock);
    874 	if (d == NULL)
    875 		return ENXIO;
    876 
    877 	DEV_LOCK(d);
    878 	rv = (*d->d_open)(dev, flag, devtype, l);
    879 	DEV_UNLOCK(d);
    880 
    881 	return rv;
    882 }
    883 
    884 int
    885 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    886 {
    887 	const struct bdevsw *d;
    888 	int rv, mpflag;
    889 
    890 	if ((d = bdevsw_lookup(dev)) == NULL)
    891 		return ENXIO;
    892 
    893 	DEV_LOCK(d);
    894 	rv = (*d->d_close)(dev, flag, devtype, l);
    895 	DEV_UNLOCK(d);
    896 
    897 	return rv;
    898 }
    899 
    900 SDT_PROVIDER_DECLARE(io);
    901 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
    902 
    903 void
    904 bdev_strategy(struct buf *bp)
    905 {
    906 	const struct bdevsw *d;
    907 	int mpflag;
    908 
    909 	SDT_PROBE1(io, kernel, , start, bp);
    910 
    911 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
    912 		bp->b_error = ENXIO;
    913 		bp->b_resid = bp->b_bcount;
    914 		biodone_vfs(bp); /* biodone() iff vfs present */
    915 		return;
    916 	}
    917 
    918 	DEV_LOCK(d);
    919 	(*d->d_strategy)(bp);
    920 	DEV_UNLOCK(d);
    921 }
    922 
    923 int
    924 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    925 {
    926 	const struct bdevsw *d;
    927 	int rv, mpflag;
    928 
    929 	if ((d = bdevsw_lookup(dev)) == NULL)
    930 		return ENXIO;
    931 
    932 	DEV_LOCK(d);
    933 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    934 	DEV_UNLOCK(d);
    935 
    936 	return rv;
    937 }
    938 
    939 int
    940 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    941 {
    942 	const struct bdevsw *d;
    943 	int rv;
    944 
    945 	/*
    946 	 * Dump can be called without the device open.  Since it can
    947 	 * currently only be called with the system paused (and in a
    948 	 * potentially unstable state), we don't perform any locking.
    949 	 */
    950 	if ((d = bdevsw_lookup(dev)) == NULL)
    951 		return ENXIO;
    952 
    953 	/* DEV_LOCK(d); */
    954 	rv = (*d->d_dump)(dev, addr, data, sz);
    955 	/* DEV_UNLOCK(d); */
    956 
    957 	return rv;
    958 }
    959 
    960 int
    961 bdev_type(dev_t dev)
    962 {
    963 	const struct bdevsw *d;
    964 
    965 	if ((d = bdevsw_lookup(dev)) == NULL)
    966 		return D_OTHER;
    967 	return d->d_flag & D_TYPEMASK;
    968 }
    969 
    970 int
    971 bdev_size(dev_t dev)
    972 {
    973 	const struct bdevsw *d;
    974 	int rv, mpflag = 0;
    975 
    976 	if ((d = bdevsw_lookup(dev)) == NULL ||
    977 	    d->d_psize == NULL)
    978 		return -1;
    979 
    980 	/*
    981 	 * Don't to try lock the device if we're dumping.
    982 	 * XXX: is there a better way to test this?
    983 	 */
    984 	if ((boothowto & RB_DUMP) == 0)
    985 		DEV_LOCK(d);
    986 	rv = (*d->d_psize)(dev);
    987 	if ((boothowto & RB_DUMP) == 0)
    988 		DEV_UNLOCK(d);
    989 
    990 	return rv;
    991 }
    992 
    993 int
    994 bdev_discard(dev_t dev, off_t pos, off_t len)
    995 {
    996 	const struct bdevsw *d;
    997 	int rv, mpflag;
    998 
    999 	if ((d = bdevsw_lookup(dev)) == NULL)
   1000 		return ENXIO;
   1001 
   1002 	DEV_LOCK(d);
   1003 	rv = (*d->d_discard)(dev, pos, len);
   1004 	DEV_UNLOCK(d);
   1005 
   1006 	return rv;
   1007 }
   1008 
   1009 int
   1010 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
   1011 {
   1012 	const struct cdevsw *d;
   1013 	int rv, mpflag;
   1014 
   1015 	/*
   1016 	 * For open we need to lock, in order to synchronize
   1017 	 * with attach/detach.
   1018 	 */
   1019 	mutex_enter(&device_lock);
   1020 	d = cdevsw_lookup(dev);
   1021 	mutex_exit(&device_lock);
   1022 	if (d == NULL)
   1023 		return ENXIO;
   1024 
   1025 	DEV_LOCK(d);
   1026 	rv = (*d->d_open)(dev, flag, devtype, l);
   1027 	DEV_UNLOCK(d);
   1028 
   1029 	return rv;
   1030 }
   1031 
   1032 int
   1033 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
   1034 {
   1035 	const struct cdevsw *d;
   1036 	int rv, mpflag;
   1037 
   1038 	if ((d = cdevsw_lookup(dev)) == NULL)
   1039 		return ENXIO;
   1040 
   1041 	DEV_LOCK(d);
   1042 	rv = (*d->d_close)(dev, flag, devtype, l);
   1043 	DEV_UNLOCK(d);
   1044 
   1045 	return rv;
   1046 }
   1047 
   1048 int
   1049 cdev_read(dev_t dev, struct uio *uio, int flag)
   1050 {
   1051 	const struct cdevsw *d;
   1052 	int rv, mpflag;
   1053 
   1054 	if ((d = cdevsw_lookup(dev)) == NULL)
   1055 		return ENXIO;
   1056 
   1057 	DEV_LOCK(d);
   1058 	rv = (*d->d_read)(dev, uio, flag);
   1059 	DEV_UNLOCK(d);
   1060 
   1061 	return rv;
   1062 }
   1063 
   1064 int
   1065 cdev_write(dev_t dev, struct uio *uio, int flag)
   1066 {
   1067 	const struct cdevsw *d;
   1068 	int rv, mpflag;
   1069 
   1070 	if ((d = cdevsw_lookup(dev)) == NULL)
   1071 		return ENXIO;
   1072 
   1073 	DEV_LOCK(d);
   1074 	rv = (*d->d_write)(dev, uio, flag);
   1075 	DEV_UNLOCK(d);
   1076 
   1077 	return rv;
   1078 }
   1079 
   1080 int
   1081 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
   1082 {
   1083 	const struct cdevsw *d;
   1084 	int rv, mpflag;
   1085 
   1086 	if ((d = cdevsw_lookup(dev)) == NULL)
   1087 		return ENXIO;
   1088 
   1089 	DEV_LOCK(d);
   1090 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
   1091 	DEV_UNLOCK(d);
   1092 
   1093 	return rv;
   1094 }
   1095 
   1096 void
   1097 cdev_stop(struct tty *tp, int flag)
   1098 {
   1099 	const struct cdevsw *d;
   1100 	int mpflag;
   1101 
   1102 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
   1103 		return;
   1104 
   1105 	DEV_LOCK(d);
   1106 	(*d->d_stop)(tp, flag);
   1107 	DEV_UNLOCK(d);
   1108 }
   1109 
   1110 struct tty *
   1111 cdev_tty(dev_t dev)
   1112 {
   1113 	const struct cdevsw *d;
   1114 
   1115 	if ((d = cdevsw_lookup(dev)) == NULL)
   1116 		return NULL;
   1117 
   1118 	/* XXX Check if necessary. */
   1119 	if (d->d_tty == NULL)
   1120 		return NULL;
   1121 
   1122 	return (*d->d_tty)(dev);
   1123 }
   1124 
   1125 int
   1126 cdev_poll(dev_t dev, int flag, lwp_t *l)
   1127 {
   1128 	const struct cdevsw *d;
   1129 	int rv, mpflag;
   1130 
   1131 	if ((d = cdevsw_lookup(dev)) == NULL)
   1132 		return POLLERR;
   1133 
   1134 	DEV_LOCK(d);
   1135 	rv = (*d->d_poll)(dev, flag, l);
   1136 	DEV_UNLOCK(d);
   1137 
   1138 	return rv;
   1139 }
   1140 
   1141 paddr_t
   1142 cdev_mmap(dev_t dev, off_t off, int flag)
   1143 {
   1144 	const struct cdevsw *d;
   1145 	paddr_t rv;
   1146 	int mpflag;
   1147 
   1148 	if ((d = cdevsw_lookup(dev)) == NULL)
   1149 		return (paddr_t)-1LL;
   1150 
   1151 	DEV_LOCK(d);
   1152 	rv = (*d->d_mmap)(dev, off, flag);
   1153 	DEV_UNLOCK(d);
   1154 
   1155 	return rv;
   1156 }
   1157 
   1158 int
   1159 cdev_kqfilter(dev_t dev, struct knote *kn)
   1160 {
   1161 	const struct cdevsw *d;
   1162 	int rv, mpflag;
   1163 
   1164 	if ((d = cdevsw_lookup(dev)) == NULL)
   1165 		return ENXIO;
   1166 
   1167 	DEV_LOCK(d);
   1168 	rv = (*d->d_kqfilter)(dev, kn);
   1169 	DEV_UNLOCK(d);
   1170 
   1171 	return rv;
   1172 }
   1173 
   1174 int
   1175 cdev_discard(dev_t dev, off_t pos, off_t len)
   1176 {
   1177 	const struct cdevsw *d;
   1178 	int rv, mpflag;
   1179 
   1180 	if ((d = cdevsw_lookup(dev)) == NULL)
   1181 		return ENXIO;
   1182 
   1183 	DEV_LOCK(d);
   1184 	rv = (*d->d_discard)(dev, pos, len);
   1185 	DEV_UNLOCK(d);
   1186 
   1187 	return rv;
   1188 }
   1189 
   1190 int
   1191 cdev_type(dev_t dev)
   1192 {
   1193 	const struct cdevsw *d;
   1194 
   1195 	if ((d = cdevsw_lookup(dev)) == NULL)
   1196 		return D_OTHER;
   1197 	return d->d_flag & D_TYPEMASK;
   1198 }
   1199