Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.37.2.2
      1 /*	$NetBSD: subr_devsw.c,v 1.37.2.2 2017/04/28 02:36:10 pgoyette Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	subr_devsw.c: registers device drivers by name and by major
     36  *	number, and provides wrapper methods for performing I/O and
     37  *	other tasks on device drivers, keying on the device number
     38  *	(dev_t).
     39  *
     40  *	When the system is built, the config(8) command generates
     41  *	static tables of device drivers built into the kernel image
     42  *	along with their associated methods.  These are recorded in
     43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     44  *	and removed from the system dynamically.
     45  *
     46  * Allocation
     47  *
     48  *	When the system initially boots only the statically allocated
     49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     50  *	allocation, we allocate a fixed block of memory to hold the new,
     51  *	expanded index.  This "fork" of the table is only ever performed
     52  *	once in order to guarantee that other threads may safely access
     53  *	the device tables:
     54  *
     55  *	o Once a thread has a "reference" to the table via an earlier
     56  *	  open() call, we know that the entry in the table must exist
     57  *	  and so it is safe to access it.
     58  *
     59  *	o Regardless of whether other threads see the old or new
     60  *	  pointers, they will point to a correct device switch
     61  *	  structure for the operation being performed.
     62  *
     63  *	XXX Currently, the wrapper methods such as cdev_read() verify
     64  *	that a device driver does in fact exist before calling the
     65  *	associated driver method.  This should be changed so that
     66  *	once the device is has been referenced by a vnode (opened),
     67  *	calling	the other methods should be valid until that reference
     68  *	is dropped.
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.37.2.2 2017/04/28 02:36:10 pgoyette Exp $");
     73 
     74 #ifdef _KERNEL_OPT
     75 #include "opt_dtrace.h"
     76 #endif
     77 
     78 #include <sys/param.h>
     79 #include <sys/conf.h>
     80 #include <sys/kmem.h>
     81 #include <sys/systm.h>
     82 #include <sys/poll.h>
     83 #include <sys/tty.h>
     84 #include <sys/cpu.h>
     85 #include <sys/buf.h>
     86 #include <sys/reboot.h>
     87 #include <sys/sdt.h>
     88 #include <sys/atomic.h>
     89 #include <sys/condvar.h>
     90 #include <sys/localcount.h>
     91 #include <sys/pserialize.h>
     92 
     93 #ifdef DEVSW_DEBUG
     94 #define	DPRINTF(x)	printf x
     95 #else /* DEVSW_DEBUG */
     96 #define	DPRINTF(x)
     97 #endif /* DEVSW_DEBUG */
     98 
     99 #define	MAXDEVSW	512	/* the maximum of major device number */
    100 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
    101 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
    102 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
    103 
    104 extern const struct bdevsw **bdevsw, *bdevsw0[];
    105 extern const struct cdevsw **cdevsw, *cdevsw0[];
    106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    107 extern const int sys_bdevsws, sys_cdevsws;
    108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    109 
    110 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
    111 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
    112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    113 
    114 kmutex_t	device_lock;
    115 kcondvar_t	device_cv;
    116 pserialize_t	device_psz = NULL;
    117 
    118 void (*biodone_vfs)(buf_t *) = (void *)nullop;
    119 
    120 void
    121 devsw_init(void)
    122 {
    123 
    124 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    125 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    126 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
    127 	cv_init(&device_cv, "devsw");
    128 }
    129 
    130 void
    131 devsw_detach_init(void)
    132 {
    133 
    134 	device_psz = pserialize_create();
    135 }
    136 
    137 int
    138 devsw_attach(const char *devname,
    139 	     const struct bdevsw *bdev, devmajor_t *bmajor,
    140 	     const struct cdevsw *cdev, devmajor_t *cmajor)
    141 {
    142 	struct devsw_conv *conv;
    143 	char *name;
    144 	int error, i;
    145 	size_t len;
    146 
    147 	if (devname == NULL || cdev == NULL)
    148 		return (EINVAL);
    149 
    150 	mutex_enter(&device_lock);
    151 
    152 	if (bdev != NULL) {
    153 		if (bdev->d_localcount == NULL) {
    154 			aprint_normal("%s: %s's bdevsw has no localcount",
    155 			    __func__, devname);
    156 			return EINVAL;
    157 		}
    158 		if (bdev->d_localcount == cdev->d_localcount) {
    159 			aprint_normal("%s: %s uses same localcount for both "
    160 			    cdevsw and bdevsw", __func__, devname);
    161 			return EINVAL;
    162 		}
    163 	}
    164 	if (cdev != NULL) {
    165 		aprint_normal("%s: %s's cdevsw has no localcount",
    166 		    __func__, devname);
    167 		return EINVAL;
    168 	}
    169 
    170 	for (i = 0 ; i < max_devsw_convs ; i++) {
    171 		conv = &devsw_conv[i];
    172 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    173 			continue;
    174 
    175 		if (*bmajor < 0)
    176 			*bmajor = conv->d_bmajor;
    177 		if (*cmajor < 0)
    178 			*cmajor = conv->d_cmajor;
    179 
    180 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    181 			error = EINVAL;
    182 			goto fail;
    183 		}
    184 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    185 			error = EINVAL;
    186 			goto fail;
    187 		}
    188 
    189 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    190 		    cdevsw[*cmajor] != NULL) {
    191 			error = EEXIST;
    192 			goto fail;
    193 		}
    194 
    195 		/* use membar_producer() to ensure visibility of the xdevsw */
    196 		if (bdev != NULL) {
    197 			localcount_init(bdev->d_localcount);
    198 			membar_producer();
    199 			bdevsw[*bmajor] = bdev;
    200 		}
    201 		localcount_init(cdev->d_localcount);
    202 		membar_producer();
    203 		cdevsw[*cmajor] = cdev;
    204 
    205 		mutex_exit(&device_lock);
    206 		return (0);
    207 	}
    208 
    209 	error = bdevsw_attach(bdev, bmajor);
    210 	if (error != 0)
    211 		goto fail;
    212 	error = cdevsw_attach(cdev, cmajor);
    213 	if (error != 0) {
    214 		devsw_detach_locked(bdev, NULL);
    215 		goto fail;
    216 	}
    217 
    218 	for (i = 0 ; i < max_devsw_convs ; i++) {
    219 		if (devsw_conv[i].d_name == NULL)
    220 			break;
    221 	}
    222 	if (i == max_devsw_convs) {
    223 		struct devsw_conv *newptr;
    224 		int old_convs, new_convs;
    225 
    226 		old_convs = max_devsw_convs;
    227 		new_convs = old_convs + 1;
    228 
    229 		newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
    230 		if (newptr == NULL) {
    231 			devsw_detach_locked(bdev, cdev);
    232 			error = ENOMEM;
    233 			goto fail;
    234 		}
    235 		newptr[old_convs].d_name = NULL;
    236 		newptr[old_convs].d_bmajor = -1;
    237 		newptr[old_convs].d_cmajor = -1;
    238 		memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
    239 		if (devsw_conv != devsw_conv0)
    240 			kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
    241 		devsw_conv = newptr;
    242 		max_devsw_convs = new_convs;
    243 	}
    244 
    245 	len = strlen(devname) + 1;
    246 	name = kmem_alloc(len, KM_NOSLEEP);
    247 	if (name == NULL) {
    248 		devsw_detach_locked(bdev, cdev);
    249 		error = ENOMEM;
    250 		goto fail;
    251 	}
    252 	strlcpy(name, devname, len);
    253 
    254 	devsw_conv[i].d_name = name;
    255 	devsw_conv[i].d_bmajor = *bmajor;
    256 	devsw_conv[i].d_cmajor = *cmajor;
    257 
    258 	mutex_exit(&device_lock);
    259 	return (0);
    260  fail:
    261 	mutex_exit(&device_lock);
    262 	return (error);
    263 }
    264 
    265 static int
    266 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
    267 {
    268 	const struct bdevsw **newptr;
    269 	devmajor_t bmajor;
    270 	int i;
    271 
    272 	KASSERT(mutex_owned(&device_lock));
    273 
    274 	if (devsw == NULL)
    275 		return (0);
    276 
    277 	if (*devmajor < 0) {
    278 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    279 			if (bdevsw[bmajor] != NULL)
    280 				continue;
    281 			for (i = 0 ; i < max_devsw_convs ; i++) {
    282 				if (devsw_conv[i].d_bmajor == bmajor)
    283 					break;
    284 			}
    285 			if (i != max_devsw_convs)
    286 				continue;
    287 			break;
    288 		}
    289 		*devmajor = bmajor;
    290 	}
    291 
    292 	if (*devmajor >= MAXDEVSW) {
    293 		printf("%s: block majors exhausted", __func__);
    294 		return (ENOMEM);
    295 	}
    296 
    297 	if (*devmajor >= max_bdevsws) {
    298 		KASSERT(bdevsw == bdevsw0);
    299 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    300 		if (newptr == NULL)
    301 			return (ENOMEM);
    302 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    303 		bdevsw = newptr;
    304 		max_bdevsws = MAXDEVSW;
    305 	}
    306 
    307 	if (bdevsw[*devmajor] != NULL)
    308 		return (EEXIST);
    309 
    310 	if (devsw->d_localcount == NULL) {
    311 		aprint_normal("%s: %s's bdevsw has no localcount",
    312 		    __func__, devname);
    313 		return EINVAL;
    314 	}
    315 	localcount_init(devsw->d_localcount);
    316 
    317 	/* ensure visibility of the bdevsw */
    318 	membar_producer();
    319 
    320 	bdevsw[*devmajor] = devsw;
    321 
    322 	return (0);
    323 }
    324 
    325 static int
    326 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
    327 {
    328 	const struct cdevsw **newptr;
    329 	devmajor_t cmajor;
    330 	int i;
    331 
    332 	KASSERT(mutex_owned(&device_lock));
    333 
    334 	if (*devmajor < 0) {
    335 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    336 			if (cdevsw[cmajor] != NULL)
    337 				continue;
    338 			for (i = 0 ; i < max_devsw_convs ; i++) {
    339 				if (devsw_conv[i].d_cmajor == cmajor)
    340 					break;
    341 			}
    342 			if (i != max_devsw_convs)
    343 				continue;
    344 			break;
    345 		}
    346 		*devmajor = cmajor;
    347 	}
    348 
    349 	if (*devmajor >= MAXDEVSW) {
    350 		printf("%s: character majors exhausted", __func__);
    351 		return (ENOMEM);
    352 	}
    353 
    354 	if (*devmajor >= max_cdevsws) {
    355 		KASSERT(cdevsw == cdevsw0);
    356 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    357 		if (newptr == NULL)
    358 			return (ENOMEM);
    359 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    360 		cdevsw = newptr;
    361 		max_cdevsws = MAXDEVSW;
    362 	}
    363 
    364 	if (cdevsw[*devmajor] != NULL)
    365 		return (EEXIST);
    366 
    367 	if (devsw->d_localcount == NULL) {
    368 		aprint_normal("%s: %s's cdevsw has no localcount",
    369 		    __func__, devname);
    370 		return EINVAL;
    371 	}
    372 	localcount_init(devsw->d_localcount);
    373 
    374 	/* ensure visibility of the cdevsw */
    375 	membar_producer();
    376 
    377 	cdevsw[*devmajor] = devsw;
    378 
    379 	return (0);
    380 }
    381 
    382 /*
    383  * First, look up both bdev and cdev indices, and remove the
    384  * {b,c]devsw[] entries so no new references can be taken.  Then
    385  * drain any existing references.
    386  */
    387 
    388 static void
    389 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    390 {
    391 	int i, j;
    392 
    393 	KASSERT(mutex_owned(&device_lock));
    394 
    395 	i = max_bdevsws;
    396 	if (bdev != NULL) {
    397 		for (i = 0 ; i < max_bdevsws ; i++) {
    398 			if (bdevsw[i] != bdev)
    399 				continue;
    400 
    401 			KASSERTMSG(bdev->d_localcount != NULL,
    402 			    "%s: no bdev localcount for major %d", __func__, i);
    403 			break;
    404 		}
    405 	}
    406 	j = max_cdevsws;
    407 	if (cdev != NULL) {
    408 		for (j = 0 ; j < max_cdevsws ; j++) {
    409 			if (cdevsw[j] != cdev)
    410 				continue;
    411 
    412 			KASSERTMSG(cdev->d_localcount != NULL,
    413 			    "%s: no cdev localcount for major %d", __func__, j);
    414 			break;
    415 		}
    416 	}
    417 	if (i < max_bdevsws)
    418 		bdevsw[i] = NULL;
    419 	if (j < max_cdevsws )
    420 		cdevsw[j] = NULL;
    421 
    422 	/* Wait for all current readers to finish with the devsw's */
    423 	pserialize_perform(device_psz);
    424 
    425 	/*
    426 	 * No new accessors can reach the bdev and cdev via the
    427 	 * {b,c}devsw[] arrays, so no new references can be
    428 	 * acquired.  Wait for all existing references to drain,
    429 	 * and then destroy.
    430 	 */
    431 
    432 	if (i < max_bdevsws && bdev->d_localcount != NULL) {
    433 		localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
    434 		localcount_fini(bdev->d_localcount);
    435 	}
    436 	if (j < max_cdevsws && cdev->d_localcount != NULL ) {
    437 		localcount_drain(cdev->d_localcount, &device_cv, &device_lock);
    438 		localcount_fini(cdev->d_localcount);
    439 	}
    440 }
    441 
    442 int
    443 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    444 {
    445 
    446 	mutex_enter(&device_lock);
    447 	devsw_detach_locked(bdev, cdev);
    448 	mutex_exit(&device_lock);
    449 	return 0;
    450 }
    451 
    452 /*
    453  * Look up a block device by number.
    454  *
    455  * => Caller must ensure that the device is attached.
    456  */
    457 const struct bdevsw *
    458 bdevsw_lookup(dev_t dev)
    459 {
    460 	devmajor_t bmajor;
    461 
    462 	if (dev == NODEV)
    463 		return (NULL);
    464 	bmajor = major(dev);
    465 	if (bmajor < 0 || bmajor >= max_bdevsws)
    466 		return (NULL);
    467 
    468 	/* Wait for the content of the struct bdevsw to become visible */
    469 	membar_datadep_consumer();
    470 
    471 	return (bdevsw[bmajor]);
    472 }
    473 
    474 const struct bdevsw *
    475 bdevsw_lookup_acquire(dev_t dev)
    476 {
    477 	devmajor_t bmajor;
    478 	const struct bdevsw *bdev = NULL;
    479 	int s;
    480 
    481 	if (dev == NODEV)
    482 		return (NULL);
    483 	bmajor = major(dev);
    484 	if (bmajor < 0 || bmajor >= max_bdevsws)
    485 		return (NULL);
    486 
    487 	/* Start a read transaction to block localcount_drain() */
    488 	s = pserialize_read_enter();
    489 
    490 	/* Get the struct bdevsw pointer */
    491 	bdev = bdevsw[bmajor];
    492 	if (bdev == NULL)
    493 		goto out;
    494 
    495 	/* Wait for the content of the struct bdevsw to become visible */
    496 	membar_datadep_consumer();
    497 
    498 	/* If the devsw is not statically linked, acquire a reference */
    499 	if (bdev->d_localcount != NULL)
    500 		localcount_acquire(bdev->d_localcount);
    501 
    502  out:	pserialize_read_exit(s);
    503 
    504 	return bdev;
    505 }
    506 
    507 void
    508 bdevsw_release(const struct bdevsw *bd)
    509 {
    510 
    511 	KASSERT(bd != NULL);
    512 	if (bd->d_localcount != NULL)
    513 		localcount_release(bd->d_localcount, &device_cv, &device_lock);
    514 }
    515 
    516 /*
    517  * Look up a character device by number.
    518  *
    519  * => Caller must ensure that the device is attached.
    520  */
    521 const struct cdevsw *
    522 cdevsw_lookup(dev_t dev)
    523 {
    524 	devmajor_t cmajor;
    525 
    526 	if (dev == NODEV)
    527 		return (NULL);
    528 	cmajor = major(dev);
    529 	if (cmajor < 0 || cmajor >= max_cdevsws)
    530 		return (NULL);
    531 
    532 	/* Wait for the content of the struct bdevsw to become visible */
    533 	membar_datadep_consumer();
    534 
    535 	return (cdevsw[cmajor]);
    536 }
    537 
    538 const struct cdevsw *
    539 cdevsw_lookup_acquire(dev_t dev)
    540 {
    541 	devmajor_t cmajor;
    542 	const struct cdevsw *cdev = NULL;
    543 	int s;
    544 
    545 	if (dev == NODEV)
    546 		return (NULL);
    547 	cmajor = major(dev);
    548 	if (cmajor < 0 || cmajor >= max_cdevsws)
    549 		return (NULL);
    550 
    551 	/* Start a read transaction to block localcount_drain() */
    552 	s = pserialize_read_enter();
    553 
    554 	/* Get the struct bdevsw pointer */
    555 	cdev = cdevsw[cmajor];
    556 	if (cdev == NULL)
    557 		goto out;
    558 
    559 	/* Wait for the content of the struct cdevsw to become visible */
    560 	membar_datadep_consumer();
    561 
    562 	/* If the devsw is not statically linked, acquire a reference */
    563 	if (cdev->d_localcount != NULL)
    564 		localcount_acquire(cdev->d_localcount);
    565 
    566  out:	pserialize_read_exit(s);
    567 
    568 	return cdev;
    569 }
    570 
    571 void
    572 cdevsw_release(const struct cdevsw *cd)
    573 {
    574 
    575 	KASSERT(cd != NULL);
    576 	if (cd->d_localcount != NULL)
    577 		localcount_release(cd->d_localcount, &device_cv, &device_lock);
    578 }
    579 
    580 /*
    581  * Look up a block device by reference to its operations set.
    582  *
    583  * => Caller must ensure that the device is not detached, and therefore
    584  *    that the returned major is still valid when dereferenced.
    585  */
    586 devmajor_t
    587 bdevsw_lookup_major(const struct bdevsw *bdev)
    588 {
    589 	devmajor_t bmajor;
    590 
    591 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    592 		if (bdevsw[bmajor] == bdev)
    593 			return (bmajor);
    594 	}
    595 
    596 	return (NODEVMAJOR);
    597 }
    598 
    599 /*
    600  * Look up a character device by reference to its operations set.
    601  *
    602  * => Caller must ensure that the device is not detached, and therefore
    603  *    that the returned major is still valid when dereferenced.
    604  */
    605 devmajor_t
    606 cdevsw_lookup_major(const struct cdevsw *cdev)
    607 {
    608 	devmajor_t cmajor;
    609 
    610 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    611 		if (cdevsw[cmajor] == cdev)
    612 			return (cmajor);
    613 	}
    614 
    615 	return (NODEVMAJOR);
    616 }
    617 
    618 /*
    619  * Convert from block major number to name.
    620  *
    621  * => Caller must ensure that the device is not detached, and therefore
    622  *    that the name pointer is still valid when dereferenced.
    623  */
    624 const char *
    625 devsw_blk2name(devmajor_t bmajor)
    626 {
    627 	const char *name;
    628 	devmajor_t cmajor;
    629 	int i;
    630 
    631 	name = NULL;
    632 	cmajor = -1;
    633 
    634 	mutex_enter(&device_lock);
    635 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    636 		mutex_exit(&device_lock);
    637 		return (NULL);
    638 	}
    639 	for (i = 0 ; i < max_devsw_convs; i++) {
    640 		if (devsw_conv[i].d_bmajor == bmajor) {
    641 			cmajor = devsw_conv[i].d_cmajor;
    642 			break;
    643 		}
    644 	}
    645 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    646 		name = devsw_conv[i].d_name;
    647 	mutex_exit(&device_lock);
    648 
    649 	return (name);
    650 }
    651 
    652 /*
    653  * Convert char major number to device driver name.
    654  */
    655 const char *
    656 cdevsw_getname(devmajor_t major)
    657 {
    658 	const char *name;
    659 	int i;
    660 
    661 	name = NULL;
    662 
    663 	if (major < 0)
    664 		return (NULL);
    665 
    666 	mutex_enter(&device_lock);
    667 	for (i = 0 ; i < max_devsw_convs; i++) {
    668 		if (devsw_conv[i].d_cmajor == major) {
    669 			name = devsw_conv[i].d_name;
    670 			break;
    671 		}
    672 	}
    673 	mutex_exit(&device_lock);
    674 	return (name);
    675 }
    676 
    677 /*
    678  * Convert block major number to device driver name.
    679  */
    680 const char *
    681 bdevsw_getname(devmajor_t major)
    682 {
    683 	const char *name;
    684 	int i;
    685 
    686 	name = NULL;
    687 
    688 	if (major < 0)
    689 		return (NULL);
    690 
    691 	mutex_enter(&device_lock);
    692 	for (i = 0 ; i < max_devsw_convs; i++) {
    693 		if (devsw_conv[i].d_bmajor == major) {
    694 			name = devsw_conv[i].d_name;
    695 			break;
    696 		}
    697 	}
    698 	mutex_exit(&device_lock);
    699 	return (name);
    700 }
    701 
    702 /*
    703  * Convert from device name to block major number.
    704  *
    705  * => Caller must ensure that the device is not detached, and therefore
    706  *    that the major number is still valid when dereferenced.
    707  */
    708 devmajor_t
    709 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    710 {
    711 	struct devsw_conv *conv;
    712 	devmajor_t bmajor;
    713 	int i;
    714 
    715 	if (name == NULL)
    716 		return (NODEVMAJOR);
    717 
    718 	mutex_enter(&device_lock);
    719 	for (i = 0 ; i < max_devsw_convs ; i++) {
    720 		size_t len;
    721 
    722 		conv = &devsw_conv[i];
    723 		if (conv->d_name == NULL)
    724 			continue;
    725 		len = strlen(conv->d_name);
    726 		if (strncmp(conv->d_name, name, len) != 0)
    727 			continue;
    728 		if (*(name +len) && !isdigit(*(name + len)))
    729 			continue;
    730 		bmajor = conv->d_bmajor;
    731 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    732 		    bdevsw[bmajor] == NULL)
    733 			break;
    734 		if (devname != NULL) {
    735 #ifdef DEVSW_DEBUG
    736 			if (strlen(conv->d_name) >= devnamelen)
    737 				printf("%s: too short buffer", __func__);
    738 #endif /* DEVSW_DEBUG */
    739 			strncpy(devname, conv->d_name, devnamelen);
    740 			devname[devnamelen - 1] = '\0';
    741 		}
    742 		mutex_exit(&device_lock);
    743 		return (bmajor);
    744 	}
    745 
    746 	mutex_exit(&device_lock);
    747 	return (NODEVMAJOR);
    748 }
    749 
    750 /*
    751  * Convert from device name to char major number.
    752  *
    753  * => Caller must ensure that the device is not detached, and therefore
    754  *    that the major number is still valid when dereferenced.
    755  */
    756 devmajor_t
    757 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    758 {
    759 	struct devsw_conv *conv;
    760 	devmajor_t cmajor;
    761 	int i;
    762 
    763 	if (name == NULL)
    764 		return (NODEVMAJOR);
    765 
    766 	mutex_enter(&device_lock);
    767 	for (i = 0 ; i < max_devsw_convs ; i++) {
    768 		size_t len;
    769 
    770 		conv = &devsw_conv[i];
    771 		if (conv->d_name == NULL)
    772 			continue;
    773 		len = strlen(conv->d_name);
    774 		if (strncmp(conv->d_name, name, len) != 0)
    775 			continue;
    776 		if (*(name +len) && !isdigit(*(name + len)))
    777 			continue;
    778 		cmajor = conv->d_cmajor;
    779 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    780 		    cdevsw[cmajor] == NULL)
    781 			break;
    782 		if (devname != NULL) {
    783 #ifdef DEVSW_DEBUG
    784 			if (strlen(conv->d_name) >= devnamelen)
    785 				printf("%s: too short buffer", __func__);
    786 #endif /* DEVSW_DEBUG */
    787 			strncpy(devname, conv->d_name, devnamelen);
    788 			devname[devnamelen - 1] = '\0';
    789 		}
    790 		mutex_exit(&device_lock);
    791 		return (cmajor);
    792 	}
    793 
    794 	mutex_exit(&device_lock);
    795 	return (NODEVMAJOR);
    796 }
    797 
    798 /*
    799  * Convert from character dev_t to block dev_t.
    800  *
    801  * => Caller must ensure that the device is not detached, and therefore
    802  *    that the major number is still valid when dereferenced.
    803  */
    804 dev_t
    805 devsw_chr2blk(dev_t cdev)
    806 {
    807 	devmajor_t bmajor, cmajor;
    808 	int i;
    809 	dev_t rv;
    810 
    811 	cmajor = major(cdev);
    812 	bmajor = NODEVMAJOR;
    813 	rv = NODEV;
    814 
    815 	mutex_enter(&device_lock);
    816 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    817 		mutex_exit(&device_lock);
    818 		return (NODEV);
    819 	}
    820 	for (i = 0 ; i < max_devsw_convs ; i++) {
    821 		if (devsw_conv[i].d_cmajor == cmajor) {
    822 			bmajor = devsw_conv[i].d_bmajor;
    823 			break;
    824 		}
    825 	}
    826 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    827 		rv = makedev(bmajor, minor(cdev));
    828 	mutex_exit(&device_lock);
    829 
    830 	return (rv);
    831 }
    832 
    833 /*
    834  * Convert from block dev_t to character dev_t.
    835  *
    836  * => Caller must ensure that the device is not detached, and therefore
    837  *    that the major number is still valid when dereferenced.
    838  */
    839 dev_t
    840 devsw_blk2chr(dev_t bdev)
    841 {
    842 	devmajor_t bmajor, cmajor;
    843 	int i;
    844 	dev_t rv;
    845 
    846 	bmajor = major(bdev);
    847 	cmajor = NODEVMAJOR;
    848 	rv = NODEV;
    849 
    850 	mutex_enter(&device_lock);
    851 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    852 		mutex_exit(&device_lock);
    853 		return (NODEV);
    854 	}
    855 	for (i = 0 ; i < max_devsw_convs ; i++) {
    856 		if (devsw_conv[i].d_bmajor == bmajor) {
    857 			cmajor = devsw_conv[i].d_cmajor;
    858 			break;
    859 		}
    860 	}
    861 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    862 		rv = makedev(cmajor, minor(bdev));
    863 	mutex_exit(&device_lock);
    864 
    865 	return (rv);
    866 }
    867 
    868 /*
    869  * Device access methods.
    870  */
    871 
    872 #define	DEV_LOCK(d)						\
    873 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
    874 		KERNEL_LOCK(1, NULL);				\
    875 	}
    876 
    877 #define	DEV_UNLOCK(d)						\
    878 	if (mpflag == 0) {					\
    879 		KERNEL_UNLOCK_ONE(NULL);			\
    880 	}
    881 
    882 int
    883 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    884 {
    885 	const struct bdevsw *d;
    886 	int rv, mpflag;
    887 
    888 	/*
    889 	 * For open we need to lock, in order to synchronize
    890 	 * with attach/detach.
    891 	 */
    892 	mutex_enter(&device_lock);
    893 	d = bdevsw_lookup_acquire(dev);
    894 	mutex_exit(&device_lock);
    895 	if (d == NULL)
    896 		return ENXIO;
    897 
    898 	DEV_LOCK(d);
    899 	rv = (*d->d_open)(dev, flag, devtype, l);
    900 	DEV_UNLOCK(d);
    901 	bdevsw_release(d);
    902 
    903 	return rv;
    904 }
    905 
    906 int
    907 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    908 {
    909 	const struct bdevsw *d;
    910 	int rv, mpflag;
    911 
    912 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
    913 		return ENXIO;
    914 
    915 	DEV_LOCK(d);
    916 	rv = (*d->d_close)(dev, flag, devtype, l);
    917 	DEV_UNLOCK(d);
    918 	bdevsw_release(d);
    919 
    920 	return rv;
    921 }
    922 
    923 SDT_PROVIDER_DECLARE(io);
    924 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
    925 
    926 void
    927 bdev_strategy(struct buf *bp)
    928 {
    929 	const struct bdevsw *d;
    930 	int mpflag;
    931 
    932 	SDT_PROBE1(io, kernel, , start, bp);
    933 
    934 	if ((d = bdevsw_lookup_acquire(bp->b_dev)) == NULL) {
    935 		bp->b_error = ENXIO;
    936 		bp->b_resid = bp->b_bcount;
    937 		biodone_vfs(bp); /* biodone() iff vfs present */
    938 		return;
    939 	}
    940 
    941 	DEV_LOCK(d);
    942 	(*d->d_strategy)(bp);
    943 	DEV_UNLOCK(d);
    944 	bdevsw_release(d);
    945 }
    946 
    947 int
    948 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    949 {
    950 	const struct bdevsw *d;
    951 	int rv, mpflag;
    952 
    953 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
    954 		return ENXIO;
    955 
    956 	DEV_LOCK(d);
    957 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    958 	DEV_UNLOCK(d);
    959 	bdevsw_release(d);
    960 
    961 	return rv;
    962 }
    963 
    964 int
    965 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    966 {
    967 	const struct bdevsw *d;
    968 	int rv;
    969 
    970 	/*
    971 	 * Dump can be called without the device open.  Since it can
    972 	 * currently only be called with the system paused (and in a
    973 	 * potentially unstable state), we don't perform any locking.
    974 	 */
    975 	if ((d = bdevsw_lookup(dev)) == NULL)
    976 		return ENXIO;
    977 
    978 	/* DEV_LOCK(d); */
    979 	rv = (*d->d_dump)(dev, addr, data, sz);
    980 	/* DEV_UNLOCK(d); */
    981 
    982 	return rv;
    983 }
    984 
    985 int
    986 bdev_flags(dev_t dev)
    987 {
    988 	const struct bdevsw *d;
    989 	int rv;
    990 
    991 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
    992 		return 0;
    993 
    994 	rv = d->d_flag & ~D_TYPEMASK;
    995 	bdevsw_release(d);
    996 
    997 	return rv;
    998 }
    999 
   1000 int
   1001 bdev_type(dev_t dev)
   1002 {
   1003 	const struct bdevsw *d;
   1004 	int rv;
   1005 
   1006 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
   1007 		return D_OTHER;
   1008 
   1009 	rv = d->d_flag & D_TYPEMASK;
   1010 	bdevsw_release(d);
   1011 
   1012 	return rv;
   1013 }
   1014 
   1015 int
   1016 bdev_size(dev_t dev)
   1017 {
   1018 	const struct bdevsw *d;
   1019 	int rv, mpflag = 0;
   1020 
   1021 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
   1022 		return -1;
   1023 
   1024 	if (d->d_psize == NULL) {
   1025 		bdevsw_release(d);
   1026 		return -1;
   1027 	}
   1028 
   1029 	/*
   1030 	 * Don't to try lock the device if we're dumping.
   1031 	 * XXX: is there a better way to test this?
   1032 	 */
   1033 	if ((boothowto & RB_DUMP) == 0)
   1034 		DEV_LOCK(d);
   1035 	rv = (*d->d_psize)(dev);
   1036 	if ((boothowto & RB_DUMP) == 0)
   1037 		DEV_UNLOCK(d);
   1038 	bdevsw_release(d);
   1039 	return rv;
   1040 }
   1041 
   1042 int
   1043 bdev_discard(dev_t dev, off_t pos, off_t len)
   1044 {
   1045 	const struct bdevsw *d;
   1046 	int rv, mpflag;
   1047 
   1048 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
   1049 		return ENXIO;
   1050 
   1051 	DEV_LOCK(d);
   1052 	rv = (*d->d_discard)(dev, pos, len);
   1053 	DEV_UNLOCK(d);
   1054 	bdevsw_release(d);
   1055 
   1056 	return rv;
   1057 }
   1058 
   1059 int
   1060 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
   1061 {
   1062 	const struct cdevsw *d;
   1063 	int rv, mpflag;
   1064 
   1065 	/*
   1066 	 * For open we need to lock, in order to synchronize
   1067 	 * with attach/detach.
   1068 	 */
   1069 	mutex_enter(&device_lock);
   1070 	d = cdevsw_lookup_acquire(dev);
   1071 	mutex_exit(&device_lock);
   1072 	if (d == NULL)
   1073 		return ENXIO;
   1074 
   1075 	DEV_LOCK(d);
   1076 	rv = (*d->d_open)(dev, flag, devtype, l);
   1077 	DEV_UNLOCK(d);
   1078 	cdevsw_release(d);
   1079 
   1080 	return rv;
   1081 }
   1082 
   1083 int
   1084 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
   1085 {
   1086 	const struct cdevsw *d;
   1087 	int rv, mpflag;
   1088 
   1089 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1090 		return ENXIO;
   1091 
   1092 	DEV_LOCK(d);
   1093 	rv = (*d->d_close)(dev, flag, devtype, l);
   1094 	DEV_UNLOCK(d);
   1095 	cdevsw_release(d);
   1096 
   1097 	return rv;
   1098 }
   1099 
   1100 int
   1101 cdev_read(dev_t dev, struct uio *uio, int flag)
   1102 {
   1103 	const struct cdevsw *d;
   1104 	int rv, mpflag;
   1105 
   1106 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1107 		return ENXIO;
   1108 
   1109 	DEV_LOCK(d);
   1110 	rv = (*d->d_read)(dev, uio, flag);
   1111 	DEV_UNLOCK(d);
   1112 	cdevsw_release(d);
   1113 
   1114 	return rv;
   1115 }
   1116 
   1117 int
   1118 cdev_write(dev_t dev, struct uio *uio, int flag)
   1119 {
   1120 	const struct cdevsw *d;
   1121 	int rv, mpflag;
   1122 
   1123 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1124 		return ENXIO;
   1125 
   1126 	DEV_LOCK(d);
   1127 	rv = (*d->d_write)(dev, uio, flag);
   1128 	DEV_UNLOCK(d);
   1129 	cdevsw_release(d);
   1130 
   1131 	return rv;
   1132 }
   1133 
   1134 int
   1135 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
   1136 {
   1137 	const struct cdevsw *d;
   1138 	int rv, mpflag;
   1139 
   1140 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1141 		return ENXIO;
   1142 
   1143 	DEV_LOCK(d);
   1144 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
   1145 	DEV_UNLOCK(d);
   1146 	cdevsw_release(d);
   1147 
   1148 	return rv;
   1149 }
   1150 
   1151 void
   1152 cdev_stop(struct tty *tp, int flag)
   1153 {
   1154 	const struct cdevsw *d;
   1155 	int mpflag;
   1156 
   1157 	if ((d = cdevsw_lookup_acquire(tp->t_dev)) == NULL)
   1158 		return;
   1159 
   1160 	DEV_LOCK(d);
   1161 	(*d->d_stop)(tp, flag);
   1162 	DEV_UNLOCK(d);
   1163 	cdevsw_release(d);
   1164 }
   1165 
   1166 struct tty *
   1167 cdev_tty(dev_t dev)
   1168 {
   1169 	const struct cdevsw *d;
   1170 	struct tty *rv;
   1171 
   1172 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1173 		return NULL;
   1174 
   1175 	/* XXX Check if necessary. */
   1176 	if (d->d_tty == NULL)
   1177 		rv = NULL;
   1178 	else
   1179 		rv= (*d->d_tty)(dev);
   1180 	cdevsw_release(d);
   1181 
   1182 	return rv;
   1183 }
   1184 
   1185 int
   1186 cdev_poll(dev_t dev, int flag, lwp_t *l)
   1187 {
   1188 	const struct cdevsw *d;
   1189 	int rv, mpflag;
   1190 
   1191 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1192 		return POLLERR;
   1193 
   1194 	DEV_LOCK(d);
   1195 	rv = (*d->d_poll)(dev, flag, l);
   1196 	DEV_UNLOCK(d);
   1197 	cdevsw_release(d);
   1198 
   1199 	return rv;
   1200 }
   1201 
   1202 paddr_t
   1203 cdev_mmap(dev_t dev, off_t off, int flag)
   1204 {
   1205 	const struct cdevsw *d;
   1206 	paddr_t rv;
   1207 	int mpflag;
   1208 
   1209 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1210 		return (paddr_t)-1LL;
   1211 
   1212 	DEV_LOCK(d);
   1213 	rv = (*d->d_mmap)(dev, off, flag);
   1214 	DEV_UNLOCK(d);
   1215 	cdevsw_release(d);
   1216 
   1217 	return rv;
   1218 }
   1219 
   1220 int
   1221 cdev_kqfilter(dev_t dev, struct knote *kn)
   1222 {
   1223 	const struct cdevsw *d;
   1224 	int rv, mpflag;
   1225 
   1226 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1227 		return ENXIO;
   1228 
   1229 	DEV_LOCK(d);
   1230 	rv = (*d->d_kqfilter)(dev, kn);
   1231 	DEV_UNLOCK(d);
   1232 	cdevsw_release(d);
   1233 
   1234 	return rv;
   1235 }
   1236 
   1237 int
   1238 cdev_discard(dev_t dev, off_t pos, off_t len)
   1239 {
   1240 	const struct cdevsw *d;
   1241 	int rv, mpflag;
   1242 
   1243 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1244 		return ENXIO;
   1245 
   1246 	DEV_LOCK(d);
   1247 	rv = (*d->d_discard)(dev, pos, len);
   1248 	DEV_UNLOCK(d);
   1249 	cdevsw_release(d);
   1250 
   1251 	return rv;
   1252 }
   1253 
   1254 int
   1255 cdev_flags(dev_t dev)
   1256 {
   1257 	const struct cdevsw *d;
   1258 	int rv;
   1259 
   1260 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1261 		return 0;
   1262 
   1263 	rv = d->d_flag & ~D_TYPEMASK;
   1264 	cdevsw_release(d);
   1265 
   1266 	return rv;
   1267 }
   1268 
   1269 int
   1270 cdev_type(dev_t dev)
   1271 {
   1272 	const struct cdevsw *d;
   1273 	int rv;
   1274 
   1275 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1276 		return D_OTHER;
   1277 
   1278 	rv = d->d_flag & D_TYPEMASK;
   1279 	cdevsw_release(d);
   1280 
   1281 	return rv;
   1282 }
   1283 
   1284 /*
   1285  * nommap(dev, off, prot)
   1286  *
   1287  *	mmap routine that always fails, for non-mmappable devices.
   1288  */
   1289 paddr_t
   1290 nommap(dev_t dev, off_t off, int prot)
   1291 {
   1292 
   1293 	return (paddr_t)-1;
   1294 }
   1295