Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.37.2.4
      1 /*	$NetBSD: subr_devsw.c,v 1.37.2.4 2017/05/17 04:33:03 pgoyette Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	subr_devsw.c: registers device drivers by name and by major
     36  *	number, and provides wrapper methods for performing I/O and
     37  *	other tasks on device drivers, keying on the device number
     38  *	(dev_t).
     39  *
     40  *	When the system is built, the config(8) command generates
     41  *	static tables of device drivers built into the kernel image
     42  *	along with their associated methods.  These are recorded in
     43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     44  *	and removed from the system dynamically.
     45  *
     46  * Allocation
     47  *
     48  *	When the system initially boots only the statically allocated
     49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     50  *	allocation, we allocate a fixed block of memory to hold the new,
     51  *	expanded index.  This "fork" of the table is only ever performed
     52  *	once in order to guarantee that other threads may safely access
     53  *	the device tables:
     54  *
     55  *	o Once a thread has a "reference" to the table via an earlier
     56  *	  open() call, we know that the entry in the table must exist
     57  *	  and so it is safe to access it.
     58  *
     59  *	o Regardless of whether other threads see the old or new
     60  *	  pointers, they will point to a correct device switch
     61  *	  structure for the operation being performed.
     62  *
     63  *	XXX Currently, the wrapper methods such as cdev_read() verify
     64  *	that a device driver does in fact exist before calling the
     65  *	associated driver method.  This should be changed so that
     66  *	once the device is has been referenced by a vnode (opened),
     67  *	calling	the other methods should be valid until that reference
     68  *	is dropped.
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.37.2.4 2017/05/17 04:33:03 pgoyette Exp $");
     73 
     74 #ifdef _KERNEL_OPT
     75 #include "opt_dtrace.h"
     76 #endif
     77 
     78 #include <sys/param.h>
     79 #include <sys/conf.h>
     80 #include <sys/kmem.h>
     81 #include <sys/systm.h>
     82 #include <sys/poll.h>
     83 #include <sys/tty.h>
     84 #include <sys/cpu.h>
     85 #include <sys/buf.h>
     86 #include <sys/reboot.h>
     87 #include <sys/sdt.h>
     88 #include <sys/atomic.h>
     89 #include <sys/condvar.h>
     90 #include <sys/localcount.h>
     91 #include <sys/pserialize.h>
     92 
     93 #ifdef DEVSW_DEBUG
     94 #define	DPRINTF(x)	printf x
     95 #else /* DEVSW_DEBUG */
     96 #define	DPRINTF(x)
     97 #endif /* DEVSW_DEBUG */
     98 
     99 #define	MAXDEVSW	512	/* the maximum of major device number */
    100 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
    101 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
    102 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
    103 
    104 extern const struct bdevsw **bdevsw, *bdevsw0[];
    105 extern const struct cdevsw **cdevsw, *cdevsw0[];
    106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    107 extern const int sys_bdevsws, sys_cdevsws;
    108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    109 
    110 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
    111 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
    112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    113 
    114 kmutex_t	device_lock;
    115 kcondvar_t	device_cv;
    116 pserialize_t	device_psz = NULL;
    117 
    118 void (*biodone_vfs)(buf_t *) = (void *)nullop;
    119 
    120 void
    121 devsw_init(void)
    122 {
    123 
    124 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    125 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    126 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
    127 	cv_init(&device_cv, "devsw");
    128 }
    129 
    130 void
    131 devsw_detach_init(void)
    132 {
    133 
    134 	device_psz = pserialize_create();
    135 }
    136 
    137 int
    138 devsw_attach(const char *devname,
    139 	     const struct bdevsw *bdev, devmajor_t *bmajor,
    140 	     const struct cdevsw *cdev, devmajor_t *cmajor)
    141 {
    142 	struct devsw_conv *conv;
    143 	char *name;
    144 	int error, i;
    145 	size_t len;
    146 
    147 	if (devname == NULL || cdev == NULL)
    148 		return (EINVAL);
    149 
    150 	mutex_enter(&device_lock);
    151 
    152 	if (bdev != NULL) {
    153 		if (bdev->d_localcount == NULL) {
    154 			printf("%s: %s's bdevsw has no localcount",
    155 			    __func__, devname);
    156 			return EINVAL;
    157 		}
    158 		if (bdev->d_localcount == cdev->d_localcount) {
    159 			printf("%s: %s uses same localcount for both "
    160 			    "cdevsw and bdevsw", __func__, devname);
    161 			return EINVAL;
    162 		}
    163 	}
    164 	if (cdev == NULL) {
    165 		printf("%s: %s's cdevsw has no localcount", __func__, devname);
    166 		return EINVAL;
    167 	}
    168 
    169 	for (i = 0 ; i < max_devsw_convs ; i++) {
    170 		conv = &devsw_conv[i];
    171 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    172 			continue;
    173 
    174 		if (*bmajor < 0)
    175 			*bmajor = conv->d_bmajor;
    176 		if (*cmajor < 0)
    177 			*cmajor = conv->d_cmajor;
    178 
    179 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    180 			error = EINVAL;
    181 			goto fail;
    182 		}
    183 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    184 			error = EINVAL;
    185 			goto fail;
    186 		}
    187 
    188 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    189 		    cdevsw[*cmajor] != NULL) {
    190 			error = EEXIST;
    191 			goto fail;
    192 		}
    193 
    194 		/* use membar_producer() to ensure visibility of the xdevsw */
    195 		if (bdev != NULL) {
    196 			localcount_init(bdev->d_localcount);
    197 			membar_producer();
    198 			bdevsw[*bmajor] = bdev;
    199 		}
    200 		localcount_init(cdev->d_localcount);
    201 		membar_producer();
    202 		cdevsw[*cmajor] = cdev;
    203 
    204 		mutex_exit(&device_lock);
    205 		return (0);
    206 	}
    207 
    208 	error = bdevsw_attach(bdev, bmajor);
    209 	if (error != 0)
    210 		goto fail;
    211 	error = cdevsw_attach(cdev, cmajor);
    212 	if (error != 0) {
    213 		devsw_detach_locked(bdev, NULL);
    214 		goto fail;
    215 	}
    216 
    217 	for (i = 0 ; i < max_devsw_convs ; i++) {
    218 		if (devsw_conv[i].d_name == NULL)
    219 			break;
    220 	}
    221 	if (i == max_devsw_convs) {
    222 		struct devsw_conv *newptr;
    223 		int old_convs, new_convs;
    224 
    225 		old_convs = max_devsw_convs;
    226 		new_convs = old_convs + 1;
    227 
    228 		newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
    229 		if (newptr == NULL) {
    230 			devsw_detach_locked(bdev, cdev);
    231 			error = ENOMEM;
    232 			goto fail;
    233 		}
    234 		newptr[old_convs].d_name = NULL;
    235 		newptr[old_convs].d_bmajor = -1;
    236 		newptr[old_convs].d_cmajor = -1;
    237 		memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
    238 		if (devsw_conv != devsw_conv0)
    239 			kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
    240 		devsw_conv = newptr;
    241 		max_devsw_convs = new_convs;
    242 	}
    243 
    244 	len = strlen(devname) + 1;
    245 	name = kmem_alloc(len, KM_NOSLEEP);
    246 	if (name == NULL) {
    247 		devsw_detach_locked(bdev, cdev);
    248 		error = ENOMEM;
    249 		goto fail;
    250 	}
    251 	strlcpy(name, devname, len);
    252 
    253 	devsw_conv[i].d_name = name;
    254 	devsw_conv[i].d_bmajor = *bmajor;
    255 	devsw_conv[i].d_cmajor = *cmajor;
    256 
    257 	mutex_exit(&device_lock);
    258 	return (0);
    259  fail:
    260 	mutex_exit(&device_lock);
    261 	return (error);
    262 }
    263 
    264 static int
    265 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
    266 {
    267 	const struct bdevsw **newptr;
    268 	devmajor_t bmajor;
    269 	int i;
    270 
    271 	KASSERT(mutex_owned(&device_lock));
    272 
    273 	if (devsw == NULL)
    274 		return (0);
    275 
    276 	if (*devmajor < 0) {
    277 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    278 			if (bdevsw[bmajor] != NULL)
    279 				continue;
    280 			for (i = 0 ; i < max_devsw_convs ; i++) {
    281 				if (devsw_conv[i].d_bmajor == bmajor)
    282 					break;
    283 			}
    284 			if (i != max_devsw_convs)
    285 				continue;
    286 			break;
    287 		}
    288 		*devmajor = bmajor;
    289 	}
    290 
    291 	if (*devmajor >= MAXDEVSW) {
    292 		printf("%s: block majors exhausted", __func__);
    293 		return (ENOMEM);
    294 	}
    295 
    296 	if (*devmajor >= max_bdevsws) {
    297 		KASSERT(bdevsw == bdevsw0);
    298 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    299 		if (newptr == NULL)
    300 			return (ENOMEM);
    301 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    302 		bdevsw = newptr;
    303 		max_bdevsws = MAXDEVSW;
    304 	}
    305 
    306 	if (bdevsw[*devmajor] != NULL)
    307 		return (EEXIST);
    308 
    309 	if (devsw->d_localcount == NULL) {
    310 		printf("%s: bdevsw has no localcount", __func__);
    311 		return EINVAL;
    312 	}
    313 	localcount_init(devsw->d_localcount);
    314 
    315 	/* ensure visibility of the bdevsw */
    316 	membar_producer();
    317 
    318 	bdevsw[*devmajor] = devsw;
    319 
    320 	return (0);
    321 }
    322 
    323 static int
    324 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
    325 {
    326 	const struct cdevsw **newptr;
    327 	devmajor_t cmajor;
    328 	int i;
    329 
    330 	KASSERT(mutex_owned(&device_lock));
    331 
    332 	if (*devmajor < 0) {
    333 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    334 			if (cdevsw[cmajor] != NULL)
    335 				continue;
    336 			for (i = 0 ; i < max_devsw_convs ; i++) {
    337 				if (devsw_conv[i].d_cmajor == cmajor)
    338 					break;
    339 			}
    340 			if (i != max_devsw_convs)
    341 				continue;
    342 			break;
    343 		}
    344 		*devmajor = cmajor;
    345 	}
    346 
    347 	if (*devmajor >= MAXDEVSW) {
    348 		printf("%s: character majors exhausted", __func__);
    349 		return (ENOMEM);
    350 	}
    351 
    352 	if (*devmajor >= max_cdevsws) {
    353 		KASSERT(cdevsw == cdevsw0);
    354 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    355 		if (newptr == NULL)
    356 			return (ENOMEM);
    357 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    358 		cdevsw = newptr;
    359 		max_cdevsws = MAXDEVSW;
    360 	}
    361 
    362 	if (cdevsw[*devmajor] != NULL)
    363 		return (EEXIST);
    364 
    365 	if (devsw->d_localcount == NULL) {
    366 		printf("%s: cdevsw has no localcount", __func__);
    367 		return EINVAL;
    368 	}
    369 	localcount_init(devsw->d_localcount);
    370 
    371 	/* ensure visibility of the cdevsw */
    372 	membar_producer();
    373 
    374 	cdevsw[*devmajor] = devsw;
    375 
    376 	return (0);
    377 }
    378 
    379 /*
    380  * First, look up both bdev and cdev indices, and remove the
    381  * {b,c]devsw[] entries so no new references can be taken.  Then
    382  * drain any existing references.
    383  */
    384 
    385 static void
    386 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    387 {
    388 	int i, j;
    389 
    390 	KASSERT(mutex_owned(&device_lock));
    391 
    392 	i = max_bdevsws;
    393 	if (bdev != NULL) {
    394 		for (i = 0 ; i < max_bdevsws ; i++) {
    395 			if (bdevsw[i] != bdev)
    396 				continue;
    397 
    398 			KASSERTMSG(bdev->d_localcount != NULL,
    399 			    "%s: no bdev localcount for major %d", __func__, i);
    400 			break;
    401 		}
    402 	}
    403 	j = max_cdevsws;
    404 	if (cdev != NULL) {
    405 		for (j = 0 ; j < max_cdevsws ; j++) {
    406 			if (cdevsw[j] != cdev)
    407 				continue;
    408 
    409 			KASSERTMSG(cdev->d_localcount != NULL,
    410 			    "%s: no cdev localcount for major %d", __func__, j);
    411 			break;
    412 		}
    413 	}
    414 	if (i < max_bdevsws)
    415 		bdevsw[i] = NULL;
    416 	if (j < max_cdevsws )
    417 		cdevsw[j] = NULL;
    418 
    419 	/* Wait for all current readers to finish with the devsw's */
    420 	pserialize_perform(device_psz);
    421 
    422 	/*
    423 	 * No new accessors can reach the bdev and cdev via the
    424 	 * {b,c}devsw[] arrays, so no new references can be
    425 	 * acquired.  Wait for all existing references to drain,
    426 	 * and then destroy.
    427 	 */
    428 
    429 	if (i < max_bdevsws && bdev->d_localcount != NULL) {
    430 		localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
    431 		localcount_fini(bdev->d_localcount);
    432 	}
    433 	if (j < max_cdevsws && cdev->d_localcount != NULL ) {
    434 		localcount_drain(cdev->d_localcount, &device_cv, &device_lock);
    435 		localcount_fini(cdev->d_localcount);
    436 	}
    437 }
    438 
    439 int
    440 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    441 {
    442 
    443 	mutex_enter(&device_lock);
    444 	devsw_detach_locked(bdev, cdev);
    445 	mutex_exit(&device_lock);
    446 	return 0;
    447 }
    448 
    449 /*
    450  * Look up a block device by number.
    451  *
    452  * => Caller must ensure that the device is attached.
    453  */
    454 const struct bdevsw *
    455 bdevsw_lookup(dev_t dev)
    456 {
    457 	devmajor_t bmajor;
    458 
    459 	if (dev == NODEV)
    460 		return (NULL);
    461 	bmajor = major(dev);
    462 	if (bmajor < 0 || bmajor >= max_bdevsws)
    463 		return (NULL);
    464 
    465 	/* Wait for the content of the struct bdevsw to become visible */
    466 	membar_datadep_consumer();
    467 
    468 	return (bdevsw[bmajor]);
    469 }
    470 
    471 const struct bdevsw *
    472 bdevsw_lookup_acquire(dev_t dev)
    473 {
    474 	devmajor_t bmajor;
    475 	const struct bdevsw *bdev = NULL;
    476 	int s;
    477 
    478 	if (dev == NODEV)
    479 		return (NULL);
    480 	bmajor = major(dev);
    481 	if (bmajor < 0 || bmajor >= max_bdevsws)
    482 		return (NULL);
    483 
    484 	/* Start a read transaction to block localcount_drain() */
    485 	s = pserialize_read_enter();
    486 
    487 	/* Get the struct bdevsw pointer */
    488 	bdev = bdevsw[bmajor];
    489 	if (bdev == NULL)
    490 		goto out;
    491 
    492 	/* Wait for the content of the struct bdevsw to become visible */
    493 	membar_datadep_consumer();
    494 
    495 	/* If the devsw is not statically linked, acquire a reference */
    496 	if (bdev->d_localcount != NULL)
    497 		localcount_acquire(bdev->d_localcount);
    498 
    499  out:	pserialize_read_exit(s);
    500 
    501 	return bdev;
    502 }
    503 
    504 void
    505 bdevsw_release(const struct bdevsw *bd)
    506 {
    507 
    508 	if (bd != NULL && bd->d_localcount != NULL)
    509 		localcount_release(bd->d_localcount, &device_cv, &device_lock);
    510 }
    511 
    512 /*
    513  * Look up a character device by number.
    514  *
    515  * => Caller must ensure that the device is attached.
    516  */
    517 const struct cdevsw *
    518 cdevsw_lookup(dev_t dev)
    519 {
    520 	devmajor_t cmajor;
    521 
    522 	if (dev == NODEV)
    523 		return (NULL);
    524 	cmajor = major(dev);
    525 	if (cmajor < 0 || cmajor >= max_cdevsws)
    526 		return (NULL);
    527 
    528 	/* Wait for the content of the struct bdevsw to become visible */
    529 	membar_datadep_consumer();
    530 
    531 	return (cdevsw[cmajor]);
    532 }
    533 
    534 const struct cdevsw *
    535 cdevsw_lookup_acquire(dev_t dev)
    536 {
    537 	devmajor_t cmajor;
    538 	const struct cdevsw *cdev = NULL;
    539 	int s;
    540 
    541 	if (dev == NODEV)
    542 		return (NULL);
    543 	cmajor = major(dev);
    544 	if (cmajor < 0 || cmajor >= max_cdevsws)
    545 		return (NULL);
    546 
    547 	/* Start a read transaction to block localcount_drain() */
    548 	s = pserialize_read_enter();
    549 
    550 	/* Get the struct bdevsw pointer */
    551 	cdev = cdevsw[cmajor];
    552 	if (cdev == NULL)
    553 		goto out;
    554 
    555 	/* Wait for the content of the struct cdevsw to become visible */
    556 	membar_datadep_consumer();
    557 
    558 	/* If the devsw is not statically linked, acquire a reference */
    559 	if (cdev->d_localcount != NULL)
    560 		localcount_acquire(cdev->d_localcount);
    561 
    562  out:	pserialize_read_exit(s);
    563 
    564 	return cdev;
    565 }
    566 
    567 void
    568 cdevsw_release(const struct cdevsw *cd)
    569 {
    570 
    571 	if (cd != NULL && cd->d_localcount != NULL)
    572 		localcount_release(cd->d_localcount, &device_cv, &device_lock);
    573 }
    574 
    575 /*
    576  * Look up a block device by reference to its operations set.
    577  *
    578  * => Caller must ensure that the device is not detached, and therefore
    579  *    that the returned major is still valid when dereferenced.
    580  */
    581 devmajor_t
    582 bdevsw_lookup_major(const struct bdevsw *bdev)
    583 {
    584 	devmajor_t bmajor;
    585 
    586 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    587 		if (bdevsw[bmajor] == bdev)
    588 			return (bmajor);
    589 	}
    590 
    591 	return (NODEVMAJOR);
    592 }
    593 
    594 /*
    595  * Look up a character device by reference to its operations set.
    596  *
    597  * => Caller must ensure that the device is not detached, and therefore
    598  *    that the returned major is still valid when dereferenced.
    599  */
    600 devmajor_t
    601 cdevsw_lookup_major(const struct cdevsw *cdev)
    602 {
    603 	devmajor_t cmajor;
    604 
    605 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    606 		if (cdevsw[cmajor] == cdev)
    607 			return (cmajor);
    608 	}
    609 
    610 	return (NODEVMAJOR);
    611 }
    612 
    613 /*
    614  * Convert from block major number to name.
    615  *
    616  * => Caller must ensure that the device is not detached, and therefore
    617  *    that the name pointer is still valid when dereferenced.
    618  */
    619 const char *
    620 devsw_blk2name(devmajor_t bmajor)
    621 {
    622 	const char *name;
    623 	devmajor_t cmajor;
    624 	int i;
    625 
    626 	name = NULL;
    627 	cmajor = -1;
    628 
    629 	mutex_enter(&device_lock);
    630 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    631 		mutex_exit(&device_lock);
    632 		return (NULL);
    633 	}
    634 	for (i = 0 ; i < max_devsw_convs; i++) {
    635 		if (devsw_conv[i].d_bmajor == bmajor) {
    636 			cmajor = devsw_conv[i].d_cmajor;
    637 			break;
    638 		}
    639 	}
    640 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    641 		name = devsw_conv[i].d_name;
    642 	mutex_exit(&device_lock);
    643 
    644 	return (name);
    645 }
    646 
    647 /*
    648  * Convert char major number to device driver name.
    649  */
    650 const char *
    651 cdevsw_getname(devmajor_t major)
    652 {
    653 	const char *name;
    654 	int i;
    655 
    656 	name = NULL;
    657 
    658 	if (major < 0)
    659 		return (NULL);
    660 
    661 	mutex_enter(&device_lock);
    662 	for (i = 0 ; i < max_devsw_convs; i++) {
    663 		if (devsw_conv[i].d_cmajor == major) {
    664 			name = devsw_conv[i].d_name;
    665 			break;
    666 		}
    667 	}
    668 	mutex_exit(&device_lock);
    669 	return (name);
    670 }
    671 
    672 /*
    673  * Convert block major number to device driver name.
    674  */
    675 const char *
    676 bdevsw_getname(devmajor_t major)
    677 {
    678 	const char *name;
    679 	int i;
    680 
    681 	name = NULL;
    682 
    683 	if (major < 0)
    684 		return (NULL);
    685 
    686 	mutex_enter(&device_lock);
    687 	for (i = 0 ; i < max_devsw_convs; i++) {
    688 		if (devsw_conv[i].d_bmajor == major) {
    689 			name = devsw_conv[i].d_name;
    690 			break;
    691 		}
    692 	}
    693 	mutex_exit(&device_lock);
    694 	return (name);
    695 }
    696 
    697 /*
    698  * Convert from device name to block major number.
    699  *
    700  * => Caller must ensure that the device is not detached, and therefore
    701  *    that the major number is still valid when dereferenced.
    702  */
    703 devmajor_t
    704 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    705 {
    706 	struct devsw_conv *conv;
    707 	devmajor_t bmajor;
    708 	int i;
    709 
    710 	if (name == NULL)
    711 		return (NODEVMAJOR);
    712 
    713 	mutex_enter(&device_lock);
    714 	for (i = 0 ; i < max_devsw_convs ; i++) {
    715 		size_t len;
    716 
    717 		conv = &devsw_conv[i];
    718 		if (conv->d_name == NULL)
    719 			continue;
    720 		len = strlen(conv->d_name);
    721 		if (strncmp(conv->d_name, name, len) != 0)
    722 			continue;
    723 		if (*(name +len) && !isdigit(*(name + len)))
    724 			continue;
    725 		bmajor = conv->d_bmajor;
    726 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    727 		    bdevsw[bmajor] == NULL)
    728 			break;
    729 		if (devname != NULL) {
    730 #ifdef DEVSW_DEBUG
    731 			if (strlen(conv->d_name) >= devnamelen)
    732 				printf("%s: too short buffer", __func__);
    733 #endif /* DEVSW_DEBUG */
    734 			strncpy(devname, conv->d_name, devnamelen);
    735 			devname[devnamelen - 1] = '\0';
    736 		}
    737 		mutex_exit(&device_lock);
    738 		return (bmajor);
    739 	}
    740 
    741 	mutex_exit(&device_lock);
    742 	return (NODEVMAJOR);
    743 }
    744 
    745 /*
    746  * Convert from device name to char major number.
    747  *
    748  * => Caller must ensure that the device is not detached, and therefore
    749  *    that the major number is still valid when dereferenced.
    750  */
    751 devmajor_t
    752 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    753 {
    754 	struct devsw_conv *conv;
    755 	devmajor_t cmajor;
    756 	int i;
    757 
    758 	if (name == NULL)
    759 		return (NODEVMAJOR);
    760 
    761 	mutex_enter(&device_lock);
    762 	for (i = 0 ; i < max_devsw_convs ; i++) {
    763 		size_t len;
    764 
    765 		conv = &devsw_conv[i];
    766 		if (conv->d_name == NULL)
    767 			continue;
    768 		len = strlen(conv->d_name);
    769 		if (strncmp(conv->d_name, name, len) != 0)
    770 			continue;
    771 		if (*(name +len) && !isdigit(*(name + len)))
    772 			continue;
    773 		cmajor = conv->d_cmajor;
    774 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    775 		    cdevsw[cmajor] == NULL)
    776 			break;
    777 		if (devname != NULL) {
    778 #ifdef DEVSW_DEBUG
    779 			if (strlen(conv->d_name) >= devnamelen)
    780 				printf("%s: too short buffer", __func__);
    781 #endif /* DEVSW_DEBUG */
    782 			strncpy(devname, conv->d_name, devnamelen);
    783 			devname[devnamelen - 1] = '\0';
    784 		}
    785 		mutex_exit(&device_lock);
    786 		return (cmajor);
    787 	}
    788 
    789 	mutex_exit(&device_lock);
    790 	return (NODEVMAJOR);
    791 }
    792 
    793 /*
    794  * Convert from character dev_t to block dev_t.
    795  *
    796  * => Caller must ensure that the device is not detached, and therefore
    797  *    that the major number is still valid when dereferenced.
    798  */
    799 dev_t
    800 devsw_chr2blk(dev_t cdev)
    801 {
    802 	devmajor_t bmajor, cmajor;
    803 	int i;
    804 	dev_t rv;
    805 
    806 	cmajor = major(cdev);
    807 	bmajor = NODEVMAJOR;
    808 	rv = NODEV;
    809 
    810 	mutex_enter(&device_lock);
    811 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    812 		mutex_exit(&device_lock);
    813 		return (NODEV);
    814 	}
    815 	for (i = 0 ; i < max_devsw_convs ; i++) {
    816 		if (devsw_conv[i].d_cmajor == cmajor) {
    817 			bmajor = devsw_conv[i].d_bmajor;
    818 			break;
    819 		}
    820 	}
    821 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    822 		rv = makedev(bmajor, minor(cdev));
    823 	mutex_exit(&device_lock);
    824 
    825 	return (rv);
    826 }
    827 
    828 /*
    829  * Convert from block dev_t to character dev_t.
    830  *
    831  * => Caller must ensure that the device is not detached, and therefore
    832  *    that the major number is still valid when dereferenced.
    833  */
    834 dev_t
    835 devsw_blk2chr(dev_t bdev)
    836 {
    837 	devmajor_t bmajor, cmajor;
    838 	int i;
    839 	dev_t rv;
    840 
    841 	bmajor = major(bdev);
    842 	cmajor = NODEVMAJOR;
    843 	rv = NODEV;
    844 
    845 	mutex_enter(&device_lock);
    846 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    847 		mutex_exit(&device_lock);
    848 		return (NODEV);
    849 	}
    850 	for (i = 0 ; i < max_devsw_convs ; i++) {
    851 		if (devsw_conv[i].d_bmajor == bmajor) {
    852 			cmajor = devsw_conv[i].d_cmajor;
    853 			break;
    854 		}
    855 	}
    856 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    857 		rv = makedev(cmajor, minor(bdev));
    858 	mutex_exit(&device_lock);
    859 
    860 	return (rv);
    861 }
    862 
    863 /*
    864  * Device access methods.
    865  */
    866 
    867 #define	DEV_LOCK(d)						\
    868 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
    869 		KERNEL_LOCK(1, NULL);				\
    870 	}
    871 
    872 #define	DEV_UNLOCK(d)						\
    873 	if (mpflag == 0) {					\
    874 		KERNEL_UNLOCK_ONE(NULL);			\
    875 	}
    876 
    877 int
    878 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    879 {
    880 	const struct bdevsw *d;
    881 	int rv, mpflag;
    882 
    883 	/*
    884 	 * For open we need to lock, in order to synchronize
    885 	 * with attach/detach.
    886 	 */
    887 	mutex_enter(&device_lock);
    888 	d = bdevsw_lookup_acquire(dev);
    889 	mutex_exit(&device_lock);
    890 	if (d == NULL)
    891 		return ENXIO;
    892 
    893 	DEV_LOCK(d);
    894 	rv = (*d->d_open)(dev, flag, devtype, l);
    895 	DEV_UNLOCK(d);
    896 	bdevsw_release(d);
    897 
    898 	return rv;
    899 }
    900 
    901 int
    902 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    903 {
    904 	const struct bdevsw *d;
    905 	int rv, mpflag;
    906 
    907 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
    908 		return ENXIO;
    909 
    910 	DEV_LOCK(d);
    911 	rv = (*d->d_close)(dev, flag, devtype, l);
    912 	DEV_UNLOCK(d);
    913 	bdevsw_release(d);
    914 
    915 	return rv;
    916 }
    917 
    918 SDT_PROVIDER_DECLARE(io);
    919 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
    920 
    921 void
    922 bdev_strategy(struct buf *bp)
    923 {
    924 	const struct bdevsw *d;
    925 	int mpflag;
    926 
    927 	SDT_PROBE1(io, kernel, , start, bp);
    928 
    929 	if ((d = bdevsw_lookup_acquire(bp->b_dev)) == NULL) {
    930 		bp->b_error = ENXIO;
    931 		bp->b_resid = bp->b_bcount;
    932 		biodone_vfs(bp); /* biodone() iff vfs present */
    933 		return;
    934 	}
    935 
    936 	DEV_LOCK(d);
    937 	(*d->d_strategy)(bp);
    938 	DEV_UNLOCK(d);
    939 	bdevsw_release(d);
    940 }
    941 
    942 int
    943 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    944 {
    945 	const struct bdevsw *d;
    946 	int rv, mpflag;
    947 
    948 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
    949 		return ENXIO;
    950 
    951 	DEV_LOCK(d);
    952 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    953 	DEV_UNLOCK(d);
    954 	bdevsw_release(d);
    955 
    956 	return rv;
    957 }
    958 
    959 int
    960 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    961 {
    962 	const struct bdevsw *d;
    963 	int rv;
    964 
    965 	/*
    966 	 * Dump can be called without the device open.  Since it can
    967 	 * currently only be called with the system paused (and in a
    968 	 * potentially unstable state), we don't perform any locking.
    969 	 */
    970 	if ((d = bdevsw_lookup(dev)) == NULL)
    971 		return ENXIO;
    972 
    973 	/* DEV_LOCK(d); */
    974 	rv = (*d->d_dump)(dev, addr, data, sz);
    975 	/* DEV_UNLOCK(d); */
    976 
    977 	return rv;
    978 }
    979 
    980 int
    981 bdev_flags(dev_t dev)
    982 {
    983 	const struct bdevsw *d;
    984 	int rv;
    985 
    986 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
    987 		return 0;
    988 
    989 	rv = d->d_flag & ~D_TYPEMASK;
    990 	bdevsw_release(d);
    991 
    992 	return rv;
    993 }
    994 
    995 int
    996 bdev_type(dev_t dev)
    997 {
    998 	const struct bdevsw *d;
    999 	int rv;
   1000 
   1001 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
   1002 		return D_OTHER;
   1003 
   1004 	rv = d->d_flag & D_TYPEMASK;
   1005 	bdevsw_release(d);
   1006 
   1007 	return rv;
   1008 }
   1009 
   1010 int
   1011 bdev_size(dev_t dev)
   1012 {
   1013 	const struct bdevsw *d;
   1014 	int rv, mpflag = 0;
   1015 
   1016 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
   1017 		return -1;
   1018 
   1019 	if (d->d_psize == NULL) {
   1020 		bdevsw_release(d);
   1021 		return -1;
   1022 	}
   1023 
   1024 	/*
   1025 	 * Don't to try lock the device if we're dumping.
   1026 	 * XXX: is there a better way to test this?
   1027 	 */
   1028 	if ((boothowto & RB_DUMP) == 0)
   1029 		DEV_LOCK(d);
   1030 	rv = (*d->d_psize)(dev);
   1031 	if ((boothowto & RB_DUMP) == 0)
   1032 		DEV_UNLOCK(d);
   1033 	bdevsw_release(d);
   1034 	return rv;
   1035 }
   1036 
   1037 int
   1038 bdev_discard(dev_t dev, off_t pos, off_t len)
   1039 {
   1040 	const struct bdevsw *d;
   1041 	int rv, mpflag;
   1042 
   1043 	if ((d = bdevsw_lookup_acquire(dev)) == NULL)
   1044 		return ENXIO;
   1045 
   1046 	DEV_LOCK(d);
   1047 	rv = (*d->d_discard)(dev, pos, len);
   1048 	DEV_UNLOCK(d);
   1049 	bdevsw_release(d);
   1050 
   1051 	return rv;
   1052 }
   1053 
   1054 int
   1055 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
   1056 {
   1057 	const struct cdevsw *d;
   1058 	int rv, mpflag;
   1059 
   1060 	/*
   1061 	 * For open we need to lock, in order to synchronize
   1062 	 * with attach/detach.
   1063 	 */
   1064 	mutex_enter(&device_lock);
   1065 	d = cdevsw_lookup_acquire(dev);
   1066 	mutex_exit(&device_lock);
   1067 	if (d == NULL)
   1068 		return ENXIO;
   1069 
   1070 	DEV_LOCK(d);
   1071 	rv = (*d->d_open)(dev, flag, devtype, l);
   1072 	DEV_UNLOCK(d);
   1073 	cdevsw_release(d);
   1074 
   1075 	return rv;
   1076 }
   1077 
   1078 int
   1079 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
   1080 {
   1081 	const struct cdevsw *d;
   1082 	int rv, mpflag;
   1083 
   1084 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1085 		return ENXIO;
   1086 
   1087 	DEV_LOCK(d);
   1088 	rv = (*d->d_close)(dev, flag, devtype, l);
   1089 	DEV_UNLOCK(d);
   1090 	cdevsw_release(d);
   1091 
   1092 	return rv;
   1093 }
   1094 
   1095 int
   1096 cdev_read(dev_t dev, struct uio *uio, int flag)
   1097 {
   1098 	const struct cdevsw *d;
   1099 	int rv, mpflag;
   1100 
   1101 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1102 		return ENXIO;
   1103 
   1104 	DEV_LOCK(d);
   1105 	rv = (*d->d_read)(dev, uio, flag);
   1106 	DEV_UNLOCK(d);
   1107 	cdevsw_release(d);
   1108 
   1109 	return rv;
   1110 }
   1111 
   1112 int
   1113 cdev_write(dev_t dev, struct uio *uio, int flag)
   1114 {
   1115 	const struct cdevsw *d;
   1116 	int rv, mpflag;
   1117 
   1118 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1119 		return ENXIO;
   1120 
   1121 	DEV_LOCK(d);
   1122 	rv = (*d->d_write)(dev, uio, flag);
   1123 	DEV_UNLOCK(d);
   1124 	cdevsw_release(d);
   1125 
   1126 	return rv;
   1127 }
   1128 
   1129 int
   1130 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
   1131 {
   1132 	const struct cdevsw *d;
   1133 	int rv, mpflag;
   1134 
   1135 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1136 		return ENXIO;
   1137 
   1138 	DEV_LOCK(d);
   1139 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
   1140 	DEV_UNLOCK(d);
   1141 	cdevsw_release(d);
   1142 
   1143 	return rv;
   1144 }
   1145 
   1146 void
   1147 cdev_stop(struct tty *tp, int flag)
   1148 {
   1149 	const struct cdevsw *d;
   1150 	int mpflag;
   1151 
   1152 	if ((d = cdevsw_lookup_acquire(tp->t_dev)) == NULL)
   1153 		return;
   1154 
   1155 	DEV_LOCK(d);
   1156 	(*d->d_stop)(tp, flag);
   1157 	DEV_UNLOCK(d);
   1158 	cdevsw_release(d);
   1159 }
   1160 
   1161 struct tty *
   1162 cdev_tty(dev_t dev)
   1163 {
   1164 	const struct cdevsw *d;
   1165 	struct tty *rv;
   1166 
   1167 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1168 		return NULL;
   1169 
   1170 	/* XXX Check if necessary. */
   1171 	if (d->d_tty == NULL)
   1172 		rv = NULL;
   1173 	else
   1174 		rv= (*d->d_tty)(dev);
   1175 	cdevsw_release(d);
   1176 
   1177 	return rv;
   1178 }
   1179 
   1180 int
   1181 cdev_poll(dev_t dev, int flag, lwp_t *l)
   1182 {
   1183 	const struct cdevsw *d;
   1184 	int rv, mpflag;
   1185 
   1186 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1187 		return POLLERR;
   1188 
   1189 	DEV_LOCK(d);
   1190 	rv = (*d->d_poll)(dev, flag, l);
   1191 	DEV_UNLOCK(d);
   1192 	cdevsw_release(d);
   1193 
   1194 	return rv;
   1195 }
   1196 
   1197 paddr_t
   1198 cdev_mmap(dev_t dev, off_t off, int flag)
   1199 {
   1200 	const struct cdevsw *d;
   1201 	paddr_t rv;
   1202 	int mpflag;
   1203 
   1204 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1205 		return (paddr_t)-1LL;
   1206 
   1207 	DEV_LOCK(d);
   1208 	rv = (*d->d_mmap)(dev, off, flag);
   1209 	DEV_UNLOCK(d);
   1210 	cdevsw_release(d);
   1211 
   1212 	return rv;
   1213 }
   1214 
   1215 int
   1216 cdev_kqfilter(dev_t dev, struct knote *kn)
   1217 {
   1218 	const struct cdevsw *d;
   1219 	int rv, mpflag;
   1220 
   1221 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1222 		return ENXIO;
   1223 
   1224 	DEV_LOCK(d);
   1225 	rv = (*d->d_kqfilter)(dev, kn);
   1226 	DEV_UNLOCK(d);
   1227 	cdevsw_release(d);
   1228 
   1229 	return rv;
   1230 }
   1231 
   1232 int
   1233 cdev_discard(dev_t dev, off_t pos, off_t len)
   1234 {
   1235 	const struct cdevsw *d;
   1236 	int rv, mpflag;
   1237 
   1238 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1239 		return ENXIO;
   1240 
   1241 	DEV_LOCK(d);
   1242 	rv = (*d->d_discard)(dev, pos, len);
   1243 	DEV_UNLOCK(d);
   1244 	cdevsw_release(d);
   1245 
   1246 	return rv;
   1247 }
   1248 
   1249 int
   1250 cdev_flags(dev_t dev)
   1251 {
   1252 	const struct cdevsw *d;
   1253 	int rv;
   1254 
   1255 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1256 		return 0;
   1257 
   1258 	rv = d->d_flag & ~D_TYPEMASK;
   1259 	cdevsw_release(d);
   1260 
   1261 	return rv;
   1262 }
   1263 
   1264 int
   1265 cdev_type(dev_t dev)
   1266 {
   1267 	const struct cdevsw *d;
   1268 	int rv;
   1269 
   1270 	if ((d = cdevsw_lookup_acquire(dev)) == NULL)
   1271 		return D_OTHER;
   1272 
   1273 	rv = d->d_flag & D_TYPEMASK;
   1274 	cdevsw_release(d);
   1275 
   1276 	return rv;
   1277 }
   1278 
   1279 /*
   1280  * nommap(dev, off, prot)
   1281  *
   1282  *	mmap routine that always fails, for non-mmappable devices.
   1283  */
   1284 paddr_t
   1285 nommap(dev_t dev, off_t off, int prot)
   1286 {
   1287 
   1288 	return (paddr_t)-1;
   1289 }
   1290