Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.34.2.16
      1 /*	$NetBSD: subr_devsw.c,v 1.34.2.16 2017/04/25 21:36:41 pgoyette Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	subr_devsw.c: registers device drivers by name and by major
     36  *	number, and provides wrapper methods for performing I/O and
     37  *	other tasks on device drivers, keying on the device number
     38  *	(dev_t).
     39  *
     40  *	When the system is built, the config(8) command generates
     41  *	static tables of device drivers built into the kernel image
     42  *	along with their associated methods.  These are recorded in
     43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     44  *	and removed from the system dynamically.
     45  *
     46  * Allocation
     47  *
     48  *	When the system initially boots only the statically allocated
     49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     50  *	allocation, we allocate a fixed block of memory to hold the new,
     51  *	expanded index.  This "fork" of the table is only ever performed
     52  *	once in order to guarantee that other threads may safely access
     53  *	the device tables:
     54  *
     55  *	o Once a thread has a "reference" to the table via an earlier
     56  *	  open() call, we know that the entry in the table must exist
     57  *	  and so it is safe to access it.
     58  *
     59  *	o Regardless of whether other threads see the old or new
     60  *	  pointers, they will point to a correct device switch
     61  *	  structure for the operation being performed.
     62  *
     63  *	XXX Currently, the wrapper methods such as cdev_read() verify
     64  *	that a device driver does in fact exist before calling the
     65  *	associated driver method.  This should be changed so that
     66  *	once the device is has been referenced by a vnode (opened),
     67  *	calling	the other methods should be valid until that reference
     68  *	is dropped.
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.34.2.16 2017/04/25 21:36:41 pgoyette Exp $");
     73 
     74 #ifdef _KERNEL_OPT
     75 #include "opt_dtrace.h"
     76 #endif
     77 
     78 #include <sys/param.h>
     79 #include <sys/conf.h>
     80 #include <sys/kmem.h>
     81 #include <sys/systm.h>
     82 #include <sys/poll.h>
     83 #include <sys/tty.h>
     84 #include <sys/cpu.h>
     85 #include <sys/buf.h>
     86 #include <sys/reboot.h>
     87 #include <sys/sdt.h>
     88 #include <sys/atomic.h>
     89 #include <sys/condvar.h>
     90 #include <sys/localcount.h>
     91 #include <sys/pserialize.h>
     92 
     93 #ifdef DEVSW_DEBUG
     94 #define	DPRINTF(x)	printf x
     95 #else /* DEVSW_DEBUG */
     96 #define	DPRINTF(x)
     97 #endif /* DEVSW_DEBUG */
     98 
     99 #define	MAXDEVSW	512	/* the maximum of major device number */
    100 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
    101 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
    102 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
    103 
    104 extern const struct bdevsw **bdevsw, *bdevsw0[];
    105 extern const struct cdevsw **cdevsw, *cdevsw0[];
    106 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    107 extern const int sys_bdevsws, sys_cdevsws;
    108 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    109 
    110 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
    111 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
    112 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    113 
    114 kmutex_t	device_lock;
    115 kcondvar_t	device_cv;
    116 pserialize_t	device_psz = NULL;
    117 
    118 void (*biodone_vfs)(buf_t *) = (void *)nullop;
    119 
    120 void
    121 devsw_init(void)
    122 {
    123 
    124 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    125 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    126 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
    127 	cv_init(&device_cv, "devsw");
    128 }
    129 
    130 void
    131 devsw_detach_init(void)
    132 {
    133 
    134 	device_psz = pserialize_create();
    135 }
    136 
    137 int
    138 devsw_attach(const char *devname,
    139 	     const struct bdevsw *bdev, devmajor_t *bmajor,
    140 	     const struct cdevsw *cdev, devmajor_t *cmajor)
    141 {
    142 	struct devsw_conv *conv;
    143 	char *name;
    144 	int error, i;
    145 	size_t len;
    146 
    147 	if (devname == NULL || cdev == NULL)
    148 		return (EINVAL);
    149 
    150 	mutex_enter(&device_lock);
    151 
    152 	if (bdev != NULL) {
    153 		KASSERTMSG(bdev->d_localcount != NULL,
    154 		    "%s: bdev %s has no d_localcount", __func__, devname);
    155 		KASSERTMSG(bdev->d_localcount != cdev->d_localcount,
    156 		    "%s: bdev and cdev for %s have same d_localcount",
    157 		    __func__, devname);
    158 	}
    159 	if (cdev != NULL)
    160 		KASSERTMSG(cdev->d_localcount != NULL,
    161 		    "%s: cdev %s has no d_localcount", __func__, devname);
    162 
    163 	for (i = 0 ; i < max_devsw_convs ; i++) {
    164 		conv = &devsw_conv[i];
    165 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    166 			continue;
    167 
    168 		if (*bmajor < 0)
    169 			*bmajor = conv->d_bmajor;
    170 		if (*cmajor < 0)
    171 			*cmajor = conv->d_cmajor;
    172 
    173 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    174 			error = EINVAL;
    175 			goto fail;
    176 		}
    177 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    178 			error = EINVAL;
    179 			goto fail;
    180 		}
    181 
    182 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    183 		    cdevsw[*cmajor] != NULL) {
    184 			error = EEXIST;
    185 			goto fail;
    186 		}
    187 
    188 		/* use membar_producer() to ensure visibility of the xdevsw */
    189 		if (bdev != NULL) {
    190 			localcount_init(bdev->d_localcount);
    191 			membar_producer();
    192 			bdevsw[*bmajor] = bdev;
    193 		}
    194 		localcount_init(cdev->d_localcount);
    195 		membar_producer();
    196 		cdevsw[*cmajor] = cdev;
    197 
    198 		mutex_exit(&device_lock);
    199 		return (0);
    200 	}
    201 
    202 	error = bdevsw_attach(bdev, bmajor);
    203 	if (error != 0)
    204 		goto fail;
    205 	error = cdevsw_attach(cdev, cmajor);
    206 	if (error != 0) {
    207 		devsw_detach_locked(bdev, NULL);
    208 		goto fail;
    209 	}
    210 
    211 	for (i = 0 ; i < max_devsw_convs ; i++) {
    212 		if (devsw_conv[i].d_name == NULL)
    213 			break;
    214 	}
    215 	if (i == max_devsw_convs) {
    216 		struct devsw_conv *newptr;
    217 		int old_convs, new_convs;
    218 
    219 		old_convs = max_devsw_convs;
    220 		new_convs = old_convs + 1;
    221 
    222 		newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
    223 		if (newptr == NULL) {
    224 			devsw_detach_locked(bdev, cdev);
    225 			error = ENOMEM;
    226 			goto fail;
    227 		}
    228 		newptr[old_convs].d_name = NULL;
    229 		newptr[old_convs].d_bmajor = -1;
    230 		newptr[old_convs].d_cmajor = -1;
    231 		memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
    232 		if (devsw_conv != devsw_conv0)
    233 			kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
    234 		devsw_conv = newptr;
    235 		max_devsw_convs = new_convs;
    236 	}
    237 
    238 	len = strlen(devname) + 1;
    239 	name = kmem_alloc(len, KM_NOSLEEP);
    240 	if (name == NULL) {
    241 		devsw_detach_locked(bdev, cdev);
    242 		error = ENOMEM;
    243 		goto fail;
    244 	}
    245 	strlcpy(name, devname, len);
    246 
    247 	devsw_conv[i].d_name = name;
    248 	devsw_conv[i].d_bmajor = *bmajor;
    249 	devsw_conv[i].d_cmajor = *cmajor;
    250 
    251 	mutex_exit(&device_lock);
    252 	return (0);
    253  fail:
    254 	mutex_exit(&device_lock);
    255 	return (error);
    256 }
    257 
    258 static int
    259 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
    260 {
    261 	const struct bdevsw **newptr;
    262 	devmajor_t bmajor;
    263 	int i;
    264 
    265 	KASSERT(mutex_owned(&device_lock));
    266 
    267 	if (devsw == NULL)
    268 		return (0);
    269 
    270 	if (*devmajor < 0) {
    271 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    272 			if (bdevsw[bmajor] != NULL)
    273 				continue;
    274 			for (i = 0 ; i < max_devsw_convs ; i++) {
    275 				if (devsw_conv[i].d_bmajor == bmajor)
    276 					break;
    277 			}
    278 			if (i != max_devsw_convs)
    279 				continue;
    280 			break;
    281 		}
    282 		*devmajor = bmajor;
    283 	}
    284 
    285 	if (*devmajor >= MAXDEVSW) {
    286 		printf("%s: block majors exhausted", __func__);
    287 		return (ENOMEM);
    288 	}
    289 
    290 	if (*devmajor >= max_bdevsws) {
    291 		KASSERT(bdevsw == bdevsw0);
    292 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    293 		if (newptr == NULL)
    294 			return (ENOMEM);
    295 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    296 		bdevsw = newptr;
    297 		max_bdevsws = MAXDEVSW;
    298 	}
    299 
    300 	if (bdevsw[*devmajor] != NULL)
    301 		return (EEXIST);
    302 
    303 	KASSERTMSG(devsw->d_localcount != NULL, "%s: bdev for major %d has "
    304 	    "no localcount", __func__, *devmajor);
    305 	localcount_init(devsw->d_localcount);
    306 
    307 	/* ensure visibility of the bdevsw */
    308 	membar_producer();
    309 
    310 	bdevsw[*devmajor] = devsw;
    311 
    312 	return (0);
    313 }
    314 
    315 static int
    316 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
    317 {
    318 	const struct cdevsw **newptr;
    319 	devmajor_t cmajor;
    320 	int i;
    321 
    322 	KASSERT(mutex_owned(&device_lock));
    323 
    324 	if (*devmajor < 0) {
    325 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    326 			if (cdevsw[cmajor] != NULL)
    327 				continue;
    328 			for (i = 0 ; i < max_devsw_convs ; i++) {
    329 				if (devsw_conv[i].d_cmajor == cmajor)
    330 					break;
    331 			}
    332 			if (i != max_devsw_convs)
    333 				continue;
    334 			break;
    335 		}
    336 		*devmajor = cmajor;
    337 	}
    338 
    339 	if (*devmajor >= MAXDEVSW) {
    340 		printf("%s: character majors exhausted", __func__);
    341 		return (ENOMEM);
    342 	}
    343 
    344 	if (*devmajor >= max_cdevsws) {
    345 		KASSERT(cdevsw == cdevsw0);
    346 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    347 		if (newptr == NULL)
    348 			return (ENOMEM);
    349 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    350 		cdevsw = newptr;
    351 		max_cdevsws = MAXDEVSW;
    352 	}
    353 
    354 	if (cdevsw[*devmajor] != NULL)
    355 		return (EEXIST);
    356 
    357 	KASSERTMSG(devsw->d_localcount != NULL, "%s: cdev for major %d has "
    358 	    "no localcount", __func__, *devmajor);
    359 	localcount_init(devsw->d_localcount);
    360 
    361 	/* ensure visibility of the cdevsw */
    362 	membar_producer();
    363 
    364 	cdevsw[*devmajor] = devsw;
    365 
    366 	return (0);
    367 }
    368 
    369 /*
    370  * First, look up both bdev and cdev indices, and remove the
    371  * {b,c]devsw[] entries so no new references can be taken.  Then
    372  * drain any existing references.
    373  */
    374 
    375 static void
    376 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    377 {
    378 	int i, j;
    379 
    380 	KASSERT(mutex_owned(&device_lock));
    381 
    382 	i = max_bdevsws;
    383 	if (bdev != NULL) {
    384 		for (i = 0 ; i < max_bdevsws ; i++) {
    385 			if (bdevsw[i] != bdev)
    386 				continue;
    387 
    388 			KASSERTMSG(bdev->d_localcount != NULL,
    389 			    "%s: no bdev localcount for major %d", __func__, i);
    390 			break;
    391 		}
    392 	}
    393 	j = max_cdevsws;
    394 	if (cdev != NULL) {
    395 		for (j = 0 ; j < max_cdevsws ; j++) {
    396 			if (cdevsw[j] != cdev)
    397 				continue;
    398 
    399 			KASSERTMSG(cdev->d_localcount != NULL,
    400 			    "%s: no cdev localcount for major %d", __func__, j);
    401 			break;
    402 		}
    403 	}
    404 	if (i < max_bdevsws)
    405 		bdevsw[i] = NULL;
    406 	if (j < max_cdevsws )
    407 		cdevsw[j] = NULL;
    408 
    409 	/* Wait for all current readers to finish with the devsw's */
    410 	pserialize_perform(device_psz);
    411 
    412 	/*
    413 	 * No new accessors can reach the bdev and cdev via the
    414 	 * {b,c}devsw[] arrays, so no new references can be
    415 	 * acquired.  Wait for all existing references to drain,
    416 	 * and then destroy.
    417 	 */
    418 
    419 	if (i < max_bdevsws && bdev->d_localcount != NULL) {
    420 		localcount_drain(bdev->d_localcount, &device_cv, &device_lock);
    421 		localcount_fini(bdev->d_localcount);
    422 	}
    423 	if (j < max_cdevsws && cdev->d_localcount != NULL ) {
    424 		localcount_drain(cdev->d_localcount, &device_cv, &device_lock);
    425 		localcount_fini(cdev->d_localcount);
    426 	}
    427 }
    428 
    429 int
    430 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    431 {
    432 
    433 	mutex_enter(&device_lock);
    434 	devsw_detach_locked(bdev, cdev);
    435 	mutex_exit(&device_lock);
    436 	return 0;
    437 }
    438 
    439 /*
    440  * Look up a block device by number.
    441  *
    442  * => Caller must ensure that the device is attached.
    443  */
    444 const struct bdevsw *
    445 bdevsw_lookup(dev_t dev)
    446 {
    447 	devmajor_t bmajor;
    448 
    449 	if (dev == NODEV)
    450 		return (NULL);
    451 	bmajor = major(dev);
    452 	if (bmajor < 0 || bmajor >= max_bdevsws)
    453 		return (NULL);
    454 
    455 	/* Wait for the content of the struct bdevsw to become visible */
    456 	membar_datadep_consumer();
    457 
    458 	return (bdevsw[bmajor]);
    459 }
    460 
    461 const struct bdevsw *
    462 bdevsw_lookup_acquire(dev_t dev)
    463 {
    464 	devmajor_t bmajor;
    465 	const struct bdevsw *bdev = NULL;
    466 	int s;
    467 
    468 	if (dev == NODEV)
    469 		return (NULL);
    470 	bmajor = major(dev);
    471 	if (bmajor < 0 || bmajor >= max_bdevsws)
    472 		return (NULL);
    473 
    474 	/* Start a read transaction to block localcount_drain() */
    475 	s = pserialize_read_enter();
    476 
    477 	/* Get the struct bdevsw pointer */
    478 	bdev = bdevsw[bmajor];
    479 	if (bdev == NULL)
    480 		goto out;
    481 
    482 	/* Wait for the content of the struct bdevsw to become visible */
    483 	membar_datadep_consumer();
    484 
    485 	/* If the devsw is not statically linked, acquire a reference */
    486 	if (bdev->d_localcount != NULL)
    487 		localcount_acquire(bdev->d_localcount);
    488 
    489  out:	pserialize_read_exit(s);
    490 
    491 	return bdev;
    492 }
    493 
    494 void
    495 bdevsw_release(const struct bdevsw *bd)
    496 {
    497 
    498 	KASSERT(bd != NULL);
    499 	if (bd->d_localcount != NULL)
    500 		localcount_release(bd->d_localcount, &device_cv, &device_lock);
    501 }
    502 
    503 /*
    504  * Look up a character device by number.
    505  *
    506  * => Caller must ensure that the device is attached.
    507  */
    508 const struct cdevsw *
    509 cdevsw_lookup(dev_t dev)
    510 {
    511 	devmajor_t cmajor;
    512 
    513 	if (dev == NODEV)
    514 		return (NULL);
    515 	cmajor = major(dev);
    516 	if (cmajor < 0 || cmajor >= max_cdevsws)
    517 		return (NULL);
    518 
    519 	/* Wait for the content of the struct bdevsw to become visible */
    520 	membar_datadep_consumer();
    521 
    522 	return (cdevsw[cmajor]);
    523 }
    524 
    525 const struct cdevsw *
    526 cdevsw_lookup_acquire(dev_t dev)
    527 {
    528 	devmajor_t cmajor;
    529 	const struct cdevsw *cdev = NULL;
    530 	int s;
    531 
    532 	if (dev == NODEV)
    533 		return (NULL);
    534 	cmajor = major(dev);
    535 	if (cmajor < 0 || cmajor >= max_cdevsws)
    536 		return (NULL);
    537 
    538 	/* Start a read transaction to block localcount_drain() */
    539 	s = pserialize_read_enter();
    540 
    541 	/* Get the struct bdevsw pointer */
    542 	cdev = cdevsw[cmajor];
    543 	if (cdev == NULL)
    544 		goto out;
    545 
    546 	/* Wait for the content of the struct cdevsw to become visible */
    547 	membar_datadep_consumer();
    548 
    549 	/* If the devsw is not statically linked, acquire a reference */
    550 	if (cdev->d_localcount != NULL)
    551 		localcount_acquire(cdev->d_localcount);
    552 
    553  out:	pserialize_read_exit(s);
    554 
    555 	return cdev;
    556 }
    557 
    558 void
    559 cdevsw_release(const struct cdevsw *cd)
    560 {
    561 
    562 	KASSERT(cd != NULL);
    563 	if (cd->d_localcount != NULL)
    564 		localcount_release(cd->d_localcount, &device_cv, &device_lock);
    565 }
    566 
    567 /*
    568  * Look up a block device by reference to its operations set.
    569  *
    570  * => Caller must ensure that the device is not detached, and therefore
    571  *    that the returned major is still valid when dereferenced.
    572  */
    573 devmajor_t
    574 bdevsw_lookup_major(const struct bdevsw *bdev)
    575 {
    576 	devmajor_t bmajor;
    577 
    578 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    579 		if (bdevsw[bmajor] == bdev)
    580 			return (bmajor);
    581 	}
    582 
    583 	return (NODEVMAJOR);
    584 }
    585 
    586 /*
    587  * Look up a character device by reference to its operations set.
    588  *
    589  * => Caller must ensure that the device is not detached, and therefore
    590  *    that the returned major is still valid when dereferenced.
    591  */
    592 devmajor_t
    593 cdevsw_lookup_major(const struct cdevsw *cdev)
    594 {
    595 	devmajor_t cmajor;
    596 
    597 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    598 		if (cdevsw[cmajor] == cdev)
    599 			return (cmajor);
    600 	}
    601 
    602 	return (NODEVMAJOR);
    603 }
    604 
    605 /*
    606  * Convert from block major number to name.
    607  *
    608  * => Caller must ensure that the device is not detached, and therefore
    609  *    that the name pointer is still valid when dereferenced.
    610  */
    611 const char *
    612 devsw_blk2name(devmajor_t bmajor)
    613 {
    614 	const char *name;
    615 	devmajor_t cmajor;
    616 	int i;
    617 
    618 	name = NULL;
    619 	cmajor = -1;
    620 
    621 	mutex_enter(&device_lock);
    622 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    623 		mutex_exit(&device_lock);
    624 		return (NULL);
    625 	}
    626 	for (i = 0 ; i < max_devsw_convs; i++) {
    627 		if (devsw_conv[i].d_bmajor == bmajor) {
    628 			cmajor = devsw_conv[i].d_cmajor;
    629 			break;
    630 		}
    631 	}
    632 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    633 		name = devsw_conv[i].d_name;
    634 	mutex_exit(&device_lock);
    635 
    636 	return (name);
    637 }
    638 
    639 /*
    640  * Convert char major number to device driver name.
    641  */
    642 const char *
    643 cdevsw_getname(devmajor_t major)
    644 {
    645 	const char *name;
    646 	int i;
    647 
    648 	name = NULL;
    649 
    650 	if (major < 0)
    651 		return (NULL);
    652 
    653 	mutex_enter(&device_lock);
    654 	for (i = 0 ; i < max_devsw_convs; i++) {
    655 		if (devsw_conv[i].d_cmajor == major) {
    656 			name = devsw_conv[i].d_name;
    657 			break;
    658 		}
    659 	}
    660 	mutex_exit(&device_lock);
    661 	return (name);
    662 }
    663 
    664 /*
    665  * Convert block major number to device driver name.
    666  */
    667 const char *
    668 bdevsw_getname(devmajor_t major)
    669 {
    670 	const char *name;
    671 	int i;
    672 
    673 	name = NULL;
    674 
    675 	if (major < 0)
    676 		return (NULL);
    677 
    678 	mutex_enter(&device_lock);
    679 	for (i = 0 ; i < max_devsw_convs; i++) {
    680 		if (devsw_conv[i].d_bmajor == major) {
    681 			name = devsw_conv[i].d_name;
    682 			break;
    683 		}
    684 	}
    685 	mutex_exit(&device_lock);
    686 	return (name);
    687 }
    688 
    689 /*
    690  * Convert from device name to block major number.
    691  *
    692  * => Caller must ensure that the device is not detached, and therefore
    693  *    that the major number is still valid when dereferenced.
    694  */
    695 devmajor_t
    696 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    697 {
    698 	struct devsw_conv *conv;
    699 	devmajor_t bmajor;
    700 	int i;
    701 
    702 	if (name == NULL)
    703 		return (NODEVMAJOR);
    704 
    705 	mutex_enter(&device_lock);
    706 	for (i = 0 ; i < max_devsw_convs ; i++) {
    707 		size_t len;
    708 
    709 		conv = &devsw_conv[i];
    710 		if (conv->d_name == NULL)
    711 			continue;
    712 		len = strlen(conv->d_name);
    713 		if (strncmp(conv->d_name, name, len) != 0)
    714 			continue;
    715 		if (*(name +len) && !isdigit(*(name + len)))
    716 			continue;
    717 		bmajor = conv->d_bmajor;
    718 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    719 		    bdevsw[bmajor] == NULL)
    720 			break;
    721 		if (devname != NULL) {
    722 #ifdef DEVSW_DEBUG
    723 			if (strlen(conv->d_name) >= devnamelen)
    724 				printf("devsw_name2blk: too short buffer");
    725 #endif /* DEVSW_DEBUG */
    726 			strncpy(devname, conv->d_name, devnamelen);
    727 			devname[devnamelen - 1] = '\0';
    728 		}
    729 		mutex_exit(&device_lock);
    730 		return (bmajor);
    731 	}
    732 
    733 	mutex_exit(&device_lock);
    734 	return (NODEVMAJOR);
    735 }
    736 
    737 /*
    738  * Convert from device name to char major number.
    739  *
    740  * => Caller must ensure that the device is not detached, and therefore
    741  *    that the major number is still valid when dereferenced.
    742  */
    743 devmajor_t
    744 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    745 {
    746 	struct devsw_conv *conv;
    747 	devmajor_t cmajor;
    748 	int i;
    749 
    750 	if (name == NULL)
    751 		return (NODEVMAJOR);
    752 
    753 	mutex_enter(&device_lock);
    754 	for (i = 0 ; i < max_devsw_convs ; i++) {
    755 		size_t len;
    756 
    757 		conv = &devsw_conv[i];
    758 		if (conv->d_name == NULL)
    759 			continue;
    760 		len = strlen(conv->d_name);
    761 		if (strncmp(conv->d_name, name, len) != 0)
    762 			continue;
    763 		if (*(name +len) && !isdigit(*(name + len)))
    764 			continue;
    765 		cmajor = conv->d_cmajor;
    766 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    767 		    cdevsw[cmajor] == NULL)
    768 			break;
    769 		if (devname != NULL) {
    770 #ifdef DEVSW_DEBUG
    771 			if (strlen(conv->d_name) >= devnamelen)
    772 				printf("devsw_name2chr: too short buffer");
    773 #endif /* DEVSW_DEBUG */
    774 			strncpy(devname, conv->d_name, devnamelen);
    775 			devname[devnamelen - 1] = '\0';
    776 		}
    777 		mutex_exit(&device_lock);
    778 		return (cmajor);
    779 	}
    780 
    781 	mutex_exit(&device_lock);
    782 	return (NODEVMAJOR);
    783 }
    784 
    785 /*
    786  * Convert from character dev_t to block dev_t.
    787  *
    788  * => Caller must ensure that the device is not detached, and therefore
    789  *    that the major number is still valid when dereferenced.
    790  */
    791 dev_t
    792 devsw_chr2blk(dev_t cdev)
    793 {
    794 	devmajor_t bmajor, cmajor;
    795 	int i;
    796 	dev_t rv;
    797 
    798 	cmajor = major(cdev);
    799 	bmajor = NODEVMAJOR;
    800 	rv = NODEV;
    801 
    802 	mutex_enter(&device_lock);
    803 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    804 		mutex_exit(&device_lock);
    805 		return (NODEV);
    806 	}
    807 	for (i = 0 ; i < max_devsw_convs ; i++) {
    808 		if (devsw_conv[i].d_cmajor == cmajor) {
    809 			bmajor = devsw_conv[i].d_bmajor;
    810 			break;
    811 		}
    812 	}
    813 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    814 		rv = makedev(bmajor, minor(cdev));
    815 	mutex_exit(&device_lock);
    816 
    817 	return (rv);
    818 }
    819 
    820 /*
    821  * Convert from block dev_t to character dev_t.
    822  *
    823  * => Caller must ensure that the device is not detached, and therefore
    824  *    that the major number is still valid when dereferenced.
    825  */
    826 dev_t
    827 devsw_blk2chr(dev_t bdev)
    828 {
    829 	devmajor_t bmajor, cmajor;
    830 	int i;
    831 	dev_t rv;
    832 
    833 	bmajor = major(bdev);
    834 	cmajor = NODEVMAJOR;
    835 	rv = NODEV;
    836 
    837 	mutex_enter(&device_lock);
    838 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    839 		mutex_exit(&device_lock);
    840 		return (NODEV);
    841 	}
    842 	for (i = 0 ; i < max_devsw_convs ; i++) {
    843 		if (devsw_conv[i].d_bmajor == bmajor) {
    844 			cmajor = devsw_conv[i].d_cmajor;
    845 			break;
    846 		}
    847 	}
    848 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    849 		rv = makedev(cmajor, minor(bdev));
    850 	mutex_exit(&device_lock);
    851 
    852 	return (rv);
    853 }
    854 
    855 /*
    856  * Device access methods.
    857  */
    858 
    859 #define	DEV_LOCK(d)						\
    860 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
    861 		KERNEL_LOCK(1, NULL);				\
    862 	}
    863 
    864 #define	DEV_UNLOCK(d)						\
    865 	if (mpflag == 0) {					\
    866 		KERNEL_UNLOCK_ONE(NULL);			\
    867 	}
    868 
    869 int
    870 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    871 {
    872 	const struct bdevsw *d;
    873 	int rv, mpflag;
    874 
    875 	/*
    876 	 * For open we need to lock, in order to synchronize
    877 	 * with attach/detach.
    878 	 */
    879 	mutex_enter(&device_lock);
    880 	d = bdevsw_lookup(dev);
    881 	mutex_exit(&device_lock);
    882 	if (d == NULL)
    883 		return ENXIO;
    884 
    885 	DEV_LOCK(d);
    886 	rv = (*d->d_open)(dev, flag, devtype, l);
    887 	DEV_UNLOCK(d);
    888 
    889 	return rv;
    890 }
    891 
    892 int
    893 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    894 {
    895 	const struct bdevsw *d;
    896 	int rv, mpflag;
    897 
    898 	if ((d = bdevsw_lookup(dev)) == NULL)
    899 		return ENXIO;
    900 
    901 	DEV_LOCK(d);
    902 	rv = (*d->d_close)(dev, flag, devtype, l);
    903 	DEV_UNLOCK(d);
    904 
    905 	return rv;
    906 }
    907 
    908 SDT_PROVIDER_DECLARE(io);
    909 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
    910 
    911 void
    912 bdev_strategy(struct buf *bp)
    913 {
    914 	const struct bdevsw *d;
    915 	int mpflag;
    916 
    917 	SDT_PROBE1(io, kernel, , start, bp);
    918 
    919 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
    920 		bp->b_error = ENXIO;
    921 		bp->b_resid = bp->b_bcount;
    922 		biodone_vfs(bp); /* biodone() iff vfs present */
    923 		return;
    924 	}
    925 
    926 	DEV_LOCK(d);
    927 	(*d->d_strategy)(bp);
    928 	DEV_UNLOCK(d);
    929 }
    930 
    931 int
    932 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    933 {
    934 	const struct bdevsw *d;
    935 	int rv, mpflag;
    936 
    937 	if ((d = bdevsw_lookup(dev)) == NULL)
    938 		return ENXIO;
    939 
    940 	DEV_LOCK(d);
    941 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    942 	DEV_UNLOCK(d);
    943 
    944 	return rv;
    945 }
    946 
    947 int
    948 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    949 {
    950 	const struct bdevsw *d;
    951 	int rv;
    952 
    953 	/*
    954 	 * Dump can be called without the device open.  Since it can
    955 	 * currently only be called with the system paused (and in a
    956 	 * potentially unstable state), we don't perform any locking.
    957 	 */
    958 	if ((d = bdevsw_lookup(dev)) == NULL)
    959 		return ENXIO;
    960 
    961 	/* DEV_LOCK(d); */
    962 	rv = (*d->d_dump)(dev, addr, data, sz);
    963 	/* DEV_UNLOCK(d); */
    964 
    965 	return rv;
    966 }
    967 
    968 int
    969 bdev_flags(dev_t dev)
    970 {
    971 	const struct bdevsw *d;
    972 
    973 	if ((d = bdevsw_lookup(dev)) == NULL)
    974 		return 0;
    975 	return d->d_flag & ~D_TYPEMASK;
    976 }
    977 
    978 int
    979 bdev_type(dev_t dev)
    980 {
    981 	const struct bdevsw *d;
    982 
    983 	if ((d = bdevsw_lookup(dev)) == NULL)
    984 		return D_OTHER;
    985 	return d->d_flag & D_TYPEMASK;
    986 }
    987 
    988 int
    989 bdev_size(dev_t dev)
    990 {
    991 	const struct bdevsw *d;
    992 	int rv, mpflag = 0;
    993 
    994 	if ((d = bdevsw_lookup(dev)) == NULL ||
    995 	    d->d_psize == NULL)
    996 		return -1;
    997 
    998 	/*
    999 	 * Don't to try lock the device if we're dumping.
   1000 	 * XXX: is there a better way to test this?
   1001 	 */
   1002 	if ((boothowto & RB_DUMP) == 0)
   1003 		DEV_LOCK(d);
   1004 	rv = (*d->d_psize)(dev);
   1005 	if ((boothowto & RB_DUMP) == 0)
   1006 		DEV_UNLOCK(d);
   1007 
   1008 	return rv;
   1009 }
   1010 
   1011 int
   1012 bdev_discard(dev_t dev, off_t pos, off_t len)
   1013 {
   1014 	const struct bdevsw *d;
   1015 	int rv, mpflag;
   1016 
   1017 	if ((d = bdevsw_lookup(dev)) == NULL)
   1018 		return ENXIO;
   1019 
   1020 	DEV_LOCK(d);
   1021 	rv = (*d->d_discard)(dev, pos, len);
   1022 	DEV_UNLOCK(d);
   1023 
   1024 	return rv;
   1025 }
   1026 
   1027 int
   1028 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
   1029 {
   1030 	const struct cdevsw *d;
   1031 	int rv, mpflag;
   1032 
   1033 	/*
   1034 	 * For open we need to lock, in order to synchronize
   1035 	 * with attach/detach.
   1036 	 */
   1037 	mutex_enter(&device_lock);
   1038 	d = cdevsw_lookup(dev);
   1039 	mutex_exit(&device_lock);
   1040 	if (d == NULL)
   1041 		return ENXIO;
   1042 
   1043 	DEV_LOCK(d);
   1044 	rv = (*d->d_open)(dev, flag, devtype, l);
   1045 	DEV_UNLOCK(d);
   1046 
   1047 	return rv;
   1048 }
   1049 
   1050 int
   1051 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
   1052 {
   1053 	const struct cdevsw *d;
   1054 	int rv, mpflag;
   1055 
   1056 	if ((d = cdevsw_lookup(dev)) == NULL)
   1057 		return ENXIO;
   1058 
   1059 	DEV_LOCK(d);
   1060 	rv = (*d->d_close)(dev, flag, devtype, l);
   1061 	DEV_UNLOCK(d);
   1062 
   1063 	return rv;
   1064 }
   1065 
   1066 int
   1067 cdev_read(dev_t dev, struct uio *uio, int flag)
   1068 {
   1069 	const struct cdevsw *d;
   1070 	int rv, mpflag;
   1071 
   1072 	if ((d = cdevsw_lookup(dev)) == NULL)
   1073 		return ENXIO;
   1074 
   1075 	DEV_LOCK(d);
   1076 	rv = (*d->d_read)(dev, uio, flag);
   1077 	DEV_UNLOCK(d);
   1078 
   1079 	return rv;
   1080 }
   1081 
   1082 int
   1083 cdev_write(dev_t dev, struct uio *uio, int flag)
   1084 {
   1085 	const struct cdevsw *d;
   1086 	int rv, mpflag;
   1087 
   1088 	if ((d = cdevsw_lookup(dev)) == NULL)
   1089 		return ENXIO;
   1090 
   1091 	DEV_LOCK(d);
   1092 	rv = (*d->d_write)(dev, uio, flag);
   1093 	DEV_UNLOCK(d);
   1094 
   1095 	return rv;
   1096 }
   1097 
   1098 int
   1099 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
   1100 {
   1101 	const struct cdevsw *d;
   1102 	int rv, mpflag;
   1103 
   1104 	if ((d = cdevsw_lookup(dev)) == NULL)
   1105 		return ENXIO;
   1106 
   1107 	DEV_LOCK(d);
   1108 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
   1109 	DEV_UNLOCK(d);
   1110 
   1111 	return rv;
   1112 }
   1113 
   1114 void
   1115 cdev_stop(struct tty *tp, int flag)
   1116 {
   1117 	const struct cdevsw *d;
   1118 	int mpflag;
   1119 
   1120 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
   1121 		return;
   1122 
   1123 	DEV_LOCK(d);
   1124 	(*d->d_stop)(tp, flag);
   1125 	DEV_UNLOCK(d);
   1126 }
   1127 
   1128 struct tty *
   1129 cdev_tty(dev_t dev)
   1130 {
   1131 	const struct cdevsw *d;
   1132 
   1133 	if ((d = cdevsw_lookup(dev)) == NULL)
   1134 		return NULL;
   1135 
   1136 	/* XXX Check if necessary. */
   1137 	if (d->d_tty == NULL)
   1138 		return NULL;
   1139 
   1140 	return (*d->d_tty)(dev);
   1141 }
   1142 
   1143 int
   1144 cdev_poll(dev_t dev, int flag, lwp_t *l)
   1145 {
   1146 	const struct cdevsw *d;
   1147 	int rv, mpflag;
   1148 
   1149 	if ((d = cdevsw_lookup(dev)) == NULL)
   1150 		return POLLERR;
   1151 
   1152 	DEV_LOCK(d);
   1153 	rv = (*d->d_poll)(dev, flag, l);
   1154 	DEV_UNLOCK(d);
   1155 
   1156 	return rv;
   1157 }
   1158 
   1159 paddr_t
   1160 cdev_mmap(dev_t dev, off_t off, int flag)
   1161 {
   1162 	const struct cdevsw *d;
   1163 	paddr_t rv;
   1164 	int mpflag;
   1165 
   1166 	if ((d = cdevsw_lookup(dev)) == NULL)
   1167 		return (paddr_t)-1LL;
   1168 
   1169 	DEV_LOCK(d);
   1170 	rv = (*d->d_mmap)(dev, off, flag);
   1171 	DEV_UNLOCK(d);
   1172 
   1173 	return rv;
   1174 }
   1175 
   1176 int
   1177 cdev_kqfilter(dev_t dev, struct knote *kn)
   1178 {
   1179 	const struct cdevsw *d;
   1180 	int rv, mpflag;
   1181 
   1182 	if ((d = cdevsw_lookup(dev)) == NULL)
   1183 		return ENXIO;
   1184 
   1185 	DEV_LOCK(d);
   1186 	rv = (*d->d_kqfilter)(dev, kn);
   1187 	DEV_UNLOCK(d);
   1188 
   1189 	return rv;
   1190 }
   1191 
   1192 int
   1193 cdev_discard(dev_t dev, off_t pos, off_t len)
   1194 {
   1195 	const struct cdevsw *d;
   1196 	int rv, mpflag;
   1197 
   1198 	if ((d = cdevsw_lookup(dev)) == NULL)
   1199 		return ENXIO;
   1200 
   1201 	DEV_LOCK(d);
   1202 	rv = (*d->d_discard)(dev, pos, len);
   1203 	DEV_UNLOCK(d);
   1204 
   1205 	return rv;
   1206 }
   1207 
   1208 int
   1209 cdev_flags(dev_t dev)
   1210 {
   1211 	const struct cdevsw *d;
   1212 
   1213 	if ((d = cdevsw_lookup(dev)) == NULL)
   1214 		return 0;
   1215 	return d->d_flag & ~D_TYPEMASK;
   1216 }
   1217 
   1218 int
   1219 cdev_type(dev_t dev)
   1220 {
   1221 	const struct cdevsw *d;
   1222 
   1223 	if ((d = cdevsw_lookup(dev)) == NULL)
   1224 		return D_OTHER;
   1225 	return d->d_flag & D_TYPEMASK;
   1226 }
   1227 
   1228 /*
   1229  * nommap(dev, off, prot)
   1230  *
   1231  *	mmap routine that always fails, for non-mmappable devices.
   1232  */
   1233 paddr_t
   1234 nommap(dev_t dev, off_t off, int prot)
   1235 {
   1236 
   1237 	return (paddr_t)-1;
   1238 }
   1239