Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.33.2.2
      1 /*	$NetBSD: subr_devsw.c,v 1.33.2.2 2017/02/05 13:40:56 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	subr_devsw.c: registers device drivers by name and by major
     36  *	number, and provides wrapper methods for performing I/O and
     37  *	other tasks on device drivers, keying on the device number
     38  *	(dev_t).
     39  *
     40  *	When the system is built, the config(8) command generates
     41  *	static tables of device drivers built into the kernel image
     42  *	along with their associated methods.  These are recorded in
     43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     44  *	and removed from the system dynamically.
     45  *
     46  * Allocation
     47  *
     48  *	When the system initially boots only the statically allocated
     49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     50  *	allocation, we allocate a fixed block of memory to hold the new,
     51  *	expanded index.  This "fork" of the table is only ever performed
     52  *	once in order to guarantee that other threads may safely access
     53  *	the device tables:
     54  *
     55  *	o Once a thread has a "reference" to the table via an earlier
     56  *	  open() call, we know that the entry in the table must exist
     57  *	  and so it is safe to access it.
     58  *
     59  *	o Regardless of whether other threads see the old or new
     60  *	  pointers, they will point to a correct device switch
     61  *	  structure for the operation being performed.
     62  *
     63  *	XXX Currently, the wrapper methods such as cdev_read() verify
     64  *	that a device driver does in fact exist before calling the
     65  *	associated driver method.  This should be changed so that
     66  *	once the device is has been referenced by a vnode (opened),
     67  *	calling	the other methods should be valid until that reference
     68  *	is dropped.
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.33.2.2 2017/02/05 13:40:56 skrll Exp $");
     73 
     74 #ifdef _KERNEL_OPT
     75 #include "opt_dtrace.h"
     76 #endif
     77 
     78 #include <sys/param.h>
     79 #include <sys/conf.h>
     80 #include <sys/kmem.h>
     81 #include <sys/systm.h>
     82 #include <sys/poll.h>
     83 #include <sys/tty.h>
     84 #include <sys/cpu.h>
     85 #include <sys/buf.h>
     86 #include <sys/reboot.h>
     87 #include <sys/sdt.h>
     88 
     89 #ifdef DEVSW_DEBUG
     90 #define	DPRINTF(x)	printf x
     91 #else /* DEVSW_DEBUG */
     92 #define	DPRINTF(x)
     93 #endif /* DEVSW_DEBUG */
     94 
     95 #define	MAXDEVSW	512	/* the maximum of major device number */
     96 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
     97 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
     98 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
     99 
    100 extern const struct bdevsw **bdevsw, *bdevsw0[];
    101 extern const struct cdevsw **cdevsw, *cdevsw0[];
    102 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    103 extern const int sys_bdevsws, sys_cdevsws;
    104 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    105 
    106 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
    107 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
    108 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    109 
    110 kmutex_t device_lock;
    111 
    112 void (*biodone_vfs)(buf_t *) = (void *)nullop;
    113 
    114 void
    115 devsw_init(void)
    116 {
    117 
    118 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    119 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    120 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
    121 }
    122 
    123 int
    124 devsw_attach(const char *devname,
    125 	     const struct bdevsw *bdev, devmajor_t *bmajor,
    126 	     const struct cdevsw *cdev, devmajor_t *cmajor)
    127 {
    128 	struct devsw_conv *conv;
    129 	char *name;
    130 	int error, i;
    131 	size_t len;
    132 
    133 	if (devname == NULL || cdev == NULL)
    134 		return (EINVAL);
    135 
    136 	mutex_enter(&device_lock);
    137 
    138 	for (i = 0 ; i < max_devsw_convs ; i++) {
    139 		conv = &devsw_conv[i];
    140 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    141 			continue;
    142 
    143 		if (*bmajor < 0)
    144 			*bmajor = conv->d_bmajor;
    145 		if (*cmajor < 0)
    146 			*cmajor = conv->d_cmajor;
    147 
    148 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    149 			error = EINVAL;
    150 			goto fail;
    151 		}
    152 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    153 			error = EINVAL;
    154 			goto fail;
    155 		}
    156 
    157 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    158 		    cdevsw[*cmajor] != NULL) {
    159 			error = EEXIST;
    160 			goto fail;
    161 		}
    162 
    163 		if (bdev != NULL)
    164 			bdevsw[*bmajor] = bdev;
    165 		cdevsw[*cmajor] = cdev;
    166 
    167 		mutex_exit(&device_lock);
    168 		return (0);
    169 	}
    170 
    171 	error = bdevsw_attach(bdev, bmajor);
    172 	if (error != 0)
    173 		goto fail;
    174 	error = cdevsw_attach(cdev, cmajor);
    175 	if (error != 0) {
    176 		devsw_detach_locked(bdev, NULL);
    177 		goto fail;
    178 	}
    179 
    180 	for (i = 0 ; i < max_devsw_convs ; i++) {
    181 		if (devsw_conv[i].d_name == NULL)
    182 			break;
    183 	}
    184 	if (i == max_devsw_convs) {
    185 		struct devsw_conv *newptr;
    186 		int old_convs, new_convs;
    187 
    188 		old_convs = max_devsw_convs;
    189 		new_convs = old_convs + 1;
    190 
    191 		newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
    192 		if (newptr == NULL) {
    193 			devsw_detach_locked(bdev, cdev);
    194 			error = ENOMEM;
    195 			goto fail;
    196 		}
    197 		newptr[old_convs].d_name = NULL;
    198 		newptr[old_convs].d_bmajor = -1;
    199 		newptr[old_convs].d_cmajor = -1;
    200 		memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
    201 		if (devsw_conv != devsw_conv0)
    202 			kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
    203 		devsw_conv = newptr;
    204 		max_devsw_convs = new_convs;
    205 	}
    206 
    207 	len = strlen(devname) + 1;
    208 	name = kmem_alloc(len, KM_NOSLEEP);
    209 	if (name == NULL) {
    210 		devsw_detach_locked(bdev, cdev);
    211 		error = ENOMEM;
    212 		goto fail;
    213 	}
    214 	strlcpy(name, devname, len);
    215 
    216 	devsw_conv[i].d_name = name;
    217 	devsw_conv[i].d_bmajor = *bmajor;
    218 	devsw_conv[i].d_cmajor = *cmajor;
    219 
    220 	mutex_exit(&device_lock);
    221 	return (0);
    222  fail:
    223 	mutex_exit(&device_lock);
    224 	return (error);
    225 }
    226 
    227 static int
    228 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
    229 {
    230 	const struct bdevsw **newptr;
    231 	devmajor_t bmajor;
    232 	int i;
    233 
    234 	KASSERT(mutex_owned(&device_lock));
    235 
    236 	if (devsw == NULL)
    237 		return (0);
    238 
    239 	if (*devmajor < 0) {
    240 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
    241 			if (bdevsw[bmajor] != NULL)
    242 				continue;
    243 			for (i = 0 ; i < max_devsw_convs ; i++) {
    244 				if (devsw_conv[i].d_bmajor == bmajor)
    245 					break;
    246 			}
    247 			if (i != max_devsw_convs)
    248 				continue;
    249 			break;
    250 		}
    251 		*devmajor = bmajor;
    252 	}
    253 
    254 	if (*devmajor >= MAXDEVSW) {
    255 		printf("bdevsw_attach: block majors exhausted");
    256 		return (ENOMEM);
    257 	}
    258 
    259 	if (*devmajor >= max_bdevsws) {
    260 		KASSERT(bdevsw == bdevsw0);
    261 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
    262 		if (newptr == NULL)
    263 			return (ENOMEM);
    264 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
    265 		bdevsw = newptr;
    266 		max_bdevsws = MAXDEVSW;
    267 	}
    268 
    269 	if (bdevsw[*devmajor] != NULL)
    270 		return (EEXIST);
    271 
    272 	bdevsw[*devmajor] = devsw;
    273 
    274 	return (0);
    275 }
    276 
    277 static int
    278 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
    279 {
    280 	const struct cdevsw **newptr;
    281 	devmajor_t cmajor;
    282 	int i;
    283 
    284 	KASSERT(mutex_owned(&device_lock));
    285 
    286 	if (*devmajor < 0) {
    287 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
    288 			if (cdevsw[cmajor] != NULL)
    289 				continue;
    290 			for (i = 0 ; i < max_devsw_convs ; i++) {
    291 				if (devsw_conv[i].d_cmajor == cmajor)
    292 					break;
    293 			}
    294 			if (i != max_devsw_convs)
    295 				continue;
    296 			break;
    297 		}
    298 		*devmajor = cmajor;
    299 	}
    300 
    301 	if (*devmajor >= MAXDEVSW) {
    302 		printf("cdevsw_attach: character majors exhausted");
    303 		return (ENOMEM);
    304 	}
    305 
    306 	if (*devmajor >= max_cdevsws) {
    307 		KASSERT(cdevsw == cdevsw0);
    308 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
    309 		if (newptr == NULL)
    310 			return (ENOMEM);
    311 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
    312 		cdevsw = newptr;
    313 		max_cdevsws = MAXDEVSW;
    314 	}
    315 
    316 	if (cdevsw[*devmajor] != NULL)
    317 		return (EEXIST);
    318 
    319 	cdevsw[*devmajor] = devsw;
    320 
    321 	return (0);
    322 }
    323 
    324 static void
    325 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    326 {
    327 	int i;
    328 
    329 	KASSERT(mutex_owned(&device_lock));
    330 
    331 	if (bdev != NULL) {
    332 		for (i = 0 ; i < max_bdevsws ; i++) {
    333 			if (bdevsw[i] != bdev)
    334 				continue;
    335 			bdevsw[i] = NULL;
    336 			break;
    337 		}
    338 	}
    339 	if (cdev != NULL) {
    340 		for (i = 0 ; i < max_cdevsws ; i++) {
    341 			if (cdevsw[i] != cdev)
    342 				continue;
    343 			cdevsw[i] = NULL;
    344 			break;
    345 		}
    346 	}
    347 }
    348 
    349 int
    350 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    351 {
    352 
    353 	mutex_enter(&device_lock);
    354 	devsw_detach_locked(bdev, cdev);
    355 	mutex_exit(&device_lock);
    356 	return 0;
    357 }
    358 
    359 /*
    360  * Look up a block device by number.
    361  *
    362  * => Caller must ensure that the device is attached.
    363  */
    364 const struct bdevsw *
    365 bdevsw_lookup(dev_t dev)
    366 {
    367 	devmajor_t bmajor;
    368 
    369 	if (dev == NODEV)
    370 		return (NULL);
    371 	bmajor = major(dev);
    372 	if (bmajor < 0 || bmajor >= max_bdevsws)
    373 		return (NULL);
    374 
    375 	return (bdevsw[bmajor]);
    376 }
    377 
    378 /*
    379  * Look up a character device by number.
    380  *
    381  * => Caller must ensure that the device is attached.
    382  */
    383 const struct cdevsw *
    384 cdevsw_lookup(dev_t dev)
    385 {
    386 	devmajor_t cmajor;
    387 
    388 	if (dev == NODEV)
    389 		return (NULL);
    390 	cmajor = major(dev);
    391 	if (cmajor < 0 || cmajor >= max_cdevsws)
    392 		return (NULL);
    393 
    394 	return (cdevsw[cmajor]);
    395 }
    396 
    397 /*
    398  * Look up a block device by reference to its operations set.
    399  *
    400  * => Caller must ensure that the device is not detached, and therefore
    401  *    that the returned major is still valid when dereferenced.
    402  */
    403 devmajor_t
    404 bdevsw_lookup_major(const struct bdevsw *bdev)
    405 {
    406 	devmajor_t bmajor;
    407 
    408 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
    409 		if (bdevsw[bmajor] == bdev)
    410 			return (bmajor);
    411 	}
    412 
    413 	return (NODEVMAJOR);
    414 }
    415 
    416 /*
    417  * Look up a character device by reference to its operations set.
    418  *
    419  * => Caller must ensure that the device is not detached, and therefore
    420  *    that the returned major is still valid when dereferenced.
    421  */
    422 devmajor_t
    423 cdevsw_lookup_major(const struct cdevsw *cdev)
    424 {
    425 	devmajor_t cmajor;
    426 
    427 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
    428 		if (cdevsw[cmajor] == cdev)
    429 			return (cmajor);
    430 	}
    431 
    432 	return (NODEVMAJOR);
    433 }
    434 
    435 /*
    436  * Convert from block major number to name.
    437  *
    438  * => Caller must ensure that the device is not detached, and therefore
    439  *    that the name pointer is still valid when dereferenced.
    440  */
    441 const char *
    442 devsw_blk2name(devmajor_t bmajor)
    443 {
    444 	const char *name;
    445 	devmajor_t cmajor;
    446 	int i;
    447 
    448 	name = NULL;
    449 	cmajor = -1;
    450 
    451 	mutex_enter(&device_lock);
    452 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    453 		mutex_exit(&device_lock);
    454 		return (NULL);
    455 	}
    456 	for (i = 0 ; i < max_devsw_convs; i++) {
    457 		if (devsw_conv[i].d_bmajor == bmajor) {
    458 			cmajor = devsw_conv[i].d_cmajor;
    459 			break;
    460 		}
    461 	}
    462 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    463 		name = devsw_conv[i].d_name;
    464 	mutex_exit(&device_lock);
    465 
    466 	return (name);
    467 }
    468 
    469 /*
    470  * Convert char major number to device driver name.
    471  */
    472 const char *
    473 cdevsw_getname(devmajor_t major)
    474 {
    475 	const char *name;
    476 	int i;
    477 
    478 	name = NULL;
    479 
    480 	if (major < 0)
    481 		return (NULL);
    482 
    483 	mutex_enter(&device_lock);
    484 	for (i = 0 ; i < max_devsw_convs; i++) {
    485 		if (devsw_conv[i].d_cmajor == major) {
    486 			name = devsw_conv[i].d_name;
    487 			break;
    488 		}
    489 	}
    490 	mutex_exit(&device_lock);
    491 	return (name);
    492 }
    493 
    494 /*
    495  * Convert block major number to device driver name.
    496  */
    497 const char *
    498 bdevsw_getname(devmajor_t major)
    499 {
    500 	const char *name;
    501 	int i;
    502 
    503 	name = NULL;
    504 
    505 	if (major < 0)
    506 		return (NULL);
    507 
    508 	mutex_enter(&device_lock);
    509 	for (i = 0 ; i < max_devsw_convs; i++) {
    510 		if (devsw_conv[i].d_bmajor == major) {
    511 			name = devsw_conv[i].d_name;
    512 			break;
    513 		}
    514 	}
    515 	mutex_exit(&device_lock);
    516 	return (name);
    517 }
    518 
    519 /*
    520  * Convert from device name to block major number.
    521  *
    522  * => Caller must ensure that the device is not detached, and therefore
    523  *    that the major number is still valid when dereferenced.
    524  */
    525 devmajor_t
    526 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    527 {
    528 	struct devsw_conv *conv;
    529 	devmajor_t bmajor;
    530 	int i;
    531 
    532 	if (name == NULL)
    533 		return (NODEVMAJOR);
    534 
    535 	mutex_enter(&device_lock);
    536 	for (i = 0 ; i < max_devsw_convs ; i++) {
    537 		size_t len;
    538 
    539 		conv = &devsw_conv[i];
    540 		if (conv->d_name == NULL)
    541 			continue;
    542 		len = strlen(conv->d_name);
    543 		if (strncmp(conv->d_name, name, len) != 0)
    544 			continue;
    545 		if (*(name +len) && !isdigit(*(name + len)))
    546 			continue;
    547 		bmajor = conv->d_bmajor;
    548 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    549 		    bdevsw[bmajor] == NULL)
    550 			break;
    551 		if (devname != NULL) {
    552 #ifdef DEVSW_DEBUG
    553 			if (strlen(conv->d_name) >= devnamelen)
    554 				printf("devsw_name2blk: too short buffer");
    555 #endif /* DEVSW_DEBUG */
    556 			strncpy(devname, conv->d_name, devnamelen);
    557 			devname[devnamelen - 1] = '\0';
    558 		}
    559 		mutex_exit(&device_lock);
    560 		return (bmajor);
    561 	}
    562 
    563 	mutex_exit(&device_lock);
    564 	return (NODEVMAJOR);
    565 }
    566 
    567 /*
    568  * Convert from device name to char major number.
    569  *
    570  * => Caller must ensure that the device is not detached, and therefore
    571  *    that the major number is still valid when dereferenced.
    572  */
    573 devmajor_t
    574 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    575 {
    576 	struct devsw_conv *conv;
    577 	devmajor_t cmajor;
    578 	int i;
    579 
    580 	if (name == NULL)
    581 		return (NODEVMAJOR);
    582 
    583 	mutex_enter(&device_lock);
    584 	for (i = 0 ; i < max_devsw_convs ; i++) {
    585 		size_t len;
    586 
    587 		conv = &devsw_conv[i];
    588 		if (conv->d_name == NULL)
    589 			continue;
    590 		len = strlen(conv->d_name);
    591 		if (strncmp(conv->d_name, name, len) != 0)
    592 			continue;
    593 		if (*(name +len) && !isdigit(*(name + len)))
    594 			continue;
    595 		cmajor = conv->d_cmajor;
    596 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    597 		    cdevsw[cmajor] == NULL)
    598 			break;
    599 		if (devname != NULL) {
    600 #ifdef DEVSW_DEBUG
    601 			if (strlen(conv->d_name) >= devnamelen)
    602 				printf("devsw_name2chr: too short buffer");
    603 #endif /* DEVSW_DEBUG */
    604 			strncpy(devname, conv->d_name, devnamelen);
    605 			devname[devnamelen - 1] = '\0';
    606 		}
    607 		mutex_exit(&device_lock);
    608 		return (cmajor);
    609 	}
    610 
    611 	mutex_exit(&device_lock);
    612 	return (NODEVMAJOR);
    613 }
    614 
    615 /*
    616  * Convert from character dev_t to block dev_t.
    617  *
    618  * => Caller must ensure that the device is not detached, and therefore
    619  *    that the major number is still valid when dereferenced.
    620  */
    621 dev_t
    622 devsw_chr2blk(dev_t cdev)
    623 {
    624 	devmajor_t bmajor, cmajor;
    625 	int i;
    626 	dev_t rv;
    627 
    628 	cmajor = major(cdev);
    629 	bmajor = NODEVMAJOR;
    630 	rv = NODEV;
    631 
    632 	mutex_enter(&device_lock);
    633 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    634 		mutex_exit(&device_lock);
    635 		return (NODEV);
    636 	}
    637 	for (i = 0 ; i < max_devsw_convs ; i++) {
    638 		if (devsw_conv[i].d_cmajor == cmajor) {
    639 			bmajor = devsw_conv[i].d_bmajor;
    640 			break;
    641 		}
    642 	}
    643 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    644 		rv = makedev(bmajor, minor(cdev));
    645 	mutex_exit(&device_lock);
    646 
    647 	return (rv);
    648 }
    649 
    650 /*
    651  * Convert from block dev_t to character dev_t.
    652  *
    653  * => Caller must ensure that the device is not detached, and therefore
    654  *    that the major number is still valid when dereferenced.
    655  */
    656 dev_t
    657 devsw_blk2chr(dev_t bdev)
    658 {
    659 	devmajor_t bmajor, cmajor;
    660 	int i;
    661 	dev_t rv;
    662 
    663 	bmajor = major(bdev);
    664 	cmajor = NODEVMAJOR;
    665 	rv = NODEV;
    666 
    667 	mutex_enter(&device_lock);
    668 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    669 		mutex_exit(&device_lock);
    670 		return (NODEV);
    671 	}
    672 	for (i = 0 ; i < max_devsw_convs ; i++) {
    673 		if (devsw_conv[i].d_bmajor == bmajor) {
    674 			cmajor = devsw_conv[i].d_cmajor;
    675 			break;
    676 		}
    677 	}
    678 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    679 		rv = makedev(cmajor, minor(bdev));
    680 	mutex_exit(&device_lock);
    681 
    682 	return (rv);
    683 }
    684 
    685 /*
    686  * Device access methods.
    687  */
    688 
    689 #define	DEV_LOCK(d)						\
    690 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
    691 		KERNEL_LOCK(1, NULL);				\
    692 	}
    693 
    694 #define	DEV_UNLOCK(d)						\
    695 	if (mpflag == 0) {					\
    696 		KERNEL_UNLOCK_ONE(NULL);			\
    697 	}
    698 
    699 int
    700 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    701 {
    702 	const struct bdevsw *d;
    703 	int rv, mpflag;
    704 
    705 	/*
    706 	 * For open we need to lock, in order to synchronize
    707 	 * with attach/detach.
    708 	 */
    709 	mutex_enter(&device_lock);
    710 	d = bdevsw_lookup(dev);
    711 	mutex_exit(&device_lock);
    712 	if (d == NULL)
    713 		return ENXIO;
    714 
    715 	DEV_LOCK(d);
    716 	rv = (*d->d_open)(dev, flag, devtype, l);
    717 	DEV_UNLOCK(d);
    718 
    719 	return rv;
    720 }
    721 
    722 int
    723 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    724 {
    725 	const struct bdevsw *d;
    726 	int rv, mpflag;
    727 
    728 	if ((d = bdevsw_lookup(dev)) == NULL)
    729 		return ENXIO;
    730 
    731 	DEV_LOCK(d);
    732 	rv = (*d->d_close)(dev, flag, devtype, l);
    733 	DEV_UNLOCK(d);
    734 
    735 	return rv;
    736 }
    737 
    738 SDT_PROVIDER_DECLARE(io);
    739 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
    740 
    741 void
    742 bdev_strategy(struct buf *bp)
    743 {
    744 	const struct bdevsw *d;
    745 	int mpflag;
    746 
    747 	SDT_PROBE1(io, kernel, , start, bp);
    748 
    749 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
    750 		bp->b_error = ENXIO;
    751 		bp->b_resid = bp->b_bcount;
    752 		biodone_vfs(bp); /* biodone() iff vfs present */
    753 		return;
    754 	}
    755 
    756 	DEV_LOCK(d);
    757 	(*d->d_strategy)(bp);
    758 	DEV_UNLOCK(d);
    759 }
    760 
    761 int
    762 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    763 {
    764 	const struct bdevsw *d;
    765 	int rv, mpflag;
    766 
    767 	if ((d = bdevsw_lookup(dev)) == NULL)
    768 		return ENXIO;
    769 
    770 	DEV_LOCK(d);
    771 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    772 	DEV_UNLOCK(d);
    773 
    774 	return rv;
    775 }
    776 
    777 int
    778 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
    779 {
    780 	const struct bdevsw *d;
    781 	int rv;
    782 
    783 	/*
    784 	 * Dump can be called without the device open.  Since it can
    785 	 * currently only be called with the system paused (and in a
    786 	 * potentially unstable state), we don't perform any locking.
    787 	 */
    788 	if ((d = bdevsw_lookup(dev)) == NULL)
    789 		return ENXIO;
    790 
    791 	/* DEV_LOCK(d); */
    792 	rv = (*d->d_dump)(dev, addr, data, sz);
    793 	/* DEV_UNLOCK(d); */
    794 
    795 	return rv;
    796 }
    797 
    798 int
    799 bdev_flags(dev_t dev)
    800 {
    801 	const struct bdevsw *d;
    802 
    803 	if ((d = bdevsw_lookup(dev)) == NULL)
    804 		return 0;
    805 	return d->d_flag & ~D_TYPEMASK;
    806 }
    807 
    808 int
    809 bdev_type(dev_t dev)
    810 {
    811 	const struct bdevsw *d;
    812 
    813 	if ((d = bdevsw_lookup(dev)) == NULL)
    814 		return D_OTHER;
    815 	return d->d_flag & D_TYPEMASK;
    816 }
    817 
    818 int
    819 bdev_size(dev_t dev)
    820 {
    821 	const struct bdevsw *d;
    822 	int rv, mpflag = 0;
    823 
    824 	if ((d = bdevsw_lookup(dev)) == NULL ||
    825 	    d->d_psize == NULL)
    826 		return -1;
    827 
    828 	/*
    829 	 * Don't to try lock the device if we're dumping.
    830 	 * XXX: is there a better way to test this?
    831 	 */
    832 	if ((boothowto & RB_DUMP) == 0)
    833 		DEV_LOCK(d);
    834 	rv = (*d->d_psize)(dev);
    835 	if ((boothowto & RB_DUMP) == 0)
    836 		DEV_UNLOCK(d);
    837 
    838 	return rv;
    839 }
    840 
    841 int
    842 bdev_discard(dev_t dev, off_t pos, off_t len)
    843 {
    844 	const struct bdevsw *d;
    845 	int rv, mpflag;
    846 
    847 	if ((d = bdevsw_lookup(dev)) == NULL)
    848 		return ENXIO;
    849 
    850 	DEV_LOCK(d);
    851 	rv = (*d->d_discard)(dev, pos, len);
    852 	DEV_UNLOCK(d);
    853 
    854 	return rv;
    855 }
    856 
    857 int
    858 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    859 {
    860 	const struct cdevsw *d;
    861 	int rv, mpflag;
    862 
    863 	/*
    864 	 * For open we need to lock, in order to synchronize
    865 	 * with attach/detach.
    866 	 */
    867 	mutex_enter(&device_lock);
    868 	d = cdevsw_lookup(dev);
    869 	mutex_exit(&device_lock);
    870 	if (d == NULL)
    871 		return ENXIO;
    872 
    873 	DEV_LOCK(d);
    874 	rv = (*d->d_open)(dev, flag, devtype, l);
    875 	DEV_UNLOCK(d);
    876 
    877 	return rv;
    878 }
    879 
    880 int
    881 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    882 {
    883 	const struct cdevsw *d;
    884 	int rv, mpflag;
    885 
    886 	if ((d = cdevsw_lookup(dev)) == NULL)
    887 		return ENXIO;
    888 
    889 	DEV_LOCK(d);
    890 	rv = (*d->d_close)(dev, flag, devtype, l);
    891 	DEV_UNLOCK(d);
    892 
    893 	return rv;
    894 }
    895 
    896 int
    897 cdev_read(dev_t dev, struct uio *uio, int flag)
    898 {
    899 	const struct cdevsw *d;
    900 	int rv, mpflag;
    901 
    902 	if ((d = cdevsw_lookup(dev)) == NULL)
    903 		return ENXIO;
    904 
    905 	DEV_LOCK(d);
    906 	rv = (*d->d_read)(dev, uio, flag);
    907 	DEV_UNLOCK(d);
    908 
    909 	return rv;
    910 }
    911 
    912 int
    913 cdev_write(dev_t dev, struct uio *uio, int flag)
    914 {
    915 	const struct cdevsw *d;
    916 	int rv, mpflag;
    917 
    918 	if ((d = cdevsw_lookup(dev)) == NULL)
    919 		return ENXIO;
    920 
    921 	DEV_LOCK(d);
    922 	rv = (*d->d_write)(dev, uio, flag);
    923 	DEV_UNLOCK(d);
    924 
    925 	return rv;
    926 }
    927 
    928 int
    929 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    930 {
    931 	const struct cdevsw *d;
    932 	int rv, mpflag;
    933 
    934 	if ((d = cdevsw_lookup(dev)) == NULL)
    935 		return ENXIO;
    936 
    937 	DEV_LOCK(d);
    938 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
    939 	DEV_UNLOCK(d);
    940 
    941 	return rv;
    942 }
    943 
    944 void
    945 cdev_stop(struct tty *tp, int flag)
    946 {
    947 	const struct cdevsw *d;
    948 	int mpflag;
    949 
    950 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
    951 		return;
    952 
    953 	DEV_LOCK(d);
    954 	(*d->d_stop)(tp, flag);
    955 	DEV_UNLOCK(d);
    956 }
    957 
    958 struct tty *
    959 cdev_tty(dev_t dev)
    960 {
    961 	const struct cdevsw *d;
    962 
    963 	if ((d = cdevsw_lookup(dev)) == NULL)
    964 		return NULL;
    965 
    966 	/* XXX Check if necessary. */
    967 	if (d->d_tty == NULL)
    968 		return NULL;
    969 
    970 	return (*d->d_tty)(dev);
    971 }
    972 
    973 int
    974 cdev_poll(dev_t dev, int flag, lwp_t *l)
    975 {
    976 	const struct cdevsw *d;
    977 	int rv, mpflag;
    978 
    979 	if ((d = cdevsw_lookup(dev)) == NULL)
    980 		return POLLERR;
    981 
    982 	DEV_LOCK(d);
    983 	rv = (*d->d_poll)(dev, flag, l);
    984 	DEV_UNLOCK(d);
    985 
    986 	return rv;
    987 }
    988 
    989 paddr_t
    990 cdev_mmap(dev_t dev, off_t off, int flag)
    991 {
    992 	const struct cdevsw *d;
    993 	paddr_t rv;
    994 	int mpflag;
    995 
    996 	if ((d = cdevsw_lookup(dev)) == NULL)
    997 		return (paddr_t)-1LL;
    998 
    999 	DEV_LOCK(d);
   1000 	rv = (*d->d_mmap)(dev, off, flag);
   1001 	DEV_UNLOCK(d);
   1002 
   1003 	return rv;
   1004 }
   1005 
   1006 int
   1007 cdev_kqfilter(dev_t dev, struct knote *kn)
   1008 {
   1009 	const struct cdevsw *d;
   1010 	int rv, mpflag;
   1011 
   1012 	if ((d = cdevsw_lookup(dev)) == NULL)
   1013 		return ENXIO;
   1014 
   1015 	DEV_LOCK(d);
   1016 	rv = (*d->d_kqfilter)(dev, kn);
   1017 	DEV_UNLOCK(d);
   1018 
   1019 	return rv;
   1020 }
   1021 
   1022 int
   1023 cdev_discard(dev_t dev, off_t pos, off_t len)
   1024 {
   1025 	const struct cdevsw *d;
   1026 	int rv, mpflag;
   1027 
   1028 	if ((d = cdevsw_lookup(dev)) == NULL)
   1029 		return ENXIO;
   1030 
   1031 	DEV_LOCK(d);
   1032 	rv = (*d->d_discard)(dev, pos, len);
   1033 	DEV_UNLOCK(d);
   1034 
   1035 	return rv;
   1036 }
   1037 
   1038 int
   1039 cdev_flags(dev_t dev)
   1040 {
   1041 	const struct cdevsw *d;
   1042 
   1043 	if ((d = cdevsw_lookup(dev)) == NULL)
   1044 		return 0;
   1045 	return d->d_flag & ~D_TYPEMASK;
   1046 }
   1047 
   1048 int
   1049 cdev_type(dev_t dev)
   1050 {
   1051 	const struct cdevsw *d;
   1052 
   1053 	if ((d = cdevsw_lookup(dev)) == NULL)
   1054 		return D_OTHER;
   1055 	return d->d_flag & D_TYPEMASK;
   1056 }
   1057 
   1058 /*
   1059  * nommap(dev, off, prot)
   1060  *
   1061  *	mmap routine that always fails, for non-mmappable devices.
   1062  */
   1063 paddr_t
   1064 nommap(dev_t dev, off_t off, int prot)
   1065 {
   1066 
   1067 	return (paddr_t)-1;
   1068 }
   1069