Home | History | Annotate | Line # | Download | only in kern
subr_devsw.c revision 1.46
      1 /*	$NetBSD: subr_devsw.c,v 1.46 2022/07/09 10:30:27 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Overview
     34  *
     35  *	subr_devsw.c: registers device drivers by name and by major
     36  *	number, and provides wrapper methods for performing I/O and
     37  *	other tasks on device drivers, keying on the device number
     38  *	(dev_t).
     39  *
     40  *	When the system is built, the config(8) command generates
     41  *	static tables of device drivers built into the kernel image
     42  *	along with their associated methods.  These are recorded in
     43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
     44  *	and removed from the system dynamically.
     45  *
     46  * Allocation
     47  *
     48  *	When the system initially boots only the statically allocated
     49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
     50  *	allocation, we allocate a fixed block of memory to hold the new,
     51  *	expanded index.  This "fork" of the table is only ever performed
     52  *	once in order to guarantee that other threads may safely access
     53  *	the device tables:
     54  *
     55  *	o Once a thread has a "reference" to the table via an earlier
     56  *	  open() call, we know that the entry in the table must exist
     57  *	  and so it is safe to access it.
     58  *
     59  *	o Regardless of whether other threads see the old or new
     60  *	  pointers, they will point to a correct device switch
     61  *	  structure for the operation being performed.
     62  *
     63  *	XXX Currently, the wrapper methods such as cdev_read() verify
     64  *	that a device driver does in fact exist before calling the
     65  *	associated driver method.  This should be changed so that
     66  *	once the device is has been referenced by a vnode (opened),
     67  *	calling	the other methods should be valid until that reference
     68  *	is dropped.
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.46 2022/07/09 10:30:27 riastradh Exp $");
     73 
     74 #ifdef _KERNEL_OPT
     75 #include "opt_dtrace.h"
     76 #endif
     77 
     78 #include <sys/param.h>
     79 #include <sys/conf.h>
     80 #include <sys/kmem.h>
     81 #include <sys/systm.h>
     82 #include <sys/poll.h>
     83 #include <sys/tty.h>
     84 #include <sys/cpu.h>
     85 #include <sys/buf.h>
     86 #include <sys/reboot.h>
     87 #include <sys/sdt.h>
     88 #include <sys/atomic.h>
     89 #include <sys/localcount.h>
     90 #include <sys/pserialize.h>
     91 #include <sys/xcall.h>
     92 #include <sys/device.h>
     93 
     94 #ifdef DEVSW_DEBUG
     95 #define	DPRINTF(x)	printf x
     96 #else /* DEVSW_DEBUG */
     97 #define	DPRINTF(x)
     98 #endif /* DEVSW_DEBUG */
     99 
    100 #define	MAXDEVSW	512	/* the maximum of major device number */
    101 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
    102 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
    103 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
    104 
    105 struct devswref {
    106 	struct localcount	*dr_lc;
    107 };
    108 
    109 /* XXX bdevsw, cdevsw, max_bdevsws, and max_cdevsws should be volatile */
    110 extern const struct bdevsw **bdevsw, *bdevsw0[];
    111 extern const struct cdevsw **cdevsw, *cdevsw0[];
    112 extern struct devsw_conv *devsw_conv, devsw_conv0[];
    113 extern const int sys_bdevsws, sys_cdevsws;
    114 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
    115 
    116 static struct devswref *cdevswref;
    117 static struct devswref *bdevswref;
    118 static kcondvar_t devsw_cv;
    119 
    120 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
    121 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
    122 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
    123 
    124 kmutex_t device_lock;
    125 
    126 void (*biodone_vfs)(buf_t *) = (void *)nullop;
    127 
    128 void
    129 devsw_init(void)
    130 {
    131 
    132 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
    133 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
    134 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
    135 
    136 	cv_init(&devsw_cv, "devsw");
    137 }
    138 
    139 int
    140 devsw_attach(const char *devname,
    141 	     const struct bdevsw *bdev, devmajor_t *bmajor,
    142 	     const struct cdevsw *cdev, devmajor_t *cmajor)
    143 {
    144 	struct devsw_conv *conv;
    145 	char *name;
    146 	int error, i;
    147 
    148 	if (devname == NULL || cdev == NULL)
    149 		return EINVAL;
    150 
    151 	mutex_enter(&device_lock);
    152 
    153 	for (i = 0; i < max_devsw_convs; i++) {
    154 		conv = &devsw_conv[i];
    155 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
    156 			continue;
    157 
    158 		if (*bmajor < 0)
    159 			*bmajor = conv->d_bmajor;
    160 		if (*cmajor < 0)
    161 			*cmajor = conv->d_cmajor;
    162 
    163 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
    164 			error = EINVAL;
    165 			goto out;
    166 		}
    167 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
    168 			error = EINVAL;
    169 			goto out;
    170 		}
    171 
    172 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
    173 		    cdevsw[*cmajor] != NULL) {
    174 			error = EEXIST;
    175 			goto out;
    176 		}
    177 		break;
    178 	}
    179 
    180 	/*
    181 	 * XXX This should allocate what it needs up front so we never
    182 	 * need to flail around trying to unwind.
    183 	 */
    184 	error = bdevsw_attach(bdev, bmajor);
    185 	if (error != 0)
    186 		goto out;
    187 	error = cdevsw_attach(cdev, cmajor);
    188 	if (error != 0) {
    189 		devsw_detach_locked(bdev, NULL);
    190 		goto out;
    191 	}
    192 
    193 	/*
    194 	 * If we already found a conv, we're done.  Otherwise, find an
    195 	 * empty slot or extend the table.
    196 	 */
    197 	if (i == max_devsw_convs)
    198 		goto out;
    199 
    200 	for (i = 0; i < max_devsw_convs; i++) {
    201 		if (devsw_conv[i].d_name == NULL)
    202 			break;
    203 	}
    204 	if (i == max_devsw_convs) {
    205 		struct devsw_conv *newptr;
    206 		int old_convs, new_convs;
    207 
    208 		old_convs = max_devsw_convs;
    209 		new_convs = old_convs + 1;
    210 
    211 		newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
    212 		if (newptr == NULL) {
    213 			devsw_detach_locked(bdev, cdev);
    214 			error = ENOMEM;
    215 			goto out;
    216 		}
    217 		newptr[old_convs].d_name = NULL;
    218 		newptr[old_convs].d_bmajor = -1;
    219 		newptr[old_convs].d_cmajor = -1;
    220 		memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
    221 		if (devsw_conv != devsw_conv0)
    222 			kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
    223 		devsw_conv = newptr;
    224 		max_devsw_convs = new_convs;
    225 	}
    226 
    227 	name = kmem_strdupsize(devname, NULL, KM_NOSLEEP);
    228 	if (name == NULL) {
    229 		devsw_detach_locked(bdev, cdev);
    230 		error = ENOMEM;
    231 		goto out;
    232 	}
    233 
    234 	devsw_conv[i].d_name = name;
    235 	devsw_conv[i].d_bmajor = *bmajor;
    236 	devsw_conv[i].d_cmajor = *cmajor;
    237 	error = 0;
    238 out:
    239 	mutex_exit(&device_lock);
    240 	return error;
    241 }
    242 
    243 static int
    244 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
    245 {
    246 	const struct bdevsw **newbdevsw = NULL;
    247 	struct devswref *newbdevswref = NULL;
    248 	struct localcount *lc;
    249 	devmajor_t bmajor;
    250 	int i;
    251 
    252 	KASSERT(mutex_owned(&device_lock));
    253 
    254 	if (devsw == NULL)
    255 		return 0;
    256 
    257 	if (*devmajor < 0) {
    258 		for (bmajor = sys_bdevsws; bmajor < max_bdevsws; bmajor++) {
    259 			if (bdevsw[bmajor] != NULL)
    260 				continue;
    261 			for (i = 0; i < max_devsw_convs; i++) {
    262 				if (devsw_conv[i].d_bmajor == bmajor)
    263 					break;
    264 			}
    265 			if (i != max_devsw_convs)
    266 				continue;
    267 			break;
    268 		}
    269 		*devmajor = bmajor;
    270 	}
    271 
    272 	if (*devmajor >= MAXDEVSW) {
    273 		printf("%s: block majors exhausted\n", __func__);
    274 		return ENOMEM;
    275 	}
    276 
    277 	if (bdevswref == NULL) {
    278 		newbdevswref = kmem_zalloc(MAXDEVSW * sizeof(newbdevswref[0]),
    279 		    KM_NOSLEEP);
    280 		if (newbdevswref == NULL)
    281 			return ENOMEM;
    282 		atomic_store_release(&bdevswref, newbdevswref);
    283 	}
    284 
    285 	if (*devmajor >= max_bdevsws) {
    286 		KASSERT(bdevsw == bdevsw0);
    287 		newbdevsw = kmem_zalloc(MAXDEVSW * sizeof(newbdevsw[0]),
    288 		    KM_NOSLEEP);
    289 		if (newbdevsw == NULL)
    290 			return ENOMEM;
    291 		memcpy(newbdevsw, bdevsw, max_bdevsws * sizeof(bdevsw[0]));
    292 		atomic_store_release(&bdevsw, newbdevsw);
    293 		atomic_store_release(&max_bdevsws, MAXDEVSW);
    294 	}
    295 
    296 	if (bdevsw[*devmajor] != NULL)
    297 		return EEXIST;
    298 
    299 	KASSERT(bdevswref[*devmajor].dr_lc == NULL);
    300 	lc = kmem_zalloc(sizeof(*lc), KM_SLEEP);
    301 	localcount_init(lc);
    302 	bdevswref[*devmajor].dr_lc = lc;
    303 
    304 	atomic_store_release(&bdevsw[*devmajor], devsw);
    305 
    306 	return 0;
    307 }
    308 
    309 static int
    310 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
    311 {
    312 	const struct cdevsw **newcdevsw = NULL;
    313 	struct devswref *newcdevswref = NULL;
    314 	struct localcount *lc;
    315 	devmajor_t cmajor;
    316 	int i;
    317 
    318 	KASSERT(mutex_owned(&device_lock));
    319 
    320 	if (*devmajor < 0) {
    321 		for (cmajor = sys_cdevsws; cmajor < max_cdevsws; cmajor++) {
    322 			if (cdevsw[cmajor] != NULL)
    323 				continue;
    324 			for (i = 0; i < max_devsw_convs; i++) {
    325 				if (devsw_conv[i].d_cmajor == cmajor)
    326 					break;
    327 			}
    328 			if (i != max_devsw_convs)
    329 				continue;
    330 			break;
    331 		}
    332 		*devmajor = cmajor;
    333 	}
    334 
    335 	if (*devmajor >= MAXDEVSW) {
    336 		printf("%s: character majors exhausted\n", __func__);
    337 		return ENOMEM;
    338 	}
    339 
    340 	if (cdevswref == NULL) {
    341 		newcdevswref = kmem_zalloc(MAXDEVSW * sizeof(newcdevswref[0]),
    342 		    KM_NOSLEEP);
    343 		if (newcdevswref == NULL)
    344 			return ENOMEM;
    345 		atomic_store_release(&cdevswref, newcdevswref);
    346 	}
    347 
    348 	if (*devmajor >= max_cdevsws) {
    349 		KASSERT(cdevsw == cdevsw0);
    350 		newcdevsw = kmem_zalloc(MAXDEVSW * sizeof(newcdevsw[0]),
    351 		    KM_NOSLEEP);
    352 		if (newcdevsw == NULL)
    353 			return ENOMEM;
    354 		memcpy(newcdevsw, cdevsw, max_cdevsws * sizeof(cdevsw[0]));
    355 		atomic_store_release(&cdevsw, newcdevsw);
    356 		atomic_store_release(&max_cdevsws, MAXDEVSW);
    357 	}
    358 
    359 	if (cdevsw[*devmajor] != NULL)
    360 		return EEXIST;
    361 
    362 	KASSERT(cdevswref[*devmajor].dr_lc == NULL);
    363 	lc = kmem_zalloc(sizeof(*lc), KM_SLEEP);
    364 	localcount_init(lc);
    365 	cdevswref[*devmajor].dr_lc = lc;
    366 
    367 	atomic_store_release(&cdevsw[*devmajor], devsw);
    368 
    369 	return 0;
    370 }
    371 
    372 static void
    373 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
    374 {
    375 	int bi, ci = -1/*XXXGCC*/, di;
    376 	struct cfdriver *cd;
    377 	device_t dv;
    378 
    379 	KASSERT(mutex_owned(&device_lock));
    380 
    381 	/*
    382 	 * If this is wired to an autoconf device, make sure the device
    383 	 * has no more instances.  No locking here because under
    384 	 * correct use of devsw_detach, none of this state can change
    385 	 * at this point.
    386 	 */
    387 	if (cdev != NULL && (cd = cdev->d_cfdriver) != NULL) {
    388 		for (di = 0; di < cd->cd_ndevs; di++) {
    389 			KASSERTMSG((dv = cd->cd_devs[di]) == NULL,
    390 			    "detaching character device driver %s"
    391 			    " still has attached unit %s",
    392 			    cd->cd_name, device_xname(dv));
    393 		}
    394 	}
    395 	if (bdev != NULL && (cd = bdev->d_cfdriver) != NULL) {
    396 		for (di = 0; di < cd->cd_ndevs; di++) {
    397 			KASSERTMSG((dv = cd->cd_devs[di]) == NULL,
    398 			    "detaching block device driver %s"
    399 			    " still has attached unit %s",
    400 			    cd->cd_name, device_xname(dv));
    401 		}
    402 	}
    403 
    404 	/* Prevent new references.  */
    405 	if (bdev != NULL) {
    406 		for (bi = 0; bi < max_bdevsws; bi++) {
    407 			if (bdevsw[bi] != bdev)
    408 				continue;
    409 			atomic_store_relaxed(&bdevsw[bi], NULL);
    410 			break;
    411 		}
    412 		KASSERT(bi < max_bdevsws);
    413 	}
    414 	if (cdev != NULL) {
    415 		for (ci = 0; ci < max_cdevsws; ci++) {
    416 			if (cdevsw[ci] != cdev)
    417 				continue;
    418 			atomic_store_relaxed(&cdevsw[ci], NULL);
    419 			break;
    420 		}
    421 		KASSERT(ci < max_cdevsws);
    422 	}
    423 
    424 	if (bdev == NULL && cdev == NULL) /* XXX possible? */
    425 		return;
    426 
    427 	/*
    428 	 * Wait for all bdevsw_lookup_acquire, cdevsw_lookup_acquire
    429 	 * calls to notice that the devsw is gone.
    430 	 *
    431 	 * XXX Despite the use of the pserialize_read_enter/exit API
    432 	 * elsewhere in this file, we use xc_barrier here instead of
    433 	 * pserialize_perform -- because devsw_init is too early for
    434 	 * pserialize_create.  Either pserialize_create should be made
    435 	 * to work earlier, or it should be nixed altogether.  Until
    436 	 * that is fixed, xc_barrier will serve the same purpose.
    437 	 */
    438 	xc_barrier(0);
    439 
    440 	/*
    441 	 * Wait for all references to drain.  It is the caller's
    442 	 * responsibility to ensure that at this point, there are no
    443 	 * extant open instances and all new d_open calls will fail.
    444 	 *
    445 	 * Note that localcount_drain may release and reacquire
    446 	 * device_lock.
    447 	 */
    448 	if (bdev != NULL) {
    449 		localcount_drain(bdevswref[bi].dr_lc,
    450 		    &devsw_cv, &device_lock);
    451 		localcount_fini(bdevswref[bi].dr_lc);
    452 		kmem_free(bdevswref[bi].dr_lc, sizeof(*bdevswref[bi].dr_lc));
    453 		bdevswref[bi].dr_lc = NULL;
    454 	}
    455 	if (cdev != NULL) {
    456 		localcount_drain(cdevswref[ci].dr_lc,
    457 		    &devsw_cv, &device_lock);
    458 		localcount_fini(cdevswref[ci].dr_lc);
    459 		kmem_free(cdevswref[ci].dr_lc, sizeof(*cdevswref[ci].dr_lc));
    460 		cdevswref[ci].dr_lc = NULL;
    461 	}
    462 }
    463 
    464 void
    465 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
    466 {
    467 
    468 	mutex_enter(&device_lock);
    469 	devsw_detach_locked(bdev, cdev);
    470 	mutex_exit(&device_lock);
    471 }
    472 
    473 /*
    474  * Look up a block device by number.
    475  *
    476  * => Caller must ensure that the device is attached.
    477  */
    478 const struct bdevsw *
    479 bdevsw_lookup(dev_t dev)
    480 {
    481 	devmajor_t bmajor;
    482 
    483 	if (dev == NODEV)
    484 		return NULL;
    485 	bmajor = major(dev);
    486 	if (bmajor < 0 || bmajor >= atomic_load_relaxed(&max_bdevsws))
    487 		return NULL;
    488 
    489 	return atomic_load_consume(&bdevsw)[bmajor];
    490 }
    491 
    492 static const struct bdevsw *
    493 bdevsw_lookup_acquire(dev_t dev, struct localcount **lcp)
    494 {
    495 	devmajor_t bmajor;
    496 	const struct bdevsw *bdev = NULL, *const *curbdevsw;
    497 	struct devswref *curbdevswref;
    498 	int s;
    499 
    500 	if (dev == NODEV)
    501 		return NULL;
    502 	bmajor = major(dev);
    503 	if (bmajor < 0)
    504 		return NULL;
    505 
    506 	s = pserialize_read_enter();
    507 
    508 	/*
    509 	 * max_bdevsws never goes down, so it is safe to rely on this
    510 	 * condition without any locking for the array access below.
    511 	 * Test sys_bdevsws first so we can avoid the memory barrier in
    512 	 * that case.
    513 	 */
    514 	if (bmajor >= sys_bdevsws &&
    515 	    bmajor >= atomic_load_acquire(&max_bdevsws))
    516 		goto out;
    517 	curbdevsw = atomic_load_consume(&bdevsw);
    518 	if ((bdev = atomic_load_consume(&curbdevsw[bmajor])) == NULL)
    519 		goto out;
    520 
    521 	curbdevswref = atomic_load_consume(&bdevswref);
    522 	if (curbdevswref == NULL) {
    523 		*lcp = NULL;
    524 	} else if ((*lcp = curbdevswref[bmajor].dr_lc) != NULL) {
    525 		localcount_acquire(*lcp);
    526 	}
    527 out:
    528 	pserialize_read_exit(s);
    529 	return bdev;
    530 }
    531 
    532 static void
    533 bdevsw_release(const struct bdevsw *bdev, struct localcount *lc)
    534 {
    535 
    536 	if (lc == NULL)
    537 		return;
    538 	localcount_release(lc, &devsw_cv, &device_lock);
    539 }
    540 
    541 /*
    542  * Look up a character device by number.
    543  *
    544  * => Caller must ensure that the device is attached.
    545  */
    546 const struct cdevsw *
    547 cdevsw_lookup(dev_t dev)
    548 {
    549 	devmajor_t cmajor;
    550 
    551 	if (dev == NODEV)
    552 		return NULL;
    553 	cmajor = major(dev);
    554 	if (cmajor < 0 || cmajor >= atomic_load_relaxed(&max_cdevsws))
    555 		return NULL;
    556 
    557 	return atomic_load_consume(&cdevsw)[cmajor];
    558 }
    559 
    560 static const struct cdevsw *
    561 cdevsw_lookup_acquire(dev_t dev, struct localcount **lcp)
    562 {
    563 	devmajor_t cmajor;
    564 	const struct cdevsw *cdev = NULL, *const *curcdevsw;
    565 	struct devswref *curcdevswref;
    566 	int s;
    567 
    568 	if (dev == NODEV)
    569 		return NULL;
    570 	cmajor = major(dev);
    571 	if (cmajor < 0)
    572 		return NULL;
    573 
    574 	s = pserialize_read_enter();
    575 
    576 	/*
    577 	 * max_cdevsws never goes down, so it is safe to rely on this
    578 	 * condition without any locking for the array access below.
    579 	 * Test sys_cdevsws first so we can avoid the memory barrier in
    580 	 * that case.
    581 	 */
    582 	if (cmajor >= sys_cdevsws &&
    583 	    cmajor >= atomic_load_acquire(&max_cdevsws))
    584 		goto out;
    585 	curcdevsw = atomic_load_consume(&cdevsw);
    586 	if ((cdev = atomic_load_consume(&curcdevsw[cmajor])) == NULL)
    587 		goto out;
    588 
    589 	curcdevswref = atomic_load_consume(&cdevswref);
    590 	if (curcdevswref == NULL) {
    591 		*lcp = NULL;
    592 	} else if ((*lcp = curcdevswref[cmajor].dr_lc) != NULL) {
    593 		localcount_acquire(*lcp);
    594 	}
    595 out:
    596 	pserialize_read_exit(s);
    597 	return cdev;
    598 }
    599 
    600 static void
    601 cdevsw_release(const struct cdevsw *cdev, struct localcount *lc)
    602 {
    603 
    604 	if (lc == NULL)
    605 		return;
    606 	localcount_release(lc, &devsw_cv, &device_lock);
    607 }
    608 
    609 /*
    610  * Look up a block device by reference to its operations set.
    611  *
    612  * => Caller must ensure that the device is not detached, and therefore
    613  *    that the returned major is still valid when dereferenced.
    614  */
    615 devmajor_t
    616 bdevsw_lookup_major(const struct bdevsw *bdev)
    617 {
    618 	const struct bdevsw *const *curbdevsw;
    619 	devmajor_t bmajor, bmax;
    620 
    621 	bmax = atomic_load_acquire(&max_bdevsws);
    622 	curbdevsw = atomic_load_consume(&bdevsw);
    623 	for (bmajor = 0; bmajor < bmax; bmajor++) {
    624 		if (atomic_load_relaxed(&curbdevsw[bmajor]) == bdev)
    625 			return bmajor;
    626 	}
    627 
    628 	return NODEVMAJOR;
    629 }
    630 
    631 /*
    632  * Look up a character device by reference to its operations set.
    633  *
    634  * => Caller must ensure that the device is not detached, and therefore
    635  *    that the returned major is still valid when dereferenced.
    636  */
    637 devmajor_t
    638 cdevsw_lookup_major(const struct cdevsw *cdev)
    639 {
    640 	const struct cdevsw *const *curcdevsw;
    641 	devmajor_t cmajor, cmax;
    642 
    643 	cmax = atomic_load_acquire(&max_cdevsws);
    644 	curcdevsw = atomic_load_consume(&cdevsw);
    645 	for (cmajor = 0; cmajor < cmax; cmajor++) {
    646 		if (atomic_load_relaxed(&curcdevsw[cmajor]) == cdev)
    647 			return cmajor;
    648 	}
    649 
    650 	return NODEVMAJOR;
    651 }
    652 
    653 /*
    654  * Convert from block major number to name.
    655  *
    656  * => Caller must ensure that the device is not detached, and therefore
    657  *    that the name pointer is still valid when dereferenced.
    658  */
    659 const char *
    660 devsw_blk2name(devmajor_t bmajor)
    661 {
    662 	const char *name;
    663 	devmajor_t cmajor;
    664 	int i;
    665 
    666 	name = NULL;
    667 	cmajor = -1;
    668 
    669 	mutex_enter(&device_lock);
    670 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    671 		mutex_exit(&device_lock);
    672 		return NULL;
    673 	}
    674 	for (i = 0; i < max_devsw_convs; i++) {
    675 		if (devsw_conv[i].d_bmajor == bmajor) {
    676 			cmajor = devsw_conv[i].d_cmajor;
    677 			break;
    678 		}
    679 	}
    680 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    681 		name = devsw_conv[i].d_name;
    682 	mutex_exit(&device_lock);
    683 
    684 	return name;
    685 }
    686 
    687 /*
    688  * Convert char major number to device driver name.
    689  */
    690 const char *
    691 cdevsw_getname(devmajor_t major)
    692 {
    693 	const char *name;
    694 	int i;
    695 
    696 	name = NULL;
    697 
    698 	if (major < 0)
    699 		return NULL;
    700 
    701 	mutex_enter(&device_lock);
    702 	for (i = 0; i < max_devsw_convs; i++) {
    703 		if (devsw_conv[i].d_cmajor == major) {
    704 			name = devsw_conv[i].d_name;
    705 			break;
    706 		}
    707 	}
    708 	mutex_exit(&device_lock);
    709 	return name;
    710 }
    711 
    712 /*
    713  * Convert block major number to device driver name.
    714  */
    715 const char *
    716 bdevsw_getname(devmajor_t major)
    717 {
    718 	const char *name;
    719 	int i;
    720 
    721 	name = NULL;
    722 
    723 	if (major < 0)
    724 		return NULL;
    725 
    726 	mutex_enter(&device_lock);
    727 	for (i = 0; i < max_devsw_convs; i++) {
    728 		if (devsw_conv[i].d_bmajor == major) {
    729 			name = devsw_conv[i].d_name;
    730 			break;
    731 		}
    732 	}
    733 	mutex_exit(&device_lock);
    734 	return name;
    735 }
    736 
    737 /*
    738  * Convert from device name to block major number.
    739  *
    740  * => Caller must ensure that the device is not detached, and therefore
    741  *    that the major number is still valid when dereferenced.
    742  */
    743 devmajor_t
    744 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
    745 {
    746 	struct devsw_conv *conv;
    747 	devmajor_t bmajor;
    748 	int i;
    749 
    750 	if (name == NULL)
    751 		return NODEVMAJOR;
    752 
    753 	mutex_enter(&device_lock);
    754 	for (i = 0; i < max_devsw_convs; i++) {
    755 		size_t len;
    756 
    757 		conv = &devsw_conv[i];
    758 		if (conv->d_name == NULL)
    759 			continue;
    760 		len = strlen(conv->d_name);
    761 		if (strncmp(conv->d_name, name, len) != 0)
    762 			continue;
    763 		if (name[len] != '\0' && !isdigit((unsigned char)name[len]))
    764 			continue;
    765 		bmajor = conv->d_bmajor;
    766 		if (bmajor < 0 || bmajor >= max_bdevsws ||
    767 		    bdevsw[bmajor] == NULL)
    768 			break;
    769 		if (devname != NULL) {
    770 #ifdef DEVSW_DEBUG
    771 			if (strlen(conv->d_name) >= devnamelen)
    772 				printf("%s: too short buffer\n", __func__);
    773 #endif /* DEVSW_DEBUG */
    774 			strncpy(devname, conv->d_name, devnamelen);
    775 			devname[devnamelen - 1] = '\0';
    776 		}
    777 		mutex_exit(&device_lock);
    778 		return bmajor;
    779 	}
    780 
    781 	mutex_exit(&device_lock);
    782 	return NODEVMAJOR;
    783 }
    784 
    785 /*
    786  * Convert from device name to char major number.
    787  *
    788  * => Caller must ensure that the device is not detached, and therefore
    789  *    that the major number is still valid when dereferenced.
    790  */
    791 devmajor_t
    792 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
    793 {
    794 	struct devsw_conv *conv;
    795 	devmajor_t cmajor;
    796 	int i;
    797 
    798 	if (name == NULL)
    799 		return NODEVMAJOR;
    800 
    801 	mutex_enter(&device_lock);
    802 	for (i = 0; i < max_devsw_convs; i++) {
    803 		size_t len;
    804 
    805 		conv = &devsw_conv[i];
    806 		if (conv->d_name == NULL)
    807 			continue;
    808 		len = strlen(conv->d_name);
    809 		if (strncmp(conv->d_name, name, len) != 0)
    810 			continue;
    811 		if (name[len] != '\0' && !isdigit((unsigned char)name[len]))
    812 			continue;
    813 		cmajor = conv->d_cmajor;
    814 		if (cmajor < 0 || cmajor >= max_cdevsws ||
    815 		    cdevsw[cmajor] == NULL)
    816 			break;
    817 		if (devname != NULL) {
    818 #ifdef DEVSW_DEBUG
    819 			if (strlen(conv->d_name) >= devnamelen)
    820 				printf("%s: too short buffer", __func__);
    821 #endif /* DEVSW_DEBUG */
    822 			strncpy(devname, conv->d_name, devnamelen);
    823 			devname[devnamelen - 1] = '\0';
    824 		}
    825 		mutex_exit(&device_lock);
    826 		return cmajor;
    827 	}
    828 
    829 	mutex_exit(&device_lock);
    830 	return NODEVMAJOR;
    831 }
    832 
    833 /*
    834  * Convert from character dev_t to block dev_t.
    835  *
    836  * => Caller must ensure that the device is not detached, and therefore
    837  *    that the major number is still valid when dereferenced.
    838  */
    839 dev_t
    840 devsw_chr2blk(dev_t cdev)
    841 {
    842 	devmajor_t bmajor, cmajor;
    843 	int i;
    844 	dev_t rv;
    845 
    846 	cmajor = major(cdev);
    847 	bmajor = NODEVMAJOR;
    848 	rv = NODEV;
    849 
    850 	mutex_enter(&device_lock);
    851 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
    852 		mutex_exit(&device_lock);
    853 		return NODEV;
    854 	}
    855 	for (i = 0; i < max_devsw_convs; i++) {
    856 		if (devsw_conv[i].d_cmajor == cmajor) {
    857 			bmajor = devsw_conv[i].d_bmajor;
    858 			break;
    859 		}
    860 	}
    861 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
    862 		rv = makedev(bmajor, minor(cdev));
    863 	mutex_exit(&device_lock);
    864 
    865 	return rv;
    866 }
    867 
    868 /*
    869  * Convert from block dev_t to character dev_t.
    870  *
    871  * => Caller must ensure that the device is not detached, and therefore
    872  *    that the major number is still valid when dereferenced.
    873  */
    874 dev_t
    875 devsw_blk2chr(dev_t bdev)
    876 {
    877 	devmajor_t bmajor, cmajor;
    878 	int i;
    879 	dev_t rv;
    880 
    881 	bmajor = major(bdev);
    882 	cmajor = NODEVMAJOR;
    883 	rv = NODEV;
    884 
    885 	mutex_enter(&device_lock);
    886 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
    887 		mutex_exit(&device_lock);
    888 		return NODEV;
    889 	}
    890 	for (i = 0; i < max_devsw_convs; i++) {
    891 		if (devsw_conv[i].d_bmajor == bmajor) {
    892 			cmajor = devsw_conv[i].d_cmajor;
    893 			break;
    894 		}
    895 	}
    896 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
    897 		rv = makedev(cmajor, minor(bdev));
    898 	mutex_exit(&device_lock);
    899 
    900 	return rv;
    901 }
    902 
    903 /*
    904  * Device access methods.
    905  */
    906 
    907 #define	DEV_LOCK(d)						\
    908 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
    909 		KERNEL_LOCK(1, NULL);				\
    910 	}
    911 
    912 #define	DEV_UNLOCK(d)						\
    913 	if (mpflag == 0) {					\
    914 		KERNEL_UNLOCK_ONE(NULL);			\
    915 	}
    916 
    917 int
    918 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
    919 {
    920 	const struct bdevsw *d;
    921 	struct localcount *lc;
    922 	device_t dv = NULL/*XXXGCC*/;
    923 	int unit, rv, mpflag;
    924 
    925 	d = bdevsw_lookup_acquire(dev, &lc);
    926 	if (d == NULL)
    927 		return ENXIO;
    928 
    929 	if (d->d_devtounit) {
    930 		/*
    931 		 * If the device node corresponds to an autoconf device
    932 		 * instance, acquire a reference to it so that during
    933 		 * d_open, device_lookup is stable.
    934 		 *
    935 		 * XXX This should also arrange to instantiate cloning
    936 		 * pseudo-devices if appropriate, but that requires
    937 		 * reviewing them all to find and verify a common
    938 		 * pattern.
    939 		 */
    940 		if ((unit = (*d->d_devtounit)(dev)) == -1)
    941 			return ENXIO;
    942 		if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL)
    943 			return ENXIO;
    944 	}
    945 
    946 	DEV_LOCK(d);
    947 	rv = (*d->d_open)(dev, flag, devtype, l);
    948 	DEV_UNLOCK(d);
    949 
    950 	if (d->d_devtounit) {
    951 		device_release(dv);
    952 	}
    953 
    954 	bdevsw_release(d, lc);
    955 
    956 	return rv;
    957 }
    958 
    959 int
    960 bdev_cancel(dev_t dev, int flag, int devtype, struct lwp *l)
    961 {
    962 	const struct bdevsw *d;
    963 	int rv, mpflag;
    964 
    965 	if ((d = bdevsw_lookup(dev)) == NULL)
    966 		return ENXIO;
    967 	if (d->d_cancel == NULL)
    968 		return ENODEV;
    969 
    970 	DEV_LOCK(d);
    971 	rv = (*d->d_cancel)(dev, flag, devtype, l);
    972 	DEV_UNLOCK(d);
    973 
    974 	return rv;
    975 }
    976 
    977 int
    978 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
    979 {
    980 	const struct bdevsw *d;
    981 	int rv, mpflag;
    982 
    983 	if ((d = bdevsw_lookup(dev)) == NULL)
    984 		return ENXIO;
    985 
    986 	DEV_LOCK(d);
    987 	rv = (*d->d_close)(dev, flag, devtype, l);
    988 	DEV_UNLOCK(d);
    989 
    990 	return rv;
    991 }
    992 
    993 SDT_PROVIDER_DECLARE(io);
    994 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
    995 
    996 void
    997 bdev_strategy(struct buf *bp)
    998 {
    999 	const struct bdevsw *d;
   1000 	int mpflag;
   1001 
   1002 	SDT_PROBE1(io, kernel, , start, bp);
   1003 
   1004 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
   1005 		bp->b_error = ENXIO;
   1006 		bp->b_resid = bp->b_bcount;
   1007 		biodone_vfs(bp); /* biodone() iff vfs present */
   1008 		return;
   1009 	}
   1010 
   1011 	DEV_LOCK(d);
   1012 	(*d->d_strategy)(bp);
   1013 	DEV_UNLOCK(d);
   1014 }
   1015 
   1016 int
   1017 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
   1018 {
   1019 	const struct bdevsw *d;
   1020 	int rv, mpflag;
   1021 
   1022 	if ((d = bdevsw_lookup(dev)) == NULL)
   1023 		return ENXIO;
   1024 
   1025 	DEV_LOCK(d);
   1026 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
   1027 	DEV_UNLOCK(d);
   1028 
   1029 	return rv;
   1030 }
   1031 
   1032 int
   1033 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
   1034 {
   1035 	const struct bdevsw *d;
   1036 	int rv;
   1037 
   1038 	/*
   1039 	 * Dump can be called without the device open.  Since it can
   1040 	 * currently only be called with the system paused (and in a
   1041 	 * potentially unstable state), we don't perform any locking.
   1042 	 */
   1043 	if ((d = bdevsw_lookup(dev)) == NULL)
   1044 		return ENXIO;
   1045 
   1046 	/* DEV_LOCK(d); */
   1047 	rv = (*d->d_dump)(dev, addr, data, sz);
   1048 	/* DEV_UNLOCK(d); */
   1049 
   1050 	return rv;
   1051 }
   1052 
   1053 int
   1054 bdev_flags(dev_t dev)
   1055 {
   1056 	const struct bdevsw *d;
   1057 
   1058 	if ((d = bdevsw_lookup(dev)) == NULL)
   1059 		return 0;
   1060 	return d->d_flag & ~D_TYPEMASK;
   1061 }
   1062 
   1063 int
   1064 bdev_type(dev_t dev)
   1065 {
   1066 	const struct bdevsw *d;
   1067 
   1068 	if ((d = bdevsw_lookup(dev)) == NULL)
   1069 		return D_OTHER;
   1070 	return d->d_flag & D_TYPEMASK;
   1071 }
   1072 
   1073 int
   1074 bdev_size(dev_t dev)
   1075 {
   1076 	const struct bdevsw *d;
   1077 	int rv, mpflag = 0;
   1078 
   1079 	if ((d = bdevsw_lookup(dev)) == NULL ||
   1080 	    d->d_psize == NULL)
   1081 		return -1;
   1082 
   1083 	/*
   1084 	 * Don't to try lock the device if we're dumping.
   1085 	 * XXX: is there a better way to test this?
   1086 	 */
   1087 	if ((boothowto & RB_DUMP) == 0)
   1088 		DEV_LOCK(d);
   1089 	rv = (*d->d_psize)(dev);
   1090 	if ((boothowto & RB_DUMP) == 0)
   1091 		DEV_UNLOCK(d);
   1092 
   1093 	return rv;
   1094 }
   1095 
   1096 int
   1097 bdev_discard(dev_t dev, off_t pos, off_t len)
   1098 {
   1099 	const struct bdevsw *d;
   1100 	int rv, mpflag;
   1101 
   1102 	if ((d = bdevsw_lookup(dev)) == NULL)
   1103 		return ENXIO;
   1104 
   1105 	DEV_LOCK(d);
   1106 	rv = (*d->d_discard)(dev, pos, len);
   1107 	DEV_UNLOCK(d);
   1108 
   1109 	return rv;
   1110 }
   1111 
   1112 void
   1113 bdev_detached(dev_t dev)
   1114 {
   1115 	const struct bdevsw *d;
   1116 	device_t dv;
   1117 	int unit;
   1118 
   1119 	if ((d = bdevsw_lookup(dev)) == NULL)
   1120 		return;
   1121 	if (d->d_devtounit == NULL)
   1122 		return;
   1123 	if ((unit = (*d->d_devtounit)(dev)) == -1)
   1124 		return;
   1125 	if ((dv = device_lookup(d->d_cfdriver, unit)) == NULL)
   1126 		return;
   1127 	config_detach_commit(dv);
   1128 }
   1129 
   1130 int
   1131 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
   1132 {
   1133 	const struct cdevsw *d;
   1134 	struct localcount *lc;
   1135 	device_t dv = NULL/*XXXGCC*/;
   1136 	int unit, rv, mpflag;
   1137 
   1138 	d = cdevsw_lookup_acquire(dev, &lc);
   1139 	if (d == NULL)
   1140 		return ENXIO;
   1141 
   1142 	if (d->d_devtounit) {
   1143 		/*
   1144 		 * If the device node corresponds to an autoconf device
   1145 		 * instance, acquire a reference to it so that during
   1146 		 * d_open, device_lookup is stable.
   1147 		 *
   1148 		 * XXX This should also arrange to instantiate cloning
   1149 		 * pseudo-devices if appropriate, but that requires
   1150 		 * reviewing them all to find and verify a common
   1151 		 * pattern.
   1152 		 */
   1153 		if ((unit = (*d->d_devtounit)(dev)) == -1)
   1154 			return ENXIO;
   1155 		if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL)
   1156 			return ENXIO;
   1157 	}
   1158 
   1159 	DEV_LOCK(d);
   1160 	rv = (*d->d_open)(dev, flag, devtype, l);
   1161 	DEV_UNLOCK(d);
   1162 
   1163 	if (d->d_devtounit) {
   1164 		device_release(dv);
   1165 	}
   1166 
   1167 	cdevsw_release(d, lc);
   1168 
   1169 	return rv;
   1170 }
   1171 
   1172 int
   1173 cdev_cancel(dev_t dev, int flag, int devtype, struct lwp *l)
   1174 {
   1175 	const struct cdevsw *d;
   1176 	int rv, mpflag;
   1177 
   1178 	if ((d = cdevsw_lookup(dev)) == NULL)
   1179 		return ENXIO;
   1180 	if (d->d_cancel == NULL)
   1181 		return ENODEV;
   1182 
   1183 	DEV_LOCK(d);
   1184 	rv = (*d->d_cancel)(dev, flag, devtype, l);
   1185 	DEV_UNLOCK(d);
   1186 
   1187 	return rv;
   1188 }
   1189 
   1190 int
   1191 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
   1192 {
   1193 	const struct cdevsw *d;
   1194 	int rv, mpflag;
   1195 
   1196 	if ((d = cdevsw_lookup(dev)) == NULL)
   1197 		return ENXIO;
   1198 
   1199 	DEV_LOCK(d);
   1200 	rv = (*d->d_close)(dev, flag, devtype, l);
   1201 	DEV_UNLOCK(d);
   1202 
   1203 	return rv;
   1204 }
   1205 
   1206 int
   1207 cdev_read(dev_t dev, struct uio *uio, int flag)
   1208 {
   1209 	const struct cdevsw *d;
   1210 	int rv, mpflag;
   1211 
   1212 	if ((d = cdevsw_lookup(dev)) == NULL)
   1213 		return ENXIO;
   1214 
   1215 	DEV_LOCK(d);
   1216 	rv = (*d->d_read)(dev, uio, flag);
   1217 	DEV_UNLOCK(d);
   1218 
   1219 	return rv;
   1220 }
   1221 
   1222 int
   1223 cdev_write(dev_t dev, struct uio *uio, int flag)
   1224 {
   1225 	const struct cdevsw *d;
   1226 	int rv, mpflag;
   1227 
   1228 	if ((d = cdevsw_lookup(dev)) == NULL)
   1229 		return ENXIO;
   1230 
   1231 	DEV_LOCK(d);
   1232 	rv = (*d->d_write)(dev, uio, flag);
   1233 	DEV_UNLOCK(d);
   1234 
   1235 	return rv;
   1236 }
   1237 
   1238 int
   1239 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
   1240 {
   1241 	const struct cdevsw *d;
   1242 	int rv, mpflag;
   1243 
   1244 	if ((d = cdevsw_lookup(dev)) == NULL)
   1245 		return ENXIO;
   1246 
   1247 	DEV_LOCK(d);
   1248 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
   1249 	DEV_UNLOCK(d);
   1250 
   1251 	return rv;
   1252 }
   1253 
   1254 void
   1255 cdev_stop(struct tty *tp, int flag)
   1256 {
   1257 	const struct cdevsw *d;
   1258 	int mpflag;
   1259 
   1260 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
   1261 		return;
   1262 
   1263 	DEV_LOCK(d);
   1264 	(*d->d_stop)(tp, flag);
   1265 	DEV_UNLOCK(d);
   1266 }
   1267 
   1268 struct tty *
   1269 cdev_tty(dev_t dev)
   1270 {
   1271 	const struct cdevsw *d;
   1272 
   1273 	if ((d = cdevsw_lookup(dev)) == NULL)
   1274 		return NULL;
   1275 
   1276 	/* XXX Check if necessary. */
   1277 	if (d->d_tty == NULL)
   1278 		return NULL;
   1279 
   1280 	return (*d->d_tty)(dev);
   1281 }
   1282 
   1283 int
   1284 cdev_poll(dev_t dev, int flag, lwp_t *l)
   1285 {
   1286 	const struct cdevsw *d;
   1287 	int rv, mpflag;
   1288 
   1289 	if ((d = cdevsw_lookup(dev)) == NULL)
   1290 		return POLLERR;
   1291 
   1292 	DEV_LOCK(d);
   1293 	rv = (*d->d_poll)(dev, flag, l);
   1294 	DEV_UNLOCK(d);
   1295 
   1296 	return rv;
   1297 }
   1298 
   1299 paddr_t
   1300 cdev_mmap(dev_t dev, off_t off, int flag)
   1301 {
   1302 	const struct cdevsw *d;
   1303 	paddr_t rv;
   1304 	int mpflag;
   1305 
   1306 	if ((d = cdevsw_lookup(dev)) == NULL)
   1307 		return (paddr_t)-1LL;
   1308 
   1309 	DEV_LOCK(d);
   1310 	rv = (*d->d_mmap)(dev, off, flag);
   1311 	DEV_UNLOCK(d);
   1312 
   1313 	return rv;
   1314 }
   1315 
   1316 int
   1317 cdev_kqfilter(dev_t dev, struct knote *kn)
   1318 {
   1319 	const struct cdevsw *d;
   1320 	int rv, mpflag;
   1321 
   1322 	if ((d = cdevsw_lookup(dev)) == NULL)
   1323 		return ENXIO;
   1324 
   1325 	DEV_LOCK(d);
   1326 	rv = (*d->d_kqfilter)(dev, kn);
   1327 	DEV_UNLOCK(d);
   1328 
   1329 	return rv;
   1330 }
   1331 
   1332 int
   1333 cdev_discard(dev_t dev, off_t pos, off_t len)
   1334 {
   1335 	const struct cdevsw *d;
   1336 	int rv, mpflag;
   1337 
   1338 	if ((d = cdevsw_lookup(dev)) == NULL)
   1339 		return ENXIO;
   1340 
   1341 	DEV_LOCK(d);
   1342 	rv = (*d->d_discard)(dev, pos, len);
   1343 	DEV_UNLOCK(d);
   1344 
   1345 	return rv;
   1346 }
   1347 
   1348 int
   1349 cdev_flags(dev_t dev)
   1350 {
   1351 	const struct cdevsw *d;
   1352 
   1353 	if ((d = cdevsw_lookup(dev)) == NULL)
   1354 		return 0;
   1355 	return d->d_flag & ~D_TYPEMASK;
   1356 }
   1357 
   1358 int
   1359 cdev_type(dev_t dev)
   1360 {
   1361 	const struct cdevsw *d;
   1362 
   1363 	if ((d = cdevsw_lookup(dev)) == NULL)
   1364 		return D_OTHER;
   1365 	return d->d_flag & D_TYPEMASK;
   1366 }
   1367 
   1368 void
   1369 cdev_detached(dev_t dev)
   1370 {
   1371 	const struct cdevsw *d;
   1372 	device_t dv;
   1373 	int unit;
   1374 
   1375 	if ((d = cdevsw_lookup(dev)) == NULL)
   1376 		return;
   1377 	if (d->d_devtounit == NULL)
   1378 		return;
   1379 	if ((unit = (*d->d_devtounit)(dev)) == -1)
   1380 		return;
   1381 	if ((dv = device_lookup(d->d_cfdriver, unit)) == NULL)
   1382 		return;
   1383 	config_detach_commit(dv);
   1384 }
   1385 
   1386 /*
   1387  * nommap(dev, off, prot)
   1388  *
   1389  *	mmap routine that always fails, for non-mmappable devices.
   1390  */
   1391 paddr_t
   1392 nommap(dev_t dev, off_t off, int prot)
   1393 {
   1394 
   1395 	return (paddr_t)-1;
   1396 }
   1397 
   1398 /*
   1399  * dev_minor_unit(dev)
   1400  *
   1401  *	Returns minor(dev) as an int.  Intended for use with struct
   1402  *	bdevsw, cdevsw::d_devtounit for drivers whose /dev nodes are
   1403  *	implemented by reference to an autoconf instance with the minor
   1404  *	number.
   1405  */
   1406 int
   1407 dev_minor_unit(dev_t dev)
   1408 {
   1409 
   1410 	return minor(dev);
   1411 }
   1412