Home | History | Annotate | Line # | Download | only in dev
dksubr.c revision 1.109.2.1
      1 /* $NetBSD: dksubr.c,v 1.109.2.1 2020/03/21 15:52:09 martin Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998, 1999, 2002, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Roland C. Dowdeswell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: dksubr.c,v 1.109.2.1 2020/03/21 15:52:09 martin Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/stat.h>
     38 #include <sys/proc.h>
     39 #include <sys/ioctl.h>
     40 #include <sys/device.h>
     41 #include <sys/disk.h>
     42 #include <sys/disklabel.h>
     43 #include <sys/buf.h>
     44 #include <sys/bufq.h>
     45 #include <sys/vnode.h>
     46 #include <sys/fcntl.h>
     47 #include <sys/namei.h>
     48 #include <sys/module.h>
     49 #include <sys/syslog.h>
     50 
     51 #include <dev/dkvar.h>
     52 #include <miscfs/specfs/specdev.h> /* for v_rdev */
     53 
     54 int	dkdebug = 0;
     55 
     56 #ifdef DEBUG
     57 #define DKDB_FOLLOW	0x1
     58 #define DKDB_INIT	0x2
     59 #define DKDB_VNODE	0x4
     60 #define DKDB_DUMP	0x8
     61 
     62 #define IFDEBUG(x,y)		if (dkdebug & (x)) y
     63 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
     64 #define DPRINTF_FOLLOW(y)	DPRINTF(DKDB_FOLLOW, y)
     65 #else
     66 #define IFDEBUG(x,y)
     67 #define DPRINTF(x,y)
     68 #define DPRINTF_FOLLOW(y)
     69 #endif
     70 
     71 #define DKF_READYFORDUMP	(DKF_INITED|DKF_TAKEDUMP)
     72 
     73 static int dk_subr_modcmd(modcmd_t, void *);
     74 
     75 #define DKLABELDEV(dev)	\
     76 	(MAKEDISKDEV(major((dev)), DISKUNIT((dev)), RAW_PART))
     77 
     78 static void	dk_makedisklabel(struct dk_softc *);
     79 static int	dk_translate(struct dk_softc *, struct buf *);
     80 static void	dk_done1(struct dk_softc *, struct buf *, bool);
     81 
     82 void
     83 dk_init(struct dk_softc *dksc, device_t dev, int dtype)
     84 {
     85 
     86 	memset(dksc, 0x0, sizeof(*dksc));
     87 	dksc->sc_dtype = dtype;
     88 	dksc->sc_dev = dev;
     89 
     90 	strlcpy(dksc->sc_xname, device_xname(dev), DK_XNAME_SIZE);
     91 	dksc->sc_dkdev.dk_name = dksc->sc_xname;
     92 }
     93 
     94 void
     95 dk_attach(struct dk_softc *dksc)
     96 {
     97 	KASSERT(dksc->sc_dev != NULL);
     98 
     99 	mutex_init(&dksc->sc_iolock, MUTEX_DEFAULT, IPL_VM);
    100 	dksc->sc_flags |= DKF_READYFORDUMP;
    101 #ifdef DIAGNOSTIC
    102 	dksc->sc_flags |= DKF_WARNLABEL | DKF_LABELSANITY;
    103 #endif
    104 
    105 	if ((dksc->sc_flags & DKF_NO_RND) == 0) {
    106 		/* Attach the device into the rnd source list. */
    107 		rnd_attach_source(&dksc->sc_rnd_source, dksc->sc_xname,
    108 		    RND_TYPE_DISK, RND_FLAG_DEFAULT);
    109 	}
    110 }
    111 
    112 void
    113 dk_detach(struct dk_softc *dksc)
    114 {
    115 	if ((dksc->sc_flags & DKF_NO_RND) == 0) {
    116 		/* Unhook the entropy source. */
    117 		rnd_detach_source(&dksc->sc_rnd_source);
    118 	}
    119 
    120 	dksc->sc_flags &= ~DKF_READYFORDUMP;
    121 	mutex_destroy(&dksc->sc_iolock);
    122 }
    123 
    124 /* ARGSUSED */
    125 int
    126 dk_open(struct dk_softc *dksc, dev_t dev,
    127     int flags, int fmt, struct lwp *l)
    128 {
    129 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    130 	struct	disklabel *lp = dksc->sc_dkdev.dk_label;
    131 	int	part = DISKPART(dev);
    132 	int	pmask = 1 << part;
    133 	int	ret = 0;
    134 	struct disk *dk = &dksc->sc_dkdev;
    135 
    136 	DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
    137 	    dksc->sc_xname, dksc, dev, flags));
    138 
    139 	mutex_enter(&dk->dk_openlock);
    140 
    141 	/*
    142 	 * If there are wedges, and this is not RAW_PART, then we
    143 	 * need to fail.
    144 	 */
    145 	if (dk->dk_nwedges != 0 && part != RAW_PART) {
    146 		ret = EBUSY;
    147 		goto done;
    148 	}
    149 
    150 	/*
    151 	 * initialize driver for the first opener
    152 	 */
    153 	if (dk->dk_openmask == 0 && dkd->d_firstopen != NULL) {
    154 		ret = (*dkd->d_firstopen)(dksc->sc_dev, dev, flags, fmt);
    155 		if (ret)
    156 			goto done;
    157 	}
    158 
    159 	/*
    160 	 * If we're init'ed and there are no other open partitions then
    161 	 * update the in-core disklabel.
    162 	 */
    163 	if ((dksc->sc_flags & DKF_INITED)) {
    164 		if ((dksc->sc_flags & DKF_VLABEL) == 0) {
    165 			dksc->sc_flags |= DKF_VLABEL;
    166 			dk_getdisklabel(dksc, dev);
    167 		}
    168 	}
    169 
    170 	/* Fail if we can't find the partition. */
    171 	if (part != RAW_PART &&
    172 	    ((dksc->sc_flags & DKF_VLABEL) == 0 ||
    173 	     part >= lp->d_npartitions ||
    174 	     lp->d_partitions[part].p_fstype == FS_UNUSED)) {
    175 		ret = ENXIO;
    176 		goto done;
    177 	}
    178 
    179 	/* Mark our unit as open. */
    180 	switch (fmt) {
    181 	case S_IFCHR:
    182 		dk->dk_copenmask |= pmask;
    183 		break;
    184 	case S_IFBLK:
    185 		dk->dk_bopenmask |= pmask;
    186 		break;
    187 	}
    188 
    189 	dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
    190 
    191 done:
    192 	mutex_exit(&dk->dk_openlock);
    193 	return ret;
    194 }
    195 
    196 /* ARGSUSED */
    197 int
    198 dk_close(struct dk_softc *dksc, dev_t dev,
    199     int flags, int fmt, struct lwp *l)
    200 {
    201 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    202 	int	part = DISKPART(dev);
    203 	int	pmask = 1 << part;
    204 	struct disk *dk = &dksc->sc_dkdev;
    205 
    206 	DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
    207 	    dksc->sc_xname, dksc, dev, flags));
    208 
    209 	mutex_enter(&dk->dk_openlock);
    210 
    211 	switch (fmt) {
    212 	case S_IFCHR:
    213 		dk->dk_copenmask &= ~pmask;
    214 		break;
    215 	case S_IFBLK:
    216 		dk->dk_bopenmask &= ~pmask;
    217 		break;
    218 	}
    219 	dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
    220 
    221 	if (dk->dk_openmask == 0) {
    222 		if (dkd->d_lastclose != NULL)
    223 			(*dkd->d_lastclose)(dksc->sc_dev);
    224 		if ((dksc->sc_flags & DKF_KLABEL) == 0)
    225 			dksc->sc_flags &= ~DKF_VLABEL;
    226 	}
    227 
    228 	mutex_exit(&dk->dk_openlock);
    229 	return 0;
    230 }
    231 
    232 static int
    233 dk_translate(struct dk_softc *dksc, struct buf *bp)
    234 {
    235 	int	part;
    236 	int	wlabel;
    237 	daddr_t	blkno;
    238 	struct disklabel *lp;
    239 	struct disk *dk;
    240 	uint64_t numsecs;
    241 	unsigned secsize;
    242 
    243 	lp = dksc->sc_dkdev.dk_label;
    244 	dk = &dksc->sc_dkdev;
    245 
    246 	part = DISKPART(bp->b_dev);
    247 	numsecs = dk->dk_geom.dg_secperunit;
    248 	secsize = dk->dk_geom.dg_secsize;
    249 
    250 	/*
    251 	 * The transfer must be a whole number of blocks and the offset must
    252 	 * not be negative.
    253 	 */
    254 	if ((bp->b_bcount % secsize) != 0 || bp->b_blkno < 0) {
    255 		bp->b_error = EINVAL;
    256 		goto done;
    257 	}
    258 
    259 	/* If there is nothing to do, then we are done */
    260 	if (bp->b_bcount == 0)
    261 		goto done;
    262 
    263 	wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING);
    264 	if (part == RAW_PART) {
    265 		uint64_t numblocks = btodb(numsecs * secsize);
    266 		if (bounds_check_with_mediasize(bp, DEV_BSIZE, numblocks) <= 0)
    267 			goto done;
    268 	} else {
    269 		if (bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0)
    270 			goto done;
    271 	}
    272 
    273 	/*
    274 	 * Convert the block number to absolute and put it in terms
    275 	 * of the device's logical block size.
    276 	 */
    277 	if (secsize >= DEV_BSIZE)
    278 		blkno = bp->b_blkno / (secsize / DEV_BSIZE);
    279 	else
    280 		blkno = bp->b_blkno * (DEV_BSIZE / secsize);
    281 
    282 	if (part != RAW_PART)
    283 		blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
    284 	bp->b_rawblkno = blkno;
    285 
    286 	return -1;
    287 
    288 done:
    289 	bp->b_resid = bp->b_bcount;
    290 	return bp->b_error;
    291 }
    292 
    293 static int
    294 dk_strategy1(struct dk_softc *dksc, struct buf *bp)
    295 {
    296 	int error;
    297 
    298 	DPRINTF_FOLLOW(("%s(%s, %p, %p)\n", __func__,
    299 	    dksc->sc_xname, dksc, bp));
    300 
    301 	if (!(dksc->sc_flags & DKF_INITED)) {
    302 		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
    303 		bp->b_error = ENXIO;
    304 		bp->b_resid = bp->b_bcount;
    305 		biodone(bp);
    306 		return 1;
    307 	}
    308 
    309 	error = dk_translate(dksc, bp);
    310 	if (error >= 0) {
    311 		biodone(bp);
    312 		return 1;
    313 	}
    314 
    315 	return 0;
    316 }
    317 
    318 void
    319 dk_strategy(struct dk_softc *dksc, struct buf *bp)
    320 {
    321 	int error;
    322 
    323 	error = dk_strategy1(dksc, bp);
    324 	if (error)
    325 		return;
    326 
    327 	/*
    328 	 * Queue buffer and start unit
    329 	 */
    330 	dk_start(dksc, bp);
    331 }
    332 
    333 int
    334 dk_strategy_defer(struct dk_softc *dksc, struct buf *bp)
    335 {
    336 	int error;
    337 
    338 	error = dk_strategy1(dksc, bp);
    339 	if (error)
    340 		return error;
    341 
    342 	/*
    343 	 * Queue buffer only
    344 	 */
    345 	mutex_enter(&dksc->sc_iolock);
    346 	disk_wait(&dksc->sc_dkdev);
    347 	bufq_put(dksc->sc_bufq, bp);
    348 	mutex_exit(&dksc->sc_iolock);
    349 
    350 	return 0;
    351 }
    352 
    353 int
    354 dk_strategy_pending(struct dk_softc *dksc)
    355 {
    356 	struct buf *bp;
    357 
    358 	if (!(dksc->sc_flags & DKF_INITED)) {
    359 		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
    360 		return 0;
    361 	}
    362 
    363 	mutex_enter(&dksc->sc_iolock);
    364 	bp = bufq_peek(dksc->sc_bufq);
    365 	mutex_exit(&dksc->sc_iolock);
    366 
    367 	return bp != NULL;
    368 }
    369 
    370 void
    371 dk_start(struct dk_softc *dksc, struct buf *bp)
    372 {
    373 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    374 	int error;
    375 
    376 	if (!(dksc->sc_flags & DKF_INITED)) {
    377 		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
    378 		return;
    379 	}
    380 
    381 	mutex_enter(&dksc->sc_iolock);
    382 
    383 	if (bp != NULL) {
    384 		bp->b_ci = curcpu();
    385 		disk_wait(&dksc->sc_dkdev);
    386 		bufq_put(dksc->sc_bufq, bp);
    387 	}
    388 
    389 	/*
    390 	 * If another thread is running the queue, increment
    391 	 * busy counter to 2 so that the queue is retried,
    392 	 * because the driver may now accept additional
    393 	 * requests.
    394 	 */
    395 	if (dksc->sc_busy < 2)
    396 		dksc->sc_busy++;
    397 	if (dksc->sc_busy > 1)
    398 		goto done;
    399 
    400 	/*
    401 	 * Peeking at the buffer queue and committing the operation
    402 	 * only after success isn't atomic.
    403 	 *
    404 	 * So when a diskstart fails, the buffer is saved
    405 	 * and tried again before the next buffer is fetched.
    406 	 * dk_drain() handles flushing of a saved buffer.
    407 	 *
    408 	 * This keeps order of I/O operations, unlike bufq_put.
    409 	 */
    410 
    411 	while (dksc->sc_busy > 0) {
    412 
    413 		bp = dksc->sc_deferred;
    414 		dksc->sc_deferred = NULL;
    415 
    416 		if (bp == NULL)
    417 			bp = bufq_get(dksc->sc_bufq);
    418 
    419 		while (bp != NULL) {
    420 
    421 			disk_busy(&dksc->sc_dkdev);
    422 			mutex_exit(&dksc->sc_iolock);
    423 			error = dkd->d_diskstart(dksc->sc_dev, bp);
    424 			mutex_enter(&dksc->sc_iolock);
    425 			if (error == EAGAIN) {
    426 				KASSERT(dksc->sc_deferred == NULL);
    427 				dksc->sc_deferred = bp;
    428 				disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
    429 				disk_wait(&dksc->sc_dkdev);
    430 				break;
    431 			}
    432 
    433 			if (error != 0) {
    434 				bp->b_error = error;
    435 				bp->b_resid = bp->b_bcount;
    436 				dk_done1(dksc, bp, false);
    437 			}
    438 
    439 			bp = bufq_get(dksc->sc_bufq);
    440 		}
    441 
    442 		dksc->sc_busy--;
    443 	}
    444 done:
    445 	mutex_exit(&dksc->sc_iolock);
    446 }
    447 
    448 static void
    449 dk_done1(struct dk_softc *dksc, struct buf *bp, bool lock)
    450 {
    451 	struct disk *dk = &dksc->sc_dkdev;
    452 
    453 	if (bp->b_error != 0) {
    454 		struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
    455 
    456 		diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
    457 			dk->dk_label);
    458 		printf("\n");
    459 	}
    460 
    461 	if (lock)
    462 		mutex_enter(&dksc->sc_iolock);
    463 	disk_unbusy(dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ));
    464 
    465 	if ((dksc->sc_flags & DKF_NO_RND) == 0)
    466 		rnd_add_uint32(&dksc->sc_rnd_source, bp->b_rawblkno);
    467 	if (lock)
    468 		mutex_exit(&dksc->sc_iolock);
    469 
    470 	biodone(bp);
    471 }
    472 
    473 void
    474 dk_done(struct dk_softc *dksc, struct buf *bp)
    475 {
    476 	dk_done1(dksc, bp, true);
    477 }
    478 
    479 void
    480 dk_drain(struct dk_softc *dksc)
    481 {
    482 	struct buf *bp;
    483 
    484 	mutex_enter(&dksc->sc_iolock);
    485 	bp = dksc->sc_deferred;
    486 	dksc->sc_deferred = NULL;
    487 	if (bp != NULL) {
    488 		bp->b_error = EIO;
    489 		bp->b_resid = bp->b_bcount;
    490 		biodone(bp);
    491 	}
    492 	bufq_drain(dksc->sc_bufq);
    493 	mutex_exit(&dksc->sc_iolock);
    494 }
    495 
    496 int
    497 dk_discard(struct dk_softc *dksc, dev_t dev, off_t pos, off_t len)
    498 {
    499 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    500 	unsigned secsize = dksc->sc_dkdev.dk_geom.dg_secsize;
    501 	struct buf tmp, *bp = &tmp;
    502 	int maxsz;
    503 	int error = 0;
    504 
    505 	KASSERT(len >= 0);
    506 
    507 	DPRINTF_FOLLOW(("%s(%s, %p, 0x"PRIx64", %jd, %jd)\n", __func__,
    508 	    dksc->sc_xname, dksc, (intmax_t)pos, (intmax_t)len));
    509 
    510 	if (!(dksc->sc_flags & DKF_INITED)) {
    511 		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
    512 		return ENXIO;
    513 	}
    514 
    515 	if (secsize == 0 || (pos % secsize) != 0 || (len % secsize) != 0)
    516 		return EINVAL;
    517 
    518 	/* largest value that b_bcount can store */
    519 	maxsz = rounddown(INT_MAX, secsize);
    520 
    521 	while (len > 0) {
    522 		/* enough data to please the bounds checking code */
    523 		bp->b_dev = dev;
    524 		bp->b_blkno = (daddr_t)(pos / secsize);
    525 		bp->b_bcount = uimin(len, maxsz);
    526 		bp->b_flags = B_WRITE;
    527 
    528 		error = dk_translate(dksc, bp);
    529 		if (error >= 0)
    530 			break;
    531 
    532 		error = dkd->d_discard(dksc->sc_dev,
    533 			(off_t)bp->b_rawblkno * secsize,
    534 			(off_t)bp->b_bcount);
    535 		if (error)
    536 			break;
    537 
    538 		pos += bp->b_bcount;
    539 		len -= bp->b_bcount;
    540 	}
    541 
    542 	return error;
    543 }
    544 
    545 int
    546 dk_size(struct dk_softc *dksc, dev_t dev)
    547 {
    548 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    549 	struct	disklabel *lp;
    550 	int	is_open;
    551 	int	part;
    552 	int	size;
    553 
    554 	if ((dksc->sc_flags & DKF_INITED) == 0)
    555 		return -1;
    556 
    557 	part = DISKPART(dev);
    558 	is_open = dksc->sc_dkdev.dk_openmask & (1 << part);
    559 
    560 	if (!is_open && dkd->d_open(dev, 0, S_IFBLK, curlwp))
    561 		return -1;
    562 
    563 	lp = dksc->sc_dkdev.dk_label;
    564 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
    565 		size = -1;
    566 	else
    567 		size = lp->d_partitions[part].p_size *
    568 		    (lp->d_secsize / DEV_BSIZE);
    569 
    570 	if (!is_open && dkd->d_close(dev, 0, S_IFBLK, curlwp))
    571 		return -1;
    572 
    573 	return size;
    574 }
    575 
    576 int
    577 dk_ioctl(struct dk_softc *dksc, dev_t dev,
    578 	    u_long cmd, void *data, int flag, struct lwp *l)
    579 {
    580 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    581 	struct	disklabel *lp;
    582 	struct	disk *dk = &dksc->sc_dkdev;
    583 #ifdef __HAVE_OLD_DISKLABEL
    584 	struct	disklabel newlabel;
    585 #endif
    586 	int	error;
    587 
    588 	DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%lx)\n", __func__,
    589 	    dksc->sc_xname, dksc, dev, cmd));
    590 
    591 	/* ensure that the pseudo disk is open for writes for these commands */
    592 	switch (cmd) {
    593 	case DIOCSDINFO:
    594 	case DIOCWDINFO:
    595 #ifdef __HAVE_OLD_DISKLABEL
    596 	case ODIOCSDINFO:
    597 	case ODIOCWDINFO:
    598 #endif
    599 	case DIOCKLABEL:
    600 	case DIOCWLABEL:
    601 	case DIOCAWEDGE:
    602 	case DIOCDWEDGE:
    603 	case DIOCSSTRATEGY:
    604 		if ((flag & FWRITE) == 0)
    605 			return EBADF;
    606 	}
    607 
    608 	/* ensure that the pseudo-disk is initialized for these */
    609 	switch (cmd) {
    610 	case DIOCGDINFO:
    611 	case DIOCSDINFO:
    612 	case DIOCWDINFO:
    613 	case DIOCGPARTINFO:
    614 	case DIOCKLABEL:
    615 	case DIOCWLABEL:
    616 	case DIOCGDEFLABEL:
    617 	case DIOCAWEDGE:
    618 	case DIOCDWEDGE:
    619 	case DIOCLWEDGES:
    620 	case DIOCMWEDGES:
    621 	case DIOCRMWEDGES:
    622 	case DIOCCACHESYNC:
    623 #ifdef __HAVE_OLD_DISKLABEL
    624 	case ODIOCGDINFO:
    625 	case ODIOCSDINFO:
    626 	case ODIOCWDINFO:
    627 	case ODIOCGDEFLABEL:
    628 #endif
    629 		if ((dksc->sc_flags & DKF_INITED) == 0)
    630 			return ENXIO;
    631 	}
    632 
    633 	error = disk_ioctl(dk, dev, cmd, data, flag, l);
    634 	if (error != EPASSTHROUGH)
    635 		return error;
    636 	else
    637 		error = 0;
    638 
    639 	switch (cmd) {
    640 	case DIOCWDINFO:
    641 	case DIOCSDINFO:
    642 #ifdef __HAVE_OLD_DISKLABEL
    643 	case ODIOCWDINFO:
    644 	case ODIOCSDINFO:
    645 #endif
    646 #ifdef __HAVE_OLD_DISKLABEL
    647 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
    648 			memset(&newlabel, 0, sizeof newlabel);
    649 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
    650 			lp = &newlabel;
    651 		} else
    652 #endif
    653 		lp = (struct disklabel *)data;
    654 
    655 		mutex_enter(&dk->dk_openlock);
    656 		dksc->sc_flags |= DKF_LABELLING;
    657 
    658 		error = setdisklabel(dksc->sc_dkdev.dk_label,
    659 		    lp, 0, dksc->sc_dkdev.dk_cpulabel);
    660 		if (error == 0) {
    661 			if (cmd == DIOCWDINFO
    662 #ifdef __HAVE_OLD_DISKLABEL
    663 			    || cmd == ODIOCWDINFO
    664 #endif
    665 			   )
    666 				error = writedisklabel(DKLABELDEV(dev),
    667 				    dkd->d_strategy, dksc->sc_dkdev.dk_label,
    668 				    dksc->sc_dkdev.dk_cpulabel);
    669 		}
    670 
    671 		dksc->sc_flags &= ~DKF_LABELLING;
    672 		mutex_exit(&dk->dk_openlock);
    673 		break;
    674 
    675 	case DIOCKLABEL:
    676 		if (*(int *)data != 0)
    677 			dksc->sc_flags |= DKF_KLABEL;
    678 		else
    679 			dksc->sc_flags &= ~DKF_KLABEL;
    680 		break;
    681 
    682 	case DIOCWLABEL:
    683 		if (*(int *)data != 0)
    684 			dksc->sc_flags |= DKF_WLABEL;
    685 		else
    686 			dksc->sc_flags &= ~DKF_WLABEL;
    687 		break;
    688 
    689 	case DIOCGDEFLABEL:
    690 		dk_getdefaultlabel(dksc, (struct disklabel *)data);
    691 		break;
    692 
    693 #ifdef __HAVE_OLD_DISKLABEL
    694 	case ODIOCGDEFLABEL:
    695 		dk_getdefaultlabel(dksc, &newlabel);
    696 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
    697 			return ENOTTY;
    698 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
    699 		break;
    700 #endif
    701 
    702 	case DIOCGSTRATEGY:
    703 	    {
    704 		struct disk_strategy *dks = (void *)data;
    705 
    706 		mutex_enter(&dksc->sc_iolock);
    707 		if (dksc->sc_bufq != NULL)
    708 			strlcpy(dks->dks_name,
    709 			    bufq_getstrategyname(dksc->sc_bufq),
    710 			    sizeof(dks->dks_name));
    711 		else
    712 			error = EINVAL;
    713 		mutex_exit(&dksc->sc_iolock);
    714 		dks->dks_paramlen = 0;
    715 		break;
    716 	    }
    717 
    718 	case DIOCSSTRATEGY:
    719 	    {
    720 		struct disk_strategy *dks = (void *)data;
    721 		struct bufq_state *new;
    722 		struct bufq_state *old;
    723 
    724 		if (dks->dks_param != NULL) {
    725 			return EINVAL;
    726 		}
    727 		dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
    728 		error = bufq_alloc(&new, dks->dks_name,
    729 		    BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
    730 		if (error) {
    731 			return error;
    732 		}
    733 		mutex_enter(&dksc->sc_iolock);
    734 		old = dksc->sc_bufq;
    735 		if (old)
    736 			bufq_move(new, old);
    737 		dksc->sc_bufq = new;
    738 		mutex_exit(&dksc->sc_iolock);
    739 		if (old)
    740 			bufq_free(old);
    741 		break;
    742 	    }
    743 
    744 	default:
    745 		error = ENOTTY;
    746 	}
    747 
    748 	return error;
    749 }
    750 
    751 /*
    752  * dk_dump dumps all of physical memory into the partition specified.
    753  * This requires substantially more framework than {s,w}ddump, and hence
    754  * is probably much more fragile.
    755  *
    756  */
    757 
    758 #define DKFF_READYFORDUMP(x)	(((x) & DKF_READYFORDUMP) == DKF_READYFORDUMP)
    759 static volatile int	dk_dumping = 0;
    760 
    761 /* ARGSUSED */
    762 int
    763 dk_dump(struct dk_softc *dksc, dev_t dev,
    764     daddr_t blkno, void *vav, size_t size, int flags)
    765 {
    766 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    767 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    768 	char *va = vav;
    769 	struct disklabel *lp;
    770 	struct partition *p;
    771 	int part, towrt, maxblkcnt, nblk;
    772 	int maxxfer, rv = 0;
    773 
    774 	/*
    775 	 * ensure that we consider this device to be safe for dumping,
    776 	 * and that the device is configured.
    777 	 */
    778 	if (!DKFF_READYFORDUMP(dksc->sc_flags)) {
    779 		DPRINTF(DKDB_DUMP, ("%s: bad dump flags 0x%x\n", __func__,
    780 		    dksc->sc_flags));
    781 		return ENXIO;
    782 	}
    783 
    784 	/* ensure that we are not already dumping */
    785 	if (dk_dumping)
    786 		return EFAULT;
    787 	if ((flags & DK_DUMP_RECURSIVE) == 0)
    788 		dk_dumping = 1;
    789 
    790 	if (dkd->d_dumpblocks == NULL) {
    791 		DPRINTF(DKDB_DUMP, ("%s: no dumpblocks\n", __func__));
    792 		return ENXIO;
    793 	}
    794 
    795 	/* device specific max transfer size */
    796 	maxxfer = MAXPHYS;
    797 	if (dkd->d_iosize != NULL)
    798 		(*dkd->d_iosize)(dksc->sc_dev, &maxxfer);
    799 
    800 	/* Convert to disk sectors.  Request must be a multiple of size. */
    801 	part = DISKPART(dev);
    802 	lp = dksc->sc_dkdev.dk_label;
    803 	if ((size % lp->d_secsize) != 0) {
    804 		DPRINTF(DKDB_DUMP, ("%s: odd size %zu\n", __func__, size));
    805 		return EFAULT;
    806 	}
    807 	towrt = size / lp->d_secsize;
    808 	blkno = dbtob(blkno) / lp->d_secsize;   /* blkno in secsize units */
    809 
    810 	p = &lp->d_partitions[part];
    811 	if (part == RAW_PART) {
    812 		if (p->p_fstype != FS_UNUSED) {
    813 			DPRINTF(DKDB_DUMP, ("%s: bad fstype %d\n", __func__,
    814 			    p->p_fstype));
    815 			return ENXIO;
    816 		}
    817 		/* Check whether dump goes to a wedge */
    818 		if (dksc->sc_dkdev.dk_nwedges == 0) {
    819 			DPRINTF(DKDB_DUMP, ("%s: dump to raw\n", __func__));
    820 			return ENXIO;
    821 		}
    822 		/* Check transfer bounds against media size */
    823 		if (blkno < 0 || (blkno + towrt) > dg->dg_secperunit) {
    824 			DPRINTF(DKDB_DUMP, ("%s: out of bounds blkno=%jd, towrt=%d, "
    825 			    "nsects=%jd\n", __func__, (intmax_t)blkno, towrt, dg->dg_secperunit));
    826 			return EINVAL;
    827 		}
    828 	} else {
    829 		int nsects, sectoff;
    830 
    831 		if (p->p_fstype != FS_SWAP) {
    832 			DPRINTF(DKDB_DUMP, ("%s: bad fstype %d\n", __func__,
    833 			    p->p_fstype));
    834 			return ENXIO;
    835 		}
    836 		nsects = p->p_size;
    837 		sectoff = p->p_offset;
    838 
    839 		/* Check transfer bounds against partition size. */
    840 		if ((blkno < 0) || ((blkno + towrt) > nsects)) {
    841 			DPRINTF(DKDB_DUMP, ("%s: out of bounds blkno=%jd, towrt=%d, "
    842 			    "nsects=%d\n", __func__, (intmax_t)blkno, towrt, nsects));
    843 			return EINVAL;
    844 		}
    845 
    846 		/* Offset block number to start of partition. */
    847 		blkno += sectoff;
    848 	}
    849 
    850 	/* Start dumping and return when done. */
    851 	maxblkcnt = howmany(maxxfer, lp->d_secsize);
    852 	while (towrt > 0) {
    853 		nblk = uimin(maxblkcnt, towrt);
    854 
    855 		if ((rv = (*dkd->d_dumpblocks)(dksc->sc_dev, va, blkno, nblk))
    856 		    != 0) {
    857 			DPRINTF(DKDB_DUMP, ("%s: dumpblocks %d\n", __func__,
    858 			    rv));
    859 			return rv;
    860 		}
    861 
    862 		towrt -= nblk;
    863 		blkno += nblk;
    864 		va += nblk * lp->d_secsize;
    865 	}
    866 
    867 	if ((flags & DK_DUMP_RECURSIVE) == 0)
    868 		dk_dumping = 0;
    869 
    870 	return 0;
    871 }
    872 
    873 /* ARGSUSED */
    874 void
    875 dk_getdefaultlabel(struct dk_softc *dksc, struct disklabel *lp)
    876 {
    877 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    878 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    879 
    880 	memset(lp, 0, sizeof(*lp));
    881 
    882 	if (dg->dg_secperunit > UINT32_MAX)
    883 		lp->d_secperunit = UINT32_MAX;
    884 	else
    885 		lp->d_secperunit = dg->dg_secperunit;
    886 	lp->d_secsize = dg->dg_secsize;
    887 	lp->d_nsectors = dg->dg_nsectors;
    888 	lp->d_ntracks = dg->dg_ntracks;
    889 	lp->d_ncylinders = dg->dg_ncylinders;
    890 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
    891 
    892 	strlcpy(lp->d_typename, dksc->sc_xname, sizeof(lp->d_typename));
    893 	lp->d_type = dksc->sc_dtype;
    894 	strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
    895 	lp->d_rpm = 3600;
    896 	lp->d_interleave = 1;
    897 	lp->d_flags = 0;
    898 
    899 	lp->d_partitions[RAW_PART].p_offset = 0;
    900 	lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
    901 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
    902 	lp->d_npartitions = RAW_PART + 1;
    903 
    904 	lp->d_magic = DISKMAGIC;
    905 	lp->d_magic2 = DISKMAGIC;
    906 
    907 	if (dkd->d_label)
    908 		dkd->d_label(dksc->sc_dev, lp);
    909 
    910 	lp->d_checksum = dkcksum(lp);
    911 }
    912 
    913 /* ARGSUSED */
    914 void
    915 dk_getdisklabel(struct dk_softc *dksc, dev_t dev)
    916 {
    917 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    918 	struct	 disklabel *lp = dksc->sc_dkdev.dk_label;
    919 	struct	 cpu_disklabel *clp = dksc->sc_dkdev.dk_cpulabel;
    920 	struct   disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    921 	struct	 partition *pp;
    922 	int	 i, lpratio, dgratio;
    923 	const char	*errstring;
    924 
    925 	memset(clp, 0x0, sizeof(*clp));
    926 	dk_getdefaultlabel(dksc, lp);
    927 	errstring = readdisklabel(DKLABELDEV(dev), dkd->d_strategy,
    928 	    dksc->sc_dkdev.dk_label, dksc->sc_dkdev.dk_cpulabel);
    929 	if (errstring) {
    930 		dk_makedisklabel(dksc);
    931 		if (dksc->sc_flags & DKF_WARNLABEL)
    932 			printf("%s: %s\n", dksc->sc_xname, errstring);
    933 		return;
    934 	}
    935 
    936 	if ((dksc->sc_flags & DKF_LABELSANITY) == 0)
    937 		return;
    938 
    939 	/* Convert sector counts to multiple of DEV_BSIZE for comparison */
    940 	lpratio = dgratio = 1;
    941 	if (lp->d_secsize > DEV_BSIZE)
    942 		lpratio = lp->d_secsize / DEV_BSIZE;
    943 	if (dg->dg_secsize > DEV_BSIZE)
    944 		dgratio = dg->dg_secsize / DEV_BSIZE;
    945 
    946 	/* Sanity check */
    947 	if ((uint64_t)lp->d_secperunit * lpratio > dg->dg_secperunit * dgratio)
    948 		printf("WARNING: %s: "
    949 		    "total unit size in disklabel (%" PRIu64 ") "
    950 		    "!= the size of %s (%" PRIu64 ")\n", dksc->sc_xname,
    951 		    (uint64_t)lp->d_secperunit * lpratio, dksc->sc_xname,
    952 		    dg->dg_secperunit * dgratio);
    953 	else if (lp->d_secperunit < UINT32_MAX &&
    954 	    (uint64_t)lp->d_secperunit * lpratio < dg->dg_secperunit * dgratio)
    955 		printf("%s: %" PRIu64 " trailing sectors not covered"
    956 		    " by disklabel\n", dksc->sc_xname,
    957 		    (dg->dg_secperunit * dgratio)
    958 		    - (lp->d_secperunit * lpratio));
    959 
    960 	for (i=0; i < lp->d_npartitions; i++) {
    961 		uint64_t pend;
    962 
    963 		pp = &lp->d_partitions[i];
    964 		pend = pp->p_offset + pp->p_size;
    965 		if (pend * lpratio > dg->dg_secperunit * dgratio)
    966 			printf("WARNING: %s: end of partition `%c' exceeds "
    967 			    "the size of %s (%" PRIu64 ")\n", dksc->sc_xname,
    968 			    'a' + i, dksc->sc_xname,
    969 			    dg->dg_secperunit * dgratio);
    970 	}
    971 }
    972 
    973 /*
    974  * Heuristic to conjure a disklabel if reading a disklabel failed.
    975  *
    976  * This is to allow the raw partition to be used for a filesystem
    977  * without caring about the write protected label sector.
    978  *
    979  * If the driver provides it's own callback, use that instead.
    980  */
    981 /* ARGSUSED */
    982 static void
    983 dk_makedisklabel(struct dk_softc *dksc)
    984 {
    985 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
    986 	struct  disklabel *lp = dksc->sc_dkdev.dk_label;
    987 
    988 	strlcpy(lp->d_packname, "default label", sizeof(lp->d_packname));
    989 
    990 	if (dkd->d_label)
    991 		dkd->d_label(dksc->sc_dev, lp);
    992 	else
    993 		lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
    994 
    995 	lp->d_checksum = dkcksum(lp);
    996 }
    997 
    998 /* This function is taken from ccd.c:1.76  --rcd */
    999 
   1000 /*
   1001  * XXX this function looks too generic for dksubr.c, shouldn't we
   1002  *     put it somewhere better?
   1003  */
   1004 
   1005 /*
   1006  * Lookup the provided name in the filesystem.  If the file exists,
   1007  * is a valid block device, and isn't being used by anyone else,
   1008  * set *vpp to the file's vnode.
   1009  */
   1010 int
   1011 dk_lookup(struct pathbuf *pb, struct lwp *l, struct vnode **vpp)
   1012 {
   1013 	struct nameidata nd;
   1014 	struct vnode *vp;
   1015 	int     error;
   1016 
   1017 	if (l == NULL)
   1018 		return ESRCH;	/* Is ESRCH the best choice? */
   1019 
   1020 	NDINIT(&nd, LOOKUP, FOLLOW, pb);
   1021 	if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
   1022 		DPRINTF((DKDB_FOLLOW|DKDB_INIT),
   1023 		    ("%s: vn_open error = %d\n", __func__, error));
   1024 		return error;
   1025 	}
   1026 
   1027 	vp = nd.ni_vp;
   1028 	if (vp->v_type != VBLK) {
   1029 		error = ENOTBLK;
   1030 		goto out;
   1031 	}
   1032 
   1033 	/* Reopen as anonymous vnode to protect against forced unmount. */
   1034 	if ((error = bdevvp(vp->v_rdev, vpp)) != 0)
   1035 		goto out;
   1036 	VOP_UNLOCK(vp);
   1037 	if ((error = vn_close(vp, FREAD | FWRITE, l->l_cred)) != 0) {
   1038 		vrele(*vpp);
   1039 		return error;
   1040 	}
   1041 	if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
   1042 		vrele(*vpp);
   1043 		return error;
   1044 	}
   1045 	mutex_enter((*vpp)->v_interlock);
   1046 	(*vpp)->v_writecount++;
   1047 	mutex_exit((*vpp)->v_interlock);
   1048 
   1049 	IFDEBUG(DKDB_VNODE, vprint("dk_lookup: vnode info", *vpp));
   1050 
   1051 	return 0;
   1052 out:
   1053 	VOP_UNLOCK(vp);
   1054 	(void) vn_close(vp, FREAD | FWRITE, l->l_cred);
   1055 	return error;
   1056 }
   1057 
   1058 MODULE(MODULE_CLASS_MISC, dk_subr, NULL);
   1059 
   1060 static int
   1061 dk_subr_modcmd(modcmd_t cmd, void *arg)
   1062 {
   1063 	switch (cmd) {
   1064 	case MODULE_CMD_INIT:
   1065 	case MODULE_CMD_FINI:
   1066 		return 0;
   1067 	case MODULE_CMD_STAT:
   1068 	case MODULE_CMD_AUTOUNLOAD:
   1069 	default:
   1070 		return ENOTTY;
   1071 	}
   1072 }
   1073