Home | History | Annotate | Line # | Download | only in dev
vnd.c revision 1.116
      1 /*	$NetBSD: vnd.c,v 1.116 2005/07/18 16:09:37 christos Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Copyright (c) 1990, 1993
     41  *	The Regents of the University of California.  All rights reserved.
     42  *
     43  * This code is derived from software contributed to Berkeley by
     44  * the Systems Programming Group of the University of Utah Computer
     45  * Science Department.
     46  *
     47  * Redistribution and use in source and binary forms, with or without
     48  * modification, are permitted provided that the following conditions
     49  * are met:
     50  * 1. Redistributions of source code must retain the above copyright
     51  *    notice, this list of conditions and the following disclaimer.
     52  * 2. Redistributions in binary form must reproduce the above copyright
     53  *    notice, this list of conditions and the following disclaimer in the
     54  *    documentation and/or other materials provided with the distribution.
     55  * 3. Neither the name of the University nor the names of its contributors
     56  *    may be used to endorse or promote products derived from this software
     57  *    without specific prior written permission.
     58  *
     59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69  * SUCH DAMAGE.
     70  *
     71  * from: Utah $Hdr: vn.c 1.13 94/04/02$
     72  *
     73  *	@(#)vn.c	8.9 (Berkeley) 5/14/95
     74  */
     75 
     76 /*
     77  * Copyright (c) 1988 University of Utah.
     78  *
     79  * This code is derived from software contributed to Berkeley by
     80  * the Systems Programming Group of the University of Utah Computer
     81  * Science Department.
     82  *
     83  * Redistribution and use in source and binary forms, with or without
     84  * modification, are permitted provided that the following conditions
     85  * are met:
     86  * 1. Redistributions of source code must retain the above copyright
     87  *    notice, this list of conditions and the following disclaimer.
     88  * 2. Redistributions in binary form must reproduce the above copyright
     89  *    notice, this list of conditions and the following disclaimer in the
     90  *    documentation and/or other materials provided with the distribution.
     91  * 3. All advertising materials mentioning features or use of this software
     92  *    must display the following acknowledgement:
     93  *	This product includes software developed by the University of
     94  *	California, Berkeley and its contributors.
     95  * 4. Neither the name of the University nor the names of its contributors
     96  *    may be used to endorse or promote products derived from this software
     97  *    without specific prior written permission.
     98  *
     99  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
    100  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    101  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    102  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
    103  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    104  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
    105  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    106  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    107  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    108  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    109  * SUCH DAMAGE.
    110  *
    111  * from: Utah $Hdr: vn.c 1.13 94/04/02$
    112  *
    113  *	@(#)vn.c	8.9 (Berkeley) 5/14/95
    114  */
    115 
    116 /*
    117  * Vnode disk driver.
    118  *
    119  * Block/character interface to a vnode.  Allows one to treat a file
    120  * as a disk (e.g. build a filesystem in it, mount it, etc.).
    121  *
    122  * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
    123  * instead of a simple VOP_RDWR.  We do this to avoid distorting the
    124  * local buffer cache.
    125  *
    126  * NOTE 2: There is a security issue involved with this driver.
    127  * Once mounted all access to the contents of the "mapped" file via
    128  * the special file is controlled by the permissions on the special
    129  * file, the protection of the mapped file is ignored (effectively,
    130  * by using root credentials in all transactions).
    131  *
    132  * NOTE 3: Doesn't interact with leases, should it?
    133  */
    134 
    135 #include <sys/cdefs.h>
    136 __KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.116 2005/07/18 16:09:37 christos Exp $");
    137 
    138 #if defined(_KERNEL_OPT)
    139 #include "fs_nfs.h"
    140 #include "opt_vnd.h"
    141 #endif
    142 
    143 #include <sys/param.h>
    144 #include <sys/systm.h>
    145 #include <sys/namei.h>
    146 #include <sys/proc.h>
    147 #include <sys/kthread.h>
    148 #include <sys/errno.h>
    149 #include <sys/buf.h>
    150 #include <sys/bufq.h>
    151 #include <sys/malloc.h>
    152 #include <sys/ioctl.h>
    153 #include <sys/disklabel.h>
    154 #include <sys/device.h>
    155 #include <sys/disk.h>
    156 #include <sys/stat.h>
    157 #include <sys/mount.h>
    158 #include <sys/vnode.h>
    159 #include <sys/file.h>
    160 #include <sys/uio.h>
    161 #include <sys/conf.h>
    162 #include <net/zlib.h>
    163 
    164 #include <miscfs/specfs/specdev.h>
    165 
    166 #include <dev/vndvar.h>
    167 
    168 #if defined(VNDDEBUG) && !defined(DEBUG)
    169 #define DEBUG
    170 #endif
    171 
    172 #ifdef DEBUG
    173 int dovndcluster = 1;
    174 #define VDB_FOLLOW	0x01
    175 #define VDB_INIT	0x02
    176 #define VDB_IO		0x04
    177 #define VDB_LABEL	0x08
    178 int vnddebug = 0x00;
    179 #endif
    180 
    181 #define vndunit(x)	DISKUNIT(x)
    182 
    183 struct vndxfer {
    184 	struct buf	*vx_bp;		/* Pointer to parent buffer */
    185 	int		vx_error;
    186 	int		vx_pending;	/* # of pending aux buffers */
    187 	int		vx_flags;
    188 #define VX_BUSY		1
    189 };
    190 
    191 struct vndbuf {
    192 	struct buf	vb_buf;
    193 	struct vndxfer	*vb_xfer;
    194 };
    195 
    196 #define VND_GETXFER(vnd)	pool_get(&(vnd)->sc_vxpool, PR_WAITOK)
    197 #define VND_PUTXFER(vnd, vx)	pool_put(&(vnd)->sc_vxpool, (vx))
    198 
    199 #define VND_GETBUF(vnd)		pool_get(&(vnd)->sc_vbpool, PR_WAITOK)
    200 #define VND_PUTBUF(vnd, vb)	pool_put(&(vnd)->sc_vbpool, (vb))
    201 
    202 struct vnd_softc *vnd_softc;
    203 int numvnd = 0;
    204 
    205 #define VNDLABELDEV(dev) \
    206     (MAKEDISKDEV(major((dev)), vndunit((dev)), RAW_PART))
    207 
    208 /* called by main() at boot time (XXX: and the LKM driver) */
    209 void	vndattach(int);
    210 int	vnddetach(void);
    211 
    212 static void	vndclear(struct vnd_softc *, int);
    213 static int	vndsetcred(struct vnd_softc *, struct ucred *);
    214 static void	vndthrottle(struct vnd_softc *, struct vnode *);
    215 static void	vndiodone(struct buf *);
    216 #if 0
    217 static void	vndshutdown(void);
    218 #endif
    219 
    220 static void	vndgetdefaultlabel(struct vnd_softc *, struct disklabel *);
    221 static void	vndgetdisklabel(dev_t);
    222 
    223 static int	vndlock(struct vnd_softc *);
    224 static void	vndunlock(struct vnd_softc *);
    225 #ifdef VND_COMPRESSION
    226 static void	compstrategy(struct buf *, off_t);
    227 static void	*vnd_alloc(void *, u_int, u_int);
    228 static void	vnd_free(void *, void *);
    229 #endif /* VND_COMPRESSION */
    230 
    231 void vndthread(void *);
    232 
    233 static dev_type_open(vndopen);
    234 static dev_type_close(vndclose);
    235 static dev_type_read(vndread);
    236 static dev_type_write(vndwrite);
    237 static dev_type_ioctl(vndioctl);
    238 static dev_type_strategy(vndstrategy);
    239 static dev_type_dump(vnddump);
    240 static dev_type_size(vndsize);
    241 
    242 const struct bdevsw vnd_bdevsw = {
    243 	vndopen, vndclose, vndstrategy, vndioctl, vnddump, vndsize, D_DISK
    244 };
    245 
    246 const struct cdevsw vnd_cdevsw = {
    247 	vndopen, vndclose, vndread, vndwrite, vndioctl,
    248 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    249 };
    250 
    251 static int vndattached;
    252 
    253 void
    254 vndattach(int num)
    255 {
    256 	int i;
    257 	char *mem;
    258 
    259 	if (vndattached)
    260 		return;
    261 	vndattached = 1;
    262 	if (num <= 0)
    263 		return;
    264 	i = num * sizeof(struct vnd_softc);
    265 	mem = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
    266 	if (mem == NULL) {
    267 		printf("WARNING: no memory for vnode disks\n");
    268 		return;
    269 	}
    270 	vnd_softc = (struct vnd_softc *)mem;
    271 	numvnd = num;
    272 
    273 	for (i = 0; i < numvnd; i++) {
    274 		vnd_softc[i].sc_unit = i;
    275 		vnd_softc[i].sc_comp_offsets = NULL;
    276 		vnd_softc[i].sc_comp_buff = NULL;
    277 		vnd_softc[i].sc_comp_decombuf = NULL;
    278 		bufq_alloc(&vnd_softc[i].sc_tab,
    279 		    BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
    280 	}
    281 }
    282 
    283 int
    284 vnddetach(void)
    285 {
    286 	int i;
    287 
    288 	/* First check we aren't in use. */
    289 	for (i = 0; i < numvnd; i++)
    290 		if (vnd_softc[i].sc_flags & VNF_INITED)
    291 			return (EBUSY);
    292 
    293 	for (i = 0; i < numvnd; i++)
    294 		bufq_free(&vnd_softc[i].sc_tab);
    295 
    296 	free(vnd_softc, M_DEVBUF);
    297 	vndattached = 0;
    298 
    299 	return (0);
    300 }
    301 
    302 static int
    303 vndopen(dev_t dev, int flags, int mode, struct proc *p)
    304 {
    305 	int unit = vndunit(dev);
    306 	struct vnd_softc *sc;
    307 	int error = 0, part, pmask;
    308 	struct disklabel *lp;
    309 
    310 #ifdef DEBUG
    311 	if (vnddebug & VDB_FOLLOW)
    312 		printf("vndopen(0x%x, 0x%x, 0x%x, %p)\n", dev, flags, mode, p);
    313 #endif
    314 	if (unit >= numvnd)
    315 		return (ENXIO);
    316 	sc = &vnd_softc[unit];
    317 
    318 	if ((error = vndlock(sc)) != 0)
    319 		return (error);
    320 
    321 	lp = sc->sc_dkdev.dk_label;
    322 
    323 	part = DISKPART(dev);
    324 	pmask = (1 << part);
    325 
    326 	/*
    327 	 * If we're initialized, check to see if there are any other
    328 	 * open partitions.  If not, then it's safe to update the
    329 	 * in-core disklabel.  Only read the disklabel if it is
    330 	 * not realdy valid.
    331 	 */
    332 	if ((sc->sc_flags & (VNF_INITED|VNF_VLABEL)) == VNF_INITED &&
    333 	    sc->sc_dkdev.dk_openmask == 0)
    334 		vndgetdisklabel(dev);
    335 
    336 	/* Check that the partitions exists. */
    337 	if (part != RAW_PART) {
    338 		if (((sc->sc_flags & VNF_INITED) == 0) ||
    339 		    ((part >= lp->d_npartitions) ||
    340 		     (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
    341 			error = ENXIO;
    342 			goto done;
    343 		}
    344 	}
    345 
    346 	/* Prevent our unit from being unconfigured while open. */
    347 	switch (mode) {
    348 	case S_IFCHR:
    349 		sc->sc_dkdev.dk_copenmask |= pmask;
    350 		break;
    351 
    352 	case S_IFBLK:
    353 		sc->sc_dkdev.dk_bopenmask |= pmask;
    354 		break;
    355 	}
    356 	sc->sc_dkdev.dk_openmask =
    357 	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
    358 
    359  done:
    360 	vndunlock(sc);
    361 	return (error);
    362 }
    363 
    364 static int
    365 vndclose(dev_t dev, int flags, int mode, struct proc *p)
    366 {
    367 	int unit = vndunit(dev);
    368 	struct vnd_softc *sc;
    369 	int error = 0, part;
    370 
    371 #ifdef DEBUG
    372 	if (vnddebug & VDB_FOLLOW)
    373 		printf("vndclose(0x%x, 0x%x, 0x%x, %p)\n", dev, flags, mode, p);
    374 #endif
    375 
    376 	if (unit >= numvnd)
    377 		return (ENXIO);
    378 	sc = &vnd_softc[unit];
    379 
    380 	if ((error = vndlock(sc)) != 0)
    381 		return (error);
    382 
    383 	part = DISKPART(dev);
    384 
    385 	/* ...that much closer to allowing unconfiguration... */
    386 	switch (mode) {
    387 	case S_IFCHR:
    388 		sc->sc_dkdev.dk_copenmask &= ~(1 << part);
    389 		break;
    390 
    391 	case S_IFBLK:
    392 		sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
    393 		break;
    394 	}
    395 	sc->sc_dkdev.dk_openmask =
    396 	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
    397 
    398 	if (sc->sc_dkdev.dk_openmask == 0) {
    399 		if ((sc->sc_flags & VNF_KLABEL) == 0)
    400 			sc->sc_flags &= ~VNF_VLABEL;
    401 	}
    402 
    403 	vndunlock(sc);
    404 	return (0);
    405 }
    406 
    407 /*
    408  * Qeue the request, and wakeup the kernel thread to handle it.
    409  */
    410 static void
    411 vndstrategy(struct buf *bp)
    412 {
    413 	int unit = vndunit(bp->b_dev);
    414 	struct vnd_softc *vnd = &vnd_softc[unit];
    415 	struct disklabel *lp = vnd->sc_dkdev.dk_label;
    416 	int s = splbio();
    417 
    418 	bp->b_resid = bp->b_bcount;
    419 
    420 	if ((vnd->sc_flags & VNF_INITED) == 0) {
    421 		bp->b_error = ENXIO;
    422 		bp->b_flags |= B_ERROR;
    423 		goto done;
    424 	}
    425 
    426 	/*
    427 	 * The transfer must be a whole number of blocks.
    428 	 */
    429 	if ((bp->b_bcount % lp->d_secsize) != 0) {
    430 		bp->b_error = EINVAL;
    431 		bp->b_flags |= B_ERROR;
    432 		goto done;
    433 	}
    434 
    435 	/*
    436 	 * check if we're read-only.
    437 	 */
    438 	if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
    439 		bp->b_error = EACCES;
    440 		bp->b_flags |= B_ERROR;
    441 		goto done;
    442 	}
    443 
    444 	/*
    445 	 * Do bounds checking and adjust transfer.  If there's an error,
    446 	 * the bounds check will flag that for us.
    447 	 */
    448 	if (DISKPART(bp->b_dev) != RAW_PART) {
    449 		if (bounds_check_with_label(&vnd->sc_dkdev,
    450 		    bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0)
    451 			goto done;
    452 	}
    453 
    454 	/* If it's a nil transfer, wake up the top half now. */
    455 	if (bp->b_bcount == 0)
    456 		goto done;
    457 #ifdef DEBUG
    458 	if (vnddebug & VDB_FOLLOW)
    459 		printf("vndstrategy(%p): unit %d\n", bp, unit);
    460 #endif
    461 	BUFQ_PUT(&vnd->sc_tab, bp);
    462 	wakeup(&vnd->sc_tab);
    463 	splx(s);
    464 	return;
    465 done:
    466 	biodone(bp);
    467 	splx(s);
    468 }
    469 
    470 void
    471 vndthread(void *arg)
    472 {
    473 	struct vnd_softc *vnd = arg;
    474 	struct buf *bp;
    475 	struct vndxfer *vnx;
    476 	struct mount *mp;
    477 	int s, bsize, resid;
    478 	off_t bn;
    479 	caddr_t addr;
    480 	int sz, flags, error;
    481 	struct disklabel *lp;
    482 	struct partition *pp;
    483 
    484 	s = splbio();
    485 	vnd->sc_flags |= VNF_KTHREAD;
    486 	wakeup(&vnd->sc_kthread);
    487 
    488 	/*
    489 	 * Dequeue requests, break them into bsize pieces and submit using
    490 	 * VOP_BMAP/VOP_STRATEGY.
    491 	 */
    492 	while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
    493 		bp = BUFQ_GET(&vnd->sc_tab);
    494 		if (bp == NULL) {
    495 			tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
    496 			continue;
    497 		};
    498 		splx(s);
    499 
    500 #ifdef DEBUG
    501 		if (vnddebug & VDB_FOLLOW)
    502 			printf("vndthread(%p\n", bp);
    503 #endif
    504 		lp = vnd->sc_dkdev.dk_label;
    505 		bp->b_resid = bp->b_bcount;
    506 
    507 		/*
    508 		 * Put the block number in terms of the logical blocksize
    509 		 * of the "device".
    510 		 */
    511 		bn = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
    512 
    513 		/*
    514 		 * Translate the partition-relative block number to an absolute.
    515 		 */
    516 		if (DISKPART(bp->b_dev) != RAW_PART) {
    517 			pp = &vnd->sc_dkdev.dk_label->d_partitions[
    518 			    DISKPART(bp->b_dev)];
    519 			bn += pp->p_offset;
    520 		}
    521 
    522 		/* ...and convert to a byte offset within the file. */
    523 		bn *= lp->d_secsize;
    524 
    525 		if (vnd->sc_vp->v_mount == NULL) {
    526 			bp->b_error = ENXIO;
    527 			bp->b_flags |= B_ERROR;
    528 			goto done;
    529 		}
    530 #ifdef VND_COMPRESSION
    531 		/* handle a compressed read */
    532 		if ((bp->b_flags & B_READ) && (vnd->sc_flags & VNF_COMP)) {
    533 			compstrategy(bp, bn);
    534 			goto done;
    535 		}
    536 #endif /* VND_COMPRESSION */
    537 
    538 		bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
    539 		addr = bp->b_data;
    540 		flags = (bp->b_flags & (B_READ|B_ASYNC)) | B_CALL;
    541 
    542 		/*
    543 		 * Allocate a header for this transfer and link it to the
    544 		 * buffer
    545 		 */
    546 		s = splbio();
    547 		vnx = VND_GETXFER(vnd);
    548 		splx(s);
    549 		vnx->vx_flags = VX_BUSY;
    550 		vnx->vx_error = 0;
    551 		vnx->vx_pending = 0;
    552 		vnx->vx_bp = bp;
    553 
    554 		if ((flags & B_READ) == 0)
    555 			vn_start_write(vnd->sc_vp, &mp, V_WAIT);
    556 
    557 		/*
    558 		 * Feed requests sequentially.
    559 		 * We do it this way to keep from flooding NFS servers if we
    560 		 * are connected to an NFS file.  This places the burden on
    561 		 * the client rather than the server.
    562 		 */
    563 		for (resid = bp->b_resid; resid; resid -= sz) {
    564 			struct vndbuf *nbp;
    565 			struct vnode *vp;
    566 			daddr_t nbn;
    567 			int off, nra;
    568 
    569 			nra = 0;
    570 			vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
    571 			error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
    572 			VOP_UNLOCK(vnd->sc_vp, 0);
    573 
    574 			if (error == 0 && (long)nbn == -1)
    575 				error = EIO;
    576 
    577 			/*
    578 			 * If there was an error or a hole in the file...punt.
    579 			 * Note that we may have to wait for any operations
    580 			 * that we have already fired off before releasing
    581 			 * the buffer.
    582 			 *
    583 			 * XXX we could deal with holes here but it would be
    584 			 * a hassle (in the write case).
    585 			 */
    586 			if (error) {
    587 				s = splbio();
    588 				vnx->vx_error = error;
    589 				goto out;
    590 			}
    591 
    592 #ifdef DEBUG
    593 			if (!dovndcluster)
    594 				nra = 0;
    595 #endif
    596 
    597 			if ((off = bn % bsize) != 0)
    598 				sz = bsize - off;
    599 			else
    600 				sz = (1 + nra) * bsize;
    601 			if (resid < sz)
    602 				sz = resid;
    603 #ifdef	DEBUG
    604 			if (vnddebug & VDB_IO)
    605 				printf("vndstrategy: vp %p/%p bn 0x%qx/0x%" PRIx64
    606 				       " sz 0x%x\n",
    607 				    vnd->sc_vp, vp, (long long)bn, nbn, sz);
    608 #endif
    609 
    610 			s = splbio();
    611 			while (vnd->sc_active >= vnd->sc_maxactive) {
    612 				tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
    613 			}
    614 			vnd->sc_active++;
    615 			nbp = VND_GETBUF(vnd);
    616 			splx(s);
    617 			BUF_INIT(&nbp->vb_buf);
    618 			nbp->vb_buf.b_flags = flags;
    619 			nbp->vb_buf.b_bcount = sz;
    620 			nbp->vb_buf.b_bufsize = round_page((ulong)addr + sz)
    621 			    - trunc_page((ulong) addr);
    622 			nbp->vb_buf.b_error = 0;
    623 			nbp->vb_buf.b_data = addr;
    624 			nbp->vb_buf.b_blkno = nbp->vb_buf.b_rawblkno = nbn + btodb(off);
    625 			nbp->vb_buf.b_proc = bp->b_proc;
    626 			nbp->vb_buf.b_iodone = vndiodone;
    627 			nbp->vb_buf.b_vp = vp;
    628 
    629 			nbp->vb_xfer = vnx;
    630 
    631 			BIO_COPYPRIO(&nbp->vb_buf, bp);
    632 
    633 			/*
    634 			 * Just sort by block number
    635 			 */
    636 			s = splbio();
    637 			if (vnx->vx_error != 0) {
    638 				VND_PUTBUF(vnd, nbp);
    639 				goto out;
    640 			}
    641 			vnx->vx_pending++;
    642 #ifdef DEBUG
    643 			if (vnddebug & VDB_IO)
    644 				printf("vndstart(%ld): bp %p vp %p blkno "
    645 				    "0x%" PRIx64 " flags %x addr %p cnt 0x%x\n",
    646 				    (long) (vnd-vnd_softc), &nbp->vb_buf,
    647 				    nbp->vb_buf.b_vp, nbp->vb_buf.b_blkno,
    648 				    nbp->vb_buf.b_flags, nbp->vb_buf.b_data,
    649 				    nbp->vb_buf.b_bcount);
    650 #endif
    651 
    652 			/* Instrumentation. */
    653 			disk_busy(&vnd->sc_dkdev);
    654 
    655 			if ((nbp->vb_buf.b_flags & B_READ) == 0)
    656 				vp->v_numoutput++;
    657 			VOP_STRATEGY(vp, &nbp->vb_buf);
    658 
    659 			splx(s);
    660 			bn += sz;
    661 			addr += sz;
    662 		}
    663 
    664 		s = splbio();
    665 
    666 out: /* Arrive here at splbio */
    667 		if ((flags & B_READ) == 0)
    668 			vn_finished_write(mp, 0);
    669 		vnx->vx_flags &= ~VX_BUSY;
    670 		if (vnx->vx_pending == 0) {
    671 			if (vnx->vx_error != 0) {
    672 				bp->b_error = vnx->vx_error;
    673 				bp->b_flags |= B_ERROR;
    674 			}
    675 			VND_PUTXFER(vnd, vnx);
    676 			biodone(bp);
    677 		}
    678 		continue;
    679 done:
    680 		biodone(bp);
    681 		s = splbio();
    682 	}
    683 
    684 	vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
    685 	wakeup(&vnd->sc_kthread);
    686 	splx(s);
    687 	kthread_exit(0);
    688 }
    689 
    690 
    691 static void
    692 vndiodone(struct buf *bp)
    693 {
    694 	struct vndbuf *vbp = (struct vndbuf *) bp;
    695 	struct vndxfer *vnx = (struct vndxfer *)vbp->vb_xfer;
    696 	struct buf *pbp = vnx->vx_bp;
    697 	struct vnd_softc *vnd = &vnd_softc[vndunit(pbp->b_dev)];
    698 	int s, resid;
    699 
    700 	s = splbio();
    701 #ifdef DEBUG
    702 	if (vnddebug & VDB_IO)
    703 		printf("vndiodone(%ld): vbp %p vp %p blkno 0x%" PRIx64
    704 		       " addr %p cnt 0x%x\n",
    705 		    (long) (vnd-vnd_softc), vbp, vbp->vb_buf.b_vp,
    706 		    vbp->vb_buf.b_blkno, vbp->vb_buf.b_data,
    707 		    vbp->vb_buf.b_bcount);
    708 #endif
    709 
    710 	resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
    711 	pbp->b_resid -= resid;
    712 	disk_unbusy(&vnd->sc_dkdev, resid, (pbp->b_flags & B_READ));
    713 	vnx->vx_pending--;
    714 
    715 	if (vbp->vb_buf.b_error) {
    716 #ifdef DEBUG
    717 		if (vnddebug & VDB_IO)
    718 			printf("vndiodone: vbp %p error %d\n", vbp,
    719 			    vbp->vb_buf.b_error);
    720 #endif
    721 		vnx->vx_error = vbp->vb_buf.b_error;
    722 	}
    723 
    724 	VND_PUTBUF(vnd, vbp);
    725 
    726 	/*
    727 	 * Wrap up this transaction if it has run to completion or, in
    728 	 * case of an error, when all auxiliary buffers have returned.
    729 	 */
    730 	if (vnx->vx_error != 0) {
    731 		pbp->b_flags |= B_ERROR;
    732 		pbp->b_error = vnx->vx_error;
    733 		if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
    734 
    735 #ifdef DEBUG
    736 			if (vnddebug & VDB_IO)
    737 				printf("vndiodone: pbp %p iodone: error %d\n",
    738 					pbp, vnx->vx_error);
    739 #endif
    740 			VND_PUTXFER(vnd, vnx);
    741 			biodone(pbp);
    742 		}
    743 	} else if (pbp->b_resid == 0) {
    744 
    745 #ifdef DIAGNOSTIC
    746 		if (vnx->vx_pending != 0)
    747 			panic("vndiodone: vnx pending: %d", vnx->vx_pending);
    748 #endif
    749 
    750 		if ((vnx->vx_flags & VX_BUSY) == 0) {
    751 #ifdef DEBUG
    752 			if (vnddebug & VDB_IO)
    753 				printf("vndiodone: pbp %p iodone\n", pbp);
    754 #endif
    755 			VND_PUTXFER(vnd, vnx);
    756 			biodone(pbp);
    757 		}
    758 	}
    759 
    760 	vnd->sc_active--;
    761 	wakeup(&vnd->sc_tab);
    762 	splx(s);
    763 }
    764 
    765 /* ARGSUSED */
    766 static int
    767 vndread(dev_t dev, struct uio *uio, int flags)
    768 {
    769 	int unit = vndunit(dev);
    770 	struct vnd_softc *sc;
    771 
    772 #ifdef DEBUG
    773 	if (vnddebug & VDB_FOLLOW)
    774 		printf("vndread(0x%x, %p)\n", dev, uio);
    775 #endif
    776 
    777 	if (unit >= numvnd)
    778 		return (ENXIO);
    779 	sc = &vnd_softc[unit];
    780 
    781 	if ((sc->sc_flags & VNF_INITED) == 0)
    782 		return (ENXIO);
    783 
    784 	return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
    785 }
    786 
    787 /* ARGSUSED */
    788 static int
    789 vndwrite(dev_t dev, struct uio *uio, int flags)
    790 {
    791 	int unit = vndunit(dev);
    792 	struct vnd_softc *sc;
    793 
    794 #ifdef DEBUG
    795 	if (vnddebug & VDB_FOLLOW)
    796 		printf("vndwrite(0x%x, %p)\n", dev, uio);
    797 #endif
    798 
    799 	if (unit >= numvnd)
    800 		return (ENXIO);
    801 	sc = &vnd_softc[unit];
    802 
    803 	if ((sc->sc_flags & VNF_INITED) == 0)
    804 		return (ENXIO);
    805 
    806 	return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
    807 }
    808 
    809 /* ARGSUSED */
    810 static int
    811 vndioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
    812 {
    813 	int unit = vndunit(dev);
    814 	struct vnd_softc *vnd;
    815 	struct vnd_ioctl *vio;
    816 	struct vattr vattr;
    817 	struct nameidata nd;
    818 	int error, part, pmask;
    819 	size_t geomsize;
    820 	int fflags;
    821 #ifdef __HAVE_OLD_DISKLABEL
    822 	struct disklabel newlabel;
    823 #endif
    824 
    825 #ifdef DEBUG
    826 	if (vnddebug & VDB_FOLLOW)
    827 		printf("vndioctl(0x%x, 0x%lx, %p, 0x%x, %p): unit %d\n",
    828 		    dev, cmd, data, flag, p, unit);
    829 #endif
    830 	if (unit >= numvnd)
    831 		return (ENXIO);
    832 
    833 	vnd = &vnd_softc[unit];
    834 	vio = (struct vnd_ioctl *)data;
    835 
    836 	/* Must be open for writes for these commands... */
    837 	switch (cmd) {
    838 	case VNDIOCSET:
    839 	case VNDIOCCLR:
    840 	case DIOCSDINFO:
    841 	case DIOCWDINFO:
    842 #ifdef __HAVE_OLD_DISKLABEL
    843 	case ODIOCSDINFO:
    844 	case ODIOCWDINFO:
    845 #endif
    846 	case DIOCKLABEL:
    847 	case DIOCWLABEL:
    848 		if ((flag & FWRITE) == 0)
    849 			return (EBADF);
    850 	}
    851 
    852 	/* Must be initialized for these... */
    853 	switch (cmd) {
    854 	case VNDIOCCLR:
    855 	case DIOCGDINFO:
    856 	case DIOCSDINFO:
    857 	case DIOCWDINFO:
    858 	case DIOCGPART:
    859 	case DIOCKLABEL:
    860 	case DIOCWLABEL:
    861 	case DIOCGDEFLABEL:
    862 #ifdef __HAVE_OLD_DISKLABEL
    863 	case ODIOCGDINFO:
    864 	case ODIOCSDINFO:
    865 	case ODIOCWDINFO:
    866 	case ODIOCGDEFLABEL:
    867 #endif
    868 		if ((vnd->sc_flags & VNF_INITED) == 0)
    869 			return (ENXIO);
    870 	}
    871 
    872 	switch (cmd) {
    873 	case VNDIOCSET:
    874 		if (vnd->sc_flags & VNF_INITED)
    875 			return (EBUSY);
    876 
    877 		if ((error = vndlock(vnd)) != 0)
    878 			return (error);
    879 
    880 		fflags = FREAD;
    881 		if ((vio->vnd_flags & VNDIOF_READONLY) == 0)
    882 			fflags |= FWRITE;
    883 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p);
    884 		if ((error = vn_open(&nd, fflags, 0)) != 0)
    885 			goto unlock_and_exit;
    886 		error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p);
    887 		if (!error && nd.ni_vp->v_type != VREG)
    888 			error = EOPNOTSUPP;
    889 		if (error) {
    890 			VOP_UNLOCK(nd.ni_vp, 0);
    891 			goto close_and_exit;
    892 		}
    893 
    894 		/* If using a compressed file, initialize its info */
    895 		/* (or abort with an error if kernel has no compression) */
    896 		if (vio->vnd_flags & VNF_COMP) {
    897 #ifdef VND_COMPRESSION
    898 			struct vnd_comp_header *ch;
    899 			int i;
    900 			u_int32_t comp_size;
    901 			u_int32_t comp_maxsize;
    902 
    903 			/* allocate space for compresed file header */
    904 			ch = malloc(sizeof(struct vnd_comp_header),
    905 			M_TEMP, M_WAITOK);
    906 
    907 			/* read compressed file header */
    908 			error = vn_rdwr(UIO_READ, nd.ni_vp, (caddr_t)ch,
    909 			  sizeof(struct vnd_comp_header), 0, UIO_SYSSPACE,
    910 			  IO_UNIT|IO_NODELOCKED, p->p_ucred, NULL, NULL);
    911 			if(error) {
    912 				free(ch, M_TEMP);
    913 				VOP_UNLOCK(nd.ni_vp, 0);
    914 				goto close_and_exit;
    915 			}
    916 
    917 			/* save some header info */
    918 			vnd->sc_comp_blksz = ntohl(ch->block_size);
    919 			/* note last offset is the file byte size */
    920 			vnd->sc_comp_numoffs = ntohl(ch->num_blocks)+1;
    921 			free(ch, M_TEMP);
    922 			if(vnd->sc_comp_blksz % DEV_BSIZE !=0) {
    923 				VOP_UNLOCK(nd.ni_vp, 0);
    924 				error = EINVAL;
    925 				goto close_and_exit;
    926 			}
    927 			if(sizeof(struct vnd_comp_header) +
    928 			  sizeof(u_int64_t) * vnd->sc_comp_numoffs >
    929 			  vattr.va_size) {
    930 				VOP_UNLOCK(nd.ni_vp, 0);
    931 				error = EINVAL;
    932 				goto close_and_exit;
    933 			}
    934 
    935 			/* set decompressed file size */
    936 			vattr.va_size =
    937 			  (vnd->sc_comp_numoffs - 1) * vnd->sc_comp_blksz;
    938 
    939 			/* allocate space for all the compressed offsets */
    940 			vnd->sc_comp_offsets =
    941 			malloc(sizeof(u_int64_t) * vnd->sc_comp_numoffs,
    942 			M_DEVBUF, M_WAITOK);
    943 
    944 			/* read in the offsets */
    945 			error = vn_rdwr(UIO_READ, nd.ni_vp,
    946 			  (caddr_t)vnd->sc_comp_offsets,
    947 			  sizeof(u_int64_t) * vnd->sc_comp_numoffs,
    948 			  sizeof(struct vnd_comp_header), UIO_SYSSPACE,
    949 			  IO_UNIT|IO_NODELOCKED, p->p_ucred, NULL, NULL);
    950 			if(error) {
    951 				VOP_UNLOCK(nd.ni_vp, 0);
    952 				goto close_and_exit;
    953 			}
    954 			/*
    955 			 * find largest block size (used for allocation limit).
    956 			 * Also convert offset to native byte order.
    957 			 */
    958 			comp_maxsize = 0;
    959 			for (i = 0; i < vnd->sc_comp_numoffs - 1; i++) {
    960 				vnd->sc_comp_offsets[i] =
    961 				  be64toh(vnd->sc_comp_offsets[i]);
    962 				comp_size = be64toh(vnd->sc_comp_offsets[i + 1])
    963 				  - vnd->sc_comp_offsets[i];
    964 				if (comp_size > comp_maxsize)
    965 					comp_maxsize = comp_size;
    966 			}
    967 			vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1] =
    968 			  be64toh(vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1]);
    969 
    970 			/* create compressed data buffer */
    971 			vnd->sc_comp_buff = malloc(comp_maxsize,
    972 			  M_DEVBUF, M_WAITOK);
    973 
    974 			/* create decompressed buffer */
    975 			vnd->sc_comp_decombuf = malloc(vnd->sc_comp_blksz,
    976 			  M_DEVBUF, M_WAITOK);
    977 			vnd->sc_comp_buffblk = -1;
    978 
    979 			/* Initialize decompress stream */
    980 			bzero(&vnd->sc_comp_stream, sizeof(z_stream));
    981 			vnd->sc_comp_stream.zalloc = vnd_alloc;
    982 			vnd->sc_comp_stream.zfree = vnd_free;
    983 			error = inflateInit2(&vnd->sc_comp_stream, MAX_WBITS);
    984 			if(error) {
    985 				if(vnd->sc_comp_stream.msg)
    986 					printf("vnd%d: compressed file, %s\n",
    987 					  unit, vnd->sc_comp_stream.msg);
    988 				VOP_UNLOCK(nd.ni_vp, 0);
    989 				error = EINVAL;
    990 				goto close_and_exit;
    991 			}
    992 
    993 			vnd->sc_flags |= VNF_COMP | VNF_READONLY;
    994 #else /* !VND_COMPRESSION */
    995 			error = EOPNOTSUPP;
    996 			goto close_and_exit;
    997 #endif /* VND_COMPRESSION */
    998 		}
    999 
   1000 		VOP_UNLOCK(nd.ni_vp, 0);
   1001 		vnd->sc_vp = nd.ni_vp;
   1002 		vnd->sc_size = btodb(vattr.va_size);	/* note truncation */
   1003 
   1004 		/*
   1005 		 * Use pseudo-geometry specified.  If none was provided,
   1006 		 * use "standard" Adaptec fictitious geometry.
   1007 		 */
   1008 		if (vio->vnd_flags & VNDIOF_HASGEOM) {
   1009 
   1010 			memcpy(&vnd->sc_geom, &vio->vnd_geom,
   1011 			    sizeof(vio->vnd_geom));
   1012 
   1013 			/*
   1014 			 * Sanity-check the sector size.
   1015 			 * XXX Don't allow secsize < DEV_BSIZE.	 Should
   1016 			 * XXX we?
   1017 			 */
   1018 			if (vnd->sc_geom.vng_secsize < DEV_BSIZE ||
   1019 			    (vnd->sc_geom.vng_secsize % DEV_BSIZE) != 0 ||
   1020 			    vnd->sc_geom.vng_ncylinders == 0 ||
   1021 			    (vnd->sc_geom.vng_ntracks *
   1022 			     vnd->sc_geom.vng_nsectors) == 0) {
   1023 				error = EINVAL;
   1024 				goto close_and_exit;
   1025 			}
   1026 
   1027 			/*
   1028 			 * Compute the size (in DEV_BSIZE blocks) specified
   1029 			 * by the geometry.
   1030 			 */
   1031 			geomsize = (vnd->sc_geom.vng_nsectors *
   1032 			    vnd->sc_geom.vng_ntracks *
   1033 			    vnd->sc_geom.vng_ncylinders) *
   1034 			    (vnd->sc_geom.vng_secsize / DEV_BSIZE);
   1035 
   1036 			/*
   1037 			 * Sanity-check the size against the specified
   1038 			 * geometry.
   1039 			 */
   1040 			if (vnd->sc_size < geomsize) {
   1041 				error = EINVAL;
   1042 				goto close_and_exit;
   1043 			}
   1044 		} else {
   1045 			/*
   1046 			 * Size must be at least 2048 DEV_BSIZE blocks
   1047 			 * (1M) in order to use this geometry.
   1048 			 */
   1049 			if (vnd->sc_size < (32 * 64)) {
   1050 				error = EINVAL;
   1051 				goto close_and_exit;
   1052 			}
   1053 
   1054 			vnd->sc_geom.vng_secsize = DEV_BSIZE;
   1055 			vnd->sc_geom.vng_nsectors = 32;
   1056 			vnd->sc_geom.vng_ntracks = 64;
   1057 			vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32);
   1058 		}
   1059 
   1060 		if (vio->vnd_flags & VNDIOF_READONLY) {
   1061 			vnd->sc_flags |= VNF_READONLY;
   1062 		}
   1063 
   1064 		if ((error = vndsetcred(vnd, p->p_ucred)) != 0)
   1065 			goto close_and_exit;
   1066 
   1067 		memset(vnd->sc_xname, 0, sizeof(vnd->sc_xname)); /* XXX */
   1068 		snprintf(vnd->sc_xname, sizeof(vnd->sc_xname), "vnd%d", unit);
   1069 
   1070 
   1071 		vndthrottle(vnd, vnd->sc_vp);
   1072 		vio->vnd_size = dbtob(vnd->sc_size);
   1073 		vnd->sc_flags |= VNF_INITED;
   1074 
   1075 		/* create the kernel thread, wait for it to be up */
   1076 		error = kthread_create1(vndthread, vnd, &vnd->sc_kthread,
   1077 		    vnd->sc_xname);
   1078 		if (error)
   1079 			goto close_and_exit;
   1080 		while ((vnd->sc_flags & VNF_KTHREAD) == 0) {
   1081 			tsleep(&vnd->sc_kthread, PRIBIO, "vndthr", 0);
   1082 		}
   1083 #ifdef DEBUG
   1084 		if (vnddebug & VDB_INIT)
   1085 			printf("vndioctl: SET vp %p size 0x%lx %d/%d/%d/%d\n",
   1086 			    vnd->sc_vp, (unsigned long) vnd->sc_size,
   1087 			    vnd->sc_geom.vng_secsize,
   1088 			    vnd->sc_geom.vng_nsectors,
   1089 			    vnd->sc_geom.vng_ntracks,
   1090 			    vnd->sc_geom.vng_ncylinders);
   1091 #endif
   1092 
   1093 		/* Attach the disk. */
   1094 		vnd->sc_dkdev.dk_name = vnd->sc_xname;
   1095 		disk_attach(&vnd->sc_dkdev);
   1096 
   1097 		/* Initialize the xfer and buffer pools. */
   1098 		pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0,
   1099 		    0, 0, "vndxpl", NULL);
   1100 		pool_init(&vnd->sc_vbpool, sizeof(struct vndbuf), 0,
   1101 		    0, 0, "vndbpl", NULL);
   1102 
   1103 		/* Try and read the disklabel. */
   1104 		vndgetdisklabel(dev);
   1105 
   1106 		vndunlock(vnd);
   1107 
   1108 		break;
   1109 
   1110 close_and_exit:
   1111 		(void) vn_close(nd.ni_vp, fflags, p->p_ucred, p);
   1112 unlock_and_exit:
   1113 #ifdef VND_COMPRESSION
   1114 		/* free any allocated memory (for compressed file) */
   1115 		if(vnd->sc_comp_offsets) {
   1116 			free(vnd->sc_comp_offsets, M_DEVBUF);
   1117 			vnd->sc_comp_offsets = NULL;
   1118 		}
   1119 		if(vnd->sc_comp_buff) {
   1120 			free(vnd->sc_comp_buff, M_DEVBUF);
   1121 			vnd->sc_comp_buff = NULL;
   1122 		}
   1123 		if(vnd->sc_comp_decombuf) {
   1124 			free(vnd->sc_comp_decombuf, M_DEVBUF);
   1125 			vnd->sc_comp_decombuf = NULL;
   1126 		}
   1127 #endif /* VND_COMPRESSION */
   1128 		vndunlock(vnd);
   1129 		return (error);
   1130 
   1131 	case VNDIOCCLR:
   1132 		if ((error = vndlock(vnd)) != 0)
   1133 			return (error);
   1134 
   1135 		/*
   1136 		 * Don't unconfigure if any other partitions are open
   1137 		 * or if both the character and block flavors of this
   1138 		 * partition are open.
   1139 		 */
   1140 		part = DISKPART(dev);
   1141 		pmask = (1 << part);
   1142 		if (((vnd->sc_dkdev.dk_openmask & ~pmask) ||
   1143 		    ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
   1144 		    (vnd->sc_dkdev.dk_copenmask & pmask))) &&
   1145 			!(vio->vnd_flags & VNDIOF_FORCE)) {
   1146 			vndunlock(vnd);
   1147 			return (EBUSY);
   1148 		}
   1149 
   1150 		/*
   1151 		 * XXX vndclear() might call vndclose() implicitely;
   1152 		 * release lock to avoid recursion
   1153 		 */
   1154 		vndunlock(vnd);
   1155 		vndclear(vnd, minor(dev));
   1156 #ifdef DEBUG
   1157 		if (vnddebug & VDB_INIT)
   1158 			printf("vndioctl: CLRed\n");
   1159 #endif
   1160 
   1161 		/* Destroy the xfer and buffer pools. */
   1162 		pool_destroy(&vnd->sc_vxpool);
   1163 		pool_destroy(&vnd->sc_vbpool);
   1164 
   1165 		/* Detatch the disk. */
   1166 		disk_detach(&vnd->sc_dkdev);
   1167 
   1168 		break;
   1169 
   1170 	case VNDIOCGET: {
   1171 		struct vnd_user *vnu;
   1172 		struct vattr va;
   1173 
   1174 		vnu = (struct vnd_user *)data;
   1175 
   1176 		if (vnu->vnu_unit == -1)
   1177 			vnu->vnu_unit = unit;
   1178 		if (vnu->vnu_unit >= numvnd)
   1179 			return (ENXIO);
   1180 		if (vnu->vnu_unit < 0)
   1181 			return (EINVAL);
   1182 
   1183 		vnd = &vnd_softc[vnu->vnu_unit];
   1184 
   1185 		if (vnd->sc_flags & VNF_INITED) {
   1186 			error = VOP_GETATTR(vnd->sc_vp, &va, p->p_ucred, p);
   1187 			if (error)
   1188 				return (error);
   1189 			vnu->vnu_dev = va.va_fsid;
   1190 			vnu->vnu_ino = va.va_fileid;
   1191 		}
   1192 		else {
   1193 			/* unused is not an error */
   1194 			vnu->vnu_dev = 0;
   1195 			vnu->vnu_ino = 0;
   1196 		}
   1197 
   1198 		break;
   1199 	}
   1200 
   1201 	case DIOCGDINFO:
   1202 		*(struct disklabel *)data = *(vnd->sc_dkdev.dk_label);
   1203 		break;
   1204 
   1205 #ifdef __HAVE_OLD_DISKLABEL
   1206 	case ODIOCGDINFO:
   1207 		newlabel = *(vnd->sc_dkdev.dk_label);
   1208 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1209 			return ENOTTY;
   1210 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1211 		break;
   1212 #endif
   1213 
   1214 	case DIOCGPART:
   1215 		((struct partinfo *)data)->disklab = vnd->sc_dkdev.dk_label;
   1216 		((struct partinfo *)data)->part =
   1217 		    &vnd->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
   1218 		break;
   1219 
   1220 	case DIOCWDINFO:
   1221 	case DIOCSDINFO:
   1222 #ifdef __HAVE_OLD_DISKLABEL
   1223 	case ODIOCWDINFO:
   1224 	case ODIOCSDINFO:
   1225 #endif
   1226 	{
   1227 		struct disklabel *lp;
   1228 
   1229 		if ((error = vndlock(vnd)) != 0)
   1230 			return (error);
   1231 
   1232 		vnd->sc_flags |= VNF_LABELLING;
   1233 
   1234 #ifdef __HAVE_OLD_DISKLABEL
   1235 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
   1236 			memset(&newlabel, 0, sizeof newlabel);
   1237 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
   1238 			lp = &newlabel;
   1239 		} else
   1240 #endif
   1241 		lp = (struct disklabel *)data;
   1242 
   1243 		error = setdisklabel(vnd->sc_dkdev.dk_label,
   1244 		    lp, 0, vnd->sc_dkdev.dk_cpulabel);
   1245 		if (error == 0) {
   1246 			if (cmd == DIOCWDINFO
   1247 #ifdef __HAVE_OLD_DISKLABEL
   1248 			    || cmd == ODIOCWDINFO
   1249 #endif
   1250 			   )
   1251 				error = writedisklabel(VNDLABELDEV(dev),
   1252 				    vndstrategy, vnd->sc_dkdev.dk_label,
   1253 				    vnd->sc_dkdev.dk_cpulabel);
   1254 		}
   1255 
   1256 		vnd->sc_flags &= ~VNF_LABELLING;
   1257 
   1258 		vndunlock(vnd);
   1259 
   1260 		if (error)
   1261 			return (error);
   1262 		break;
   1263 	}
   1264 
   1265 	case DIOCKLABEL:
   1266 		if (*(int *)data != 0)
   1267 			vnd->sc_flags |= VNF_KLABEL;
   1268 		else
   1269 			vnd->sc_flags &= ~VNF_KLABEL;
   1270 		break;
   1271 
   1272 	case DIOCWLABEL:
   1273 		if (*(int *)data != 0)
   1274 			vnd->sc_flags |= VNF_WLABEL;
   1275 		else
   1276 			vnd->sc_flags &= ~VNF_WLABEL;
   1277 		break;
   1278 
   1279 	case DIOCGDEFLABEL:
   1280 		vndgetdefaultlabel(vnd, (struct disklabel *)data);
   1281 		break;
   1282 
   1283 #ifdef __HAVE_OLD_DISKLABEL
   1284 	case ODIOCGDEFLABEL:
   1285 		vndgetdefaultlabel(vnd, &newlabel);
   1286 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1287 			return ENOTTY;
   1288 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1289 		break;
   1290 #endif
   1291 
   1292 	default:
   1293 		return (ENOTTY);
   1294 	}
   1295 
   1296 	return (0);
   1297 }
   1298 
   1299 /*
   1300  * Duplicate the current processes' credentials.  Since we are called only
   1301  * as the result of a SET ioctl and only root can do that, any future access
   1302  * to this "disk" is essentially as root.  Note that credentials may change
   1303  * if some other uid can write directly to the mapped file (NFS).
   1304  */
   1305 static int
   1306 vndsetcred(struct vnd_softc *vnd, struct ucred *cred)
   1307 {
   1308 	struct uio auio;
   1309 	struct iovec aiov;
   1310 	char *tmpbuf;
   1311 	int error;
   1312 
   1313 	vnd->sc_cred = crdup(cred);
   1314 	tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
   1315 
   1316 	/* XXX: Horrible kludge to establish credentials for NFS */
   1317 	aiov.iov_base = tmpbuf;
   1318 	aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
   1319 	auio.uio_iov = &aiov;
   1320 	auio.uio_iovcnt = 1;
   1321 	auio.uio_offset = 0;
   1322 	auio.uio_rw = UIO_READ;
   1323 	auio.uio_segflg = UIO_SYSSPACE;
   1324 	auio.uio_resid = aiov.iov_len;
   1325 	vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
   1326 	error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
   1327 	if (error == 0) {
   1328 		/*
   1329 		 * Because vnd does all IO directly through the vnode
   1330 		 * we need to flush (at least) the buffer from the above
   1331 		 * VOP_READ from the buffer cache to prevent cache
   1332 		 * incoherencies.  Also, be careful to write dirty
   1333 		 * buffers back to stable storage.
   1334 		 */
   1335 		error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred,
   1336 			    curproc, 0, 0);
   1337 	}
   1338 	VOP_UNLOCK(vnd->sc_vp, 0);
   1339 
   1340 	free(tmpbuf, M_TEMP);
   1341 	return (error);
   1342 }
   1343 
   1344 /*
   1345  * Set maxactive based on FS type
   1346  */
   1347 static void
   1348 vndthrottle(struct vnd_softc *vnd, struct vnode *vp)
   1349 {
   1350 #ifdef NFS
   1351 	extern int (**nfsv2_vnodeop_p)(void *);
   1352 
   1353 	if (vp->v_op == nfsv2_vnodeop_p)
   1354 		vnd->sc_maxactive = 2;
   1355 	else
   1356 #endif
   1357 		vnd->sc_maxactive = 8;
   1358 
   1359 	if (vnd->sc_maxactive < 1)
   1360 		vnd->sc_maxactive = 1;
   1361 }
   1362 
   1363 #if 0
   1364 static void
   1365 vndshutdown(void)
   1366 {
   1367 	struct vnd_softc *vnd;
   1368 
   1369 	for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
   1370 		if (vnd->sc_flags & VNF_INITED)
   1371 			vndclear(vnd);
   1372 }
   1373 #endif
   1374 
   1375 static void
   1376 vndclear(struct vnd_softc *vnd, int myminor)
   1377 {
   1378 	struct vnode *vp = vnd->sc_vp;
   1379 	struct proc *p = curproc;		/* XXX */
   1380 	int fflags = FREAD;
   1381 	int bmaj, cmaj, i, mn;
   1382 	int s;
   1383 
   1384 #ifdef DEBUG
   1385 	if (vnddebug & VDB_FOLLOW)
   1386 		printf("vndclear(%p): vp %p\n", vnd, vp);
   1387 #endif
   1388 	/* locate the major number */
   1389 	bmaj = bdevsw_lookup_major(&vnd_bdevsw);
   1390 	cmaj = cdevsw_lookup_major(&vnd_cdevsw);
   1391 
   1392 	/* Nuke the vnodes for any open instances */
   1393 	for (i = 0; i < MAXPARTITIONS; i++) {
   1394 		mn = DISKMINOR(vnd->sc_unit, i);
   1395 		vdevgone(bmaj, mn, mn, VBLK);
   1396 		if (mn != myminor) /* XXX avoid to kill own vnode */
   1397 			vdevgone(cmaj, mn, mn, VCHR);
   1398 	}
   1399 
   1400 	if ((vnd->sc_flags & VNF_READONLY) == 0)
   1401 		fflags |= FWRITE;
   1402 
   1403 	s = splbio();
   1404 	bufq_drain(&vnd->sc_tab);
   1405 	splx(s);
   1406 
   1407 	vnd->sc_flags |= VNF_VUNCONF;
   1408 	wakeup(&vnd->sc_tab);
   1409 	while (vnd->sc_flags & VNF_KTHREAD)
   1410 		tsleep(&vnd->sc_kthread, PRIBIO, "vnthr", 0);
   1411 
   1412 #ifdef VND_COMPRESSION
   1413 	/* free the compressed file buffers */
   1414 	if(vnd->sc_flags & VNF_COMP) {
   1415 		if(vnd->sc_comp_offsets) {
   1416 			free(vnd->sc_comp_offsets, M_DEVBUF);
   1417 			vnd->sc_comp_offsets = NULL;
   1418 		}
   1419 		if(vnd->sc_comp_buff) {
   1420 			free(vnd->sc_comp_buff, M_DEVBUF);
   1421 			vnd->sc_comp_buff = NULL;
   1422 		}
   1423 		if(vnd->sc_comp_decombuf) {
   1424 			free(vnd->sc_comp_decombuf, M_DEVBUF);
   1425 			vnd->sc_comp_decombuf = NULL;
   1426 		}
   1427 	}
   1428 #endif /* VND_COMPRESSION */
   1429 	vnd->sc_flags &=
   1430 	    ~(VNF_INITED | VNF_READONLY | VNF_VLABEL
   1431 	      | VNF_VUNCONF | VNF_COMP);
   1432 	if (vp == (struct vnode *)0)
   1433 		panic("vndclear: null vp");
   1434 	(void) vn_close(vp, fflags, vnd->sc_cred, p);
   1435 	crfree(vnd->sc_cred);
   1436 	vnd->sc_vp = (struct vnode *)0;
   1437 	vnd->sc_cred = (struct ucred *)0;
   1438 	vnd->sc_size = 0;
   1439 }
   1440 
   1441 static int
   1442 vndsize(dev_t dev)
   1443 {
   1444 	struct vnd_softc *sc;
   1445 	struct disklabel *lp;
   1446 	int part, unit, omask;
   1447 	int size;
   1448 
   1449 	unit = vndunit(dev);
   1450 	if (unit >= numvnd)
   1451 		return (-1);
   1452 	sc = &vnd_softc[unit];
   1453 
   1454 	if ((sc->sc_flags & VNF_INITED) == 0)
   1455 		return (-1);
   1456 
   1457 	part = DISKPART(dev);
   1458 	omask = sc->sc_dkdev.dk_openmask & (1 << part);
   1459 	lp = sc->sc_dkdev.dk_label;
   1460 
   1461 	if (omask == 0 && vndopen(dev, 0, S_IFBLK, curproc))
   1462 		return (-1);
   1463 
   1464 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
   1465 		size = -1;
   1466 	else
   1467 		size = lp->d_partitions[part].p_size *
   1468 		    (lp->d_secsize / DEV_BSIZE);
   1469 
   1470 	if (omask == 0 && vndclose(dev, 0, S_IFBLK, curproc))
   1471 		return (-1);
   1472 
   1473 	return (size);
   1474 }
   1475 
   1476 static int
   1477 vnddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
   1478 {
   1479 
   1480 	/* Not implemented. */
   1481 	return ENXIO;
   1482 }
   1483 
   1484 static void
   1485 vndgetdefaultlabel(struct vnd_softc *sc, struct disklabel *lp)
   1486 {
   1487 	struct vndgeom *vng = &sc->sc_geom;
   1488 	struct partition *pp;
   1489 
   1490 	memset(lp, 0, sizeof(*lp));
   1491 
   1492 	lp->d_secperunit = sc->sc_size / (vng->vng_secsize / DEV_BSIZE);
   1493 	lp->d_secsize = vng->vng_secsize;
   1494 	lp->d_nsectors = vng->vng_nsectors;
   1495 	lp->d_ntracks = vng->vng_ntracks;
   1496 	lp->d_ncylinders = vng->vng_ncylinders;
   1497 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
   1498 
   1499 	strncpy(lp->d_typename, "vnd", sizeof(lp->d_typename));
   1500 	lp->d_type = DTYPE_VND;
   1501 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
   1502 	lp->d_rpm = 3600;
   1503 	lp->d_interleave = 1;
   1504 	lp->d_flags = 0;
   1505 
   1506 	pp = &lp->d_partitions[RAW_PART];
   1507 	pp->p_offset = 0;
   1508 	pp->p_size = lp->d_secperunit;
   1509 	pp->p_fstype = FS_UNUSED;
   1510 	lp->d_npartitions = RAW_PART + 1;
   1511 
   1512 	lp->d_magic = DISKMAGIC;
   1513 	lp->d_magic2 = DISKMAGIC;
   1514 	lp->d_checksum = dkcksum(lp);
   1515 }
   1516 
   1517 /*
   1518  * Read the disklabel from a vnd.  If one is not present, create a fake one.
   1519  */
   1520 static void
   1521 vndgetdisklabel(dev_t dev)
   1522 {
   1523 	struct vnd_softc *sc = &vnd_softc[vndunit(dev)];
   1524 	const char *errstring;
   1525 	struct disklabel *lp = sc->sc_dkdev.dk_label;
   1526 	struct cpu_disklabel *clp = sc->sc_dkdev.dk_cpulabel;
   1527 	int i;
   1528 
   1529 	memset(clp, 0, sizeof(*clp));
   1530 
   1531 	vndgetdefaultlabel(sc, lp);
   1532 
   1533 	/*
   1534 	 * Call the generic disklabel extraction routine.
   1535 	 */
   1536 	errstring = readdisklabel(VNDLABELDEV(dev), vndstrategy, lp, clp);
   1537 	if (errstring) {
   1538 		/*
   1539 		 * Lack of disklabel is common, but we print the warning
   1540 		 * anyway, since it might contain other useful information.
   1541 		 */
   1542 		printf("%s: %s\n", sc->sc_xname, errstring);
   1543 
   1544 		/*
   1545 		 * For historical reasons, if there's no disklabel
   1546 		 * present, all partitions must be FS_BSDFFS and
   1547 		 * occupy the entire disk.
   1548 		 */
   1549 		for (i = 0; i < MAXPARTITIONS; i++) {
   1550 			/*
   1551 			 * Don't wipe out port specific hack (such as
   1552 			 * dos partition hack of i386 port).
   1553 			 */
   1554 			if (lp->d_partitions[i].p_size != 0)
   1555 				continue;
   1556 
   1557 			lp->d_partitions[i].p_size = lp->d_secperunit;
   1558 			lp->d_partitions[i].p_offset = 0;
   1559 			lp->d_partitions[i].p_fstype = FS_BSDFFS;
   1560 		}
   1561 
   1562 		strncpy(lp->d_packname, "default label",
   1563 		    sizeof(lp->d_packname));
   1564 
   1565 		lp->d_npartitions = MAXPARTITIONS;
   1566 		lp->d_checksum = dkcksum(lp);
   1567 	}
   1568 
   1569 	/* In-core label now valid. */
   1570 	sc->sc_flags |= VNF_VLABEL;
   1571 }
   1572 
   1573 /*
   1574  * Wait interruptibly for an exclusive lock.
   1575  *
   1576  * XXX
   1577  * Several drivers do this; it should be abstracted and made MP-safe.
   1578  */
   1579 static int
   1580 vndlock(struct vnd_softc *sc)
   1581 {
   1582 	int error;
   1583 
   1584 	while ((sc->sc_flags & VNF_LOCKED) != 0) {
   1585 		sc->sc_flags |= VNF_WANTED;
   1586 		if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
   1587 			return (error);
   1588 	}
   1589 	sc->sc_flags |= VNF_LOCKED;
   1590 	return (0);
   1591 }
   1592 
   1593 /*
   1594  * Unlock and wake up any waiters.
   1595  */
   1596 static void
   1597 vndunlock(struct vnd_softc *sc)
   1598 {
   1599 
   1600 	sc->sc_flags &= ~VNF_LOCKED;
   1601 	if ((sc->sc_flags & VNF_WANTED) != 0) {
   1602 		sc->sc_flags &= ~VNF_WANTED;
   1603 		wakeup(sc);
   1604 	}
   1605 }
   1606 
   1607 #ifdef VND_COMPRESSION
   1608 /* compressed file read */
   1609 static void
   1610 compstrategy(struct buf *bp, off_t bn)
   1611 {
   1612 	int error;
   1613 	int unit = vndunit(bp->b_dev);
   1614 	struct vnd_softc *vnd = &vnd_softc[unit];
   1615 	u_int32_t comp_block;
   1616 	struct uio auio;
   1617 	caddr_t addr;
   1618 	int s;
   1619 
   1620 	/* set up constants for data move */
   1621 	auio.uio_rw = UIO_READ;
   1622 	auio.uio_segflg = bp->b_flags & B_PHYS ? UIO_USERSPACE : UIO_SYSSPACE;
   1623 	auio.uio_procp = bp->b_proc;
   1624 
   1625 	/* read, and transfer the data */
   1626 	addr = bp->b_data;
   1627 	s = splbio();
   1628 	while (bp->b_resid > 0) {
   1629 		unsigned length;
   1630 		size_t length_in_buffer;
   1631 		u_int32_t offset_in_buffer;
   1632 		struct iovec aiov;
   1633 
   1634 		/* calculate the compressed block number */
   1635 		comp_block = bn / (off_t)vnd->sc_comp_blksz;
   1636 
   1637 		/* check for good block number */
   1638 		if (comp_block >= vnd->sc_comp_numoffs) {
   1639 			bp->b_error = EINVAL;
   1640 			bp->b_flags |= B_ERROR;
   1641 			splx(s);
   1642 			return;
   1643 		}
   1644 
   1645 		/* read in the compressed block, if not in buffer */
   1646 		if (comp_block != vnd->sc_comp_buffblk) {
   1647 			length = vnd->sc_comp_offsets[comp_block + 1] -
   1648 			    vnd->sc_comp_offsets[comp_block];
   1649 			vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
   1650 			error = vn_rdwr(UIO_READ, vnd->sc_vp, vnd->sc_comp_buff,
   1651 			    length, vnd->sc_comp_offsets[comp_block],
   1652 			    UIO_SYSSPACE, IO_UNIT, vnd->sc_cred, NULL, NULL);
   1653 			if (error) {
   1654 				bp->b_error = error;
   1655 				bp->b_flags |= B_ERROR;
   1656 				VOP_UNLOCK(vnd->sc_vp, 0);
   1657 				splx(s);
   1658 				return;
   1659 			}
   1660 			/* uncompress the buffer */
   1661 			vnd->sc_comp_stream.next_in = vnd->sc_comp_buff;
   1662 			vnd->sc_comp_stream.avail_in = length;
   1663 			vnd->sc_comp_stream.next_out = vnd->sc_comp_decombuf;
   1664 			vnd->sc_comp_stream.avail_out = vnd->sc_comp_blksz;
   1665 			inflateReset(&vnd->sc_comp_stream);
   1666 			error = inflate(&vnd->sc_comp_stream, Z_FINISH);
   1667 			if (error != Z_STREAM_END) {
   1668 				if (vnd->sc_comp_stream.msg)
   1669 					printf("%s: compressed file, %s\n",
   1670 					    vnd->sc_xname,
   1671 					    vnd->sc_comp_stream.msg);
   1672 				bp->b_error = EBADMSG;
   1673 				bp->b_flags |= B_ERROR;
   1674 				VOP_UNLOCK(vnd->sc_vp, 0);
   1675 				splx(s);
   1676 				return;
   1677 			}
   1678 			vnd->sc_comp_buffblk = comp_block;
   1679 			VOP_UNLOCK(vnd->sc_vp, 0);
   1680 		}
   1681 
   1682 		/* transfer the usable uncompressed data */
   1683 		offset_in_buffer = bn % (off_t)vnd->sc_comp_blksz;
   1684 		length_in_buffer = vnd->sc_comp_blksz - offset_in_buffer;
   1685 		if (length_in_buffer > bp->b_resid)
   1686 			length_in_buffer = bp->b_resid;
   1687 		auio.uio_iov = &aiov;
   1688 		auio.uio_iovcnt = 1;
   1689 		aiov.iov_base = addr;
   1690 		aiov.iov_len = length_in_buffer;
   1691 		auio.uio_resid = aiov.iov_len;
   1692 		auio.uio_offset = 0;
   1693 		error = uiomove(vnd->sc_comp_decombuf + offset_in_buffer,
   1694 		    length_in_buffer, &auio);
   1695 		if (error) {
   1696 			bp->b_error = error;
   1697 			bp->b_flags |= B_ERROR;
   1698 			splx(s);
   1699 			return;
   1700 		}
   1701 
   1702 		bn += length_in_buffer;
   1703 		addr += length_in_buffer;
   1704 		bp->b_resid -= length_in_buffer;
   1705 	}
   1706 	splx(s);
   1707 }
   1708 
   1709 /* compression memory allocation routines */
   1710 static void *
   1711 vnd_alloc(void *aux, u_int items, u_int siz)
   1712 {
   1713 	return alloc(items * siz, M_TEMP, M_NOWAIT);
   1714 }
   1715 
   1716 static void
   1717 vnd_free(void *aux, void *ptr)
   1718 {
   1719 	free(ptr, M_TEMP);
   1720 }
   1721 #endif /* VND_COMPRESSION */
   1722