Home | History | Annotate | Line # | Download | only in specfs
spec_vnops.c revision 1.3
      1 /*
      2  * Copyright (c) 1989 The Regents of the University of California.
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. All advertising materials mentioning features or use of this software
     14  *    must display the following acknowledgement:
     15  *	This product includes software developed by the University of
     16  *	California, Berkeley and its contributors.
     17  * 4. Neither the name of the University nor the names of its contributors
     18  *    may be used to endorse or promote products derived from this software
     19  *    without specific prior written permission.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     31  * SUCH DAMAGE.
     32  *
     33  *	from: @(#)spec_vnops.c	7.37 (Berkeley) 5/30/91
     34  *	$Id: spec_vnops.c,v 1.3 1993/05/20 02:54:52 cgd Exp $
     35  */
     36 
     37 #include "param.h"
     38 #include "proc.h"
     39 #include "systm.h"
     40 #include "kernel.h"
     41 #include "conf.h"
     42 #include "buf.h"
     43 #include "mount.h"
     44 #include "namei.h"
     45 #include "vnode.h"
     46 #include "specdev.h"
     47 #include "stat.h"
     48 #include "errno.h"
     49 #include "ioctl.h"
     50 #include "file.h"
     51 #include "dkbad.h"	/* XXX */
     52 #include "disklabel.h"
     53 
     54 /* symbolic sleep message strings for devices */
     55 char	devopn[] = "devopn";
     56 char	devio[] = "devio";
     57 char	devwait[] = "devwait";
     58 char	devin[] = "devin";
     59 char	devout[] = "devout";
     60 char	devioc[] = "devioc";
     61 char	devcls[] = "devcls";
     62 
     63 struct vnodeops spec_vnodeops = {
     64 	spec_lookup,		/* lookup */
     65 	spec_create,		/* create */
     66 	spec_mknod,		/* mknod */
     67 	spec_open,		/* open */
     68 	spec_close,		/* close */
     69 	spec_access,		/* access */
     70 	spec_getattr,		/* getattr */
     71 	spec_setattr,		/* setattr */
     72 	spec_read,		/* read */
     73 	spec_write,		/* write */
     74 	spec_ioctl,		/* ioctl */
     75 	spec_select,		/* select */
     76 	spec_mmap,		/* mmap */
     77 	spec_fsync,		/* fsync */
     78 	spec_seek,		/* seek */
     79 	spec_remove,		/* remove */
     80 	spec_link,		/* link */
     81 	spec_rename,		/* rename */
     82 	spec_mkdir,		/* mkdir */
     83 	spec_rmdir,		/* rmdir */
     84 	spec_symlink,		/* symlink */
     85 	spec_readdir,		/* readdir */
     86 	spec_readlink,		/* readlink */
     87 	spec_abortop,		/* abortop */
     88 	spec_inactive,		/* inactive */
     89 	spec_reclaim,		/* reclaim */
     90 	spec_lock,		/* lock */
     91 	spec_unlock,		/* unlock */
     92 	spec_bmap,		/* bmap */
     93 	spec_strategy,		/* strategy */
     94 	spec_print,		/* print */
     95 	spec_islocked,		/* islocked */
     96 	spec_advlock,		/* advlock */
     97 };
     98 
     99 /*
    100  * Trivial lookup routine that always fails.
    101  */
    102 spec_lookup(vp, ndp, p)
    103 	struct vnode *vp;
    104 	struct nameidata *ndp;
    105 	struct proc *p;
    106 {
    107 
    108 	ndp->ni_dvp = vp;
    109 	ndp->ni_vp = NULL;
    110 	return (ENOTDIR);
    111 }
    112 
    113 /*
    114  * Open a special file: Don't allow open if fs is mounted -nodev,
    115  * and don't allow opens of block devices that are currently mounted.
    116  * Otherwise, call device driver open function.
    117  */
    118 /* ARGSUSED */
    119 spec_open(vp, mode, cred, p)
    120 	register struct vnode *vp;
    121 	int mode;
    122 	struct ucred *cred;
    123 	struct proc *p;
    124 {
    125 	dev_t dev = (dev_t)vp->v_rdev;
    126 	register int maj = major(dev);
    127 	int error;
    128 
    129 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
    130 		return (ENXIO);
    131 
    132 	switch (vp->v_type) {
    133 
    134 	case VCHR:
    135 		if ((u_int)maj >= nchrdev)
    136 			return (ENXIO);
    137 		VOP_UNLOCK(vp);
    138 		error = (*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p);
    139 		VOP_LOCK(vp);
    140 		return (error);
    141 
    142 	case VBLK:
    143 		if ((u_int)maj >= nblkdev)
    144 			return (ENXIO);
    145 		if (error = mountedon(vp))
    146 			return (error);
    147 		return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
    148 	}
    149 	return (0);
    150 }
    151 
    152 /*
    153  * Vnode op for read
    154  */
    155 /* ARGSUSED */
    156 spec_read(vp, uio, ioflag, cred)
    157 	register struct vnode *vp;
    158 	register struct uio *uio;
    159 	int ioflag;
    160 	struct ucred *cred;
    161 {
    162 	struct proc *p = uio->uio_procp;
    163 	struct buf *bp;
    164 	daddr_t bn;
    165 	long bsize, bscale;
    166 	struct partinfo dpart;
    167 	register int n, on;
    168 	int error = 0;
    169 	extern int mem_no;
    170 
    171 #ifdef DIAGNOSTIC
    172 	if (uio->uio_rw != UIO_READ)
    173 		panic("spec_read mode");
    174 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    175 		panic("spec_read proc");
    176 #endif
    177 	if (uio->uio_resid == 0)
    178 		return (0);
    179 
    180 	switch (vp->v_type) {
    181 
    182 	case VCHR:
    183 		/*
    184 		 * Negative offsets allowed only for /dev/kmem
    185 		 */
    186 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
    187 			return (EINVAL);
    188 		VOP_UNLOCK(vp);
    189 		error = (*cdevsw[major(vp->v_rdev)].d_read)
    190 			(vp->v_rdev, uio, ioflag);
    191 		VOP_LOCK(vp);
    192 		return (error);
    193 
    194 	case VBLK:
    195 		if (uio->uio_offset < 0)
    196 			return (EINVAL);
    197 		bsize = BLKDEV_IOSIZE;
    198 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
    199 		    (caddr_t)&dpart, FREAD, p) == 0) {
    200 			if (dpart.part->p_fstype == FS_BSDFFS &&
    201 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
    202 				bsize = dpart.part->p_frag *
    203 				    dpart.part->p_fsize;
    204 		}
    205 		bscale = bsize / DEV_BSIZE;
    206 		do {
    207 			bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
    208 			on = uio->uio_offset % bsize;
    209 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
    210 			if (vp->v_lastr + bscale == bn)
    211 				error = breada(vp, bn, (int)bsize, bn + bscale,
    212 					(int)bsize, NOCRED, &bp);
    213 			else
    214 				error = bread(vp, bn, (int)bsize, NOCRED, &bp);
    215 			vp->v_lastr = bn;
    216 			n = MIN(n, bsize - bp->b_resid);
    217 			if (error) {
    218 				brelse(bp);
    219 				return (error);
    220 			}
    221 			error = uiomove(bp->b_un.b_addr + on, n, uio);
    222 #ifdef OMIT	/* 20 Aug 92*/
    223 			if (n + on == bsize)
    224 				bp->b_flags |= B_AGE;
    225 #endif	/* OMIT*/
    226 			brelse(bp);
    227 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
    228 		return (error);
    229 
    230 	default:
    231 		panic("spec_read type");
    232 	}
    233 	/* NOTREACHED */
    234 }
    235 
    236 /*
    237  * Vnode op for write
    238  */
    239 /* ARGSUSED */
    240 spec_write(vp, uio, ioflag, cred)
    241 	register struct vnode *vp;
    242 	register struct uio *uio;
    243 	int ioflag;
    244 	struct ucred *cred;
    245 {
    246 	struct proc *p = uio->uio_procp;
    247 	struct buf *bp;
    248 	daddr_t bn;
    249 	int bsize, blkmask;
    250 	struct partinfo dpart;
    251 	register int n, on;
    252 	int error = 0;
    253 	extern int mem_no;
    254 
    255 #ifdef DIAGNOSTIC
    256 	if (uio->uio_rw != UIO_WRITE)
    257 		panic("spec_write mode");
    258 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    259 		panic("spec_write proc");
    260 #endif
    261 
    262 	switch (vp->v_type) {
    263 
    264 	case VCHR:
    265 		/*
    266 		 * Negative offsets allowed only for /dev/kmem
    267 		 */
    268 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
    269 			return (EINVAL);
    270 		VOP_UNLOCK(vp);
    271 		error = (*cdevsw[major(vp->v_rdev)].d_write)
    272 			(vp->v_rdev, uio, ioflag);
    273 		VOP_LOCK(vp);
    274 		return (error);
    275 
    276 	case VBLK:
    277 		if (uio->uio_resid == 0)
    278 			return (0);
    279 		if (uio->uio_offset < 0)
    280 			return (EINVAL);
    281 		bsize = BLKDEV_IOSIZE;
    282 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
    283 		    (caddr_t)&dpart, FREAD, p) == 0) {
    284 			if (dpart.part->p_fstype == FS_BSDFFS &&
    285 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
    286 				bsize = dpart.part->p_frag *
    287 				    dpart.part->p_fsize;
    288 		}
    289 		blkmask = (bsize / DEV_BSIZE) - 1;
    290 		do {
    291 			bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
    292 			on = uio->uio_offset % bsize;
    293 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
    294 			if (n == bsize)
    295 				bp = getblk(vp, bn, bsize);
    296 			else
    297 				error = bread(vp, bn, bsize, NOCRED, &bp);
    298 			n = MIN(n, bsize - bp->b_resid);
    299 			if (error) {
    300 				brelse(bp);
    301 				return (error);
    302 			}
    303 			error = uiomove(bp->b_un.b_addr + on, n, uio);
    304 			if (n + on == bsize) {
    305 				bp->b_flags |= B_AGE;
    306 				bawrite(bp);
    307 			} else
    308 				bdwrite(bp);
    309 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
    310 		return (error);
    311 
    312 	default:
    313 		panic("spec_write type");
    314 	}
    315 	/* NOTREACHED */
    316 }
    317 
    318 /*
    319  * Device ioctl operation.
    320  */
    321 /* ARGSUSED */
    322 spec_ioctl(vp, com, data, fflag, cred, p)
    323 	struct vnode *vp;
    324 	int com;
    325 	caddr_t data;
    326 	int fflag;
    327 	struct ucred *cred;
    328 	struct proc *p;
    329 {
    330 	dev_t dev = vp->v_rdev;
    331 
    332 	switch (vp->v_type) {
    333 
    334 	case VCHR:
    335 		return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
    336 		    fflag, p));
    337 
    338 	case VBLK:
    339 		if (com == 0 && (int)data == B_TAPE)
    340 			if (bdevsw[major(dev)].d_flags & B_TAPE)
    341 				return (0);
    342 			else
    343 				return (1);
    344 		return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
    345 		   fflag, p));
    346 
    347 	default:
    348 		panic("spec_ioctl");
    349 		/* NOTREACHED */
    350 	}
    351 }
    352 
    353 /* ARGSUSED */
    354 spec_select(vp, which, fflags, cred, p)
    355 	struct vnode *vp;
    356 	int which, fflags;
    357 	struct ucred *cred;
    358 	struct proc *p;
    359 {
    360 	register dev_t dev;
    361 
    362 	switch (vp->v_type) {
    363 
    364 	default:
    365 		return (1);		/* XXX */
    366 
    367 	case VCHR:
    368 		dev = vp->v_rdev;
    369 		return (*cdevsw[major(dev)].d_select)(dev, which, p);
    370 	}
    371 }
    372 
    373 /*
    374  * Just call the device strategy routine
    375  */
    376 spec_strategy(bp)
    377 	register struct buf *bp;
    378 {
    379 
    380 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
    381 	return (0);
    382 }
    383 
    384 /*
    385  * This is a noop, simply returning what one has been given.
    386  */
    387 spec_bmap(vp, bn, vpp, bnp)
    388 	struct vnode *vp;
    389 	daddr_t bn;
    390 	struct vnode **vpp;
    391 	daddr_t *bnp;
    392 {
    393 
    394 	if (vpp != NULL)
    395 		*vpp = vp;
    396 	if (bnp != NULL)
    397 		*bnp = bn;
    398 	return (0);
    399 }
    400 
    401 /*
    402  * At the moment we do not do any locking.
    403  */
    404 /* ARGSUSED */
    405 spec_lock(vp)
    406 	struct vnode *vp;
    407 {
    408 
    409 	return (0);
    410 }
    411 
    412 /* ARGSUSED */
    413 spec_unlock(vp)
    414 	struct vnode *vp;
    415 {
    416 
    417 	return (0);
    418 }
    419 
    420 /*
    421  * Device close routine
    422  */
    423 /* ARGSUSED */
    424 spec_close(vp, flag, cred, p)
    425 	register struct vnode *vp;
    426 	int flag;
    427 	struct ucred *cred;
    428 	struct proc *p;
    429 {
    430 	dev_t dev = vp->v_rdev;
    431 	int (*devclose) __P((dev_t, int, int, struct proc *));
    432 	int mode;
    433 
    434 	switch (vp->v_type) {
    435 
    436 	case VCHR:
    437 		/*
    438 		 * If the vnode is locked, then we are in the midst
    439 		 * of forcably closing the device, otherwise we only
    440 		 * close on last reference.
    441 		 */
    442 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
    443 			return (0);
    444 		devclose = cdevsw[major(dev)].d_close;
    445 		mode = S_IFCHR;
    446 		break;
    447 
    448 	case VBLK:
    449 		/*
    450 		 * On last close of a block device (that isn't mounted)
    451 		 * we must invalidate any in core blocks, so that
    452 		 * we can, for instance, change floppy disks.
    453 		 */
    454 		vflushbuf(vp, 0);
    455 		if (vinvalbuf(vp, 1))
    456 			return (0);
    457 		/*
    458 		 * We do not want to really close the device if it
    459 		 * is still in use unless we are trying to close it
    460 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
    461 		 * holds a reference to the vnode, and because we mark
    462 		 * any other vnodes that alias this device, when the
    463 		 * sum of the reference counts on all the aliased
    464 		 * vnodes descends to one, we are on last close.
    465 		 */
    466 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
    467 			return (0);
    468 		devclose = bdevsw[major(dev)].d_close;
    469 		mode = S_IFBLK;
    470 		break;
    471 
    472 	default:
    473 		panic("spec_close: not special");
    474 	}
    475 
    476 	return ((*devclose)(dev, flag, mode, p));
    477 }
    478 
    479 /*
    480  * Print out the contents of a special device vnode.
    481  */
    482 spec_print(vp)
    483 	struct vnode *vp;
    484 {
    485 
    486 	printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
    487 		minor(vp->v_rdev));
    488 }
    489 
    490 /*
    491  * Special device advisory byte-level locks.
    492  */
    493 /* ARGSUSED */
    494 spec_advlock(vp, id, op, fl, flags)
    495 	struct vnode *vp;
    496 	caddr_t id;
    497 	int op;
    498 	struct flock *fl;
    499 	int flags;
    500 {
    501 
    502 	return (EOPNOTSUPP);
    503 }
    504 
    505 /*
    506  * Special device failed operation
    507  */
    508 spec_ebadf()
    509 {
    510 
    511 	return (EBADF);
    512 }
    513 
    514 /*
    515  * Special device bad operation
    516  */
    517 spec_badop()
    518 {
    519 
    520 	panic("spec_badop called");
    521 	/* NOTREACHED */
    522 }
    523