Home | History | Annotate | Line # | Download | only in specfs
spec_vnops.c revision 1.1
      1 /*
      2  * Copyright (c) 1989 The Regents of the University of California.
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. All advertising materials mentioning features or use of this software
     14  *    must display the following acknowledgement:
     15  *	This product includes software developed by the University of
     16  *	California, Berkeley and its contributors.
     17  * 4. Neither the name of the University nor the names of its contributors
     18  *    may be used to endorse or promote products derived from this software
     19  *    without specific prior written permission.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     31  * SUCH DAMAGE.
     32  *
     33  *	@(#)spec_vnops.c	7.37 (Berkeley) 5/30/91
     34  */
     35 
     36 #include "param.h"
     37 #include "proc.h"
     38 #include "systm.h"
     39 #include "kernel.h"
     40 #include "conf.h"
     41 #include "buf.h"
     42 #include "mount.h"
     43 #include "namei.h"
     44 #include "vnode.h"
     45 #include "specdev.h"
     46 #include "stat.h"
     47 #include "errno.h"
     48 #include "ioctl.h"
     49 #include "file.h"
     50 #include "dkbad.h"	/* XXX */
     51 #include "disklabel.h"
     52 
     53 /* symbolic sleep message strings for devices */
     54 char	devopn[] = "devopn";
     55 char	devio[] = "devio";
     56 char	devwait[] = "devwait";
     57 char	devin[] = "devin";
     58 char	devout[] = "devout";
     59 char	devioc[] = "devioc";
     60 char	devcls[] = "devcls";
     61 
     62 struct vnodeops spec_vnodeops = {
     63 	spec_lookup,		/* lookup */
     64 	spec_create,		/* create */
     65 	spec_mknod,		/* mknod */
     66 	spec_open,		/* open */
     67 	spec_close,		/* close */
     68 	spec_access,		/* access */
     69 	spec_getattr,		/* getattr */
     70 	spec_setattr,		/* setattr */
     71 	spec_read,		/* read */
     72 	spec_write,		/* write */
     73 	spec_ioctl,		/* ioctl */
     74 	spec_select,		/* select */
     75 	spec_mmap,		/* mmap */
     76 	spec_fsync,		/* fsync */
     77 	spec_seek,		/* seek */
     78 	spec_remove,		/* remove */
     79 	spec_link,		/* link */
     80 	spec_rename,		/* rename */
     81 	spec_mkdir,		/* mkdir */
     82 	spec_rmdir,		/* rmdir */
     83 	spec_symlink,		/* symlink */
     84 	spec_readdir,		/* readdir */
     85 	spec_readlink,		/* readlink */
     86 	spec_abortop,		/* abortop */
     87 	spec_inactive,		/* inactive */
     88 	spec_reclaim,		/* reclaim */
     89 	spec_lock,		/* lock */
     90 	spec_unlock,		/* unlock */
     91 	spec_bmap,		/* bmap */
     92 	spec_strategy,		/* strategy */
     93 	spec_print,		/* print */
     94 	spec_islocked,		/* islocked */
     95 	spec_advlock,		/* advlock */
     96 };
     97 
     98 /*
     99  * Trivial lookup routine that always fails.
    100  */
    101 spec_lookup(vp, ndp, p)
    102 	struct vnode *vp;
    103 	struct nameidata *ndp;
    104 	struct proc *p;
    105 {
    106 
    107 	ndp->ni_dvp = vp;
    108 	ndp->ni_vp = NULL;
    109 	return (ENOTDIR);
    110 }
    111 
    112 /*
    113  * Open a special file: Don't allow open if fs is mounted -nodev,
    114  * and don't allow opens of block devices that are currently mounted.
    115  * Otherwise, call device driver open function.
    116  */
    117 /* ARGSUSED */
    118 spec_open(vp, mode, cred, p)
    119 	register struct vnode *vp;
    120 	int mode;
    121 	struct ucred *cred;
    122 	struct proc *p;
    123 {
    124 	dev_t dev = (dev_t)vp->v_rdev;
    125 	register int maj = major(dev);
    126 	int error;
    127 
    128 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
    129 		return (ENXIO);
    130 
    131 	switch (vp->v_type) {
    132 
    133 	case VCHR:
    134 		if ((u_int)maj >= nchrdev)
    135 			return (ENXIO);
    136 		VOP_UNLOCK(vp);
    137 		error = (*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p);
    138 		VOP_LOCK(vp);
    139 		return (error);
    140 
    141 	case VBLK:
    142 		if ((u_int)maj >= nblkdev)
    143 			return (ENXIO);
    144 		if (error = mountedon(vp))
    145 			return (error);
    146 		return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
    147 	}
    148 	return (0);
    149 }
    150 
    151 /*
    152  * Vnode op for read
    153  */
    154 /* ARGSUSED */
    155 spec_read(vp, uio, ioflag, cred)
    156 	register struct vnode *vp;
    157 	register struct uio *uio;
    158 	int ioflag;
    159 	struct ucred *cred;
    160 {
    161 	struct proc *p = uio->uio_procp;
    162 	struct buf *bp;
    163 	daddr_t bn;
    164 	long bsize, bscale;
    165 	struct partinfo dpart;
    166 	register int n, on;
    167 	int error = 0;
    168 	extern int mem_no;
    169 
    170 #ifdef DIAGNOSTIC
    171 	if (uio->uio_rw != UIO_READ)
    172 		panic("spec_read mode");
    173 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    174 		panic("spec_read proc");
    175 #endif
    176 	if (uio->uio_resid == 0)
    177 		return (0);
    178 
    179 	switch (vp->v_type) {
    180 
    181 	case VCHR:
    182 		/*
    183 		 * Negative offsets allowed only for /dev/kmem
    184 		 */
    185 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
    186 			return (EINVAL);
    187 		VOP_UNLOCK(vp);
    188 		error = (*cdevsw[major(vp->v_rdev)].d_read)
    189 			(vp->v_rdev, uio, ioflag);
    190 		VOP_LOCK(vp);
    191 		return (error);
    192 
    193 	case VBLK:
    194 		if (uio->uio_offset < 0)
    195 			return (EINVAL);
    196 		bsize = BLKDEV_IOSIZE;
    197 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
    198 		    (caddr_t)&dpart, FREAD, p) == 0) {
    199 			if (dpart.part->p_fstype == FS_BSDFFS &&
    200 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
    201 				bsize = dpart.part->p_frag *
    202 				    dpart.part->p_fsize;
    203 		}
    204 		bscale = bsize / DEV_BSIZE;
    205 		do {
    206 			bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
    207 			on = uio->uio_offset % bsize;
    208 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
    209 			if (vp->v_lastr + bscale == bn)
    210 				error = breada(vp, bn, (int)bsize, bn + bscale,
    211 					(int)bsize, NOCRED, &bp);
    212 			else
    213 				error = bread(vp, bn, (int)bsize, NOCRED, &bp);
    214 			vp->v_lastr = bn;
    215 			n = MIN(n, bsize - bp->b_resid);
    216 			if (error) {
    217 				brelse(bp);
    218 				return (error);
    219 			}
    220 			error = uiomove(bp->b_un.b_addr + on, n, uio);
    221 			if (n + on == bsize)
    222 				bp->b_flags |= B_AGE;
    223 			brelse(bp);
    224 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
    225 		return (error);
    226 
    227 	default:
    228 		panic("spec_read type");
    229 	}
    230 	/* NOTREACHED */
    231 }
    232 
    233 /*
    234  * Vnode op for write
    235  */
    236 /* ARGSUSED */
    237 spec_write(vp, uio, ioflag, cred)
    238 	register struct vnode *vp;
    239 	register struct uio *uio;
    240 	int ioflag;
    241 	struct ucred *cred;
    242 {
    243 	struct proc *p = uio->uio_procp;
    244 	struct buf *bp;
    245 	daddr_t bn;
    246 	int bsize, blkmask;
    247 	struct partinfo dpart;
    248 	register int n, on;
    249 	int error = 0;
    250 	extern int mem_no;
    251 
    252 #ifdef DIAGNOSTIC
    253 	if (uio->uio_rw != UIO_WRITE)
    254 		panic("spec_write mode");
    255 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    256 		panic("spec_write proc");
    257 #endif
    258 
    259 	switch (vp->v_type) {
    260 
    261 	case VCHR:
    262 		/*
    263 		 * Negative offsets allowed only for /dev/kmem
    264 		 */
    265 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
    266 			return (EINVAL);
    267 		VOP_UNLOCK(vp);
    268 		error = (*cdevsw[major(vp->v_rdev)].d_write)
    269 			(vp->v_rdev, uio, ioflag);
    270 		VOP_LOCK(vp);
    271 		return (error);
    272 
    273 	case VBLK:
    274 		if (uio->uio_resid == 0)
    275 			return (0);
    276 		if (uio->uio_offset < 0)
    277 			return (EINVAL);
    278 		bsize = BLKDEV_IOSIZE;
    279 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
    280 		    (caddr_t)&dpart, FREAD, p) == 0) {
    281 			if (dpart.part->p_fstype == FS_BSDFFS &&
    282 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
    283 				bsize = dpart.part->p_frag *
    284 				    dpart.part->p_fsize;
    285 		}
    286 		blkmask = (bsize / DEV_BSIZE) - 1;
    287 		do {
    288 			bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
    289 			on = uio->uio_offset % bsize;
    290 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
    291 			if (n == bsize)
    292 				bp = getblk(vp, bn, bsize);
    293 			else
    294 				error = bread(vp, bn, bsize, NOCRED, &bp);
    295 			n = MIN(n, bsize - bp->b_resid);
    296 			if (error) {
    297 				brelse(bp);
    298 				return (error);
    299 			}
    300 			error = uiomove(bp->b_un.b_addr + on, n, uio);
    301 			if (n + on == bsize) {
    302 				bp->b_flags |= B_AGE;
    303 				bawrite(bp);
    304 			} else
    305 				bdwrite(bp);
    306 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
    307 		return (error);
    308 
    309 	default:
    310 		panic("spec_write type");
    311 	}
    312 	/* NOTREACHED */
    313 }
    314 
    315 /*
    316  * Device ioctl operation.
    317  */
    318 /* ARGSUSED */
    319 spec_ioctl(vp, com, data, fflag, cred, p)
    320 	struct vnode *vp;
    321 	int com;
    322 	caddr_t data;
    323 	int fflag;
    324 	struct ucred *cred;
    325 	struct proc *p;
    326 {
    327 	dev_t dev = vp->v_rdev;
    328 
    329 	switch (vp->v_type) {
    330 
    331 	case VCHR:
    332 		return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
    333 		    fflag, p));
    334 
    335 	case VBLK:
    336 		if (com == 0 && (int)data == B_TAPE)
    337 			if (bdevsw[major(dev)].d_flags & B_TAPE)
    338 				return (0);
    339 			else
    340 				return (1);
    341 		return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
    342 		   fflag, p));
    343 
    344 	default:
    345 		panic("spec_ioctl");
    346 		/* NOTREACHED */
    347 	}
    348 }
    349 
    350 /* ARGSUSED */
    351 spec_select(vp, which, fflags, cred, p)
    352 	struct vnode *vp;
    353 	int which, fflags;
    354 	struct ucred *cred;
    355 	struct proc *p;
    356 {
    357 	register dev_t dev;
    358 
    359 	switch (vp->v_type) {
    360 
    361 	default:
    362 		return (1);		/* XXX */
    363 
    364 	case VCHR:
    365 		dev = vp->v_rdev;
    366 		return (*cdevsw[major(dev)].d_select)(dev, which, p);
    367 	}
    368 }
    369 
    370 /*
    371  * Just call the device strategy routine
    372  */
    373 spec_strategy(bp)
    374 	register struct buf *bp;
    375 {
    376 
    377 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
    378 	return (0);
    379 }
    380 
    381 /*
    382  * This is a noop, simply returning what one has been given.
    383  */
    384 spec_bmap(vp, bn, vpp, bnp)
    385 	struct vnode *vp;
    386 	daddr_t bn;
    387 	struct vnode **vpp;
    388 	daddr_t *bnp;
    389 {
    390 
    391 	if (vpp != NULL)
    392 		*vpp = vp;
    393 	if (bnp != NULL)
    394 		*bnp = bn;
    395 	return (0);
    396 }
    397 
    398 /*
    399  * At the moment we do not do any locking.
    400  */
    401 /* ARGSUSED */
    402 spec_lock(vp)
    403 	struct vnode *vp;
    404 {
    405 
    406 	return (0);
    407 }
    408 
    409 /* ARGSUSED */
    410 spec_unlock(vp)
    411 	struct vnode *vp;
    412 {
    413 
    414 	return (0);
    415 }
    416 
    417 /*
    418  * Device close routine
    419  */
    420 /* ARGSUSED */
    421 spec_close(vp, flag, cred, p)
    422 	register struct vnode *vp;
    423 	int flag;
    424 	struct ucred *cred;
    425 	struct proc *p;
    426 {
    427 	dev_t dev = vp->v_rdev;
    428 	int (*devclose) __P((dev_t, int, int, struct proc *));
    429 	int mode;
    430 
    431 	switch (vp->v_type) {
    432 
    433 	case VCHR:
    434 		/*
    435 		 * If the vnode is locked, then we are in the midst
    436 		 * of forcably closing the device, otherwise we only
    437 		 * close on last reference.
    438 		 */
    439 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
    440 			return (0);
    441 		devclose = cdevsw[major(dev)].d_close;
    442 		mode = S_IFCHR;
    443 		break;
    444 
    445 	case VBLK:
    446 		/*
    447 		 * On last close of a block device (that isn't mounted)
    448 		 * we must invalidate any in core blocks, so that
    449 		 * we can, for instance, change floppy disks.
    450 		 */
    451 		vflushbuf(vp, 0);
    452 		if (vinvalbuf(vp, 1))
    453 			return (0);
    454 		/*
    455 		 * We do not want to really close the device if it
    456 		 * is still in use unless we are trying to close it
    457 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
    458 		 * holds a reference to the vnode, and because we mark
    459 		 * any other vnodes that alias this device, when the
    460 		 * sum of the reference counts on all the aliased
    461 		 * vnodes descends to one, we are on last close.
    462 		 */
    463 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
    464 			return (0);
    465 		devclose = bdevsw[major(dev)].d_close;
    466 		mode = S_IFBLK;
    467 		break;
    468 
    469 	default:
    470 		panic("spec_close: not special");
    471 	}
    472 
    473 	return ((*devclose)(dev, flag, mode, p));
    474 }
    475 
    476 /*
    477  * Print out the contents of a special device vnode.
    478  */
    479 spec_print(vp)
    480 	struct vnode *vp;
    481 {
    482 
    483 	printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
    484 		minor(vp->v_rdev));
    485 }
    486 
    487 /*
    488  * Special device advisory byte-level locks.
    489  */
    490 /* ARGSUSED */
    491 spec_advlock(vp, id, op, fl, flags)
    492 	struct vnode *vp;
    493 	caddr_t id;
    494 	int op;
    495 	struct flock *fl;
    496 	int flags;
    497 {
    498 
    499 	return (EOPNOTSUPP);
    500 }
    501 
    502 /*
    503  * Special device failed operation
    504  */
    505 spec_ebadf()
    506 {
    507 
    508 	return (EBADF);
    509 }
    510 
    511 /*
    512  * Special device bad operation
    513  */
    514 spec_badop()
    515 {
    516 
    517 	panic("spec_badop called");
    518 	/* NOTREACHED */
    519 }
    520