Home | History | Annotate | Line # | Download | only in specfs
spec_vnops.c revision 1.4
      1 /*
      2  * Copyright (c) 1989 The Regents of the University of California.
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. All advertising materials mentioning features or use of this software
     14  *    must display the following acknowledgement:
     15  *	This product includes software developed by the University of
     16  *	California, Berkeley and its contributors.
     17  * 4. Neither the name of the University nor the names of its contributors
     18  *    may be used to endorse or promote products derived from this software
     19  *    without specific prior written permission.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     31  * SUCH DAMAGE.
     32  *
     33  *	from: @(#)spec_vnops.c	7.37 (Berkeley) 5/30/91
     34  *	$Id: spec_vnops.c,v 1.4 1993/06/27 06:01:50 andrew Exp $
     35  */
     36 
     37 #include "param.h"
     38 #include "proc.h"
     39 #include "systm.h"
     40 #include "kernel.h"
     41 #include "conf.h"
     42 #include "buf.h"
     43 #include "mount.h"
     44 #include "namei.h"
     45 #include "vnode.h"
     46 #include "specdev.h"
     47 #include "stat.h"
     48 #include "errno.h"
     49 #include "ioctl.h"
     50 #include "file.h"
     51 #include "dkbad.h"	/* XXX */
     52 #include "disklabel.h"
     53 
     54 /* symbolic sleep message strings for devices */
     55 char	devopn[] = "devopn";
     56 char	devio[] = "devio";
     57 char	devwait[] = "devwait";
     58 char	devin[] = "devin";
     59 char	devout[] = "devout";
     60 char	devioc[] = "devioc";
     61 char	devcls[] = "devcls";
     62 
     63 struct vnodeops spec_vnodeops = {
     64 	spec_lookup,		/* lookup */
     65 	spec_create,		/* create */
     66 	spec_mknod,		/* mknod */
     67 	spec_open,		/* open */
     68 	spec_close,		/* close */
     69 	spec_access,		/* access */
     70 	spec_getattr,		/* getattr */
     71 	spec_setattr,		/* setattr */
     72 	spec_read,		/* read */
     73 	spec_write,		/* write */
     74 	spec_ioctl,		/* ioctl */
     75 	spec_select,		/* select */
     76 	spec_mmap,		/* mmap */
     77 	spec_fsync,		/* fsync */
     78 	spec_seek,		/* seek */
     79 	spec_remove,		/* remove */
     80 	spec_link,		/* link */
     81 	spec_rename,		/* rename */
     82 	spec_mkdir,		/* mkdir */
     83 	spec_rmdir,		/* rmdir */
     84 	spec_symlink,		/* symlink */
     85 	spec_readdir,		/* readdir */
     86 	spec_readlink,		/* readlink */
     87 	spec_abortop,		/* abortop */
     88 	spec_inactive,		/* inactive */
     89 	spec_reclaim,		/* reclaim */
     90 	spec_lock,		/* lock */
     91 	spec_unlock,		/* unlock */
     92 	spec_bmap,		/* bmap */
     93 	spec_strategy,		/* strategy */
     94 	spec_print,		/* print */
     95 	spec_islocked,		/* islocked */
     96 	spec_advlock,		/* advlock */
     97 };
     98 
     99 /*
    100  * Trivial lookup routine that always fails.
    101  */
    102 int
    103 spec_lookup(vp, ndp, p)
    104 	struct vnode *vp;
    105 	struct nameidata *ndp;
    106 	struct proc *p;
    107 {
    108 
    109 	ndp->ni_dvp = vp;
    110 	ndp->ni_vp = NULL;
    111 	return (ENOTDIR);
    112 }
    113 
    114 /*
    115  * Open a special file: Don't allow open if fs is mounted -nodev,
    116  * and don't allow opens of block devices that are currently mounted.
    117  * Otherwise, call device driver open function.
    118  */
    119 /* ARGSUSED */
    120 int
    121 spec_open(vp, mode, cred, p)
    122 	register struct vnode *vp;
    123 	int mode;
    124 	struct ucred *cred;
    125 	struct proc *p;
    126 {
    127 	dev_t dev = (dev_t)vp->v_rdev;
    128 	register int maj = major(dev);
    129 	int error;
    130 
    131 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
    132 		return (ENXIO);
    133 
    134 	switch (vp->v_type) {
    135 
    136 	case VCHR:
    137 		if ((u_int)maj >= nchrdev)
    138 			return (ENXIO);
    139 		VOP_UNLOCK(vp);
    140 		error = (*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p);
    141 		VOP_LOCK(vp);
    142 		return (error);
    143 
    144 	case VBLK:
    145 		if ((u_int)maj >= nblkdev)
    146 			return (ENXIO);
    147 		if (error = mountedon(vp))
    148 			return (error);
    149 		return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
    150 
    151 	default:
    152 		return (0);
    153 	}
    154 }
    155 
    156 /*
    157  * Vnode op for read
    158  */
    159 /* ARGSUSED */
    160 int
    161 spec_read(vp, uio, ioflag, cred)
    162 	register struct vnode *vp;
    163 	register struct uio *uio;
    164 	int ioflag;
    165 	struct ucred *cred;
    166 {
    167 	struct proc *p = uio->uio_procp;
    168 	struct buf *bp;
    169 	daddr_t bn;
    170 	long bsize, bscale;
    171 	struct partinfo dpart;
    172 	register int n, on;
    173 	int error = 0;
    174 	extern int mem_no;
    175 
    176 #ifdef DIAGNOSTIC
    177 	if (uio->uio_rw != UIO_READ)
    178 		panic("spec_read mode");
    179 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    180 		panic("spec_read proc");
    181 #endif
    182 	if (uio->uio_resid == 0)
    183 		return (0);
    184 
    185 	switch (vp->v_type) {
    186 
    187 	case VCHR:
    188 		/*
    189 		 * Negative offsets allowed only for /dev/kmem
    190 		 */
    191 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
    192 			return (EINVAL);
    193 		VOP_UNLOCK(vp);
    194 		error = (*cdevsw[major(vp->v_rdev)].d_read)
    195 			(vp->v_rdev, uio, ioflag);
    196 		VOP_LOCK(vp);
    197 		return (error);
    198 
    199 	case VBLK:
    200 		if (uio->uio_offset < 0)
    201 			return (EINVAL);
    202 		bsize = BLKDEV_IOSIZE;
    203 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
    204 		    (caddr_t)&dpart, FREAD, p) == 0) {
    205 			if (dpart.part->p_fstype == FS_BSDFFS &&
    206 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
    207 				bsize = dpart.part->p_frag *
    208 				    dpart.part->p_fsize;
    209 		}
    210 		bscale = bsize / DEV_BSIZE;
    211 		do {
    212 			bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
    213 			on = uio->uio_offset % bsize;
    214 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
    215 			if (vp->v_lastr + bscale == bn)
    216 				error = breada(vp, bn, (int)bsize, bn + bscale,
    217 					(int)bsize, NOCRED, &bp);
    218 			else
    219 				error = bread(vp, bn, (int)bsize, NOCRED, &bp);
    220 			vp->v_lastr = bn;
    221 			n = MIN(n, bsize - bp->b_resid);
    222 			if (error) {
    223 				brelse(bp);
    224 				return (error);
    225 			}
    226 			error = uiomove(bp->b_un.b_addr + on, n, uio);
    227 #ifdef OMIT	/* 20 Aug 92*/
    228 			if (n + on == bsize)
    229 				bp->b_flags |= B_AGE;
    230 #endif	/* OMIT*/
    231 			brelse(bp);
    232 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
    233 		return (error);
    234 
    235 	default:
    236 		panic("spec_read type");
    237 	}
    238 	/* NOTREACHED */
    239 }
    240 
    241 /*
    242  * Vnode op for write
    243  */
    244 /* ARGSUSED */
    245 int
    246 spec_write(vp, uio, ioflag, cred)
    247 	register struct vnode *vp;
    248 	register struct uio *uio;
    249 	int ioflag;
    250 	struct ucred *cred;
    251 {
    252 	struct proc *p = uio->uio_procp;
    253 	struct buf *bp;
    254 	daddr_t bn;
    255 	int bsize, blkmask;
    256 	struct partinfo dpart;
    257 	register int n, on;
    258 	int error = 0;
    259 	extern int mem_no;
    260 
    261 #ifdef DIAGNOSTIC
    262 	if (uio->uio_rw != UIO_WRITE)
    263 		panic("spec_write mode");
    264 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    265 		panic("spec_write proc");
    266 #endif
    267 
    268 	switch (vp->v_type) {
    269 
    270 	case VCHR:
    271 		/*
    272 		 * Negative offsets allowed only for /dev/kmem
    273 		 */
    274 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
    275 			return (EINVAL);
    276 		VOP_UNLOCK(vp);
    277 		error = (*cdevsw[major(vp->v_rdev)].d_write)
    278 			(vp->v_rdev, uio, ioflag);
    279 		VOP_LOCK(vp);
    280 		return (error);
    281 
    282 	case VBLK:
    283 		if (uio->uio_resid == 0)
    284 			return (0);
    285 		if (uio->uio_offset < 0)
    286 			return (EINVAL);
    287 		bsize = BLKDEV_IOSIZE;
    288 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
    289 		    (caddr_t)&dpart, FREAD, p) == 0) {
    290 			if (dpart.part->p_fstype == FS_BSDFFS &&
    291 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
    292 				bsize = dpart.part->p_frag *
    293 				    dpart.part->p_fsize;
    294 		}
    295 		blkmask = (bsize / DEV_BSIZE) - 1;
    296 		do {
    297 			bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
    298 			on = uio->uio_offset % bsize;
    299 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
    300 			if (n == bsize)
    301 				bp = getblk(vp, bn, bsize);
    302 			else
    303 				error = bread(vp, bn, bsize, NOCRED, &bp);
    304 			n = MIN(n, bsize - bp->b_resid);
    305 			if (error) {
    306 				brelse(bp);
    307 				return (error);
    308 			}
    309 			error = uiomove(bp->b_un.b_addr + on, n, uio);
    310 			if (n + on == bsize) {
    311 				bp->b_flags |= B_AGE;
    312 				bawrite(bp);
    313 			} else
    314 				bdwrite(bp);
    315 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
    316 		return (error);
    317 
    318 	default:
    319 		panic("spec_write type");
    320 	}
    321 	/* NOTREACHED */
    322 }
    323 
    324 /*
    325  * Device ioctl operation.
    326  */
    327 /* ARGSUSED */
    328 int
    329 spec_ioctl(vp, com, data, fflag, cred, p)
    330 	struct vnode *vp;
    331 	int com;
    332 	caddr_t data;
    333 	int fflag;
    334 	struct ucred *cred;
    335 	struct proc *p;
    336 {
    337 	dev_t dev = vp->v_rdev;
    338 
    339 	switch (vp->v_type) {
    340 
    341 	case VCHR:
    342 		return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
    343 		    fflag, p));
    344 
    345 	case VBLK:
    346 		if (com == 0 && (int)data == B_TAPE)
    347 			if (bdevsw[major(dev)].d_flags & B_TAPE)
    348 				return (0);
    349 			else
    350 				return (1);
    351 		return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
    352 		   fflag, p));
    353 
    354 	default:
    355 		panic("spec_ioctl");
    356 		/* NOTREACHED */
    357 	}
    358 }
    359 
    360 /* ARGSUSED */
    361 int
    362 spec_select(vp, which, fflags, cred, p)
    363 	struct vnode *vp;
    364 	int which, fflags;
    365 	struct ucred *cred;
    366 	struct proc *p;
    367 {
    368 	register dev_t dev;
    369 
    370 	switch (vp->v_type) {
    371 
    372 	default:
    373 		return (1);		/* XXX */
    374 
    375 	case VCHR:
    376 		dev = vp->v_rdev;
    377 		return (*cdevsw[major(dev)].d_select)(dev, which, p);
    378 	}
    379 }
    380 
    381 /*
    382  * Just call the device strategy routine
    383  */
    384 int
    385 spec_strategy(bp)
    386 	register struct buf *bp;
    387 {
    388 
    389 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
    390 	return (0);
    391 }
    392 
    393 /*
    394  * This is a noop, simply returning what one has been given.
    395  */
    396 int
    397 spec_bmap(vp, bn, vpp, bnp)
    398 	struct vnode *vp;
    399 	daddr_t bn;
    400 	struct vnode **vpp;
    401 	daddr_t *bnp;
    402 {
    403 
    404 	if (vpp != NULL)
    405 		*vpp = vp;
    406 	if (bnp != NULL)
    407 		*bnp = bn;
    408 	return (0);
    409 }
    410 
    411 /*
    412  * At the moment we do not do any locking.
    413  */
    414 /* ARGSUSED */
    415 int
    416 spec_lock(vp)
    417 	struct vnode *vp;
    418 {
    419 
    420 	return (0);
    421 }
    422 
    423 /* ARGSUSED */
    424 int
    425 spec_unlock(vp)
    426 	struct vnode *vp;
    427 {
    428 
    429 	return (0);
    430 }
    431 
    432 /*
    433  * Device close routine
    434  */
    435 /* ARGSUSED */
    436 int
    437 spec_close(vp, flag, cred, p)
    438 	register struct vnode *vp;
    439 	int flag;
    440 	struct ucred *cred;
    441 	struct proc *p;
    442 {
    443 	dev_t dev = vp->v_rdev;
    444 	int (*devclose) __P((dev_t, int, int, struct proc *));
    445 	int mode;
    446 
    447 	switch (vp->v_type) {
    448 
    449 	case VCHR:
    450 		/*
    451 		 * If the vnode is locked, then we are in the midst
    452 		 * of forcably closing the device, otherwise we only
    453 		 * close on last reference.
    454 		 */
    455 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
    456 			return (0);
    457 		devclose = cdevsw[major(dev)].d_close;
    458 		mode = S_IFCHR;
    459 		break;
    460 
    461 	case VBLK:
    462 		/*
    463 		 * On last close of a block device (that isn't mounted)
    464 		 * we must invalidate any in core blocks, so that
    465 		 * we can, for instance, change floppy disks.
    466 		 */
    467 		vflushbuf(vp, 0);
    468 		if (vinvalbuf(vp, 1))
    469 			return (0);
    470 		/*
    471 		 * We do not want to really close the device if it
    472 		 * is still in use unless we are trying to close it
    473 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
    474 		 * holds a reference to the vnode, and because we mark
    475 		 * any other vnodes that alias this device, when the
    476 		 * sum of the reference counts on all the aliased
    477 		 * vnodes descends to one, we are on last close.
    478 		 */
    479 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
    480 			return (0);
    481 		devclose = bdevsw[major(dev)].d_close;
    482 		mode = S_IFBLK;
    483 		break;
    484 
    485 	default:
    486 		panic("spec_close: not special");
    487 	}
    488 
    489 	return ((*devclose)(dev, flag, mode, p));
    490 }
    491 
    492 /*
    493  * Print out the contents of a special device vnode.
    494  */
    495 void
    496 spec_print(vp)
    497 	struct vnode *vp;
    498 {
    499 
    500 	printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
    501 		minor(vp->v_rdev));
    502 }
    503 
    504 /*
    505  * Special device advisory byte-level locks.
    506  */
    507 /* ARGSUSED */
    508 int
    509 spec_advlock(vp, id, op, fl, flags)
    510 	struct vnode *vp;
    511 	caddr_t id;
    512 	int op;
    513 	struct flock *fl;
    514 	int flags;
    515 {
    516 
    517 	return (EOPNOTSUPP);
    518 }
    519 
    520 /*
    521  * Special device failed operation
    522  */
    523 int
    524 spec_ebadf()
    525 {
    526 
    527 	return (EBADF);
    528 }
    529 
    530 /*
    531  * Special device bad operation
    532  */
    533 int
    534 spec_badop()
    535 {
    536 
    537 	panic("spec_badop called");
    538 	/* NOTREACHED */
    539 }
    540