Home | History | Annotate | Line # | Download | only in kern
vfs_vnops.c revision 1.36.4.1
      1 /*	$NetBSD: vfs_vnops.c,v 1.36.4.1 1999/06/07 04:25:31 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  * (c) UNIX System Laboratories, Inc.
      7  * All or some portions of this file are derived from material licensed
      8  * to the University of California by American Telephone and Telegraph
      9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     10  * the permission of UNIX System Laboratories, Inc.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)vfs_vnops.c	8.14 (Berkeley) 6/15/95
     41  */
     42 
     43 #include "fs_union.h"
     44 
     45 #include <sys/param.h>
     46 #include <sys/systm.h>
     47 #include <sys/kernel.h>
     48 #include <sys/file.h>
     49 #include <sys/stat.h>
     50 #include <sys/buf.h>
     51 #include <sys/proc.h>
     52 #include <sys/mount.h>
     53 #include <sys/namei.h>
     54 #include <sys/vnode.h>
     55 #include <sys/ioctl.h>
     56 #include <sys/tty.h>
     57 #include <sys/poll.h>
     58 
     59 #include <vm/vm.h>
     60 
     61 #include <uvm/uvm_extern.h>
     62 
     63 #ifdef UNION
     64 #include <miscfs/union/union.h>
     65 #endif
     66 
     67 struct 	fileops vnops =
     68 	{ vn_read, vn_write, vn_ioctl, vn_poll, vn_closefile };
     69 
     70 /*
     71  * Common code for vnode open operations.
     72  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
     73  */
     74 int
     75 vn_open(ndp, fmode, cmode)
     76 	register struct nameidata *ndp;
     77 	int fmode, cmode;
     78 {
     79 	register struct vnode *vp;
     80 	register struct proc *p = ndp->ni_cnd.cn_proc;
     81 	register struct ucred *cred = p->p_ucred;
     82 	struct vattr va;
     83 	int error;
     84 
     85 	if (fmode & O_CREAT) {
     86 		ndp->ni_cnd.cn_nameiop = CREATE;
     87 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
     88 		if ((fmode & O_EXCL) == 0)
     89 			ndp->ni_cnd.cn_flags |= FOLLOW;
     90 		if ((error = namei(ndp)) != 0)
     91 			return (error);
     92 		if (ndp->ni_vp == NULL) {
     93 			VATTR_NULL(&va);
     94 			va.va_type = VREG;
     95 			va.va_mode = cmode;
     96 			if (fmode & O_EXCL)
     97 				 va.va_vaflags |= VA_EXCLUSIVE;
     98 			VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE);
     99 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
    100 					   &ndp->ni_cnd, &va);
    101 			if (error)
    102 				return (error);
    103 			fmode &= ~O_TRUNC;
    104 			vp = ndp->ni_vp;
    105 		} else {
    106 			VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
    107 			if (ndp->ni_dvp == ndp->ni_vp)
    108 				vrele(ndp->ni_dvp);
    109 			else
    110 				vput(ndp->ni_dvp);
    111 			ndp->ni_dvp = NULL;
    112 			vp = ndp->ni_vp;
    113 			if (fmode & O_EXCL) {
    114 				error = EEXIST;
    115 				goto bad;
    116 			}
    117 			fmode &= ~O_CREAT;
    118 		}
    119 	} else {
    120 		ndp->ni_cnd.cn_nameiop = LOOKUP;
    121 		ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF;
    122 		if ((error = namei(ndp)) != 0)
    123 			return (error);
    124 		vp = ndp->ni_vp;
    125 	}
    126 	if (vp->v_type == VSOCK) {
    127 		error = EOPNOTSUPP;
    128 		goto bad;
    129 	}
    130 	if ((fmode & O_CREAT) == 0) {
    131 		if (fmode & FREAD) {
    132 			if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0)
    133 				goto bad;
    134 		}
    135 		if (fmode & (FWRITE | O_TRUNC)) {
    136 			if (vp->v_type == VDIR) {
    137 				error = EISDIR;
    138 				goto bad;
    139 			}
    140 			if ((error = vn_writechk(vp)) != 0 ||
    141 			    (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0)
    142 				goto bad;
    143 		}
    144 	}
    145 	if (fmode & O_TRUNC) {
    146 		VOP_UNLOCK(vp, 0);			/* XXX */
    147 		VOP_LEASE(vp, p, cred, LEASE_WRITE);
    148 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
    149 		VATTR_NULL(&va);
    150 		va.va_size = 0;
    151 		if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0)
    152 			goto bad;
    153 	}
    154 	if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0)
    155 		goto bad;
    156 	if (vp->v_type == VREG &&
    157 	    uvn_attach(vp, fmode & FWRITE ? VM_PROT_WRITE : 0) == NULL) {
    158 		error = EIO;
    159 		goto bad;
    160 	}
    161 	if (fmode & FWRITE)
    162 		vp->v_writecount++;
    163 
    164 	return (0);
    165 bad:
    166 	vput(vp);
    167 	return (error);
    168 }
    169 
    170 /*
    171  * Check for write permissions on the specified vnode.
    172  * Prototype text segments cannot be written.
    173  */
    174 int
    175 vn_writechk(vp)
    176 	register struct vnode *vp;
    177 {
    178 
    179 	/*
    180 	 * If there's shared text associated with
    181 	 * the vnode, try to free it up once.  If
    182 	 * we fail, we can't allow writing.
    183 	 */
    184 	if ((vp->v_flag & VTEXT) && !uvm_vnp_uncache(vp))
    185 		return (ETXTBSY);
    186 	return (0);
    187 }
    188 
    189 /*
    190  * Vnode close call
    191  *
    192  * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
    193  */
    194 int
    195 vn_close(vp, flags, cred, p)
    196 	register struct vnode *vp;
    197 	int flags;
    198 	struct ucred *cred;
    199 	struct proc *p;
    200 {
    201 	int error;
    202 
    203 	if (flags & FWRITE)
    204 		vp->v_writecount--;
    205 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    206 	error = VOP_CLOSE(vp, flags, cred, p);
    207 	vput(vp);
    208 	return (error);
    209 }
    210 
    211 /*
    212  * Package up an I/O request on a vnode into a uio and do it.
    213  */
    214 int
    215 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
    216 	enum uio_rw rw;
    217 	struct vnode *vp;
    218 	caddr_t base;
    219 	int len;
    220 	off_t offset;
    221 	enum uio_seg segflg;
    222 	int ioflg;
    223 	struct ucred *cred;
    224 	size_t *aresid;
    225 	struct proc *p;
    226 {
    227 	struct uio auio;
    228 	struct iovec aiov;
    229 	int error;
    230 
    231 	if ((ioflg & IO_NODELOCKED) == 0)
    232 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    233 	auio.uio_iov = &aiov;
    234 	auio.uio_iovcnt = 1;
    235 	aiov.iov_base = base;
    236 	aiov.iov_len = len;
    237 	auio.uio_resid = len;
    238 	auio.uio_offset = offset;
    239 	auio.uio_segflg = segflg;
    240 	auio.uio_rw = rw;
    241 	auio.uio_procp = p;
    242 	if (rw == UIO_READ) {
    243 		error = VOP_READ(vp, &auio, ioflg, cred);
    244 	} else {
    245 		error = VOP_WRITE(vp, &auio, ioflg, cred);
    246 	}
    247 	if (aresid)
    248 		*aresid = auio.uio_resid;
    249 	else
    250 		if (auio.uio_resid && error == 0)
    251 			error = EIO;
    252 	if ((ioflg & IO_NODELOCKED) == 0)
    253 		VOP_UNLOCK(vp, 0);
    254 	return (error);
    255 }
    256 
    257 int
    258 vn_readdir(fp, buf, segflg, count, done, p, cookies, ncookies)
    259 	struct file *fp;
    260 	char *buf;
    261 	int segflg, *done, *ncookies;
    262 	u_int count;
    263 	struct proc *p;
    264 	off_t **cookies;
    265 {
    266 	struct vnode *vp = (struct vnode *)fp->f_data;
    267 	struct iovec aiov;
    268 	struct uio auio;
    269 	int error, eofflag;
    270 
    271 unionread:
    272 	if (vp->v_type != VDIR)
    273 		return (EINVAL);
    274 	aiov.iov_base = buf;
    275 	aiov.iov_len = count;
    276 	auio.uio_iov = &aiov;
    277 	auio.uio_iovcnt = 1;
    278 	auio.uio_rw = UIO_READ;
    279 	auio.uio_segflg = segflg;
    280 	auio.uio_procp = p;
    281 	auio.uio_resid = count;
    282 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    283 	auio.uio_offset = fp->f_offset;
    284 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
    285 		    ncookies);
    286 	fp->f_offset = auio.uio_offset;
    287 	VOP_UNLOCK(vp, 0);
    288 	if (error)
    289 		return (error);
    290 
    291 #ifdef UNION
    292 {
    293 	extern int (**union_vnodeop_p) __P((void *));
    294 	extern struct vnode *union_dircache __P((struct vnode *));
    295 
    296 	if (count == auio.uio_resid && (vp->v_op == union_vnodeop_p)) {
    297 		struct vnode *lvp;
    298 
    299 		lvp = union_dircache(vp);
    300 		if (lvp != NULLVP) {
    301 			struct vattr va;
    302 
    303 			/*
    304 			 * If the directory is opaque,
    305 			 * then don't show lower entries
    306 			 */
    307 			error = VOP_GETATTR(vp, &va, fp->f_cred, p);
    308 			if (va.va_flags & OPAQUE) {
    309 				vput(lvp);
    310 				lvp = NULL;
    311 			}
    312 		}
    313 
    314 		if (lvp != NULLVP) {
    315 			error = VOP_OPEN(lvp, FREAD, fp->f_cred, p);
    316 			if (error) {
    317 				vput(lvp);
    318 				return (error);
    319 			}
    320 			VOP_UNLOCK(lvp, 0);
    321 			fp->f_data = (caddr_t) lvp;
    322 			fp->f_offset = 0;
    323 			error = vn_close(vp, FREAD, fp->f_cred, p);
    324 			if (error)
    325 				return (error);
    326 			vp = lvp;
    327 			goto unionread;
    328 		}
    329 	}
    330 }
    331 #endif /* UNION */
    332 
    333 	if (count == auio.uio_resid && (vp->v_flag & VROOT) &&
    334 	    (vp->v_mount->mnt_flag & MNT_UNION)) {
    335 		struct vnode *tvp = vp;
    336 		vp = vp->v_mount->mnt_vnodecovered;
    337 		VREF(vp);
    338 		fp->f_data = (caddr_t) vp;
    339 		fp->f_offset = 0;
    340 		vrele(tvp);
    341 		goto unionread;
    342 	}
    343 	*done = count - auio.uio_resid;
    344 	return error;
    345 }
    346 
    347 /*
    348  * File table vnode read routine.
    349  */
    350 int
    351 vn_read(fp, offset, uio, cred, flags)
    352 	struct file *fp;
    353 	off_t *offset;
    354 	struct uio *uio;
    355 	struct ucred *cred;
    356 	int flags;
    357 {
    358 	struct vnode *vp = (struct vnode *)fp->f_data;
    359 	int count, error, ioflag = 0;
    360 
    361 	VOP_LEASE(vp, uio->uio_procp, cred, LEASE_READ);
    362 	if (fp->f_flag & FNONBLOCK)
    363 		ioflag |= IO_NDELAY;
    364 	if ((fp->f_flag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
    365 		ioflag |= IO_SYNC;
    366 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    367 	uio->uio_offset = *offset;
    368 	count = uio->uio_resid;
    369 	error = VOP_READ(vp, uio, ioflag, cred);
    370 	if (flags & FOF_UPDATE_OFFSET)
    371 		*offset += count - uio->uio_resid;
    372 	VOP_UNLOCK(vp, 0);
    373 	return (error);
    374 }
    375 
    376 /*
    377  * File table vnode write routine.
    378  */
    379 int
    380 vn_write(fp, offset, uio, cred, flags)
    381 	struct file *fp;
    382 	off_t *offset;
    383 	struct uio *uio;
    384 	struct ucred *cred;
    385 	int flags;
    386 {
    387 	struct vnode *vp = (struct vnode *)fp->f_data;
    388 	int count, error, ioflag = IO_UNIT;
    389 
    390 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
    391 		ioflag |= IO_APPEND;
    392 	if (fp->f_flag & FNONBLOCK)
    393 		ioflag |= IO_NDELAY;
    394 	if (fp->f_flag & FFSYNC ||
    395 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
    396 		ioflag |= IO_SYNC;
    397 	else if (fp->f_flag & FDSYNC)
    398 		ioflag |= IO_DSYNC;
    399 	VOP_LEASE(vp, uio->uio_procp, cred, LEASE_WRITE);
    400 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    401 	uio->uio_offset = *offset;
    402 	count = uio->uio_resid;
    403 	error = VOP_WRITE(vp, uio, ioflag, cred);
    404 	if (flags & FOF_UPDATE_OFFSET) {
    405 		if (ioflag & IO_APPEND)
    406 			*offset = uio->uio_offset;
    407 		else
    408 			*offset += count - uio->uio_resid;
    409 	}
    410 	VOP_UNLOCK(vp, 0);
    411 	return (error);
    412 }
    413 
    414 /*
    415  * File table vnode stat routine.
    416  */
    417 int
    418 vn_stat(vp, sb, p)
    419 	struct vnode *vp;
    420 	register struct stat *sb;
    421 	struct proc *p;
    422 {
    423 	struct vattr va;
    424 	int error;
    425 	mode_t mode;
    426 
    427 	error = VOP_GETATTR(vp, &va, p->p_ucred, p);
    428 	if (error)
    429 		return (error);
    430 	/*
    431 	 * Copy from vattr table
    432 	 */
    433 	sb->st_dev = va.va_fsid;
    434 	sb->st_ino = va.va_fileid;
    435 	mode = va.va_mode;
    436 	switch (vp->v_type) {
    437 	case VREG:
    438 		mode |= S_IFREG;
    439 		break;
    440 	case VDIR:
    441 		mode |= S_IFDIR;
    442 		break;
    443 	case VBLK:
    444 		mode |= S_IFBLK;
    445 		break;
    446 	case VCHR:
    447 		mode |= S_IFCHR;
    448 		break;
    449 	case VLNK:
    450 		mode |= S_IFLNK;
    451 		break;
    452 	case VSOCK:
    453 		mode |= S_IFSOCK;
    454 		break;
    455 	case VFIFO:
    456 		mode |= S_IFIFO;
    457 		break;
    458 	default:
    459 		return (EBADF);
    460 	};
    461 	sb->st_mode = mode;
    462 	sb->st_nlink = va.va_nlink;
    463 	sb->st_uid = va.va_uid;
    464 	sb->st_gid = va.va_gid;
    465 	sb->st_rdev = va.va_rdev;
    466 	sb->st_size = va.va_size;
    467 	sb->st_atimespec = va.va_atime;
    468 	sb->st_mtimespec = va.va_mtime;
    469 	sb->st_ctimespec = va.va_ctime;
    470 	sb->st_blksize = va.va_blocksize;
    471 	sb->st_flags = va.va_flags;
    472 	sb->st_gen = 0;
    473 	sb->st_blocks = va.va_bytes / S_BLKSIZE;
    474 	return (0);
    475 }
    476 
    477 /*
    478  * File table vnode ioctl routine.
    479  */
    480 int
    481 vn_ioctl(fp, com, data, p)
    482 	struct file *fp;
    483 	u_long com;
    484 	caddr_t data;
    485 	struct proc *p;
    486 {
    487 	register struct vnode *vp = ((struct vnode *)fp->f_data);
    488 	struct vattr vattr;
    489 	int error;
    490 
    491 	switch (vp->v_type) {
    492 
    493 	case VREG:
    494 	case VDIR:
    495 		if (com == FIONREAD) {
    496 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
    497 			if (error)
    498 				return (error);
    499 			*(int *)data = vattr.va_size - fp->f_offset;
    500 			return (0);
    501 		}
    502 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
    503 			return (0);			/* XXX */
    504 		/* fall into ... */
    505 
    506 	default:
    507 		return (ENOTTY);
    508 
    509 	case VFIFO:
    510 	case VCHR:
    511 	case VBLK:
    512 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
    513 		if (error == 0 && com == TIOCSCTTY) {
    514 			if (p->p_session->s_ttyvp)
    515 				vrele(p->p_session->s_ttyvp);
    516 			p->p_session->s_ttyvp = vp;
    517 			VREF(vp);
    518 		}
    519 		return (error);
    520 	}
    521 }
    522 
    523 /*
    524  * File table vnode poll routine.
    525  */
    526 int
    527 vn_poll(fp, events, p)
    528 	struct file *fp;
    529 	int events;
    530 	struct proc *p;
    531 {
    532 
    533 	return (VOP_POLL(((struct vnode *)fp->f_data), events, p));
    534 }
    535 
    536 /*
    537  * Check that the vnode is still valid, and if so
    538  * acquire requested lock.
    539  */
    540 int
    541 vn_lock(vp, flags)
    542 	struct vnode *vp;
    543 	int flags;
    544 {
    545 	int error;
    546 
    547 	do {
    548 		if ((flags & LK_INTERLOCK) == 0)
    549 			simple_lock(&vp->v_interlock);
    550 		if (vp->v_flag & VXLOCK) {
    551 			vp->v_flag |= VXWANT;
    552 			simple_unlock(&vp->v_interlock);
    553 			tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
    554 			error = ENOENT;
    555 		} else {
    556 			error = VOP_LOCK(vp, flags | LK_INTERLOCK);
    557 			if (error == 0 || error == EDEADLK)
    558 				return (error);
    559 		}
    560 		flags &= ~LK_INTERLOCK;
    561 	} while (flags & LK_RETRY);
    562 	return (error);
    563 }
    564 
    565 /*
    566  * File table vnode close routine.
    567  */
    568 int
    569 vn_closefile(fp, p)
    570 	struct file *fp;
    571 	struct proc *p;
    572 {
    573 
    574 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
    575 		fp->f_cred, p));
    576 }
    577