vfs_vnops.c revision 1.128 1 /* $NetBSD: vfs_vnops.c,v 1.128 2006/11/01 22:45:14 elad Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.128 2006/11/01 22:45:14 elad Exp $");
41
42 #include "fs_union.h"
43 #include "veriexec.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/file.h>
49 #include <sys/stat.h>
50 #include <sys/buf.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/mount.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/ioctl.h>
57 #include <sys/tty.h>
58 #include <sys/poll.h>
59 #include <sys/kauth.h>
60 #include <sys/syslog.h>
61
62 #include <miscfs/specfs/specdev.h>
63
64 #include <uvm/uvm_extern.h>
65 #include <uvm/uvm_readahead.h>
66
67 #ifdef UNION
68 #include <fs/union/union.h>
69 #endif
70
71 #if defined(LKM) || defined(UNION)
72 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
73 #endif
74
75 #if NVERIEXEC > 0
76 #include <sys/verified_exec.h>
77 #endif /* NVERIEXEC > 0 */
78
79 static int vn_read(struct file *fp, off_t *offset, struct uio *uio,
80 kauth_cred_t cred, int flags);
81 static int vn_write(struct file *fp, off_t *offset, struct uio *uio,
82 kauth_cred_t cred, int flags);
83 static int vn_closefile(struct file *fp, struct lwp *l);
84 static int vn_poll(struct file *fp, int events, struct lwp *l);
85 static int vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l);
86 static int vn_statfile(struct file *fp, struct stat *sb, struct lwp *l);
87 static int vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l);
88
89 const struct fileops vnops = {
90 vn_read, vn_write, vn_ioctl, vn_fcntl, vn_poll,
91 vn_statfile, vn_closefile, vn_kqfilter
92 };
93
94 /*
95 * Common code for vnode open operations.
96 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
97 */
98 int
99 vn_open(struct nameidata *ndp, int fmode, int cmode)
100 {
101 struct vnode *vp;
102 struct mount *mp = NULL; /* XXX: GCC */
103 struct lwp *l = ndp->ni_cnd.cn_lwp;
104 kauth_cred_t cred = l->l_cred;
105 struct vattr va;
106 int error;
107 #if NVERIEXEC > 0
108 struct veriexec_file_entry *vfe = NULL;
109 const char *pathbuf;
110 char *tmppathbuf;
111 #endif /* NVERIEXEC > 0 */
112
113 #if NVERIEXEC > 0
114 if (ndp->ni_segflg == UIO_USERSPACE) {
115 tmppathbuf = PNBUF_GET();
116 error = copyinstr(ndp->ni_dirp, tmppathbuf, MAXPATHLEN,
117 NULL);
118 if (error) {
119 if (veriexec_verbose >= 1)
120 log(LOG_NOTICE, "Veriexec: Can't copy path."
121 " (error=%d)\n", error);
122 goto bad2;
123 }
124 pathbuf = tmppathbuf;
125 } else {
126 tmppathbuf = NULL;
127 pathbuf = ndp->ni_dirp;
128 }
129 #endif /* NVERIEXEC > 0 */
130
131 restart:
132 if (fmode & O_CREAT) {
133 ndp->ni_cnd.cn_nameiop = CREATE;
134 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
135 if ((fmode & O_EXCL) == 0 &&
136 ((fmode & O_NOFOLLOW) == 0))
137 ndp->ni_cnd.cn_flags |= FOLLOW;
138 if ((error = namei(ndp)) != 0)
139 goto bad2;
140 if (ndp->ni_vp == NULL) {
141 #if NVERIEXEC > 0
142 /* Lockdown mode: Prevent creation of new files. */
143 if (veriexec_strict >= VERIEXEC_LOCKDOWN) {
144 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
145
146 log(LOG_ALERT, "Veriexec: Preventing "
147 "new file creation in %s.\n", pathbuf);
148
149 vp = ndp->ni_dvp;
150 error = EPERM;
151 goto bad;
152 }
153 #endif /* NVERIEXEC > 0 */
154
155 VATTR_NULL(&va);
156 va.va_type = VREG;
157 va.va_mode = cmode;
158 if (fmode & O_EXCL)
159 va.va_vaflags |= VA_EXCLUSIVE;
160 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
161 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
162 vput(ndp->ni_dvp);
163 if ((error = vn_start_write(NULL, &mp,
164 V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0)
165 goto bad2;
166 goto restart;
167 }
168 VOP_LEASE(ndp->ni_dvp, l, cred, LEASE_WRITE);
169 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
170 &ndp->ni_cnd, &va);
171 vn_finished_write(mp, 0);
172 if (error)
173 goto bad2;
174 fmode &= ~O_TRUNC;
175 vp = ndp->ni_vp;
176 } else {
177 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
178 if (ndp->ni_dvp == ndp->ni_vp)
179 vrele(ndp->ni_dvp);
180 else
181 vput(ndp->ni_dvp);
182 ndp->ni_dvp = NULL;
183 vp = ndp->ni_vp;
184 if (fmode & O_EXCL) {
185 error = EEXIST;
186 goto bad;
187 }
188 fmode &= ~O_CREAT;
189 }
190 } else {
191 ndp->ni_cnd.cn_nameiop = LOOKUP;
192 ndp->ni_cnd.cn_flags = LOCKLEAF;
193 if ((fmode & O_NOFOLLOW) == 0)
194 ndp->ni_cnd.cn_flags |= FOLLOW;
195 if ((error = namei(ndp)) != 0)
196 goto bad2;
197 vp = ndp->ni_vp;
198 }
199 if (vp->v_type == VSOCK) {
200 error = EOPNOTSUPP;
201 goto bad;
202 }
203 if (ndp->ni_vp->v_type == VLNK) {
204 error = EFTYPE;
205 goto bad;
206 }
207
208 if ((fmode & O_CREAT) == 0) {
209 #if NVERIEXEC > 0
210 if ((error = veriexec_verify(l, vp, pathbuf, VERIEXEC_FILE,
211 &vfe)) != 0)
212 goto bad;
213 #endif /* NVERIEXEC > 0 */
214
215 if (fmode & FREAD) {
216 if ((error = VOP_ACCESS(vp, VREAD, cred, l)) != 0)
217 goto bad;
218 }
219
220 if (fmode & (FWRITE | O_TRUNC)) {
221 if (vp->v_type == VDIR) {
222 error = EISDIR;
223 goto bad;
224 }
225 if ((error = vn_writechk(vp)) != 0 ||
226 (error = VOP_ACCESS(vp, VWRITE, cred, l)) != 0)
227 goto bad;
228 #if NVERIEXEC > 0
229 if (vfe != NULL) {
230 veriexec_report("Write access request.",
231 pathbuf, l, REPORT_ALWAYS|REPORT_ALARM);
232
233 /* IPS mode: Deny writing to monitored files. */
234 if (veriexec_strict >= VERIEXEC_IPS) {
235 error = EPERM;
236 goto bad;
237 } else {
238 veriexec_purge(vfe);
239 }
240 }
241 #endif /* NVERIEXEC > 0 */
242 }
243 }
244
245 if (fmode & O_TRUNC) {
246 #if NVERIEXEC > 0
247 if ((error = veriexec_verify(l, vp, pathbuf, VERIEXEC_FILE,
248 &vfe)) != 0) {
249 /*VOP_UNLOCK(vp, 0);*/
250 goto bad;
251 }
252
253 if (vfe != NULL) {
254 veriexec_report("truncate access request.",
255 pathbuf, l,
256 REPORT_VERBOSE | REPORT_ALARM);
257
258 /* IPS mode: Deny truncating monitored files. */
259 if (veriexec_strict >= 2) {
260 error = EPERM;
261 goto bad;
262 } else {
263 veriexec_purge(vfe);
264 }
265 }
266 #endif /* NVERIEXEC > 0 */
267
268 VOP_UNLOCK(vp, 0); /* XXX */
269
270 if ((error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0) {
271 vrele(vp);
272 goto bad2;
273 }
274 VOP_LEASE(vp, l, cred, LEASE_WRITE);
275 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
276 VATTR_NULL(&va);
277 va.va_size = 0;
278 error = VOP_SETATTR(vp, &va, cred, l);
279 vn_finished_write(mp, 0);
280 if (error != 0)
281 goto bad;
282 }
283 if ((error = VOP_OPEN(vp, fmode, cred, l)) != 0)
284 goto bad;
285 if (vp->v_type == VREG &&
286 uvn_attach(vp, fmode & FWRITE ? VM_PROT_WRITE : 0) == NULL) {
287 error = EIO;
288 goto bad;
289 }
290 if (fmode & FWRITE)
291 vp->v_writecount++;
292
293 bad:
294 if (error)
295 vput(vp);
296
297 bad2:
298 #if NVERIEXEC > 0
299 if (tmppathbuf != NULL)
300 PNBUF_PUT(tmppathbuf);
301 #endif /* NVERIEXEC > 0 */
302
303 return (error);
304 }
305
306 /*
307 * Check for write permissions on the specified vnode.
308 * Prototype text segments cannot be written.
309 */
310 int
311 vn_writechk(struct vnode *vp)
312 {
313
314 /*
315 * If the vnode is in use as a process's text,
316 * we can't allow writing.
317 */
318 if (vp->v_flag & VTEXT)
319 return (ETXTBSY);
320 return (0);
321 }
322
323 /*
324 * Mark a vnode as having executable mappings.
325 */
326 void
327 vn_markexec(struct vnode *vp)
328 {
329 if ((vp->v_flag & VEXECMAP) == 0) {
330 uvmexp.filepages -= vp->v_uobj.uo_npages;
331 uvmexp.execpages += vp->v_uobj.uo_npages;
332 }
333 vp->v_flag |= VEXECMAP;
334 }
335
336 /*
337 * Mark a vnode as being the text of a process.
338 * Fail if the vnode is currently writable.
339 */
340 int
341 vn_marktext(struct vnode *vp)
342 {
343
344 if (vp->v_writecount != 0) {
345 KASSERT((vp->v_flag & VTEXT) == 0);
346 return (ETXTBSY);
347 }
348 vp->v_flag |= VTEXT;
349 vn_markexec(vp);
350 return (0);
351 }
352
353 /*
354 * Vnode close call
355 *
356 * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
357 */
358 int
359 vn_close(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l)
360 {
361 int error;
362
363 if (flags & FWRITE)
364 vp->v_writecount--;
365 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
366 error = VOP_CLOSE(vp, flags, cred, l);
367 vput(vp);
368 return (error);
369 }
370
371 /*
372 * Package up an I/O request on a vnode into a uio and do it.
373 */
374 int
375 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset,
376 enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
377 struct lwp *l)
378 {
379 struct uio auio;
380 struct iovec aiov;
381 struct mount *mp = NULL;
382 int error;
383
384 if ((ioflg & IO_NODELOCKED) == 0) {
385 if (rw == UIO_READ) {
386 vn_lock(vp, LK_SHARED | LK_RETRY);
387 } else /* UIO_WRITE */ {
388 if (vp->v_type != VCHR &&
389 (error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH))
390 != 0)
391 return (error);
392 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
393 }
394 }
395 auio.uio_iov = &aiov;
396 auio.uio_iovcnt = 1;
397 aiov.iov_base = base;
398 aiov.iov_len = len;
399 auio.uio_resid = len;
400 auio.uio_offset = offset;
401 auio.uio_rw = rw;
402 if (segflg == UIO_SYSSPACE) {
403 UIO_SETUP_SYSSPACE(&auio);
404 } else {
405 auio.uio_vmspace = l->l_proc->p_vmspace;
406 }
407 if (rw == UIO_READ) {
408 error = VOP_READ(vp, &auio, ioflg, cred);
409 } else {
410 error = VOP_WRITE(vp, &auio, ioflg, cred);
411 }
412 if (aresid)
413 *aresid = auio.uio_resid;
414 else
415 if (auio.uio_resid && error == 0)
416 error = EIO;
417 if ((ioflg & IO_NODELOCKED) == 0) {
418 if (rw == UIO_WRITE)
419 vn_finished_write(mp, 0);
420 VOP_UNLOCK(vp, 0);
421 }
422 return (error);
423 }
424
425 int
426 vn_readdir(struct file *fp, char *bf, int segflg, u_int count, int *done,
427 struct lwp *l, off_t **cookies, int *ncookies)
428 {
429 struct vnode *vp = (struct vnode *)fp->f_data;
430 struct iovec aiov;
431 struct uio auio;
432 int error, eofflag;
433
434 /* Limit the size on any kernel buffers used by VOP_READDIR */
435 count = min(MAXBSIZE, count);
436
437 unionread:
438 if (vp->v_type != VDIR)
439 return (EINVAL);
440 aiov.iov_base = bf;
441 aiov.iov_len = count;
442 auio.uio_iov = &aiov;
443 auio.uio_iovcnt = 1;
444 auio.uio_rw = UIO_READ;
445 if (segflg == UIO_SYSSPACE) {
446 UIO_SETUP_SYSSPACE(&auio);
447 } else {
448 KASSERT(l == curlwp);
449 auio.uio_vmspace = l->l_proc->p_vmspace;
450 }
451 auio.uio_resid = count;
452 vn_lock(vp, LK_SHARED | LK_RETRY);
453 auio.uio_offset = fp->f_offset;
454 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
455 ncookies);
456 fp->f_offset = auio.uio_offset;
457 VOP_UNLOCK(vp, 0);
458 if (error)
459 return (error);
460
461 #if defined(UNION) || defined(LKM)
462 if (count == auio.uio_resid && vn_union_readdir_hook) {
463 struct vnode *ovp = vp;
464
465 error = (*vn_union_readdir_hook)(&vp, fp, l);
466 if (error)
467 return (error);
468 if (vp != ovp)
469 goto unionread;
470 }
471 #endif /* UNION || LKM */
472
473 if (count == auio.uio_resid && (vp->v_flag & VROOT) &&
474 (vp->v_mount->mnt_flag & MNT_UNION)) {
475 struct vnode *tvp = vp;
476 vp = vp->v_mount->mnt_vnodecovered;
477 VREF(vp);
478 fp->f_data = vp;
479 fp->f_offset = 0;
480 vrele(tvp);
481 goto unionread;
482 }
483 *done = count - auio.uio_resid;
484 return error;
485 }
486
487 /*
488 * File table vnode read routine.
489 */
490 static int
491 vn_read(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
492 int flags)
493 {
494 struct vnode *vp = (struct vnode *)fp->f_data;
495 int count, error, ioflag;
496 struct lwp *l = curlwp;
497
498 VOP_LEASE(vp, l, cred, LEASE_READ);
499 ioflag = IO_ADV_ENCODE(fp->f_advice);
500 if (fp->f_flag & FNONBLOCK)
501 ioflag |= IO_NDELAY;
502 if ((fp->f_flag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
503 ioflag |= IO_SYNC;
504 if (fp->f_flag & FALTIO)
505 ioflag |= IO_ALTSEMANTICS;
506 if (fp->f_flag & FDIRECT)
507 ioflag |= IO_DIRECT;
508 vn_lock(vp, LK_SHARED | LK_RETRY);
509 uio->uio_offset = *offset;
510 count = uio->uio_resid;
511 error = VOP_READ(vp, uio, ioflag, cred);
512 if (flags & FOF_UPDATE_OFFSET)
513 *offset += count - uio->uio_resid;
514 VOP_UNLOCK(vp, 0);
515 return (error);
516 }
517
518 /*
519 * File table vnode write routine.
520 */
521 static int
522 vn_write(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
523 int flags)
524 {
525 struct vnode *vp = (struct vnode *)fp->f_data;
526 struct mount *mp;
527 int count, error, ioflag = IO_UNIT;
528 struct lwp *l = curlwp;
529
530 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
531 ioflag |= IO_APPEND;
532 if (fp->f_flag & FNONBLOCK)
533 ioflag |= IO_NDELAY;
534 if (fp->f_flag & FFSYNC ||
535 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
536 ioflag |= IO_SYNC;
537 else if (fp->f_flag & FDSYNC)
538 ioflag |= IO_DSYNC;
539 if (fp->f_flag & FALTIO)
540 ioflag |= IO_ALTSEMANTICS;
541 if (fp->f_flag & FDIRECT)
542 ioflag |= IO_DIRECT;
543 mp = NULL;
544 if (vp->v_type != VCHR &&
545 (error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0)
546 return (error);
547 VOP_LEASE(vp, l, cred, LEASE_WRITE);
548 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
549 uio->uio_offset = *offset;
550 count = uio->uio_resid;
551 error = VOP_WRITE(vp, uio, ioflag, cred);
552 if (flags & FOF_UPDATE_OFFSET) {
553 if (ioflag & IO_APPEND)
554 *offset = uio->uio_offset;
555 else
556 *offset += count - uio->uio_resid;
557 }
558 VOP_UNLOCK(vp, 0);
559 vn_finished_write(mp, 0);
560 return (error);
561 }
562
563 /*
564 * File table vnode stat routine.
565 */
566 static int
567 vn_statfile(struct file *fp, struct stat *sb, struct lwp *l)
568 {
569 struct vnode *vp = (struct vnode *)fp->f_data;
570
571 return vn_stat(vp, sb, l);
572 }
573
574 int
575 vn_stat(struct vnode *vp, struct stat *sb, struct lwp *l)
576 {
577 struct vattr va;
578 int error;
579 mode_t mode;
580
581 error = VOP_GETATTR(vp, &va, l->l_cred, l);
582 if (error)
583 return (error);
584 /*
585 * Copy from vattr table
586 */
587 sb->st_dev = va.va_fsid;
588 sb->st_ino = va.va_fileid;
589 mode = va.va_mode;
590 switch (vp->v_type) {
591 case VREG:
592 mode |= S_IFREG;
593 break;
594 case VDIR:
595 mode |= S_IFDIR;
596 break;
597 case VBLK:
598 mode |= S_IFBLK;
599 break;
600 case VCHR:
601 mode |= S_IFCHR;
602 break;
603 case VLNK:
604 mode |= S_IFLNK;
605 break;
606 case VSOCK:
607 mode |= S_IFSOCK;
608 break;
609 case VFIFO:
610 mode |= S_IFIFO;
611 break;
612 default:
613 return (EBADF);
614 };
615 sb->st_mode = mode;
616 sb->st_nlink = va.va_nlink;
617 sb->st_uid = va.va_uid;
618 sb->st_gid = va.va_gid;
619 sb->st_rdev = va.va_rdev;
620 sb->st_size = va.va_size;
621 sb->st_atimespec = va.va_atime;
622 sb->st_mtimespec = va.va_mtime;
623 sb->st_ctimespec = va.va_ctime;
624 sb->st_birthtimespec = va.va_birthtime;
625 sb->st_blksize = va.va_blocksize;
626 sb->st_flags = va.va_flags;
627 sb->st_gen = 0;
628 sb->st_blocks = va.va_bytes / S_BLKSIZE;
629 return (0);
630 }
631
632 /*
633 * File table vnode fcntl routine.
634 */
635 static int
636 vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l)
637 {
638 struct vnode *vp = ((struct vnode *)fp->f_data);
639 int error;
640
641 error = VOP_FCNTL(vp, com, data, fp->f_flag, l->l_cred, l);
642 return (error);
643 }
644
645 /*
646 * File table vnode ioctl routine.
647 */
648 static int
649 vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l)
650 {
651 struct vnode *vp = ((struct vnode *)fp->f_data);
652 struct proc *p = l->l_proc;
653 struct vattr vattr;
654 int error;
655
656 switch (vp->v_type) {
657
658 case VREG:
659 case VDIR:
660 if (com == FIONREAD) {
661 error = VOP_GETATTR(vp, &vattr, l->l_cred, l);
662 if (error)
663 return (error);
664 *(int *)data = vattr.va_size - fp->f_offset;
665 return (0);
666 }
667 if ((com == FIONWRITE) || (com == FIONSPACE)) {
668 /*
669 * Files don't have send queues, so there never
670 * are any bytes in them, nor is there any
671 * open space in them.
672 */
673 *(int *)data = 0;
674 return (0);
675 }
676 if (com == FIOGETBMAP) {
677 daddr_t *block;
678
679 if (*(daddr_t *)data < 0)
680 return (EINVAL);
681 block = (daddr_t *)data;
682 return (VOP_BMAP(vp, *block, NULL, block, NULL));
683 }
684 if (com == OFIOGETBMAP) {
685 daddr_t ibn, obn;
686
687 if (*(int32_t *)data < 0)
688 return (EINVAL);
689 ibn = (daddr_t)*(int32_t *)data;
690 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
691 *(int32_t *)data = (int32_t)obn;
692 return error;
693 }
694 if (com == FIONBIO || com == FIOASYNC) /* XXX */
695 return (0); /* XXX */
696 /* fall into ... */
697 case VFIFO:
698 case VCHR:
699 case VBLK:
700 error = VOP_IOCTL(vp, com, data, fp->f_flag,
701 l->l_cred, l);
702 if (error == 0 && com == TIOCSCTTY) {
703 if (p->p_session->s_ttyvp)
704 vrele(p->p_session->s_ttyvp);
705 p->p_session->s_ttyvp = vp;
706 VREF(vp);
707 }
708 return (error);
709
710 default:
711 return (EPASSTHROUGH);
712 }
713 }
714
715 /*
716 * File table vnode poll routine.
717 */
718 static int
719 vn_poll(struct file *fp, int events, struct lwp *l)
720 {
721
722 return (VOP_POLL(((struct vnode *)fp->f_data), events, l));
723 }
724
725 /*
726 * File table vnode kqfilter routine.
727 */
728 int
729 vn_kqfilter(struct file *fp, struct knote *kn)
730 {
731
732 return (VOP_KQFILTER((struct vnode *)fp->f_data, kn));
733 }
734
735 /*
736 * Check that the vnode is still valid, and if so
737 * acquire requested lock.
738 */
739 int
740 vn_lock(struct vnode *vp, int flags)
741 {
742 int error;
743
744 #if 0
745 KASSERT(vp->v_usecount > 0 || (flags & LK_INTERLOCK) != 0
746 || (vp->v_flag & VONWORKLST) != 0);
747 #endif
748 KASSERT((flags &
749 ~(LK_INTERLOCK|LK_SHARED|LK_EXCLUSIVE|LK_DRAIN|LK_NOWAIT|LK_RETRY|
750 LK_SETRECURSE|LK_CANRECURSE))
751 == 0);
752
753 do {
754 if ((flags & LK_INTERLOCK) == 0)
755 simple_lock(&vp->v_interlock);
756 if (vp->v_flag & VXLOCK) {
757 if (flags & LK_NOWAIT) {
758 simple_unlock(&vp->v_interlock);
759 return EBUSY;
760 }
761 vp->v_flag |= VXWANT;
762 ltsleep(vp, PINOD | PNORELOCK,
763 "vn_lock", 0, &vp->v_interlock);
764 error = ENOENT;
765 } else {
766 error = VOP_LOCK(vp,
767 (flags & ~LK_RETRY) | LK_INTERLOCK);
768 if (error == 0 || error == EDEADLK || error == EBUSY)
769 return (error);
770 }
771 flags &= ~LK_INTERLOCK;
772 } while (flags & LK_RETRY);
773 return (error);
774 }
775
776 /*
777 * File table vnode close routine.
778 */
779 static int
780 vn_closefile(struct file *fp, struct lwp *l)
781 {
782
783 return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
784 fp->f_cred, l));
785 }
786
787 /*
788 * Enable LK_CANRECURSE on lock. Return prior status.
789 */
790 u_int
791 vn_setrecurse(struct vnode *vp)
792 {
793 struct lock *lkp = &vp->v_lock;
794 u_int retval = lkp->lk_flags & LK_CANRECURSE;
795
796 lkp->lk_flags |= LK_CANRECURSE;
797 return retval;
798 }
799
800 /*
801 * Called when done with locksetrecurse.
802 */
803 void
804 vn_restorerecurse(struct vnode *vp, u_int flags)
805 {
806 struct lock *lkp = &vp->v_lock;
807
808 lkp->lk_flags &= ~LK_CANRECURSE;
809 lkp->lk_flags |= flags;
810 }
811
812 int
813 vn_cow_establish(struct vnode *vp,
814 int (*func)(void *, struct buf *), void *cookie)
815 {
816 int s;
817 struct spec_cow_entry *e;
818
819 MALLOC(e, struct spec_cow_entry *, sizeof(struct spec_cow_entry),
820 M_DEVBUF, M_WAITOK);
821 e->ce_func = func;
822 e->ce_cookie = cookie;
823
824 SPEC_COW_LOCK(vp->v_specinfo, s);
825 vp->v_spec_cow_req++;
826 while (vp->v_spec_cow_count > 0)
827 ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
828 &vp->v_spec_cow_slock);
829
830 SLIST_INSERT_HEAD(&vp->v_spec_cow_head, e, ce_list);
831
832 vp->v_spec_cow_req--;
833 if (vp->v_spec_cow_req == 0)
834 wakeup(&vp->v_spec_cow_req);
835 SPEC_COW_UNLOCK(vp->v_specinfo, s);
836
837 return 0;
838 }
839
840 int
841 vn_cow_disestablish(struct vnode *vp,
842 int (*func)(void *, struct buf *), void *cookie)
843 {
844 int s;
845 struct spec_cow_entry *e;
846
847 SPEC_COW_LOCK(vp->v_specinfo, s);
848 vp->v_spec_cow_req++;
849 while (vp->v_spec_cow_count > 0)
850 ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
851 &vp->v_spec_cow_slock);
852
853 SLIST_FOREACH(e, &vp->v_spec_cow_head, ce_list)
854 if (e->ce_func == func && e->ce_cookie == cookie) {
855 SLIST_REMOVE(&vp->v_spec_cow_head, e,
856 spec_cow_entry, ce_list);
857 FREE(e, M_DEVBUF);
858 break;
859 }
860
861 vp->v_spec_cow_req--;
862 if (vp->v_spec_cow_req == 0)
863 wakeup(&vp->v_spec_cow_req);
864 SPEC_COW_UNLOCK(vp->v_specinfo, s);
865
866 return e ? 0 : EINVAL;
867 }
868
869 /*
870 * Simplified in-kernel wrapper calls for extended attribute access.
871 * Both calls pass in a NULL credential, authorizing a "kernel" access.
872 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
873 */
874 int
875 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
876 const char *attrname, size_t *buflen, void *bf, struct lwp *l)
877 {
878 struct uio auio;
879 struct iovec aiov;
880 int error;
881
882 aiov.iov_len = *buflen;
883 aiov.iov_base = bf;
884
885 auio.uio_iov = &aiov;
886 auio.uio_iovcnt = 1;
887 auio.uio_rw = UIO_READ;
888 auio.uio_offset = 0;
889 auio.uio_resid = *buflen;
890 UIO_SETUP_SYSSPACE(&auio);
891
892 if ((ioflg & IO_NODELOCKED) == 0)
893 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
894
895 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
896 l);
897
898 if ((ioflg & IO_NODELOCKED) == 0)
899 VOP_UNLOCK(vp, 0);
900
901 if (error == 0)
902 *buflen = *buflen - auio.uio_resid;
903
904 return (error);
905 }
906
907 /*
908 * XXX Failure mode if partially written?
909 */
910 int
911 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
912 const char *attrname, size_t buflen, const void *bf, struct lwp *l)
913 {
914 struct uio auio;
915 struct iovec aiov;
916 struct mount *mp = NULL; /* XXX: GCC */
917 int error;
918
919 aiov.iov_len = buflen;
920 aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */
921
922 auio.uio_iov = &aiov;
923 auio.uio_iovcnt = 1;
924 auio.uio_rw = UIO_WRITE;
925 auio.uio_offset = 0;
926 auio.uio_resid = buflen;
927 UIO_SETUP_SYSSPACE(&auio);
928
929 if ((ioflg & IO_NODELOCKED) == 0) {
930 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
931 return (error);
932 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
933 }
934
935 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, l);
936
937 if ((ioflg & IO_NODELOCKED) == 0) {
938 vn_finished_write(mp, 0);
939 VOP_UNLOCK(vp, 0);
940 }
941
942 return (error);
943 }
944
945 int
946 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
947 const char *attrname, struct lwp *l)
948 {
949 struct mount *mp = NULL; /* XXX: GCC */
950 int error;
951
952 if ((ioflg & IO_NODELOCKED) == 0) {
953 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
954 return (error);
955 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
956 }
957
958 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, l);
959 if (error == EOPNOTSUPP)
960 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
961 NULL, l);
962
963 if ((ioflg & IO_NODELOCKED) == 0) {
964 vn_finished_write(mp, 0);
965 VOP_UNLOCK(vp, 0);
966 }
967
968 return (error);
969 }
970
971 /*
972 * Preparing to start a filesystem write operation. If the operation is
973 * permitted, then we bump the count of operations in progress and
974 * proceed. If a suspend request is in progress, we wait until the
975 * suspension is over, and then proceed.
976 * V_PCATCH adds PCATCH to the tsleep flags.
977 * V_WAIT waits until suspension is over. Otherwise returns EWOULDBLOCK.
978 * V_SLEEPONLY wait, but do not bump the operations count.
979 * V_LOWER this is a lower level operation. No further vnodes should be
980 * locked. Otherwise it is a upper level operation. No vnodes
981 * should be locked.
982 */
983 int
984 vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
985 {
986 struct mount *mp;
987 int error, mask, prio;
988
989 /*
990 * If a vnode is provided, get and return the mount point that
991 * to which it will write.
992 */
993 if (vp != NULL) {
994 *mpp = vp->v_mount;
995 }
996 if ((mp = *mpp) == NULL)
997 return (0);
998 mp = mp->mnt_leaf;
999 /*
1000 * Check on status of suspension.
1001 */
1002 prio = PUSER - 1;
1003 if (flags & V_PCATCH)
1004 prio |= PCATCH;
1005
1006 if ((flags & V_LOWER) == 0)
1007 mask = IMNT_SUSPEND;
1008 else
1009 mask = IMNT_SUSPENDLOW;
1010
1011 while ((mp->mnt_iflag & mask) != 0) {
1012 if ((flags & V_WAIT) == 0)
1013 return (EWOULDBLOCK);
1014 error = tsleep(&mp->mnt_flag, prio, "suspfs", 0);
1015 if (error)
1016 return (error);
1017 }
1018 if (flags & V_SLEEPONLY)
1019 return (0);
1020 simple_lock(&mp->mnt_slock);
1021 if ((flags & V_LOWER) == 0)
1022 mp->mnt_writeopcountupper++;
1023 else
1024 mp->mnt_writeopcountlower++;
1025 simple_unlock(&mp->mnt_slock);
1026 return (0);
1027 }
1028
1029 /*
1030 * Filesystem write operation has completed. If we are suspending and this
1031 * operation is the last one, notify the suspender that the suspension is
1032 * now in effect.
1033 */
1034 void
1035 vn_finished_write(struct mount *mp, int flags)
1036 {
1037 if (mp == NULL)
1038 return;
1039 mp = mp->mnt_leaf;
1040 simple_lock(&mp->mnt_slock);
1041 if ((flags & V_LOWER) == 0) {
1042 mp->mnt_writeopcountupper--;
1043 if (mp->mnt_writeopcountupper < 0)
1044 printf("vn_finished_write: neg cnt upper=%d\n",
1045 mp->mnt_writeopcountupper);
1046 if ((mp->mnt_iflag & IMNT_SUSPEND) != 0 &&
1047 mp->mnt_writeopcountupper <= 0)
1048 wakeup(&mp->mnt_writeopcountupper);
1049 } else {
1050 mp->mnt_writeopcountlower--;
1051 if (mp->mnt_writeopcountlower < 0)
1052 printf("vn_finished_write: neg cnt lower=%d\n",
1053 mp->mnt_writeopcountlower);
1054 if ((mp->mnt_iflag & IMNT_SUSPENDLOW) != 0 &&
1055 mp->mnt_writeopcountupper <= 0)
1056 wakeup(&mp->mnt_writeopcountlower);
1057 }
1058 simple_unlock(&mp->mnt_slock);
1059 }
1060
1061 void
1062 vn_ra_allocctx(struct vnode *vp)
1063 {
1064 struct uvm_ractx *ra = NULL;
1065
1066 if (vp->v_type != VREG) {
1067 return;
1068 }
1069 if (vp->v_ractx != NULL) {
1070 return;
1071 }
1072 simple_lock(&vp->v_interlock);
1073 if (vp->v_ractx == NULL) {
1074 simple_unlock(&vp->v_interlock);
1075 ra = uvm_ra_allocctx();
1076 simple_lock(&vp->v_interlock);
1077 if (ra != NULL && vp->v_ractx == NULL) {
1078 vp->v_ractx = ra;
1079 ra = NULL;
1080 }
1081 }
1082 simple_unlock(&vp->v_interlock);
1083 if (ra != NULL) {
1084 uvm_ra_freectx(ra);
1085 }
1086 }
1087