vfs_vnops.c revision 1.126 1 /* $NetBSD: vfs_vnops.c,v 1.126 2006/10/27 20:16:10 elad Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.126 2006/10/27 20:16:10 elad Exp $");
41
42 #include "fs_union.h"
43 #include "veriexec.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/file.h>
49 #include <sys/stat.h>
50 #include <sys/buf.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/mount.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/ioctl.h>
57 #include <sys/tty.h>
58 #include <sys/poll.h>
59 #include <sys/kauth.h>
60
61 #include <miscfs/specfs/specdev.h>
62
63 #include <uvm/uvm_extern.h>
64 #include <uvm/uvm_readahead.h>
65
66 #ifdef UNION
67 #include <fs/union/union.h>
68 #endif
69
70 #if defined(LKM) || defined(UNION)
71 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
72 #endif
73
74 #if NVERIEXEC > 0
75 #include <sys/verified_exec.h>
76 #endif /* NVERIEXEC > 0 */
77
78 static int vn_read(struct file *fp, off_t *offset, struct uio *uio,
79 kauth_cred_t cred, int flags);
80 static int vn_write(struct file *fp, off_t *offset, struct uio *uio,
81 kauth_cred_t cred, int flags);
82 static int vn_closefile(struct file *fp, struct lwp *l);
83 static int vn_poll(struct file *fp, int events, struct lwp *l);
84 static int vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l);
85 static int vn_statfile(struct file *fp, struct stat *sb, struct lwp *l);
86 static int vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l);
87
88 const struct fileops vnops = {
89 vn_read, vn_write, vn_ioctl, vn_fcntl, vn_poll,
90 vn_statfile, vn_closefile, vn_kqfilter
91 };
92
93 /*
94 * Common code for vnode open operations.
95 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
96 */
97 int
98 vn_open(struct nameidata *ndp, int fmode, int cmode)
99 {
100 struct vnode *vp;
101 struct mount *mp = NULL; /* XXX: GCC */
102 struct lwp *l = ndp->ni_cnd.cn_lwp;
103 kauth_cred_t cred = l->l_cred;
104 struct vattr va;
105 int error;
106 #if NVERIEXEC > 0
107 struct veriexec_file_entry *vfe = NULL;
108 const char *pathbuf;
109 #endif /* NVERIEXEC > 0 */
110
111 #if NVERIEXEC > 0
112 if (ndp->ni_segflg == UIO_USERSPACE) {
113 pathbuf = PNBUF_GET();
114 error = copyinstr(ndp->ni_dirp, __UNCONST(pathbuf), MAXPATHLEN,
115 NULL);
116 if (error) {
117 if (veriexec_verbose >= 1)
118 printf("Veriexec: Can't copy path. (err=%d\n",
119 error);
120 goto bad2;
121 }
122 } else
123 pathbuf = ndp->ni_dirp;
124 #endif /* NVERIEXEC > 0 */
125
126 restart:
127 if (fmode & O_CREAT) {
128 ndp->ni_cnd.cn_nameiop = CREATE;
129 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
130 if ((fmode & O_EXCL) == 0 &&
131 ((fmode & O_NOFOLLOW) == 0))
132 ndp->ni_cnd.cn_flags |= FOLLOW;
133 if ((error = namei(ndp)) != 0)
134 goto bad2;
135 if (ndp->ni_vp == NULL) {
136 #if NVERIEXEC > 0
137 /* Lockdown mode: Prevent creation of new files. */
138 if (veriexec_strict >= VERIEXEC_LOCKDOWN) {
139 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
140
141 printf("Veriexec: vn_open: Preventing "
142 "new file creation in %s.\n",
143 pathbuf);
144
145 vp = ndp->ni_dvp;
146 error = EPERM;
147 goto bad;
148 }
149 #endif /* NVERIEXEC > 0 */
150
151 VATTR_NULL(&va);
152 va.va_type = VREG;
153 va.va_mode = cmode;
154 if (fmode & O_EXCL)
155 va.va_vaflags |= VA_EXCLUSIVE;
156 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
157 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
158 vput(ndp->ni_dvp);
159 if ((error = vn_start_write(NULL, &mp,
160 V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0)
161 goto bad2;
162 goto restart;
163 }
164 VOP_LEASE(ndp->ni_dvp, l, cred, LEASE_WRITE);
165 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
166 &ndp->ni_cnd, &va);
167 vn_finished_write(mp, 0);
168 if (error)
169 goto bad2;
170 fmode &= ~O_TRUNC;
171 vp = ndp->ni_vp;
172 } else {
173 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
174 if (ndp->ni_dvp == ndp->ni_vp)
175 vrele(ndp->ni_dvp);
176 else
177 vput(ndp->ni_dvp);
178 ndp->ni_dvp = NULL;
179 vp = ndp->ni_vp;
180 if (fmode & O_EXCL) {
181 error = EEXIST;
182 goto bad;
183 }
184 fmode &= ~O_CREAT;
185 }
186 } else {
187 ndp->ni_cnd.cn_nameiop = LOOKUP;
188 ndp->ni_cnd.cn_flags = LOCKLEAF;
189 if ((fmode & O_NOFOLLOW) == 0)
190 ndp->ni_cnd.cn_flags |= FOLLOW;
191 if ((error = namei(ndp)) != 0)
192 goto bad2;
193 vp = ndp->ni_vp;
194 }
195 if (vp->v_type == VSOCK) {
196 error = EOPNOTSUPP;
197 goto bad;
198 }
199 if (ndp->ni_vp->v_type == VLNK) {
200 error = EFTYPE;
201 goto bad;
202 }
203
204 if ((fmode & O_CREAT) == 0) {
205 #if NVERIEXEC > 0
206 if ((error = veriexec_verify(l, vp, pathbuf, VERIEXEC_FILE,
207 &vfe)) != 0)
208 goto bad;
209 #endif /* NVERIEXEC > 0 */
210
211 if (fmode & FREAD) {
212 if ((error = VOP_ACCESS(vp, VREAD, cred, l)) != 0)
213 goto bad;
214 }
215
216 if (fmode & (FWRITE | O_TRUNC)) {
217 if (vp->v_type == VDIR) {
218 error = EISDIR;
219 goto bad;
220 }
221 if ((error = vn_writechk(vp)) != 0 ||
222 (error = VOP_ACCESS(vp, VWRITE, cred, l)) != 0)
223 goto bad;
224 #if NVERIEXEC > 0
225 if (vfe != NULL) {
226 veriexec_report("Write access request.",
227 pathbuf, l, REPORT_ALWAYS|REPORT_ALARM);
228
229 /* IPS mode: Deny writing to monitored files. */
230 if (veriexec_strict >= VERIEXEC_IPS) {
231 error = EPERM;
232 goto bad;
233 } else {
234 veriexec_purge(vfe);
235 }
236 }
237 #endif /* NVERIEXEC > 0 */
238 }
239 }
240
241 if (fmode & O_TRUNC) {
242 #if NVERIEXEC > 0
243 if ((error = veriexec_verify(l, vp, pathbuf, VERIEXEC_FILE,
244 &vfe)) != 0) {
245 /*VOP_UNLOCK(vp, 0);*/
246 goto bad;
247 }
248
249 if (vfe != NULL) {
250 veriexec_report("truncate access request.",
251 pathbuf, l,
252 REPORT_VERBOSE | REPORT_ALARM);
253
254 /* IPS mode: Deny truncating monitored files. */
255 if (veriexec_strict >= 2) {
256 error = EPERM;
257 goto bad;
258 } else {
259 veriexec_purge(vfe);
260 }
261 }
262 #endif /* NVERIEXEC > 0 */
263
264 VOP_UNLOCK(vp, 0); /* XXX */
265
266 if ((error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0) {
267 vrele(vp);
268 goto bad2;
269 }
270 VOP_LEASE(vp, l, cred, LEASE_WRITE);
271 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
272 VATTR_NULL(&va);
273 va.va_size = 0;
274 error = VOP_SETATTR(vp, &va, cred, l);
275 vn_finished_write(mp, 0);
276 if (error != 0)
277 goto bad;
278 }
279 if ((error = VOP_OPEN(vp, fmode, cred, l)) != 0)
280 goto bad;
281 if (vp->v_type == VREG &&
282 uvn_attach(vp, fmode & FWRITE ? VM_PROT_WRITE : 0) == NULL) {
283 error = EIO;
284 goto bad;
285 }
286 if (fmode & FWRITE)
287 vp->v_writecount++;
288
289 return (0);
290 bad:
291 vput(vp);
292 bad2:
293 #if NVERIEXEC > 0
294 if (ndp->ni_segflg == UIO_USERSPACE)
295 PNBUF_PUT(__UNCONST(pathbuf));
296 #endif /* NVERIEXEC > 0 */
297
298 return (error);
299 }
300
301 /*
302 * Check for write permissions on the specified vnode.
303 * Prototype text segments cannot be written.
304 */
305 int
306 vn_writechk(struct vnode *vp)
307 {
308
309 /*
310 * If the vnode is in use as a process's text,
311 * we can't allow writing.
312 */
313 if (vp->v_flag & VTEXT)
314 return (ETXTBSY);
315 return (0);
316 }
317
318 /*
319 * Mark a vnode as having executable mappings.
320 */
321 void
322 vn_markexec(struct vnode *vp)
323 {
324 if ((vp->v_flag & VEXECMAP) == 0) {
325 uvmexp.filepages -= vp->v_uobj.uo_npages;
326 uvmexp.execpages += vp->v_uobj.uo_npages;
327 }
328 vp->v_flag |= VEXECMAP;
329 }
330
331 /*
332 * Mark a vnode as being the text of a process.
333 * Fail if the vnode is currently writable.
334 */
335 int
336 vn_marktext(struct vnode *vp)
337 {
338
339 if (vp->v_writecount != 0) {
340 KASSERT((vp->v_flag & VTEXT) == 0);
341 return (ETXTBSY);
342 }
343 vp->v_flag |= VTEXT;
344 vn_markexec(vp);
345 return (0);
346 }
347
348 /*
349 * Vnode close call
350 *
351 * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
352 */
353 int
354 vn_close(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l)
355 {
356 int error;
357
358 if (flags & FWRITE)
359 vp->v_writecount--;
360 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
361 error = VOP_CLOSE(vp, flags, cred, l);
362 vput(vp);
363 return (error);
364 }
365
366 /*
367 * Package up an I/O request on a vnode into a uio and do it.
368 */
369 int
370 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset,
371 enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
372 struct lwp *l)
373 {
374 struct uio auio;
375 struct iovec aiov;
376 struct mount *mp = NULL;
377 int error;
378
379 if ((ioflg & IO_NODELOCKED) == 0) {
380 if (rw == UIO_READ) {
381 vn_lock(vp, LK_SHARED | LK_RETRY);
382 } else /* UIO_WRITE */ {
383 if (vp->v_type != VCHR &&
384 (error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH))
385 != 0)
386 return (error);
387 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
388 }
389 }
390 auio.uio_iov = &aiov;
391 auio.uio_iovcnt = 1;
392 aiov.iov_base = base;
393 aiov.iov_len = len;
394 auio.uio_resid = len;
395 auio.uio_offset = offset;
396 auio.uio_rw = rw;
397 if (segflg == UIO_SYSSPACE) {
398 UIO_SETUP_SYSSPACE(&auio);
399 } else {
400 auio.uio_vmspace = l->l_proc->p_vmspace;
401 }
402 if (rw == UIO_READ) {
403 error = VOP_READ(vp, &auio, ioflg, cred);
404 } else {
405 error = VOP_WRITE(vp, &auio, ioflg, cred);
406 }
407 if (aresid)
408 *aresid = auio.uio_resid;
409 else
410 if (auio.uio_resid && error == 0)
411 error = EIO;
412 if ((ioflg & IO_NODELOCKED) == 0) {
413 if (rw == UIO_WRITE)
414 vn_finished_write(mp, 0);
415 VOP_UNLOCK(vp, 0);
416 }
417 return (error);
418 }
419
420 int
421 vn_readdir(struct file *fp, char *bf, int segflg, u_int count, int *done,
422 struct lwp *l, off_t **cookies, int *ncookies)
423 {
424 struct vnode *vp = (struct vnode *)fp->f_data;
425 struct iovec aiov;
426 struct uio auio;
427 int error, eofflag;
428
429 /* Limit the size on any kernel buffers used by VOP_READDIR */
430 count = min(MAXBSIZE, count);
431
432 unionread:
433 if (vp->v_type != VDIR)
434 return (EINVAL);
435 aiov.iov_base = bf;
436 aiov.iov_len = count;
437 auio.uio_iov = &aiov;
438 auio.uio_iovcnt = 1;
439 auio.uio_rw = UIO_READ;
440 if (segflg == UIO_SYSSPACE) {
441 UIO_SETUP_SYSSPACE(&auio);
442 } else {
443 KASSERT(l == curlwp);
444 auio.uio_vmspace = l->l_proc->p_vmspace;
445 }
446 auio.uio_resid = count;
447 vn_lock(vp, LK_SHARED | LK_RETRY);
448 auio.uio_offset = fp->f_offset;
449 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
450 ncookies);
451 fp->f_offset = auio.uio_offset;
452 VOP_UNLOCK(vp, 0);
453 if (error)
454 return (error);
455
456 #if defined(UNION) || defined(LKM)
457 if (count == auio.uio_resid && vn_union_readdir_hook) {
458 struct vnode *ovp = vp;
459
460 error = (*vn_union_readdir_hook)(&vp, fp, l);
461 if (error)
462 return (error);
463 if (vp != ovp)
464 goto unionread;
465 }
466 #endif /* UNION || LKM */
467
468 if (count == auio.uio_resid && (vp->v_flag & VROOT) &&
469 (vp->v_mount->mnt_flag & MNT_UNION)) {
470 struct vnode *tvp = vp;
471 vp = vp->v_mount->mnt_vnodecovered;
472 VREF(vp);
473 fp->f_data = vp;
474 fp->f_offset = 0;
475 vrele(tvp);
476 goto unionread;
477 }
478 *done = count - auio.uio_resid;
479 return error;
480 }
481
482 /*
483 * File table vnode read routine.
484 */
485 static int
486 vn_read(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
487 int flags)
488 {
489 struct vnode *vp = (struct vnode *)fp->f_data;
490 int count, error, ioflag;
491 struct lwp *l = curlwp;
492
493 VOP_LEASE(vp, l, cred, LEASE_READ);
494 ioflag = IO_ADV_ENCODE(fp->f_advice);
495 if (fp->f_flag & FNONBLOCK)
496 ioflag |= IO_NDELAY;
497 if ((fp->f_flag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
498 ioflag |= IO_SYNC;
499 if (fp->f_flag & FALTIO)
500 ioflag |= IO_ALTSEMANTICS;
501 if (fp->f_flag & FDIRECT)
502 ioflag |= IO_DIRECT;
503 vn_lock(vp, LK_SHARED | LK_RETRY);
504 uio->uio_offset = *offset;
505 count = uio->uio_resid;
506 error = VOP_READ(vp, uio, ioflag, cred);
507 if (flags & FOF_UPDATE_OFFSET)
508 *offset += count - uio->uio_resid;
509 VOP_UNLOCK(vp, 0);
510 return (error);
511 }
512
513 /*
514 * File table vnode write routine.
515 */
516 static int
517 vn_write(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
518 int flags)
519 {
520 struct vnode *vp = (struct vnode *)fp->f_data;
521 struct mount *mp;
522 int count, error, ioflag = IO_UNIT;
523 struct lwp *l = curlwp;
524
525 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
526 ioflag |= IO_APPEND;
527 if (fp->f_flag & FNONBLOCK)
528 ioflag |= IO_NDELAY;
529 if (fp->f_flag & FFSYNC ||
530 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
531 ioflag |= IO_SYNC;
532 else if (fp->f_flag & FDSYNC)
533 ioflag |= IO_DSYNC;
534 if (fp->f_flag & FALTIO)
535 ioflag |= IO_ALTSEMANTICS;
536 if (fp->f_flag & FDIRECT)
537 ioflag |= IO_DIRECT;
538 mp = NULL;
539 if (vp->v_type != VCHR &&
540 (error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0)
541 return (error);
542 VOP_LEASE(vp, l, cred, LEASE_WRITE);
543 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
544 uio->uio_offset = *offset;
545 count = uio->uio_resid;
546 error = VOP_WRITE(vp, uio, ioflag, cred);
547 if (flags & FOF_UPDATE_OFFSET) {
548 if (ioflag & IO_APPEND)
549 *offset = uio->uio_offset;
550 else
551 *offset += count - uio->uio_resid;
552 }
553 VOP_UNLOCK(vp, 0);
554 vn_finished_write(mp, 0);
555 return (error);
556 }
557
558 /*
559 * File table vnode stat routine.
560 */
561 static int
562 vn_statfile(struct file *fp, struct stat *sb, struct lwp *l)
563 {
564 struct vnode *vp = (struct vnode *)fp->f_data;
565
566 return vn_stat(vp, sb, l);
567 }
568
569 int
570 vn_stat(struct vnode *vp, struct stat *sb, struct lwp *l)
571 {
572 struct vattr va;
573 int error;
574 mode_t mode;
575
576 error = VOP_GETATTR(vp, &va, l->l_cred, l);
577 if (error)
578 return (error);
579 /*
580 * Copy from vattr table
581 */
582 sb->st_dev = va.va_fsid;
583 sb->st_ino = va.va_fileid;
584 mode = va.va_mode;
585 switch (vp->v_type) {
586 case VREG:
587 mode |= S_IFREG;
588 break;
589 case VDIR:
590 mode |= S_IFDIR;
591 break;
592 case VBLK:
593 mode |= S_IFBLK;
594 break;
595 case VCHR:
596 mode |= S_IFCHR;
597 break;
598 case VLNK:
599 mode |= S_IFLNK;
600 break;
601 case VSOCK:
602 mode |= S_IFSOCK;
603 break;
604 case VFIFO:
605 mode |= S_IFIFO;
606 break;
607 default:
608 return (EBADF);
609 };
610 sb->st_mode = mode;
611 sb->st_nlink = va.va_nlink;
612 sb->st_uid = va.va_uid;
613 sb->st_gid = va.va_gid;
614 sb->st_rdev = va.va_rdev;
615 sb->st_size = va.va_size;
616 sb->st_atimespec = va.va_atime;
617 sb->st_mtimespec = va.va_mtime;
618 sb->st_ctimespec = va.va_ctime;
619 sb->st_birthtimespec = va.va_birthtime;
620 sb->st_blksize = va.va_blocksize;
621 sb->st_flags = va.va_flags;
622 sb->st_gen = 0;
623 sb->st_blocks = va.va_bytes / S_BLKSIZE;
624 return (0);
625 }
626
627 /*
628 * File table vnode fcntl routine.
629 */
630 static int
631 vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l)
632 {
633 struct vnode *vp = ((struct vnode *)fp->f_data);
634 int error;
635
636 error = VOP_FCNTL(vp, com, data, fp->f_flag, l->l_cred, l);
637 return (error);
638 }
639
640 /*
641 * File table vnode ioctl routine.
642 */
643 static int
644 vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l)
645 {
646 struct vnode *vp = ((struct vnode *)fp->f_data);
647 struct proc *p = l->l_proc;
648 struct vattr vattr;
649 int error;
650
651 switch (vp->v_type) {
652
653 case VREG:
654 case VDIR:
655 if (com == FIONREAD) {
656 error = VOP_GETATTR(vp, &vattr, l->l_cred, l);
657 if (error)
658 return (error);
659 *(int *)data = vattr.va_size - fp->f_offset;
660 return (0);
661 }
662 if ((com == FIONWRITE) || (com == FIONSPACE)) {
663 /*
664 * Files don't have send queues, so there never
665 * are any bytes in them, nor is there any
666 * open space in them.
667 */
668 *(int *)data = 0;
669 return (0);
670 }
671 if (com == FIOGETBMAP) {
672 daddr_t *block;
673
674 if (*(daddr_t *)data < 0)
675 return (EINVAL);
676 block = (daddr_t *)data;
677 return (VOP_BMAP(vp, *block, NULL, block, NULL));
678 }
679 if (com == OFIOGETBMAP) {
680 daddr_t ibn, obn;
681
682 if (*(int32_t *)data < 0)
683 return (EINVAL);
684 ibn = (daddr_t)*(int32_t *)data;
685 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
686 *(int32_t *)data = (int32_t)obn;
687 return error;
688 }
689 if (com == FIONBIO || com == FIOASYNC) /* XXX */
690 return (0); /* XXX */
691 /* fall into ... */
692 case VFIFO:
693 case VCHR:
694 case VBLK:
695 error = VOP_IOCTL(vp, com, data, fp->f_flag,
696 l->l_cred, l);
697 if (error == 0 && com == TIOCSCTTY) {
698 if (p->p_session->s_ttyvp)
699 vrele(p->p_session->s_ttyvp);
700 p->p_session->s_ttyvp = vp;
701 VREF(vp);
702 }
703 return (error);
704
705 default:
706 return (EPASSTHROUGH);
707 }
708 }
709
710 /*
711 * File table vnode poll routine.
712 */
713 static int
714 vn_poll(struct file *fp, int events, struct lwp *l)
715 {
716
717 return (VOP_POLL(((struct vnode *)fp->f_data), events, l));
718 }
719
720 /*
721 * File table vnode kqfilter routine.
722 */
723 int
724 vn_kqfilter(struct file *fp, struct knote *kn)
725 {
726
727 return (VOP_KQFILTER((struct vnode *)fp->f_data, kn));
728 }
729
730 /*
731 * Check that the vnode is still valid, and if so
732 * acquire requested lock.
733 */
734 int
735 vn_lock(struct vnode *vp, int flags)
736 {
737 int error;
738
739 #if 0
740 KASSERT(vp->v_usecount > 0 || (flags & LK_INTERLOCK) != 0
741 || (vp->v_flag & VONWORKLST) != 0);
742 #endif
743 KASSERT((flags &
744 ~(LK_INTERLOCK|LK_SHARED|LK_EXCLUSIVE|LK_DRAIN|LK_NOWAIT|LK_RETRY|
745 LK_SETRECURSE|LK_CANRECURSE))
746 == 0);
747
748 do {
749 if ((flags & LK_INTERLOCK) == 0)
750 simple_lock(&vp->v_interlock);
751 if (vp->v_flag & VXLOCK) {
752 if (flags & LK_NOWAIT) {
753 simple_unlock(&vp->v_interlock);
754 return EBUSY;
755 }
756 vp->v_flag |= VXWANT;
757 ltsleep(vp, PINOD | PNORELOCK,
758 "vn_lock", 0, &vp->v_interlock);
759 error = ENOENT;
760 } else {
761 error = VOP_LOCK(vp,
762 (flags & ~LK_RETRY) | LK_INTERLOCK);
763 if (error == 0 || error == EDEADLK || error == EBUSY)
764 return (error);
765 }
766 flags &= ~LK_INTERLOCK;
767 } while (flags & LK_RETRY);
768 return (error);
769 }
770
771 /*
772 * File table vnode close routine.
773 */
774 static int
775 vn_closefile(struct file *fp, struct lwp *l)
776 {
777
778 return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
779 fp->f_cred, l));
780 }
781
782 /*
783 * Enable LK_CANRECURSE on lock. Return prior status.
784 */
785 u_int
786 vn_setrecurse(struct vnode *vp)
787 {
788 struct lock *lkp = &vp->v_lock;
789 u_int retval = lkp->lk_flags & LK_CANRECURSE;
790
791 lkp->lk_flags |= LK_CANRECURSE;
792 return retval;
793 }
794
795 /*
796 * Called when done with locksetrecurse.
797 */
798 void
799 vn_restorerecurse(struct vnode *vp, u_int flags)
800 {
801 struct lock *lkp = &vp->v_lock;
802
803 lkp->lk_flags &= ~LK_CANRECURSE;
804 lkp->lk_flags |= flags;
805 }
806
807 int
808 vn_cow_establish(struct vnode *vp,
809 int (*func)(void *, struct buf *), void *cookie)
810 {
811 int s;
812 struct spec_cow_entry *e;
813
814 MALLOC(e, struct spec_cow_entry *, sizeof(struct spec_cow_entry),
815 M_DEVBUF, M_WAITOK);
816 e->ce_func = func;
817 e->ce_cookie = cookie;
818
819 SPEC_COW_LOCK(vp->v_specinfo, s);
820 vp->v_spec_cow_req++;
821 while (vp->v_spec_cow_count > 0)
822 ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
823 &vp->v_spec_cow_slock);
824
825 SLIST_INSERT_HEAD(&vp->v_spec_cow_head, e, ce_list);
826
827 vp->v_spec_cow_req--;
828 if (vp->v_spec_cow_req == 0)
829 wakeup(&vp->v_spec_cow_req);
830 SPEC_COW_UNLOCK(vp->v_specinfo, s);
831
832 return 0;
833 }
834
835 int
836 vn_cow_disestablish(struct vnode *vp,
837 int (*func)(void *, struct buf *), void *cookie)
838 {
839 int s;
840 struct spec_cow_entry *e;
841
842 SPEC_COW_LOCK(vp->v_specinfo, s);
843 vp->v_spec_cow_req++;
844 while (vp->v_spec_cow_count > 0)
845 ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
846 &vp->v_spec_cow_slock);
847
848 SLIST_FOREACH(e, &vp->v_spec_cow_head, ce_list)
849 if (e->ce_func == func && e->ce_cookie == cookie) {
850 SLIST_REMOVE(&vp->v_spec_cow_head, e,
851 spec_cow_entry, ce_list);
852 FREE(e, M_DEVBUF);
853 break;
854 }
855
856 vp->v_spec_cow_req--;
857 if (vp->v_spec_cow_req == 0)
858 wakeup(&vp->v_spec_cow_req);
859 SPEC_COW_UNLOCK(vp->v_specinfo, s);
860
861 return e ? 0 : EINVAL;
862 }
863
864 /*
865 * Simplified in-kernel wrapper calls for extended attribute access.
866 * Both calls pass in a NULL credential, authorizing a "kernel" access.
867 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
868 */
869 int
870 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
871 const char *attrname, size_t *buflen, void *bf, struct lwp *l)
872 {
873 struct uio auio;
874 struct iovec aiov;
875 int error;
876
877 aiov.iov_len = *buflen;
878 aiov.iov_base = bf;
879
880 auio.uio_iov = &aiov;
881 auio.uio_iovcnt = 1;
882 auio.uio_rw = UIO_READ;
883 auio.uio_offset = 0;
884 auio.uio_resid = *buflen;
885 UIO_SETUP_SYSSPACE(&auio);
886
887 if ((ioflg & IO_NODELOCKED) == 0)
888 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
889
890 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
891 l);
892
893 if ((ioflg & IO_NODELOCKED) == 0)
894 VOP_UNLOCK(vp, 0);
895
896 if (error == 0)
897 *buflen = *buflen - auio.uio_resid;
898
899 return (error);
900 }
901
902 /*
903 * XXX Failure mode if partially written?
904 */
905 int
906 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
907 const char *attrname, size_t buflen, const void *bf, struct lwp *l)
908 {
909 struct uio auio;
910 struct iovec aiov;
911 struct mount *mp = NULL; /* XXX: GCC */
912 int error;
913
914 aiov.iov_len = buflen;
915 aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */
916
917 auio.uio_iov = &aiov;
918 auio.uio_iovcnt = 1;
919 auio.uio_rw = UIO_WRITE;
920 auio.uio_offset = 0;
921 auio.uio_resid = buflen;
922 UIO_SETUP_SYSSPACE(&auio);
923
924 if ((ioflg & IO_NODELOCKED) == 0) {
925 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
926 return (error);
927 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
928 }
929
930 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, l);
931
932 if ((ioflg & IO_NODELOCKED) == 0) {
933 vn_finished_write(mp, 0);
934 VOP_UNLOCK(vp, 0);
935 }
936
937 return (error);
938 }
939
940 int
941 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
942 const char *attrname, struct lwp *l)
943 {
944 struct mount *mp = NULL; /* XXX: GCC */
945 int error;
946
947 if ((ioflg & IO_NODELOCKED) == 0) {
948 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
949 return (error);
950 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
951 }
952
953 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, l);
954 if (error == EOPNOTSUPP)
955 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
956 NULL, l);
957
958 if ((ioflg & IO_NODELOCKED) == 0) {
959 vn_finished_write(mp, 0);
960 VOP_UNLOCK(vp, 0);
961 }
962
963 return (error);
964 }
965
966 /*
967 * Preparing to start a filesystem write operation. If the operation is
968 * permitted, then we bump the count of operations in progress and
969 * proceed. If a suspend request is in progress, we wait until the
970 * suspension is over, and then proceed.
971 * V_PCATCH adds PCATCH to the tsleep flags.
972 * V_WAIT waits until suspension is over. Otherwise returns EWOULDBLOCK.
973 * V_SLEEPONLY wait, but do not bump the operations count.
974 * V_LOWER this is a lower level operation. No further vnodes should be
975 * locked. Otherwise it is a upper level operation. No vnodes
976 * should be locked.
977 */
978 int
979 vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
980 {
981 struct mount *mp;
982 int error, mask, prio;
983
984 /*
985 * If a vnode is provided, get and return the mount point that
986 * to which it will write.
987 */
988 if (vp != NULL) {
989 *mpp = vp->v_mount;
990 }
991 if ((mp = *mpp) == NULL)
992 return (0);
993 mp = mp->mnt_leaf;
994 /*
995 * Check on status of suspension.
996 */
997 prio = PUSER - 1;
998 if (flags & V_PCATCH)
999 prio |= PCATCH;
1000
1001 if ((flags & V_LOWER) == 0)
1002 mask = IMNT_SUSPEND;
1003 else
1004 mask = IMNT_SUSPENDLOW;
1005
1006 while ((mp->mnt_iflag & mask) != 0) {
1007 if ((flags & V_WAIT) == 0)
1008 return (EWOULDBLOCK);
1009 error = tsleep(&mp->mnt_flag, prio, "suspfs", 0);
1010 if (error)
1011 return (error);
1012 }
1013 if (flags & V_SLEEPONLY)
1014 return (0);
1015 simple_lock(&mp->mnt_slock);
1016 if ((flags & V_LOWER) == 0)
1017 mp->mnt_writeopcountupper++;
1018 else
1019 mp->mnt_writeopcountlower++;
1020 simple_unlock(&mp->mnt_slock);
1021 return (0);
1022 }
1023
1024 /*
1025 * Filesystem write operation has completed. If we are suspending and this
1026 * operation is the last one, notify the suspender that the suspension is
1027 * now in effect.
1028 */
1029 void
1030 vn_finished_write(struct mount *mp, int flags)
1031 {
1032 if (mp == NULL)
1033 return;
1034 mp = mp->mnt_leaf;
1035 simple_lock(&mp->mnt_slock);
1036 if ((flags & V_LOWER) == 0) {
1037 mp->mnt_writeopcountupper--;
1038 if (mp->mnt_writeopcountupper < 0)
1039 printf("vn_finished_write: neg cnt upper=%d\n",
1040 mp->mnt_writeopcountupper);
1041 if ((mp->mnt_iflag & IMNT_SUSPEND) != 0 &&
1042 mp->mnt_writeopcountupper <= 0)
1043 wakeup(&mp->mnt_writeopcountupper);
1044 } else {
1045 mp->mnt_writeopcountlower--;
1046 if (mp->mnt_writeopcountlower < 0)
1047 printf("vn_finished_write: neg cnt lower=%d\n",
1048 mp->mnt_writeopcountlower);
1049 if ((mp->mnt_iflag & IMNT_SUSPENDLOW) != 0 &&
1050 mp->mnt_writeopcountupper <= 0)
1051 wakeup(&mp->mnt_writeopcountlower);
1052 }
1053 simple_unlock(&mp->mnt_slock);
1054 }
1055
1056 void
1057 vn_ra_allocctx(struct vnode *vp)
1058 {
1059 struct uvm_ractx *ra = NULL;
1060
1061 if (vp->v_type != VREG) {
1062 return;
1063 }
1064 if (vp->v_ractx != NULL) {
1065 return;
1066 }
1067 simple_lock(&vp->v_interlock);
1068 if (vp->v_ractx == NULL) {
1069 simple_unlock(&vp->v_interlock);
1070 ra = uvm_ra_allocctx();
1071 simple_lock(&vp->v_interlock);
1072 if (ra != NULL && vp->v_ractx == NULL) {
1073 vp->v_ractx = ra;
1074 ra = NULL;
1075 }
1076 }
1077 simple_unlock(&vp->v_interlock);
1078 if (ra != NULL) {
1079 uvm_ra_freectx(ra);
1080 }
1081 }
1082