vfs_vnops.c revision 1.158.2.1 1 /* $NetBSD: vfs_vnops.c,v 1.158.2.1 2008/06/10 14:51:22 simonb Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.158.2.1 2008/06/10 14:51:22 simonb Exp $");
41
42 #include "fs_union.h"
43 #include "veriexec.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/file.h>
49 #include <sys/stat.h>
50 #include <sys/buf.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/mount.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/ioctl.h>
57 #include <sys/tty.h>
58 #include <sys/poll.h>
59 #include <sys/kauth.h>
60 #include <sys/syslog.h>
61 #include <sys/fstrans.h>
62 #include <sys/atomic.h>
63 #include <sys/filedesc.h>
64 #include <sys/wapbl.h>
65
66 #include <miscfs/specfs/specdev.h>
67
68 #include <uvm/uvm_extern.h>
69 #include <uvm/uvm_readahead.h>
70
71 #ifdef UNION
72 #include <fs/union/union.h>
73 #endif
74
75 #if defined(LKM) || defined(UNION)
76 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
77 #endif
78
79 #include <sys/verified_exec.h>
80
81 static int vn_read(file_t *fp, off_t *offset, struct uio *uio,
82 kauth_cred_t cred, int flags);
83 static int vn_write(file_t *fp, off_t *offset, struct uio *uio,
84 kauth_cred_t cred, int flags);
85 static int vn_closefile(file_t *fp);
86 static int vn_poll(file_t *fp, int events);
87 static int vn_fcntl(file_t *fp, u_int com, void *data);
88 static int vn_statfile(file_t *fp, struct stat *sb);
89 static int vn_ioctl(file_t *fp, u_long com, void *data);
90
91 const struct fileops vnops = {
92 vn_read, vn_write, vn_ioctl, vn_fcntl, vn_poll,
93 vn_statfile, vn_closefile, vn_kqfilter
94 };
95
96 /*
97 * Common code for vnode open operations.
98 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
99 */
100 int
101 vn_open(struct nameidata *ndp, int fmode, int cmode)
102 {
103 struct vnode *vp;
104 struct lwp *l = curlwp;
105 kauth_cred_t cred = l->l_cred;
106 struct vattr va;
107 int error;
108 char *path;
109
110 ndp->ni_cnd.cn_flags &= TRYEMULROOT;
111
112 if (fmode & O_CREAT) {
113 ndp->ni_cnd.cn_nameiop = CREATE;
114 ndp->ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF;
115 if ((fmode & O_EXCL) == 0 &&
116 ((fmode & O_NOFOLLOW) == 0))
117 ndp->ni_cnd.cn_flags |= FOLLOW;
118 } else {
119 ndp->ni_cnd.cn_nameiop = LOOKUP;
120 ndp->ni_cnd.cn_flags |= LOCKLEAF;
121 if ((fmode & O_NOFOLLOW) == 0)
122 ndp->ni_cnd.cn_flags |= FOLLOW;
123 }
124
125 VERIEXEC_PATH_GET(ndp->ni_dirp, ndp->ni_segflg, ndp->ni_dirp, path);
126
127 error = namei(ndp);
128 if (error)
129 goto out;
130
131 vp = ndp->ni_vp;
132
133 #if NVERIEXEC > 0
134 error = veriexec_openchk(l, ndp->ni_vp, ndp->ni_dirp, fmode);
135 if (error)
136 goto bad;
137 #endif /* NVERIEXEC > 0 */
138
139 if (fmode & O_CREAT) {
140 if (ndp->ni_vp == NULL) {
141 VATTR_NULL(&va);
142 va.va_type = VREG;
143 va.va_mode = cmode;
144 if (fmode & O_EXCL)
145 va.va_vaflags |= VA_EXCLUSIVE;
146 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
147 &ndp->ni_cnd, &va);
148 if (error)
149 goto out;
150 fmode &= ~O_TRUNC;
151 vp = ndp->ni_vp;
152 } else {
153 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
154 if (ndp->ni_dvp == ndp->ni_vp)
155 vrele(ndp->ni_dvp);
156 else
157 vput(ndp->ni_dvp);
158 ndp->ni_dvp = NULL;
159 vp = ndp->ni_vp;
160 if (fmode & O_EXCL) {
161 error = EEXIST;
162 goto bad;
163 }
164 fmode &= ~O_CREAT;
165 }
166 } else {
167 vp = ndp->ni_vp;
168 }
169 if (vp->v_type == VSOCK) {
170 error = EOPNOTSUPP;
171 goto bad;
172 }
173 if (ndp->ni_vp->v_type == VLNK) {
174 error = EFTYPE;
175 goto bad;
176 }
177
178 if ((fmode & O_CREAT) == 0) {
179 error = vn_openchk(vp, cred, fmode);
180 if (error != 0)
181 goto bad;
182 }
183
184 if (fmode & O_TRUNC) {
185 VOP_UNLOCK(vp, 0); /* XXX */
186
187 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
188 VATTR_NULL(&va);
189 va.va_size = 0;
190 error = VOP_SETATTR(vp, &va, cred);
191 if (error != 0)
192 goto bad;
193 }
194 if ((error = VOP_OPEN(vp, fmode, cred)) != 0)
195 goto bad;
196 if (fmode & FWRITE) {
197 mutex_enter(&vp->v_interlock);
198 vp->v_writecount++;
199 mutex_exit(&vp->v_interlock);
200 }
201
202 bad:
203 if (error)
204 vput(vp);
205 out:
206 VERIEXEC_PATH_PUT(path);
207 return (error);
208 }
209
210 /*
211 * Check for write permissions on the specified vnode.
212 * Prototype text segments cannot be written.
213 */
214 int
215 vn_writechk(struct vnode *vp)
216 {
217
218 /*
219 * If the vnode is in use as a process's text,
220 * we can't allow writing.
221 */
222 if (vp->v_iflag & VI_TEXT)
223 return (ETXTBSY);
224 return (0);
225 }
226
227 int
228 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags)
229 {
230 int permbits = 0;
231 int error;
232
233 if ((fflags & FREAD) != 0) {
234 permbits = VREAD;
235 }
236 if ((fflags & (FWRITE | O_TRUNC)) != 0) {
237 permbits |= VWRITE;
238 if (vp->v_type == VDIR) {
239 error = EISDIR;
240 goto bad;
241 }
242 error = vn_writechk(vp);
243 if (error != 0)
244 goto bad;
245 }
246 error = VOP_ACCESS(vp, permbits, cred);
247 bad:
248 return error;
249 }
250
251 /*
252 * Mark a vnode as having executable mappings.
253 */
254 void
255 vn_markexec(struct vnode *vp)
256 {
257
258 if ((vp->v_iflag & VI_EXECMAP) != 0) {
259 /* Safe unlocked, as long as caller holds a reference. */
260 return;
261 }
262
263 mutex_enter(&vp->v_interlock);
264 if ((vp->v_iflag & VI_EXECMAP) == 0) {
265 atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
266 atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
267 vp->v_iflag |= VI_EXECMAP;
268 }
269 mutex_exit(&vp->v_interlock);
270 }
271
272 /*
273 * Mark a vnode as being the text of a process.
274 * Fail if the vnode is currently writable.
275 */
276 int
277 vn_marktext(struct vnode *vp)
278 {
279
280 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) {
281 /* Safe unlocked, as long as caller holds a reference. */
282 return (0);
283 }
284
285 mutex_enter(&vp->v_interlock);
286 if (vp->v_writecount != 0) {
287 KASSERT((vp->v_iflag & VI_TEXT) == 0);
288 mutex_exit(&vp->v_interlock);
289 return (ETXTBSY);
290 }
291 if ((vp->v_iflag & VI_EXECMAP) == 0) {
292 atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
293 atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
294 }
295 vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
296 mutex_exit(&vp->v_interlock);
297 return (0);
298 }
299
300 /*
301 * Vnode close call
302 *
303 * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
304 */
305 int
306 vn_close(struct vnode *vp, int flags, kauth_cred_t cred)
307 {
308 int error;
309
310 if (flags & FWRITE) {
311 mutex_enter(&vp->v_interlock);
312 vp->v_writecount--;
313 mutex_exit(&vp->v_interlock);
314 }
315 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
316 error = VOP_CLOSE(vp, flags, cred);
317 vput(vp);
318 return (error);
319 }
320
321 /*
322 * Package up an I/O request on a vnode into a uio and do it.
323 */
324 int
325 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
326 enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
327 struct lwp *l)
328 {
329 struct uio auio;
330 struct iovec aiov;
331 int error;
332
333 if ((ioflg & IO_NODELOCKED) == 0) {
334 if (rw == UIO_READ) {
335 vn_lock(vp, LK_SHARED | LK_RETRY);
336 } else /* UIO_WRITE */ {
337 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
338 }
339 }
340 auio.uio_iov = &aiov;
341 auio.uio_iovcnt = 1;
342 aiov.iov_base = base;
343 aiov.iov_len = len;
344 auio.uio_resid = len;
345 auio.uio_offset = offset;
346 auio.uio_rw = rw;
347 if (segflg == UIO_SYSSPACE) {
348 UIO_SETUP_SYSSPACE(&auio);
349 } else {
350 auio.uio_vmspace = l->l_proc->p_vmspace;
351 }
352 if (rw == UIO_READ) {
353 error = VOP_READ(vp, &auio, ioflg, cred);
354 } else {
355 error = VOP_WRITE(vp, &auio, ioflg, cred);
356 }
357 if (aresid)
358 *aresid = auio.uio_resid;
359 else
360 if (auio.uio_resid && error == 0)
361 error = EIO;
362 if ((ioflg & IO_NODELOCKED) == 0) {
363 VOP_UNLOCK(vp, 0);
364 }
365 return (error);
366 }
367
368 int
369 vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done,
370 struct lwp *l, off_t **cookies, int *ncookies)
371 {
372 struct vnode *vp = (struct vnode *)fp->f_data;
373 struct iovec aiov;
374 struct uio auio;
375 int error, eofflag;
376
377 /* Limit the size on any kernel buffers used by VOP_READDIR */
378 count = min(MAXBSIZE, count);
379
380 unionread:
381 if (vp->v_type != VDIR)
382 return (EINVAL);
383 aiov.iov_base = bf;
384 aiov.iov_len = count;
385 auio.uio_iov = &aiov;
386 auio.uio_iovcnt = 1;
387 auio.uio_rw = UIO_READ;
388 if (segflg == UIO_SYSSPACE) {
389 UIO_SETUP_SYSSPACE(&auio);
390 } else {
391 KASSERT(l == curlwp);
392 auio.uio_vmspace = l->l_proc->p_vmspace;
393 }
394 auio.uio_resid = count;
395 vn_lock(vp, LK_SHARED | LK_RETRY);
396 auio.uio_offset = fp->f_offset;
397 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
398 ncookies);
399 FILE_LOCK(fp);
400 fp->f_offset = auio.uio_offset;
401 FILE_UNLOCK(fp);
402 VOP_UNLOCK(vp, 0);
403 if (error)
404 return (error);
405
406 #if defined(UNION) || defined(LKM)
407 if (count == auio.uio_resid && vn_union_readdir_hook) {
408 struct vnode *ovp = vp;
409
410 error = (*vn_union_readdir_hook)(&vp, fp, l);
411 if (error)
412 return (error);
413 if (vp != ovp)
414 goto unionread;
415 }
416 #endif /* UNION || LKM */
417
418 if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) &&
419 (vp->v_mount->mnt_flag & MNT_UNION)) {
420 struct vnode *tvp = vp;
421 vp = vp->v_mount->mnt_vnodecovered;
422 VREF(vp);
423 FILE_LOCK(fp);
424 fp->f_data = vp;
425 fp->f_offset = 0;
426 FILE_UNLOCK(fp);
427 vrele(tvp);
428 goto unionread;
429 }
430 *done = count - auio.uio_resid;
431 return error;
432 }
433
434 /*
435 * File table vnode read routine.
436 */
437 static int
438 vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
439 int flags)
440 {
441 struct vnode *vp = (struct vnode *)fp->f_data;
442 int count, error, ioflag, fflag;
443
444 ioflag = IO_ADV_ENCODE(fp->f_advice);
445 fflag = fp->f_flag;
446 if (fflag & FNONBLOCK)
447 ioflag |= IO_NDELAY;
448 if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
449 ioflag |= IO_SYNC;
450 if (fflag & FALTIO)
451 ioflag |= IO_ALTSEMANTICS;
452 if (fflag & FDIRECT)
453 ioflag |= IO_DIRECT;
454 vn_lock(vp, LK_SHARED | LK_RETRY);
455 uio->uio_offset = *offset;
456 count = uio->uio_resid;
457 error = VOP_READ(vp, uio, ioflag, cred);
458 if (flags & FOF_UPDATE_OFFSET)
459 *offset += count - uio->uio_resid;
460 VOP_UNLOCK(vp, 0);
461 return (error);
462 }
463
464 /*
465 * File table vnode write routine.
466 */
467 static int
468 vn_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
469 int flags)
470 {
471 struct vnode *vp = (struct vnode *)fp->f_data;
472 int count, error, ioflag, fflag;
473
474 ioflag = IO_ADV_ENCODE(fp->f_advice) | IO_UNIT;
475 fflag = fp->f_flag;
476 if (vp->v_type == VREG && (fflag & O_APPEND))
477 ioflag |= IO_APPEND;
478 if (fflag & FNONBLOCK)
479 ioflag |= IO_NDELAY;
480 if (fflag & FFSYNC ||
481 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
482 ioflag |= IO_SYNC;
483 else if (fflag & FDSYNC)
484 ioflag |= IO_DSYNC;
485 if (fflag & FALTIO)
486 ioflag |= IO_ALTSEMANTICS;
487 if (fflag & FDIRECT)
488 ioflag |= IO_DIRECT;
489 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
490 uio->uio_offset = *offset;
491 count = uio->uio_resid;
492 error = VOP_WRITE(vp, uio, ioflag, cred);
493 if (flags & FOF_UPDATE_OFFSET) {
494 if (ioflag & IO_APPEND)
495 *offset = uio->uio_offset;
496 else
497 *offset += count - uio->uio_resid;
498 }
499 VOP_UNLOCK(vp, 0);
500 return (error);
501 }
502
503 /*
504 * File table vnode stat routine.
505 */
506 static int
507 vn_statfile(file_t *fp, struct stat *sb)
508 {
509 struct vnode *vp = (struct vnode *)fp->f_data;
510
511 return vn_stat(vp, sb);
512 }
513
514 int
515 vn_stat(struct vnode *vp, struct stat *sb)
516 {
517 struct vattr va;
518 int error;
519 mode_t mode;
520
521 error = VOP_GETATTR(vp, &va, kauth_cred_get());
522 if (error)
523 return (error);
524 /*
525 * Copy from vattr table
526 */
527 sb->st_dev = va.va_fsid;
528 sb->st_ino = va.va_fileid;
529 mode = va.va_mode;
530 switch (vp->v_type) {
531 case VREG:
532 mode |= S_IFREG;
533 break;
534 case VDIR:
535 mode |= S_IFDIR;
536 break;
537 case VBLK:
538 mode |= S_IFBLK;
539 break;
540 case VCHR:
541 mode |= S_IFCHR;
542 break;
543 case VLNK:
544 mode |= S_IFLNK;
545 break;
546 case VSOCK:
547 mode |= S_IFSOCK;
548 break;
549 case VFIFO:
550 mode |= S_IFIFO;
551 break;
552 default:
553 return (EBADF);
554 };
555 sb->st_mode = mode;
556 sb->st_nlink = va.va_nlink;
557 sb->st_uid = va.va_uid;
558 sb->st_gid = va.va_gid;
559 sb->st_rdev = va.va_rdev;
560 sb->st_size = va.va_size;
561 sb->st_atimespec = va.va_atime;
562 sb->st_mtimespec = va.va_mtime;
563 sb->st_ctimespec = va.va_ctime;
564 sb->st_birthtimespec = va.va_birthtime;
565 sb->st_blksize = va.va_blocksize;
566 sb->st_flags = va.va_flags;
567 sb->st_gen = 0;
568 sb->st_blocks = va.va_bytes / S_BLKSIZE;
569 return (0);
570 }
571
572 /*
573 * File table vnode fcntl routine.
574 */
575 static int
576 vn_fcntl(file_t *fp, u_int com, void *data)
577 {
578 struct vnode *vp = fp->f_data;
579 int error;
580
581 error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get());
582 return (error);
583 }
584
585 /*
586 * File table vnode ioctl routine.
587 */
588 static int
589 vn_ioctl(file_t *fp, u_long com, void *data)
590 {
591 struct vnode *vp = fp->f_data, *ovp;
592 struct vattr vattr;
593 int error;
594
595 switch (vp->v_type) {
596
597 case VREG:
598 case VDIR:
599 if (com == FIONREAD) {
600 error = VOP_GETATTR(vp, &vattr,
601 kauth_cred_get());
602 if (error)
603 return (error);
604 *(int *)data = vattr.va_size - fp->f_offset;
605 return (0);
606 }
607 if ((com == FIONWRITE) || (com == FIONSPACE)) {
608 /*
609 * Files don't have send queues, so there never
610 * are any bytes in them, nor is there any
611 * open space in them.
612 */
613 *(int *)data = 0;
614 return (0);
615 }
616 if (com == FIOGETBMAP) {
617 daddr_t *block;
618
619 if (*(daddr_t *)data < 0)
620 return (EINVAL);
621 block = (daddr_t *)data;
622 return (VOP_BMAP(vp, *block, NULL, block, NULL));
623 }
624 if (com == OFIOGETBMAP) {
625 daddr_t ibn, obn;
626
627 if (*(int32_t *)data < 0)
628 return (EINVAL);
629 ibn = (daddr_t)*(int32_t *)data;
630 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
631 *(int32_t *)data = (int32_t)obn;
632 return error;
633 }
634 if (com == FIONBIO || com == FIOASYNC) /* XXX */
635 return (0); /* XXX */
636 /* fall into ... */
637 case VFIFO:
638 case VCHR:
639 case VBLK:
640 error = VOP_IOCTL(vp, com, data, fp->f_flag,
641 kauth_cred_get());
642 if (error == 0 && com == TIOCSCTTY) {
643 VREF(vp);
644 mutex_enter(proc_lock);
645 ovp = curproc->p_session->s_ttyvp;
646 curproc->p_session->s_ttyvp = vp;
647 mutex_exit(proc_lock);
648 if (ovp != NULL)
649 vrele(ovp);
650 }
651 return (error);
652
653 default:
654 return (EPASSTHROUGH);
655 }
656 }
657
658 /*
659 * File table vnode poll routine.
660 */
661 static int
662 vn_poll(file_t *fp, int events)
663 {
664
665 return (VOP_POLL(fp->f_data, events));
666 }
667
668 /*
669 * File table vnode kqfilter routine.
670 */
671 int
672 vn_kqfilter(file_t *fp, struct knote *kn)
673 {
674
675 return (VOP_KQFILTER(fp->f_data, kn));
676 }
677
678 /*
679 * Check that the vnode is still valid, and if so
680 * acquire requested lock.
681 */
682 int
683 vn_lock(struct vnode *vp, int flags)
684 {
685 int error;
686
687 #if 0
688 KASSERT(vp->v_usecount > 0 || (flags & LK_INTERLOCK) != 0
689 || (vp->v_iflag & VI_ONWORKLST) != 0);
690 #endif
691 KASSERT((flags &
692 ~(LK_INTERLOCK|LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT|LK_RETRY|
693 LK_CANRECURSE))
694 == 0);
695
696 #ifdef DIAGNOSTIC
697 if (wapbl_vphaswapbl(vp))
698 WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
699 #endif
700
701 do {
702 /*
703 * XXX PR 37706 forced unmount of file systems is unsafe.
704 * Race between vclean() and this the remaining problem.
705 */
706 if (vp->v_iflag & VI_XLOCK) {
707 if ((flags & LK_INTERLOCK) == 0) {
708 mutex_enter(&vp->v_interlock);
709 }
710 flags &= ~LK_INTERLOCK;
711 if (flags & LK_NOWAIT) {
712 mutex_exit(&vp->v_interlock);
713 return EBUSY;
714 }
715 vwait(vp, VI_XLOCK);
716 mutex_exit(&vp->v_interlock);
717 error = ENOENT;
718 } else {
719 if ((flags & LK_INTERLOCK) != 0) {
720 mutex_exit(&vp->v_interlock);
721 }
722 flags &= ~LK_INTERLOCK;
723 error = VOP_LOCK(vp, (flags & ~LK_RETRY));
724 if (error == 0 || error == EDEADLK || error == EBUSY)
725 return (error);
726 }
727 } while (flags & LK_RETRY);
728 return (error);
729 }
730
731 /*
732 * File table vnode close routine.
733 */
734 static int
735 vn_closefile(file_t *fp)
736 {
737
738 return vn_close(fp->f_data, fp->f_flag, fp->f_cred);
739 }
740
741 /*
742 * Enable LK_CANRECURSE on lock. Return prior status.
743 */
744 u_int
745 vn_setrecurse(struct vnode *vp)
746 {
747 struct vnlock *lkp;
748
749 lkp = (vp->v_vnlock != NULL ? vp->v_vnlock : &vp->v_lock);
750 atomic_inc_uint(&lkp->vl_canrecurse);
751
752 return 0;
753 }
754
755 /*
756 * Called when done with locksetrecurse.
757 */
758 void
759 vn_restorerecurse(struct vnode *vp, u_int flags)
760 {
761 struct vnlock *lkp;
762
763 lkp = (vp->v_vnlock != NULL ? vp->v_vnlock : &vp->v_lock);
764 atomic_dec_uint(&lkp->vl_canrecurse);
765 }
766
767 /*
768 * Simplified in-kernel wrapper calls for extended attribute access.
769 * Both calls pass in a NULL credential, authorizing a "kernel" access.
770 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
771 */
772 int
773 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
774 const char *attrname, size_t *buflen, void *bf, struct lwp *l)
775 {
776 struct uio auio;
777 struct iovec aiov;
778 int error;
779
780 aiov.iov_len = *buflen;
781 aiov.iov_base = bf;
782
783 auio.uio_iov = &aiov;
784 auio.uio_iovcnt = 1;
785 auio.uio_rw = UIO_READ;
786 auio.uio_offset = 0;
787 auio.uio_resid = *buflen;
788 UIO_SETUP_SYSSPACE(&auio);
789
790 if ((ioflg & IO_NODELOCKED) == 0)
791 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
792
793 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL);
794
795 if ((ioflg & IO_NODELOCKED) == 0)
796 VOP_UNLOCK(vp, 0);
797
798 if (error == 0)
799 *buflen = *buflen - auio.uio_resid;
800
801 return (error);
802 }
803
804 /*
805 * XXX Failure mode if partially written?
806 */
807 int
808 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
809 const char *attrname, size_t buflen, const void *bf, struct lwp *l)
810 {
811 struct uio auio;
812 struct iovec aiov;
813 int error;
814
815 aiov.iov_len = buflen;
816 aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */
817
818 auio.uio_iov = &aiov;
819 auio.uio_iovcnt = 1;
820 auio.uio_rw = UIO_WRITE;
821 auio.uio_offset = 0;
822 auio.uio_resid = buflen;
823 UIO_SETUP_SYSSPACE(&auio);
824
825 if ((ioflg & IO_NODELOCKED) == 0) {
826 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
827 }
828
829 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL);
830
831 if ((ioflg & IO_NODELOCKED) == 0) {
832 VOP_UNLOCK(vp, 0);
833 }
834
835 return (error);
836 }
837
838 int
839 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
840 const char *attrname, struct lwp *l)
841 {
842 int error;
843
844 if ((ioflg & IO_NODELOCKED) == 0) {
845 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
846 }
847
848 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL);
849 if (error == EOPNOTSUPP)
850 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL);
851
852 if ((ioflg & IO_NODELOCKED) == 0) {
853 VOP_UNLOCK(vp, 0);
854 }
855
856 return (error);
857 }
858
859 void
860 vn_ra_allocctx(struct vnode *vp)
861 {
862 struct uvm_ractx *ra = NULL;
863
864 KASSERT(mutex_owned(&vp->v_interlock));
865
866 if (vp->v_type != VREG) {
867 return;
868 }
869 if (vp->v_ractx != NULL) {
870 return;
871 }
872 if (vp->v_ractx == NULL) {
873 mutex_exit(&vp->v_interlock);
874 ra = uvm_ra_allocctx();
875 mutex_enter(&vp->v_interlock);
876 if (ra != NULL && vp->v_ractx == NULL) {
877 vp->v_ractx = ra;
878 ra = NULL;
879 }
880 }
881 if (ra != NULL) {
882 uvm_ra_freectx(ra);
883 }
884 }
885