vfs_vnops.c revision 1.135.2.3 1 /* $NetBSD: vfs_vnops.c,v 1.135.2.3 2007/04/10 13:26:43 ad Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.135.2.3 2007/04/10 13:26:43 ad Exp $");
41
42 #include "fs_union.h"
43 #include "veriexec.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/file.h>
49 #include <sys/stat.h>
50 #include <sys/buf.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/mount.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/ioctl.h>
57 #include <sys/tty.h>
58 #include <sys/poll.h>
59 #include <sys/kauth.h>
60 #include <sys/syslog.h>
61
62 #include <miscfs/specfs/specdev.h>
63
64 #include <uvm/uvm_extern.h>
65 #include <uvm/uvm_readahead.h>
66
67 #ifdef UNION
68 #include <fs/union/union.h>
69 #endif
70
71 #if defined(LKM) || defined(UNION)
72 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
73 #endif
74
75 #if NVERIEXEC > 0
76 #include <sys/verified_exec.h>
77 #endif /* NVERIEXEC > 0 */
78
79 static int vn_read(struct file *fp, off_t *offset, struct uio *uio,
80 kauth_cred_t cred, int flags);
81 static int vn_write(struct file *fp, off_t *offset, struct uio *uio,
82 kauth_cred_t cred, int flags);
83 static int vn_closefile(struct file *fp, struct lwp *l);
84 static int vn_poll(struct file *fp, int events, struct lwp *l);
85 static int vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l);
86 static int vn_statfile(struct file *fp, struct stat *sb, struct lwp *l);
87 static int vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l);
88
89 const struct fileops vnops = {
90 vn_read, vn_write, vn_ioctl, vn_fcntl, vn_poll,
91 vn_statfile, vn_closefile, vn_kqfilter
92 };
93
94 /*
95 * Common code for vnode open operations.
96 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
97 */
98 int
99 vn_open(struct nameidata *ndp, int fmode, int cmode)
100 {
101 struct vnode *vp;
102 struct lwp *l = ndp->ni_cnd.cn_lwp;
103 kauth_cred_t cred = l->l_cred;
104 struct vattr va;
105 int error;
106 pathname_t pn = NULL;
107
108 if (fmode & O_CREAT) {
109 ndp->ni_cnd.cn_nameiop = CREATE;
110 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
111 if ((fmode & O_EXCL) == 0 &&
112 ((fmode & O_NOFOLLOW) == 0))
113 ndp->ni_cnd.cn_flags |= FOLLOW;
114 } else {
115 ndp->ni_cnd.cn_nameiop = LOOKUP;
116 ndp->ni_cnd.cn_flags = LOCKLEAF;
117 if ((fmode & O_NOFOLLOW) == 0)
118 ndp->ni_cnd.cn_flags |= FOLLOW;
119 }
120 #if NVERIEXEC > 0
121 error = pathname_get(ndp->ni_dirp, ndp->ni_segflg, &pn);
122 if (error)
123 goto bad2;
124 ndp->ni_dirp = pathname_path(pn);
125 ndp->ni_segflg = UIO_SYSSPACE;
126 #endif /* NVERIEXEC > 0 */
127 error = namei(ndp);
128 if (error)
129 goto bad2;
130
131 vp = ndp->ni_vp;
132
133 #if NVERIEXEC > 0
134 error = veriexec_openchk(l, ndp->ni_vp, ndp->ni_dirp, fmode);
135 if (error)
136 goto bad;
137 #endif /* NVERIEXEC > 0 */
138
139 if (fmode & O_CREAT) {
140 if (ndp->ni_vp == NULL) {
141 VATTR_NULL(&va);
142 va.va_type = VREG;
143 va.va_mode = cmode;
144 if (fmode & O_EXCL)
145 va.va_vaflags |= VA_EXCLUSIVE;
146 VOP_LEASE(ndp->ni_dvp, l, cred, LEASE_WRITE);
147 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
148 &ndp->ni_cnd, &va);
149 if (error)
150 goto bad2;
151 fmode &= ~O_TRUNC;
152 vp = ndp->ni_vp;
153 } else {
154 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
155 if (ndp->ni_dvp == ndp->ni_vp)
156 vrele(ndp->ni_dvp);
157 else
158 vput(ndp->ni_dvp);
159 ndp->ni_dvp = NULL;
160 vp = ndp->ni_vp;
161 if (fmode & O_EXCL) {
162 error = EEXIST;
163 goto bad;
164 }
165 fmode &= ~O_CREAT;
166 }
167 } else {
168 vp = ndp->ni_vp;
169 }
170 if (vp->v_type == VSOCK) {
171 error = EOPNOTSUPP;
172 goto bad;
173 }
174 if (ndp->ni_vp->v_type == VLNK) {
175 error = EFTYPE;
176 goto bad;
177 }
178
179 if ((fmode & O_CREAT) == 0) {
180 if (fmode & FREAD) {
181 if ((error = VOP_ACCESS(vp, VREAD, cred, l)) != 0)
182 goto bad;
183 }
184
185 if (fmode & (FWRITE | O_TRUNC)) {
186 if (vp->v_type == VDIR) {
187 error = EISDIR;
188 goto bad;
189 }
190 if ((error = vn_writechk(vp)) != 0 ||
191 (error = VOP_ACCESS(vp, VWRITE, cred, l)) != 0)
192 goto bad;
193 }
194 }
195
196 if (fmode & O_TRUNC) {
197 VOP_UNLOCK(vp, 0); /* XXX */
198
199 VOP_LEASE(vp, l, cred, LEASE_WRITE);
200 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
201 VATTR_NULL(&va);
202 va.va_size = 0;
203 error = VOP_SETATTR(vp, &va, cred, l);
204 if (error != 0)
205 goto bad;
206 }
207 if ((error = VOP_OPEN(vp, fmode, cred, l)) != 0)
208 goto bad;
209 if (vp->v_type == VREG &&
210 uvn_attach(vp, fmode & FWRITE ? VM_PROT_WRITE : 0) == NULL) {
211 error = EIO;
212 goto bad;
213 }
214 if (fmode & FWRITE)
215 vp->v_writecount++;
216
217 bad:
218 if (error)
219 vput(vp);
220
221 bad2:
222 pathname_put(pn);
223
224 return (error);
225 }
226
227 /*
228 * Check for write permissions on the specified vnode.
229 * Prototype text segments cannot be written.
230 */
231 int
232 vn_writechk(struct vnode *vp)
233 {
234
235 /*
236 * If the vnode is in use as a process's text,
237 * we can't allow writing.
238 */
239 if (vp->v_flag & VTEXT)
240 return (ETXTBSY);
241 return (0);
242 }
243
244 /*
245 * Mark a vnode as having executable mappings.
246 */
247 void
248 vn_markexec(struct vnode *vp)
249 {
250 if ((vp->v_flag & VEXECMAP) == 0) {
251 uvmexp.filepages -= vp->v_uobj.uo_npages;
252 uvmexp.execpages += vp->v_uobj.uo_npages;
253 }
254 vp->v_flag |= VEXECMAP;
255 }
256
257 /*
258 * Mark a vnode as being the text of a process.
259 * Fail if the vnode is currently writable.
260 */
261 int
262 vn_marktext(struct vnode *vp)
263 {
264
265 if (vp->v_writecount != 0) {
266 KASSERT((vp->v_flag & VTEXT) == 0);
267 return (ETXTBSY);
268 }
269 vp->v_flag |= VTEXT;
270 vn_markexec(vp);
271 return (0);
272 }
273
274 /*
275 * Vnode close call
276 *
277 * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
278 */
279 int
280 vn_close(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l)
281 {
282 int error;
283
284 if (flags & FWRITE)
285 vp->v_writecount--;
286 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
287 error = VOP_CLOSE(vp, flags, cred, l);
288 vput(vp);
289 return (error);
290 }
291
292 /*
293 * Package up an I/O request on a vnode into a uio and do it.
294 */
295 int
296 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
297 enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
298 struct lwp *l)
299 {
300 struct uio auio;
301 struct iovec aiov;
302 int error;
303
304 if ((ioflg & IO_NODELOCKED) == 0) {
305 if (rw == UIO_READ) {
306 vn_lock(vp, LK_SHARED | LK_RETRY);
307 } else /* UIO_WRITE */ {
308 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
309 }
310 }
311 auio.uio_iov = &aiov;
312 auio.uio_iovcnt = 1;
313 aiov.iov_base = base;
314 aiov.iov_len = len;
315 auio.uio_resid = len;
316 auio.uio_offset = offset;
317 auio.uio_rw = rw;
318 if (segflg == UIO_SYSSPACE) {
319 UIO_SETUP_SYSSPACE(&auio);
320 } else {
321 auio.uio_vmspace = l->l_proc->p_vmspace;
322 }
323 if (rw == UIO_READ) {
324 error = VOP_READ(vp, &auio, ioflg, cred);
325 } else {
326 error = VOP_WRITE(vp, &auio, ioflg, cred);
327 }
328 if (aresid)
329 *aresid = auio.uio_resid;
330 else
331 if (auio.uio_resid && error == 0)
332 error = EIO;
333 if ((ioflg & IO_NODELOCKED) == 0) {
334 VOP_UNLOCK(vp, 0);
335 }
336 return (error);
337 }
338
339 int
340 vn_readdir(struct file *fp, char *bf, int segflg, u_int count, int *done,
341 struct lwp *l, off_t **cookies, int *ncookies)
342 {
343 struct vnode *vp = (struct vnode *)fp->f_data;
344 struct iovec aiov;
345 struct uio auio;
346 int error, eofflag;
347
348 /* Limit the size on any kernel buffers used by VOP_READDIR */
349 count = min(MAXBSIZE, count);
350
351 unionread:
352 if (vp->v_type != VDIR)
353 return (EINVAL);
354 aiov.iov_base = bf;
355 aiov.iov_len = count;
356 auio.uio_iov = &aiov;
357 auio.uio_iovcnt = 1;
358 auio.uio_rw = UIO_READ;
359 if (segflg == UIO_SYSSPACE) {
360 UIO_SETUP_SYSSPACE(&auio);
361 } else {
362 KASSERT(l == curlwp);
363 auio.uio_vmspace = l->l_proc->p_vmspace;
364 }
365 auio.uio_resid = count;
366 vn_lock(vp, LK_SHARED | LK_RETRY);
367 auio.uio_offset = fp->f_offset;
368 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
369 ncookies);
370 mutex_enter(&fp->f_lock);
371 fp->f_offset = auio.uio_offset;
372 mutex_exit(&fp->f_lock);
373 VOP_UNLOCK(vp, 0);
374 if (error)
375 return (error);
376
377 #if defined(UNION) || defined(LKM)
378 if (count == auio.uio_resid && vn_union_readdir_hook) {
379 struct vnode *ovp = vp;
380
381 error = (*vn_union_readdir_hook)(&vp, fp, l);
382 if (error)
383 return (error);
384 if (vp != ovp)
385 goto unionread;
386 }
387 #endif /* UNION || LKM */
388
389 if (count == auio.uio_resid && (vp->v_flag & VROOT) &&
390 (vp->v_mount->mnt_flag & MNT_UNION)) {
391 struct vnode *tvp = vp;
392 vp = vp->v_mount->mnt_vnodecovered;
393 VREF(vp);
394 fp->f_data = vp;
395 mutex_enter(&fp->f_lock);
396 fp->f_offset = 0;
397 mutex_exit(&fp->f_lock);
398 vrele(tvp);
399 goto unionread;
400 }
401 *done = count - auio.uio_resid;
402 return error;
403 }
404
405 /*
406 * File table vnode read routine.
407 */
408 static int
409 vn_read(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
410 int flags)
411 {
412 struct vnode *vp = (struct vnode *)fp->f_data;
413 int count, error, ioflag;
414 struct lwp *l = curlwp;
415
416 VOP_LEASE(vp, l, cred, LEASE_READ);
417 ioflag = IO_ADV_ENCODE(fp->f_advice);
418 if (fp->f_flag & FNONBLOCK)
419 ioflag |= IO_NDELAY;
420 if ((fp->f_flag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
421 ioflag |= IO_SYNC;
422 if (fp->f_flag & FALTIO)
423 ioflag |= IO_ALTSEMANTICS;
424 if (fp->f_flag & FDIRECT)
425 ioflag |= IO_DIRECT;
426 vn_lock(vp, LK_SHARED | LK_RETRY);
427 uio->uio_offset = *offset;
428 count = uio->uio_resid;
429 error = VOP_READ(vp, uio, ioflag, cred);
430 if (flags & FOF_UPDATE_OFFSET)
431 *offset += count - uio->uio_resid;
432 VOP_UNLOCK(vp, 0);
433 return (error);
434 }
435
436 /*
437 * File table vnode write routine.
438 */
439 static int
440 vn_write(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
441 int flags)
442 {
443 struct vnode *vp = (struct vnode *)fp->f_data;
444 int count, error, ioflag = IO_UNIT;
445 struct lwp *l = curlwp;
446
447 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
448 ioflag |= IO_APPEND;
449 if (fp->f_flag & FNONBLOCK)
450 ioflag |= IO_NDELAY;
451 if (fp->f_flag & FFSYNC ||
452 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
453 ioflag |= IO_SYNC;
454 else if (fp->f_flag & FDSYNC)
455 ioflag |= IO_DSYNC;
456 if (fp->f_flag & FALTIO)
457 ioflag |= IO_ALTSEMANTICS;
458 if (fp->f_flag & FDIRECT)
459 ioflag |= IO_DIRECT;
460 VOP_LEASE(vp, l, cred, LEASE_WRITE);
461 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
462 uio->uio_offset = *offset;
463 count = uio->uio_resid;
464 error = VOP_WRITE(vp, uio, ioflag, cred);
465 if (flags & FOF_UPDATE_OFFSET) {
466 if (ioflag & IO_APPEND)
467 *offset = uio->uio_offset;
468 else
469 *offset += count - uio->uio_resid;
470 }
471 VOP_UNLOCK(vp, 0);
472 return (error);
473 }
474
475 /*
476 * File table vnode stat routine.
477 */
478 static int
479 vn_statfile(struct file *fp, struct stat *sb, struct lwp *l)
480 {
481 struct vnode *vp = (struct vnode *)fp->f_data;
482
483 return vn_stat(vp, sb, l);
484 }
485
486 int
487 vn_stat(struct vnode *vp, struct stat *sb, struct lwp *l)
488 {
489 struct vattr va;
490 int error;
491 mode_t mode;
492
493 error = VOP_GETATTR(vp, &va, l->l_cred, l);
494 if (error)
495 return (error);
496 /*
497 * Copy from vattr table
498 */
499 sb->st_dev = va.va_fsid;
500 sb->st_ino = va.va_fileid;
501 mode = va.va_mode;
502 switch (vp->v_type) {
503 case VREG:
504 mode |= S_IFREG;
505 break;
506 case VDIR:
507 mode |= S_IFDIR;
508 break;
509 case VBLK:
510 mode |= S_IFBLK;
511 break;
512 case VCHR:
513 mode |= S_IFCHR;
514 break;
515 case VLNK:
516 mode |= S_IFLNK;
517 break;
518 case VSOCK:
519 mode |= S_IFSOCK;
520 break;
521 case VFIFO:
522 mode |= S_IFIFO;
523 break;
524 default:
525 return (EBADF);
526 };
527 sb->st_mode = mode;
528 sb->st_nlink = va.va_nlink;
529 sb->st_uid = va.va_uid;
530 sb->st_gid = va.va_gid;
531 sb->st_rdev = va.va_rdev;
532 sb->st_size = va.va_size;
533 sb->st_atimespec = va.va_atime;
534 sb->st_mtimespec = va.va_mtime;
535 sb->st_ctimespec = va.va_ctime;
536 sb->st_birthtimespec = va.va_birthtime;
537 sb->st_blksize = va.va_blocksize;
538 sb->st_flags = va.va_flags;
539 sb->st_gen = 0;
540 sb->st_blocks = va.va_bytes / S_BLKSIZE;
541 return (0);
542 }
543
544 /*
545 * File table vnode fcntl routine.
546 */
547 static int
548 vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l)
549 {
550 struct vnode *vp = ((struct vnode *)fp->f_data);
551 int error;
552
553 error = VOP_FCNTL(vp, com, data, fp->f_flag, l->l_cred, l);
554 return (error);
555 }
556
557 /*
558 * File table vnode ioctl routine.
559 */
560 static int
561 vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l)
562 {
563 struct vnode *vp = ((struct vnode *)fp->f_data), *ovp;
564 struct proc *p = l->l_proc;
565 struct vattr vattr;
566 int error;
567
568 switch (vp->v_type) {
569
570 case VREG:
571 case VDIR:
572 if (com == FIONREAD) {
573 error = VOP_GETATTR(vp, &vattr, l->l_cred, l);
574 if (error)
575 return (error);
576 *(int *)data = vattr.va_size - fp->f_offset;
577 return (0);
578 }
579 if ((com == FIONWRITE) || (com == FIONSPACE)) {
580 /*
581 * Files don't have send queues, so there never
582 * are any bytes in them, nor is there any
583 * open space in them.
584 */
585 *(int *)data = 0;
586 return (0);
587 }
588 if (com == FIOGETBMAP) {
589 daddr_t *block;
590
591 if (*(daddr_t *)data < 0)
592 return (EINVAL);
593 block = (daddr_t *)data;
594 return (VOP_BMAP(vp, *block, NULL, block, NULL));
595 }
596 if (com == OFIOGETBMAP) {
597 daddr_t ibn, obn;
598
599 if (*(int32_t *)data < 0)
600 return (EINVAL);
601 ibn = (daddr_t)*(int32_t *)data;
602 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
603 *(int32_t *)data = (int32_t)obn;
604 return error;
605 }
606 if (com == FIONBIO || com == FIOASYNC) /* XXX */
607 return (0); /* XXX */
608 /* fall into ... */
609 case VFIFO:
610 case VCHR:
611 case VBLK:
612 error = VOP_IOCTL(vp, com, data, fp->f_flag,
613 l->l_cred, l);
614 if (error == 0 && com == TIOCSCTTY) {
615 VREF(vp);
616 mutex_enter(&proclist_lock);
617 ovp = p->p_session->s_ttyvp;
618 p->p_session->s_ttyvp = vp;
619 mutex_exit(&proclist_lock);
620 if (ovp != NULL)
621 vrele(ovp);
622 }
623 return (error);
624
625 default:
626 return (EPASSTHROUGH);
627 }
628 }
629
630 /*
631 * File table vnode poll routine.
632 */
633 static int
634 vn_poll(struct file *fp, int events, struct lwp *l)
635 {
636
637 return (VOP_POLL(((struct vnode *)fp->f_data), events, l));
638 }
639
640 /*
641 * File table vnode kqfilter routine.
642 */
643 int
644 vn_kqfilter(struct file *fp, struct knote *kn)
645 {
646
647 return (VOP_KQFILTER((struct vnode *)fp->f_data, kn));
648 }
649
650 /*
651 * Check that the vnode is still valid, and if so
652 * acquire requested lock.
653 */
654 int
655 vn_lock(struct vnode *vp, int flags)
656 {
657 int error;
658
659 #if 0
660 KASSERT(vp->v_usecount > 0 || (flags & LK_INTERLOCK) != 0
661 || (vp->v_flag & VONWORKLST) != 0);
662 #endif
663 KASSERT((flags &
664 ~(LK_INTERLOCK|LK_SHARED|LK_EXCLUSIVE|LK_DRAIN|LK_NOWAIT|LK_RETRY|
665 LK_SETRECURSE|LK_CANRECURSE))
666 == 0);
667
668 do {
669 if ((flags & LK_INTERLOCK) == 0)
670 mutex_enter(&vp->v_interlock);
671 if (vp->v_flag & VXLOCK) {
672 if (flags & LK_NOWAIT) {
673 mutex_exit(&vp->v_interlock);
674 return EBUSY;
675 }
676 vp->v_flag |= VXWANT;
677 cv_wait(&vp->v_cv, &vp->v_interlock);
678 mutex_exit(&vp->v_interlock);
679 error = ENOENT;
680 } else {
681 error = VOP_LOCK(vp,
682 (flags & ~LK_RETRY) | LK_INTERLOCK);
683 if (error == 0 || error == EDEADLK || error == EBUSY)
684 return (error);
685 }
686 flags &= ~LK_INTERLOCK;
687 } while (flags & LK_RETRY);
688 return (error);
689 }
690
691 /*
692 * File table vnode close routine.
693 */
694 static int
695 vn_closefile(struct file *fp, struct lwp *l)
696 {
697
698 return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
699 fp->f_cred, l));
700 }
701
702 /*
703 * Enable LK_CANRECURSE on lock. Return prior status.
704 */
705 u_int
706 vn_setrecurse(struct vnode *vp)
707 {
708 struct lock *lkp = &vp->v_lock;
709 u_int retval = lkp->lk_flags & LK_CANRECURSE;
710
711 lkp->lk_flags |= LK_CANRECURSE;
712 return retval;
713 }
714
715 /*
716 * Called when done with locksetrecurse.
717 */
718 void
719 vn_restorerecurse(struct vnode *vp, u_int flags)
720 {
721 struct lock *lkp = &vp->v_lock;
722
723 lkp->lk_flags &= ~LK_CANRECURSE;
724 lkp->lk_flags |= flags;
725 }
726
727 int
728 vn_cow_establish(struct vnode *vp,
729 int (*func)(void *, struct buf *), void *cookie)
730 {
731 struct spec_cow_entry *e;
732
733 MALLOC(e, struct spec_cow_entry *, sizeof(struct spec_cow_entry),
734 M_DEVBUF, M_WAITOK);
735 e->ce_func = func;
736 e->ce_cookie = cookie;
737
738 mutex_enter(&vp->v_spec_cow_lock);
739 vp->v_spec_cow_req++;
740 while (vp->v_spec_cow_count > 0)
741 mtsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
742 &vp->v_spec_cow_lock);
743
744 SLIST_INSERT_HEAD(&vp->v_spec_cow_head, e, ce_list);
745
746 vp->v_spec_cow_req--;
747 if (vp->v_spec_cow_req == 0)
748 wakeup(&vp->v_spec_cow_req);
749 mutex_exit(&vp->v_spec_cow_lock);
750
751 return 0;
752 }
753
754 int
755 vn_cow_disestablish(struct vnode *vp,
756 int (*func)(void *, struct buf *), void *cookie)
757 {
758 struct spec_cow_entry *e;
759
760 mutex_enter(&vp->v_spec_cow_lock);
761 vp->v_spec_cow_req++;
762 while (vp->v_spec_cow_count > 0)
763 mtsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
764 &vp->v_spec_cow_lock);
765
766 SLIST_FOREACH(e, &vp->v_spec_cow_head, ce_list)
767 if (e->ce_func == func && e->ce_cookie == cookie) {
768 SLIST_REMOVE(&vp->v_spec_cow_head, e,
769 spec_cow_entry, ce_list);
770 FREE(e, M_DEVBUF);
771 break;
772 }
773
774 vp->v_spec_cow_req--;
775 if (vp->v_spec_cow_req == 0)
776 wakeup(&vp->v_spec_cow_req);
777 mutex_exit(&vp->v_spec_cow_lock);
778
779 return e ? 0 : EINVAL;
780 }
781
782 /*
783 * Simplified in-kernel wrapper calls for extended attribute access.
784 * Both calls pass in a NULL credential, authorizing a "kernel" access.
785 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
786 */
787 int
788 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
789 const char *attrname, size_t *buflen, void *bf, struct lwp *l)
790 {
791 struct uio auio;
792 struct iovec aiov;
793 int error;
794
795 aiov.iov_len = *buflen;
796 aiov.iov_base = bf;
797
798 auio.uio_iov = &aiov;
799 auio.uio_iovcnt = 1;
800 auio.uio_rw = UIO_READ;
801 auio.uio_offset = 0;
802 auio.uio_resid = *buflen;
803 UIO_SETUP_SYSSPACE(&auio);
804
805 if ((ioflg & IO_NODELOCKED) == 0)
806 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
807
808 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
809 l);
810
811 if ((ioflg & IO_NODELOCKED) == 0)
812 VOP_UNLOCK(vp, 0);
813
814 if (error == 0)
815 *buflen = *buflen - auio.uio_resid;
816
817 return (error);
818 }
819
820 /*
821 * XXX Failure mode if partially written?
822 */
823 int
824 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
825 const char *attrname, size_t buflen, const void *bf, struct lwp *l)
826 {
827 struct uio auio;
828 struct iovec aiov;
829 int error;
830
831 aiov.iov_len = buflen;
832 aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */
833
834 auio.uio_iov = &aiov;
835 auio.uio_iovcnt = 1;
836 auio.uio_rw = UIO_WRITE;
837 auio.uio_offset = 0;
838 auio.uio_resid = buflen;
839 UIO_SETUP_SYSSPACE(&auio);
840
841 if ((ioflg & IO_NODELOCKED) == 0) {
842 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
843 }
844
845 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, l);
846
847 if ((ioflg & IO_NODELOCKED) == 0) {
848 VOP_UNLOCK(vp, 0);
849 }
850
851 return (error);
852 }
853
854 int
855 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
856 const char *attrname, struct lwp *l)
857 {
858 int error;
859
860 if ((ioflg & IO_NODELOCKED) == 0) {
861 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
862 }
863
864 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, l);
865 if (error == EOPNOTSUPP)
866 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
867 NULL, l);
868
869 if ((ioflg & IO_NODELOCKED) == 0) {
870 VOP_UNLOCK(vp, 0);
871 }
872
873 return (error);
874 }
875
876 void
877 vn_ra_allocctx(struct vnode *vp)
878 {
879 struct uvm_ractx *ra = NULL;
880
881 if (vp->v_type != VREG) {
882 return;
883 }
884 if (vp->v_ractx != NULL) {
885 return;
886 }
887 mutex_enter(&vp->v_interlock);
888 if (vp->v_ractx == NULL) {
889 mutex_exit(&vp->v_interlock);
890 ra = uvm_ra_allocctx();
891 mutex_enter(&vp->v_interlock);
892 if (ra != NULL && vp->v_ractx == NULL) {
893 vp->v_ractx = ra;
894 ra = NULL;
895 }
896 }
897 mutex_exit(&vp->v_interlock);
898 if (ra != NULL) {
899 uvm_ra_freectx(ra);
900 }
901 }
902