vfs_vnops.c revision 1.241 1 /* $NetBSD: vfs_vnops.c,v 1.241 2023/04/22 13:53:02 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.241 2023/04/22 13:53:02 riastradh Exp $");
70
71 #include "veriexec.h"
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/file.h>
77 #include <sys/stat.h>
78 #include <sys/buf.h>
79 #include <sys/proc.h>
80 #include <sys/mount.h>
81 #include <sys/namei.h>
82 #include <sys/vnode_impl.h>
83 #include <sys/ioctl.h>
84 #include <sys/tty.h>
85 #include <sys/poll.h>
86 #include <sys/kauth.h>
87 #include <sys/syslog.h>
88 #include <sys/fstrans.h>
89 #include <sys/atomic.h>
90 #include <sys/filedesc.h>
91 #include <sys/wapbl.h>
92 #include <sys/mman.h>
93
94 #include <miscfs/specfs/specdev.h>
95 #include <miscfs/fifofs/fifo.h>
96
97 #include <uvm/uvm_extern.h>
98 #include <uvm/uvm_readahead.h>
99 #include <uvm/uvm_device.h>
100
101 #ifdef UNION
102 #include <fs/union/union.h>
103 #endif
104
105 #ifndef COMPAT_ZERODEV
106 #define COMPAT_ZERODEV(dev) (0)
107 #endif
108
109 int (*vn_union_readdir_hook)(struct vnode **, struct file *, struct lwp *);
110
111 #include <sys/verified_exec.h>
112
113 static int vn_read(file_t *fp, off_t *offset, struct uio *uio,
114 kauth_cred_t cred, int flags);
115 static int vn_write(file_t *fp, off_t *offset, struct uio *uio,
116 kauth_cred_t cred, int flags);
117 static int vn_closefile(file_t *fp);
118 static int vn_poll(file_t *fp, int events);
119 static int vn_fcntl(file_t *fp, u_int com, void *data);
120 static int vn_statfile(file_t *fp, struct stat *sb);
121 static int vn_ioctl(file_t *fp, u_long com, void *data);
122 static int vn_mmap(struct file *, off_t *, size_t, int, int *, int *,
123 struct uvm_object **, int *);
124 static int vn_seek(struct file *, off_t, int, off_t *, int);
125 static int vn_advlock(struct file *, void *, int, struct flock *, int);
126 static int vn_fpathconf(struct file *, int, register_t *);
127 static int vn_posix_fadvise(struct file *, off_t, off_t, int);
128
129 const struct fileops vnops = {
130 .fo_name = "vn",
131 .fo_read = vn_read,
132 .fo_write = vn_write,
133 .fo_ioctl = vn_ioctl,
134 .fo_fcntl = vn_fcntl,
135 .fo_poll = vn_poll,
136 .fo_stat = vn_statfile,
137 .fo_close = vn_closefile,
138 .fo_kqfilter = vn_kqfilter,
139 .fo_restart = fnullop_restart,
140 .fo_mmap = vn_mmap,
141 .fo_seek = vn_seek,
142 .fo_advlock = vn_advlock,
143 .fo_fpathconf = vn_fpathconf,
144 .fo_posix_fadvise = vn_posix_fadvise,
145 };
146
147 /*
148 * Common code for vnode open operations.
149 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
150 *
151 * at_dvp is the directory for openat(), if any.
152 * pb is the path.
153 * nmode is additional namei flags, restricted to TRYEMULROOT and NOCHROOT.
154 * fmode is the open flags, converted from O_* to F*
155 * cmode is the creation file permissions.
156 *
157 * XXX shouldn't cmode be mode_t?
158 *
159 * On success produces either a locked vnode in *ret_vp, or NULL in
160 * *ret_vp and a file descriptor number in *ret_fd.
161 *
162 * The caller may pass NULL for ret_fd (and ret_domove), in which case
163 * EOPNOTSUPP will be produced in the cases that would otherwise return
164 * a file descriptor.
165 *
166 * Note that callers that want no-follow behavior should pass
167 * O_NOFOLLOW in fmode. Neither FOLLOW nor NOFOLLOW in nmode is
168 * honored.
169 */
170 int
171 vn_open(struct vnode *at_dvp, struct pathbuf *pb,
172 int nmode, int fmode, int cmode,
173 struct vnode **ret_vp, bool *ret_domove, int *ret_fd)
174 {
175 struct nameidata nd;
176 struct vnode *vp = NULL;
177 struct lwp *l = curlwp;
178 kauth_cred_t cred = l->l_cred;
179 struct vattr va;
180 int error;
181 const char *pathstring;
182
183 KASSERT((nmode & (TRYEMULROOT | NOCHROOT)) == nmode);
184
185 KASSERT(ret_vp != NULL);
186 KASSERT((ret_domove == NULL) == (ret_fd == NULL));
187
188 if ((fmode & (O_CREAT | O_DIRECTORY)) == (O_CREAT | O_DIRECTORY))
189 return EINVAL;
190
191 NDINIT(&nd, LOOKUP, nmode, pb);
192 if (at_dvp != NULL)
193 NDAT(&nd, at_dvp);
194
195 nd.ni_cnd.cn_flags &= TRYEMULROOT | NOCHROOT;
196
197 if (fmode & O_CREAT) {
198 nd.ni_cnd.cn_nameiop = CREATE;
199 nd.ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF;
200 if ((fmode & O_EXCL) == 0 &&
201 ((fmode & O_NOFOLLOW) == 0))
202 nd.ni_cnd.cn_flags |= FOLLOW;
203 if ((fmode & O_EXCL) == 0)
204 nd.ni_cnd.cn_flags |= NONEXCLHACK;
205 } else {
206 nd.ni_cnd.cn_nameiop = LOOKUP;
207 nd.ni_cnd.cn_flags |= LOCKLEAF;
208 if ((fmode & O_NOFOLLOW) == 0)
209 nd.ni_cnd.cn_flags |= FOLLOW;
210 }
211
212 pathstring = pathbuf_stringcopy_get(nd.ni_pathbuf);
213 if (pathstring == NULL) {
214 return ENOMEM;
215 }
216
217 /*
218 * When this "interface" was exposed to do_open() it used
219 * to initialize l_dupfd to -newfd-1 (thus passing in the
220 * new file handle number to use)... but nothing in the
221 * kernel uses that value. So just send 0.
222 */
223 l->l_dupfd = 0;
224
225 error = namei(&nd);
226 if (error)
227 goto out;
228
229 vp = nd.ni_vp;
230
231 #if NVERIEXEC > 0
232 error = veriexec_openchk(l, nd.ni_vp, pathstring, fmode);
233 if (error) {
234 /* We have to release the locks ourselves */
235 /*
236 * 20210604 dholland passing NONEXCLHACK means we can
237 * get ni_dvp == NULL back if ni_vp exists, and we should
238 * treat that like the non-O_CREAT case.
239 */
240 if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
241 if (vp == NULL) {
242 vput(nd.ni_dvp);
243 } else {
244 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
245 if (nd.ni_dvp == nd.ni_vp)
246 vrele(nd.ni_dvp);
247 else
248 vput(nd.ni_dvp);
249 nd.ni_dvp = NULL;
250 vput(vp);
251 vp = NULL;
252 }
253 } else {
254 vput(vp);
255 vp = NULL;
256 }
257 goto out;
258 }
259 #endif /* NVERIEXEC > 0 */
260
261 /*
262 * 20210604 dholland ditto
263 */
264 if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
265 if (nd.ni_vp == NULL) {
266 vattr_null(&va);
267 va.va_type = VREG;
268 va.va_mode = cmode;
269 if (fmode & O_EXCL)
270 va.va_vaflags |= VA_EXCLUSIVE;
271 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
272 &nd.ni_cnd, &va);
273 if (error) {
274 vput(nd.ni_dvp);
275 goto out;
276 }
277 fmode &= ~O_TRUNC;
278 vp = nd.ni_vp;
279 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
280 vput(nd.ni_dvp);
281 } else {
282 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
283 if (nd.ni_dvp == nd.ni_vp)
284 vrele(nd.ni_dvp);
285 else
286 vput(nd.ni_dvp);
287 nd.ni_dvp = NULL;
288 vp = nd.ni_vp;
289 if (fmode & O_EXCL) {
290 error = EEXIST;
291 goto bad;
292 }
293 fmode &= ~O_CREAT;
294 }
295 } else if ((fmode & O_CREAT) != 0) {
296 /*
297 * 20210606 dholland passing NONEXCLHACK means this
298 * case exists; it is the same as the following one
299 * but also needs to do things in the second (exists)
300 * half of the following block. (Besides handle
301 * ni_dvp, anyway.)
302 */
303 vp = nd.ni_vp;
304 KASSERT((fmode & O_EXCL) == 0);
305 fmode &= ~O_CREAT;
306 } else {
307 vp = nd.ni_vp;
308 }
309 if (vp->v_type == VSOCK) {
310 error = EOPNOTSUPP;
311 goto bad;
312 }
313 if (nd.ni_vp->v_type == VLNK) {
314 error = EFTYPE;
315 goto bad;
316 }
317
318 if ((fmode & O_CREAT) == 0) {
319 error = vn_openchk(vp, cred, fmode);
320 if (error != 0)
321 goto bad;
322 }
323
324 if (fmode & O_TRUNC) {
325 vattr_null(&va);
326 va.va_size = 0;
327 error = VOP_SETATTR(vp, &va, cred);
328 if (error != 0)
329 goto bad;
330 }
331 if ((error = VOP_OPEN(vp, fmode, cred)) != 0)
332 goto bad;
333 if (fmode & FWRITE) {
334 mutex_enter(vp->v_interlock);
335 vp->v_writecount++;
336 mutex_exit(vp->v_interlock);
337 }
338
339 bad:
340 if (error) {
341 vput(vp);
342 vp = NULL;
343 }
344 out:
345 pathbuf_stringcopy_put(nd.ni_pathbuf, pathstring);
346
347 switch (error) {
348 case EDUPFD:
349 case EMOVEFD:
350 /* if the caller isn't prepared to handle fds, fail for them */
351 if (ret_fd == NULL) {
352 error = EOPNOTSUPP;
353 break;
354 }
355 *ret_vp = NULL;
356 *ret_domove = error == EMOVEFD;
357 *ret_fd = l->l_dupfd;
358 error = 0;
359 break;
360 case 0:
361 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
362 *ret_vp = vp;
363 break;
364 }
365 l->l_dupfd = 0;
366 return error;
367 }
368
369 /*
370 * Check for write permissions on the specified vnode.
371 * Prototype text segments cannot be written.
372 */
373 int
374 vn_writechk(struct vnode *vp)
375 {
376
377 /*
378 * If the vnode is in use as a process's text,
379 * we can't allow writing.
380 */
381 if (vp->v_iflag & VI_TEXT)
382 return ETXTBSY;
383 return 0;
384 }
385
386 int
387 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags)
388 {
389 int permbits = 0;
390 int error;
391
392 if (vp->v_type == VNON || vp->v_type == VBAD)
393 return ENXIO;
394
395 if ((fflags & O_DIRECTORY) != 0 && vp->v_type != VDIR)
396 return ENOTDIR;
397
398 if ((fflags & O_REGULAR) != 0 && vp->v_type != VREG)
399 return EFTYPE;
400
401 if ((fflags & FREAD) != 0) {
402 permbits = VREAD;
403 }
404 if ((fflags & FEXEC) != 0) {
405 permbits |= VEXEC;
406 }
407 if ((fflags & (FWRITE | O_TRUNC)) != 0) {
408 permbits |= VWRITE;
409 if (vp->v_type == VDIR) {
410 error = EISDIR;
411 goto bad;
412 }
413 error = vn_writechk(vp);
414 if (error != 0)
415 goto bad;
416 }
417 error = VOP_ACCESS(vp, permbits, cred);
418 bad:
419 return error;
420 }
421
422 /*
423 * Mark a vnode as having executable mappings.
424 */
425 void
426 vn_markexec(struct vnode *vp)
427 {
428
429 if ((vp->v_iflag & VI_EXECMAP) != 0) {
430 /* Safe unlocked, as long as caller holds a reference. */
431 return;
432 }
433
434 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
435 mutex_enter(vp->v_interlock);
436 if ((vp->v_iflag & VI_EXECMAP) == 0) {
437 cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
438 vp->v_iflag |= VI_EXECMAP;
439 }
440 mutex_exit(vp->v_interlock);
441 rw_exit(vp->v_uobj.vmobjlock);
442 }
443
444 /*
445 * Mark a vnode as being the text of a process.
446 * Fail if the vnode is currently writable.
447 */
448 int
449 vn_marktext(struct vnode *vp)
450 {
451
452 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) {
453 /* Safe unlocked, as long as caller holds a reference. */
454 return 0;
455 }
456
457 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
458 mutex_enter(vp->v_interlock);
459 if (vp->v_writecount != 0) {
460 KASSERT((vp->v_iflag & VI_TEXT) == 0);
461 mutex_exit(vp->v_interlock);
462 rw_exit(vp->v_uobj.vmobjlock);
463 return ETXTBSY;
464 }
465 if ((vp->v_iflag & VI_EXECMAP) == 0) {
466 cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
467 }
468 vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
469 mutex_exit(vp->v_interlock);
470 rw_exit(vp->v_uobj.vmobjlock);
471 return 0;
472 }
473
474 /*
475 * Vnode close call
476 *
477 * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
478 */
479 int
480 vn_close(struct vnode *vp, int flags, kauth_cred_t cred)
481 {
482 int error;
483
484 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
485 if (flags & FWRITE) {
486 mutex_enter(vp->v_interlock);
487 KASSERT(vp->v_writecount > 0);
488 vp->v_writecount--;
489 mutex_exit(vp->v_interlock);
490 }
491 error = VOP_CLOSE(vp, flags, cred);
492 vput(vp);
493 return error;
494 }
495
496 static int
497 enforce_rlimit_fsize(struct vnode *vp, struct uio *uio, int ioflag)
498 {
499 struct lwp *l = curlwp;
500 off_t testoff;
501
502 if (uio->uio_rw != UIO_WRITE || vp->v_type != VREG)
503 return 0;
504
505 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
506 if (ioflag & IO_APPEND)
507 testoff = vp->v_size;
508 else
509 testoff = uio->uio_offset;
510
511 if (testoff + uio->uio_resid >
512 l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
513 mutex_enter(&proc_lock);
514 psignal(l->l_proc, SIGXFSZ);
515 mutex_exit(&proc_lock);
516 return EFBIG;
517 }
518
519 return 0;
520 }
521
522 /*
523 * Package up an I/O request on a vnode into a uio and do it.
524 */
525 int
526 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
527 enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
528 struct lwp *l)
529 {
530 struct uio auio;
531 struct iovec aiov;
532 int error;
533
534 if ((ioflg & IO_NODELOCKED) == 0) {
535 if (rw == UIO_READ) {
536 vn_lock(vp, LK_SHARED | LK_RETRY);
537 } else /* UIO_WRITE */ {
538 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
539 }
540 }
541 auio.uio_iov = &aiov;
542 auio.uio_iovcnt = 1;
543 aiov.iov_base = base;
544 aiov.iov_len = len;
545 auio.uio_resid = len;
546 auio.uio_offset = offset;
547 auio.uio_rw = rw;
548 if (segflg == UIO_SYSSPACE) {
549 UIO_SETUP_SYSSPACE(&auio);
550 } else {
551 auio.uio_vmspace = l->l_proc->p_vmspace;
552 }
553
554 if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0)
555 goto out;
556
557 if (rw == UIO_READ) {
558 error = VOP_READ(vp, &auio, ioflg, cred);
559 } else {
560 error = VOP_WRITE(vp, &auio, ioflg, cred);
561 }
562
563 if (aresid)
564 *aresid = auio.uio_resid;
565 else
566 if (auio.uio_resid && error == 0)
567 error = EIO;
568
569 out:
570 if ((ioflg & IO_NODELOCKED) == 0) {
571 VOP_UNLOCK(vp);
572 }
573 return error;
574 }
575
576 int
577 vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done,
578 struct lwp *l, off_t **cookies, int *ncookies)
579 {
580 struct vnode *vp = fp->f_vnode;
581 struct iovec aiov;
582 struct uio auio;
583 int error, eofflag;
584
585 /* Limit the size on any kernel buffers used by VOP_READDIR */
586 count = uimin(MAXBSIZE, count);
587
588 unionread:
589 if (vp->v_type != VDIR)
590 return EINVAL;
591 aiov.iov_base = bf;
592 aiov.iov_len = count;
593 auio.uio_iov = &aiov;
594 auio.uio_iovcnt = 1;
595 auio.uio_rw = UIO_READ;
596 if (segflg == UIO_SYSSPACE) {
597 UIO_SETUP_SYSSPACE(&auio);
598 } else {
599 KASSERT(l == curlwp);
600 auio.uio_vmspace = l->l_proc->p_vmspace;
601 }
602 auio.uio_resid = count;
603 vn_lock(vp, LK_SHARED | LK_RETRY);
604 mutex_enter(&fp->f_lock);
605 auio.uio_offset = fp->f_offset;
606 mutex_exit(&fp->f_lock);
607 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
608 ncookies);
609 mutex_enter(&fp->f_lock);
610 fp->f_offset = auio.uio_offset;
611 mutex_exit(&fp->f_lock);
612 VOP_UNLOCK(vp);
613 if (error)
614 return error;
615
616 if (count == auio.uio_resid && vn_union_readdir_hook) {
617 struct vnode *ovp = vp;
618
619 error = (*vn_union_readdir_hook)(&vp, fp, l);
620 if (error)
621 return error;
622 if (vp != ovp)
623 goto unionread;
624 }
625
626 if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) &&
627 (vp->v_mount->mnt_flag & MNT_UNION)) {
628 struct vnode *tvp = vp;
629 vp = vp->v_mount->mnt_vnodecovered;
630 vref(vp);
631 mutex_enter(&fp->f_lock);
632 fp->f_vnode = vp;
633 fp->f_offset = 0;
634 mutex_exit(&fp->f_lock);
635 vrele(tvp);
636 goto unionread;
637 }
638 *done = count - auio.uio_resid;
639 return error;
640 }
641
642 /*
643 * File table vnode read routine.
644 */
645 static int
646 vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
647 int flags)
648 {
649 struct vnode *vp = fp->f_vnode;
650 int error, ioflag, fflag;
651 size_t count;
652
653 ioflag = IO_ADV_ENCODE(fp->f_advice);
654 fflag = fp->f_flag;
655 if (fflag & FNONBLOCK)
656 ioflag |= IO_NDELAY;
657 if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
658 ioflag |= IO_SYNC;
659 if (fflag & FALTIO)
660 ioflag |= IO_ALTSEMANTICS;
661 if (fflag & FDIRECT)
662 ioflag |= IO_DIRECT;
663 if (offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) != 0)
664 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
665 else
666 vn_lock(vp, LK_SHARED | LK_RETRY);
667 if (__predict_false(vp->v_type == VDIR) &&
668 offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) == 0)
669 mutex_enter(&fp->f_lock);
670 uio->uio_offset = *offset;
671 if (__predict_false(vp->v_type == VDIR) &&
672 offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) == 0)
673 mutex_enter(&fp->f_lock);
674 count = uio->uio_resid;
675 error = VOP_READ(vp, uio, ioflag, cred);
676 if (flags & FOF_UPDATE_OFFSET)
677 *offset += count - uio->uio_resid;
678 VOP_UNLOCK(vp);
679 return error;
680 }
681
682 /*
683 * File table vnode write routine.
684 */
685 static int
686 vn_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
687 int flags)
688 {
689 struct vnode *vp = fp->f_vnode;
690 int error, ioflag, fflag;
691 size_t count;
692
693 ioflag = IO_ADV_ENCODE(fp->f_advice) | IO_UNIT;
694 fflag = fp->f_flag;
695 if (vp->v_type == VREG && (fflag & O_APPEND))
696 ioflag |= IO_APPEND;
697 if (fflag & FNONBLOCK)
698 ioflag |= IO_NDELAY;
699 if (fflag & FFSYNC ||
700 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
701 ioflag |= IO_SYNC;
702 else if (fflag & FDSYNC)
703 ioflag |= IO_DSYNC;
704 if (fflag & FALTIO)
705 ioflag |= IO_ALTSEMANTICS;
706 if (fflag & FDIRECT)
707 ioflag |= IO_DIRECT;
708 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
709 uio->uio_offset = *offset;
710 count = uio->uio_resid;
711
712 if ((error = enforce_rlimit_fsize(vp, uio, ioflag)) != 0)
713 goto out;
714
715 error = VOP_WRITE(vp, uio, ioflag, cred);
716
717 if (flags & FOF_UPDATE_OFFSET) {
718 if (ioflag & IO_APPEND) {
719 /*
720 * SUSv3 describes behaviour for count = 0 as following:
721 * "Before any action ... is taken, and if nbyte is zero
722 * and the file is a regular file, the write() function
723 * ... in the absence of errors ... shall return zero
724 * and have no other results."
725 */
726 if (count)
727 *offset = uio->uio_offset;
728 } else
729 *offset += count - uio->uio_resid;
730 }
731
732 out:
733 VOP_UNLOCK(vp);
734 return error;
735 }
736
737 /*
738 * File table vnode stat routine.
739 */
740 static int
741 vn_statfile(file_t *fp, struct stat *sb)
742 {
743 struct vnode *vp = fp->f_vnode;
744 int error;
745
746 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
747 error = vn_stat(vp, sb);
748 VOP_UNLOCK(vp);
749 return error;
750 }
751
752 int
753 vn_stat(struct vnode *vp, struct stat *sb)
754 {
755 struct vattr va;
756 int error;
757 mode_t mode;
758
759 memset(&va, 0, sizeof(va));
760 error = VOP_GETATTR(vp, &va, kauth_cred_get());
761 if (error)
762 return error;
763 /*
764 * Copy from vattr table
765 */
766 memset(sb, 0, sizeof(*sb));
767 sb->st_dev = va.va_fsid;
768 sb->st_ino = va.va_fileid;
769 mode = va.va_mode;
770 switch (vp->v_type) {
771 case VREG:
772 mode |= S_IFREG;
773 break;
774 case VDIR:
775 mode |= S_IFDIR;
776 break;
777 case VBLK:
778 mode |= S_IFBLK;
779 break;
780 case VCHR:
781 mode |= S_IFCHR;
782 break;
783 case VLNK:
784 mode |= S_IFLNK;
785 break;
786 case VSOCK:
787 mode |= S_IFSOCK;
788 break;
789 case VFIFO:
790 mode |= S_IFIFO;
791 break;
792 default:
793 return EBADF;
794 }
795 sb->st_mode = mode;
796 sb->st_nlink = va.va_nlink;
797 sb->st_uid = va.va_uid;
798 sb->st_gid = va.va_gid;
799 sb->st_rdev = va.va_rdev;
800 sb->st_size = va.va_size;
801 sb->st_atimespec = va.va_atime;
802 sb->st_mtimespec = va.va_mtime;
803 sb->st_ctimespec = va.va_ctime;
804 sb->st_birthtimespec = va.va_birthtime;
805 sb->st_blksize = va.va_blocksize;
806 sb->st_flags = va.va_flags;
807 sb->st_gen = 0;
808 sb->st_blocks = va.va_bytes / S_BLKSIZE;
809 return 0;
810 }
811
812 /*
813 * File table vnode fcntl routine.
814 */
815 static int
816 vn_fcntl(file_t *fp, u_int com, void *data)
817 {
818 struct vnode *vp = fp->f_vnode;
819 int error;
820
821 error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get());
822 return error;
823 }
824
825 /*
826 * File table vnode ioctl routine.
827 */
828 static int
829 vn_ioctl(file_t *fp, u_long com, void *data)
830 {
831 struct vnode *vp = fp->f_vnode, *ovp;
832 struct vattr vattr;
833 int error;
834
835 switch (vp->v_type) {
836
837 case VREG:
838 case VDIR:
839 if (com == FIONREAD) {
840 vn_lock(vp, LK_SHARED | LK_RETRY);
841 error = VOP_GETATTR(vp, &vattr, kauth_cred_get());
842 if (error == 0) {
843 if (vp->v_type == VDIR)
844 mutex_enter(&fp->f_lock);
845 *(int *)data = vattr.va_size - fp->f_offset;
846 if (vp->v_type == VDIR)
847 mutex_exit(&fp->f_lock);
848 }
849 VOP_UNLOCK(vp);
850 if (error)
851 return error;
852 return 0;
853 }
854 if ((com == FIONWRITE) || (com == FIONSPACE)) {
855 /*
856 * Files don't have send queues, so there never
857 * are any bytes in them, nor is there any
858 * open space in them.
859 */
860 *(int *)data = 0;
861 return 0;
862 }
863 if (com == FIOGETBMAP) {
864 daddr_t *block;
865
866 if (*(daddr_t *)data < 0)
867 return EINVAL;
868 block = (daddr_t *)data;
869 vn_lock(vp, LK_SHARED | LK_RETRY);
870 error = VOP_BMAP(vp, *block, NULL, block, NULL);
871 VOP_UNLOCK(vp);
872 return error;
873 }
874 if (com == OFIOGETBMAP) {
875 daddr_t ibn, obn;
876
877 if (*(int32_t *)data < 0)
878 return EINVAL;
879 ibn = (daddr_t)*(int32_t *)data;
880 vn_lock(vp, LK_SHARED | LK_RETRY);
881 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
882 VOP_UNLOCK(vp);
883 *(int32_t *)data = (int32_t)obn;
884 return error;
885 }
886 if (com == FIONBIO || com == FIOASYNC) /* XXX */
887 return 0; /* XXX */
888 /* FALLTHROUGH */
889 case VFIFO:
890 case VCHR:
891 case VBLK:
892 error = VOP_IOCTL(vp, com, data, fp->f_flag,
893 kauth_cred_get());
894 if (error == 0 && com == TIOCSCTTY) {
895 vref(vp);
896 mutex_enter(&proc_lock);
897 ovp = curproc->p_session->s_ttyvp;
898 curproc->p_session->s_ttyvp = vp;
899 mutex_exit(&proc_lock);
900 if (ovp != NULL)
901 vrele(ovp);
902 }
903 return error;
904
905 default:
906 return EPASSTHROUGH;
907 }
908 }
909
910 /*
911 * File table vnode poll routine.
912 */
913 static int
914 vn_poll(file_t *fp, int events)
915 {
916
917 return VOP_POLL(fp->f_vnode, events);
918 }
919
920 /*
921 * File table vnode kqfilter routine.
922 */
923 int
924 vn_kqfilter(file_t *fp, struct knote *kn)
925 {
926
927 return VOP_KQFILTER(fp->f_vnode, kn);
928 }
929
930 static int
931 vn_mmap(struct file *fp, off_t *offp, size_t size, int prot, int *flagsp,
932 int *advicep, struct uvm_object **uobjp, int *maxprotp)
933 {
934 struct uvm_object *uobj;
935 struct vnode *vp;
936 struct vattr va;
937 struct lwp *l;
938 vm_prot_t maxprot;
939 off_t off;
940 int error, flags;
941 bool needwritemap;
942
943 l = curlwp;
944
945 off = *offp;
946 flags = *flagsp;
947 maxprot = VM_PROT_EXECUTE;
948
949 KASSERT(size > 0);
950
951 vp = fp->f_vnode;
952 if (vp->v_type != VREG && vp->v_type != VCHR &&
953 vp->v_type != VBLK) {
954 /* only REG/CHR/BLK support mmap */
955 return ENODEV;
956 }
957 if (vp->v_type != VCHR && off < 0) {
958 return EINVAL;
959 }
960 #if SIZE_MAX > UINT32_MAX /* XXX -Wtype-limits */
961 if (vp->v_type != VCHR && size > __type_max(off_t)) {
962 return EOVERFLOW;
963 }
964 #endif
965 if (vp->v_type != VCHR && off > __type_max(off_t) - size) {
966 /* no offset wrapping */
967 return EOVERFLOW;
968 }
969
970 /* special case: catch SunOS style /dev/zero */
971 if (vp->v_type == VCHR &&
972 (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
973 *uobjp = NULL;
974 *maxprotp = VM_PROT_ALL;
975 return 0;
976 }
977
978 /*
979 * Old programs may not select a specific sharing type, so
980 * default to an appropriate one.
981 *
982 * XXX: how does MAP_ANON fit in the picture?
983 */
984 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
985 #if defined(DEBUG)
986 struct proc *p = l->l_proc;
987 printf("WARNING: defaulted mmap() share type to "
988 "%s (pid %d command %s)\n", vp->v_type == VCHR ?
989 "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
990 p->p_comm);
991 #endif
992 if (vp->v_type == VCHR)
993 flags |= MAP_SHARED; /* for a device */
994 else
995 flags |= MAP_PRIVATE; /* for a file */
996 }
997
998 /*
999 * MAP_PRIVATE device mappings don't make sense (and aren't
1000 * supported anyway). However, some programs rely on this,
1001 * so just change it to MAP_SHARED.
1002 */
1003 if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
1004 flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
1005 }
1006
1007 /*
1008 * now check protection
1009 */
1010
1011 /* check read access */
1012 if (fp->f_flag & FREAD)
1013 maxprot |= VM_PROT_READ;
1014 else if (prot & PROT_READ) {
1015 return EACCES;
1016 }
1017
1018 /* check write access, shared case first */
1019 if (flags & MAP_SHARED) {
1020 /*
1021 * if the file is writable, only add PROT_WRITE to
1022 * maxprot if the file is not immutable, append-only.
1023 * otherwise, if we have asked for PROT_WRITE, return
1024 * EPERM.
1025 */
1026 if (fp->f_flag & FWRITE) {
1027 vn_lock(vp, LK_SHARED | LK_RETRY);
1028 error = VOP_GETATTR(vp, &va, l->l_cred);
1029 VOP_UNLOCK(vp);
1030 if (error) {
1031 return error;
1032 }
1033 if ((va.va_flags &
1034 (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
1035 maxprot |= VM_PROT_WRITE;
1036 else if (prot & PROT_WRITE) {
1037 return EPERM;
1038 }
1039 } else if (prot & PROT_WRITE) {
1040 return EACCES;
1041 }
1042 } else {
1043 /* MAP_PRIVATE mappings can always write to */
1044 maxprot |= VM_PROT_WRITE;
1045 }
1046
1047 /*
1048 * Don't allow mmap for EXEC if the file system
1049 * is mounted NOEXEC.
1050 */
1051 if ((prot & PROT_EXEC) != 0 &&
1052 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
1053 return EACCES;
1054 }
1055
1056 if (vp->v_type != VCHR) {
1057 error = VOP_MMAP(vp, prot, curlwp->l_cred);
1058 if (error) {
1059 return error;
1060 }
1061 vref(vp);
1062 uobj = &vp->v_uobj;
1063
1064 /*
1065 * If the vnode is being mapped with PROT_EXEC,
1066 * then mark it as text.
1067 */
1068 if (prot & PROT_EXEC) {
1069 vn_markexec(vp);
1070 }
1071 } else {
1072 int i = maxprot;
1073
1074 /*
1075 * XXX Some devices don't like to be mapped with
1076 * XXX PROT_EXEC or PROT_WRITE, but we don't really
1077 * XXX have a better way of handling this, right now
1078 */
1079 do {
1080 uobj = udv_attach(vp->v_rdev,
1081 (flags & MAP_SHARED) ? i :
1082 (i & ~VM_PROT_WRITE), off, size);
1083 i--;
1084 } while ((uobj == NULL) && (i > 0));
1085 if (uobj == NULL) {
1086 return EINVAL;
1087 }
1088 *advicep = UVM_ADV_RANDOM;
1089 }
1090
1091 /*
1092 * Set vnode flags to indicate the new kinds of mapping.
1093 * We take the vnode lock in exclusive mode here to serialize
1094 * with direct I/O.
1095 *
1096 * Safe to check for these flag values without a lock, as
1097 * long as a reference to the vnode is held.
1098 */
1099 needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
1100 (flags & MAP_SHARED) != 0 &&
1101 (maxprot & VM_PROT_WRITE) != 0;
1102 if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
1103 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1104 vp->v_vflag |= VV_MAPPED;
1105 if (needwritemap) {
1106 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1107 mutex_enter(vp->v_interlock);
1108 vp->v_iflag |= VI_WRMAP;
1109 mutex_exit(vp->v_interlock);
1110 rw_exit(vp->v_uobj.vmobjlock);
1111 }
1112 VOP_UNLOCK(vp);
1113 }
1114
1115 #if NVERIEXEC > 0
1116
1117 /*
1118 * Check if the file can be executed indirectly.
1119 *
1120 * XXX: This gives false warnings about "Incorrect access type"
1121 * XXX: if the mapping is not executable. Harmless, but will be
1122 * XXX: fixed as part of other changes.
1123 */
1124 if (veriexec_verify(l, vp, "(mmap)", VERIEXEC_INDIRECT,
1125 NULL)) {
1126
1127 /*
1128 * Don't allow executable mappings if we can't
1129 * indirectly execute the file.
1130 */
1131 if (prot & VM_PROT_EXECUTE) {
1132 return EPERM;
1133 }
1134
1135 /*
1136 * Strip the executable bit from 'maxprot' to make sure
1137 * it can't be made executable later.
1138 */
1139 maxprot &= ~VM_PROT_EXECUTE;
1140 }
1141 #endif /* NVERIEXEC > 0 */
1142
1143 *uobjp = uobj;
1144 *maxprotp = maxprot;
1145 *flagsp = flags;
1146
1147 return 0;
1148 }
1149
1150 static int
1151 vn_seek(struct file *fp, off_t delta, int whence, off_t *newoffp,
1152 int flags)
1153 {
1154 const off_t OFF_MIN = __type_min(off_t);
1155 const off_t OFF_MAX = __type_max(off_t);
1156 kauth_cred_t cred = fp->f_cred;
1157 off_t oldoff, newoff;
1158 struct vnode *vp = fp->f_vnode;
1159 struct vattr vattr;
1160 int error;
1161
1162 if (vp->v_type == VFIFO)
1163 return ESPIPE;
1164
1165 if (flags & FOF_UPDATE_OFFSET)
1166 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1167 else
1168 vn_lock(vp, LK_SHARED | LK_RETRY);
1169
1170 /* Compute the old and new offsets. */
1171 if (vp->v_type == VDIR && (flags & FOF_UPDATE_OFFSET) == 0)
1172 mutex_enter(&fp->f_lock);
1173 oldoff = fp->f_offset;
1174 if (vp->v_type == VDIR && (flags & FOF_UPDATE_OFFSET) == 0)
1175 mutex_exit(&fp->f_lock);
1176 switch (whence) {
1177 case SEEK_CUR:
1178 if (delta > 0) {
1179 if (oldoff > 0 && delta > OFF_MAX - oldoff) {
1180 newoff = OFF_MAX;
1181 break;
1182 }
1183 } else {
1184 if (oldoff < 0 && delta < OFF_MIN - oldoff) {
1185 newoff = OFF_MIN;
1186 break;
1187 }
1188 }
1189 newoff = oldoff + delta;
1190 break;
1191 case SEEK_END:
1192 error = VOP_GETATTR(vp, &vattr, cred);
1193 if (error)
1194 goto out;
1195 if (vattr.va_size > OFF_MAX ||
1196 delta > OFF_MAX - (off_t)vattr.va_size) {
1197 newoff = OFF_MAX;
1198 break;
1199 }
1200 newoff = delta + vattr.va_size;
1201 break;
1202 case SEEK_SET:
1203 newoff = delta;
1204 break;
1205 default:
1206 error = EINVAL;
1207 goto out;
1208 }
1209
1210 /* Pass the proposed change to the file system to audit. */
1211 error = VOP_SEEK(vp, oldoff, newoff, cred);
1212 if (error)
1213 goto out;
1214
1215 /* Success! */
1216 if (newoffp)
1217 *newoffp = newoff;
1218 if (flags & FOF_UPDATE_OFFSET)
1219 fp->f_offset = newoff;
1220 error = 0;
1221
1222 out: VOP_UNLOCK(vp);
1223 return error;
1224 }
1225
1226 static int
1227 vn_advlock(struct file *fp, void *id, int op, struct flock *fl,
1228 int flags)
1229 {
1230 struct vnode *const vp = fp->f_vnode;
1231
1232 if (fl->l_whence == SEEK_CUR) {
1233 vn_lock(vp, LK_SHARED | LK_RETRY);
1234 fl->l_start += fp->f_offset;
1235 VOP_UNLOCK(vp);
1236 }
1237
1238 return VOP_ADVLOCK(vp, id, op, fl, flags);
1239 }
1240
1241 static int
1242 vn_fpathconf(struct file *fp, int name, register_t *retval)
1243 {
1244 struct vnode *const vp = fp->f_vnode;
1245 int error;
1246
1247 vn_lock(vp, LK_SHARED | LK_RETRY);
1248 error = VOP_PATHCONF(vp, name, retval);
1249 VOP_UNLOCK(vp);
1250
1251 return error;
1252 }
1253
1254 static int
1255 vn_posix_fadvise(struct file *fp, off_t offset, off_t len, int advice)
1256 {
1257 const off_t OFF_MAX = __type_max(off_t);
1258 struct vnode *vp = fp->f_vnode;
1259 off_t endoffset;
1260 int error;
1261
1262 if (offset < 0) {
1263 return EINVAL;
1264 }
1265 if (len == 0) {
1266 endoffset = OFF_MAX;
1267 } else if (len > 0 && (OFF_MAX - offset) >= len) {
1268 endoffset = offset + len;
1269 } else {
1270 return EINVAL;
1271 }
1272
1273 CTASSERT(POSIX_FADV_NORMAL == UVM_ADV_NORMAL);
1274 CTASSERT(POSIX_FADV_RANDOM == UVM_ADV_RANDOM);
1275 CTASSERT(POSIX_FADV_SEQUENTIAL == UVM_ADV_SEQUENTIAL);
1276
1277 switch (advice) {
1278 case POSIX_FADV_WILLNEED:
1279 case POSIX_FADV_DONTNEED:
1280 if (vp->v_type != VREG && vp->v_type != VBLK)
1281 return 0;
1282 break;
1283 }
1284
1285 switch (advice) {
1286 case POSIX_FADV_NORMAL:
1287 case POSIX_FADV_RANDOM:
1288 case POSIX_FADV_SEQUENTIAL:
1289 /*
1290 * We ignore offset and size. Must lock the file to
1291 * do this, as f_advice is sub-word sized.
1292 */
1293 mutex_enter(&fp->f_lock);
1294 fp->f_advice = (u_char)advice;
1295 mutex_exit(&fp->f_lock);
1296 error = 0;
1297 break;
1298
1299 case POSIX_FADV_WILLNEED:
1300 error = uvm_readahead(&vp->v_uobj, offset, endoffset - offset);
1301 break;
1302
1303 case POSIX_FADV_DONTNEED:
1304 /*
1305 * Align the region to page boundaries as VOP_PUTPAGES expects
1306 * by shrinking it. We shrink instead of expand because we
1307 * do not want to deactivate cache outside of the requested
1308 * region. It means that if the specified region is smaller
1309 * than PAGE_SIZE, we do nothing.
1310 */
1311 if (offset <= trunc_page(OFF_MAX) &&
1312 round_page(offset) < trunc_page(endoffset)) {
1313 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1314 error = VOP_PUTPAGES(vp,
1315 round_page(offset), trunc_page(endoffset),
1316 PGO_DEACTIVATE | PGO_CLEANIT);
1317 } else {
1318 error = 0;
1319 }
1320 break;
1321
1322 case POSIX_FADV_NOREUSE:
1323 /* Not implemented yet. */
1324 error = 0;
1325 break;
1326 default:
1327 error = EINVAL;
1328 break;
1329 }
1330
1331 return error;
1332 }
1333
1334 /*
1335 * Check that the vnode is still valid, and if so
1336 * acquire requested lock.
1337 */
1338 int
1339 vn_lock(struct vnode *vp, int flags)
1340 {
1341 struct lwp *l;
1342 int error;
1343
1344 KASSERT(vrefcnt(vp) > 0);
1345 KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT|LK_RETRY|
1346 LK_UPGRADE|LK_DOWNGRADE)) == 0);
1347 KASSERT((flags & LK_NOWAIT) != 0 || !mutex_owned(vp->v_interlock));
1348
1349 #ifdef DIAGNOSTIC
1350 if (wapbl_vphaswapbl(vp))
1351 WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
1352 #endif
1353
1354 /* Get a more useful report for lockstat. */
1355 l = curlwp;
1356 KASSERT(l->l_rwcallsite == 0);
1357 l->l_rwcallsite = (uintptr_t)__builtin_return_address(0);
1358
1359 error = VOP_LOCK(vp, flags);
1360
1361 l->l_rwcallsite = 0;
1362
1363 switch (flags & (LK_RETRY | LK_NOWAIT)) {
1364 case 0:
1365 KASSERT(error == 0 || error == ENOENT);
1366 break;
1367 case LK_RETRY:
1368 KASSERT(error == 0);
1369 break;
1370 case LK_NOWAIT:
1371 KASSERT(error == 0 || error == EBUSY || error == ENOENT);
1372 break;
1373 case LK_RETRY | LK_NOWAIT:
1374 KASSERT(error == 0 || error == EBUSY);
1375 break;
1376 }
1377
1378 return error;
1379 }
1380
1381 /*
1382 * File table vnode close routine.
1383 */
1384 static int
1385 vn_closefile(file_t *fp)
1386 {
1387
1388 return vn_close(fp->f_vnode, fp->f_flag, fp->f_cred);
1389 }
1390
1391 /*
1392 * Simplified in-kernel wrapper calls for extended attribute access.
1393 * Both calls pass in a NULL credential, authorizing a "kernel" access.
1394 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1395 */
1396 int
1397 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1398 const char *attrname, size_t *buflen, void *bf, struct lwp *l)
1399 {
1400 struct uio auio;
1401 struct iovec aiov;
1402 int error;
1403
1404 aiov.iov_len = *buflen;
1405 aiov.iov_base = bf;
1406
1407 auio.uio_iov = &aiov;
1408 auio.uio_iovcnt = 1;
1409 auio.uio_rw = UIO_READ;
1410 auio.uio_offset = 0;
1411 auio.uio_resid = *buflen;
1412 UIO_SETUP_SYSSPACE(&auio);
1413
1414 if ((ioflg & IO_NODELOCKED) == 0)
1415 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1416
1417 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL,
1418 NOCRED);
1419
1420 if ((ioflg & IO_NODELOCKED) == 0)
1421 VOP_UNLOCK(vp);
1422
1423 if (error == 0)
1424 *buflen = *buflen - auio.uio_resid;
1425
1426 return error;
1427 }
1428
1429 /*
1430 * XXX Failure mode if partially written?
1431 */
1432 int
1433 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1434 const char *attrname, size_t buflen, const void *bf, struct lwp *l)
1435 {
1436 struct uio auio;
1437 struct iovec aiov;
1438 int error;
1439
1440 aiov.iov_len = buflen;
1441 aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */
1442
1443 auio.uio_iov = &aiov;
1444 auio.uio_iovcnt = 1;
1445 auio.uio_rw = UIO_WRITE;
1446 auio.uio_offset = 0;
1447 auio.uio_resid = buflen;
1448 UIO_SETUP_SYSSPACE(&auio);
1449
1450 if ((ioflg & IO_NODELOCKED) == 0) {
1451 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1452 }
1453
1454 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NOCRED);
1455
1456 if ((ioflg & IO_NODELOCKED) == 0) {
1457 VOP_UNLOCK(vp);
1458 }
1459
1460 return error;
1461 }
1462
1463 int
1464 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1465 const char *attrname, struct lwp *l)
1466 {
1467 int error;
1468
1469 if ((ioflg & IO_NODELOCKED) == 0) {
1470 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1471 }
1472
1473 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NOCRED);
1474 if (error == EOPNOTSUPP)
1475 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1476 NOCRED);
1477
1478 if ((ioflg & IO_NODELOCKED) == 0) {
1479 VOP_UNLOCK(vp);
1480 }
1481
1482 return error;
1483 }
1484
1485 int
1486 vn_fifo_bypass(void *v)
1487 {
1488 struct vop_generic_args *ap = v;
1489
1490 return VOCALL(fifo_vnodeop_p, ap->a_desc->vdesc_offset, v);
1491 }
1492
1493 /*
1494 * Open block device by device number
1495 */
1496 int
1497 vn_bdev_open(dev_t dev, struct vnode **vpp, struct lwp *l)
1498 {
1499 int error;
1500
1501 if ((error = bdevvp(dev, vpp)) != 0)
1502 return error;
1503
1504 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1505 if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
1506 vput(*vpp);
1507 return error;
1508 }
1509 mutex_enter((*vpp)->v_interlock);
1510 (*vpp)->v_writecount++;
1511 mutex_exit((*vpp)->v_interlock);
1512 VOP_UNLOCK(*vpp);
1513
1514 return 0;
1515 }
1516
1517 /*
1518 * Lookup the provided name in the filesystem. If the file exists,
1519 * is a valid block device, and isn't being used by anyone else,
1520 * set *vpp to the file's vnode.
1521 */
1522 int
1523 vn_bdev_openpath(struct pathbuf *pb, struct vnode **vpp, struct lwp *l)
1524 {
1525 struct vnode *vp;
1526 dev_t dev;
1527 enum vtype vt;
1528 int error;
1529
1530 error = vn_open(NULL, pb, 0, FREAD | FWRITE, 0, &vp, NULL, NULL);
1531 if (error != 0)
1532 return error;
1533
1534 dev = vp->v_rdev;
1535 vt = vp->v_type;
1536
1537 VOP_UNLOCK(vp);
1538 (void) vn_close(vp, FREAD | FWRITE, l->l_cred);
1539
1540 if (vt != VBLK)
1541 return ENOTBLK;
1542
1543 return vn_bdev_open(dev, vpp, l);
1544 }
1545
1546 static long
1547 vn_knote_to_interest(const struct knote *kn)
1548 {
1549 switch (kn->kn_filter) {
1550 case EVFILT_READ:
1551 /*
1552 * Writing to the file or changing its attributes can
1553 * set the file size, which impacts the readability
1554 * filter.
1555 *
1556 * (No need to set NOTE_EXTEND here; it's only ever
1557 * send with other hints; see vnode_if.c.)
1558 */
1559 return NOTE_WRITE | NOTE_ATTRIB;
1560
1561 case EVFILT_VNODE:
1562 return kn->kn_sfflags;
1563
1564 case EVFILT_WRITE:
1565 default:
1566 return 0;
1567 }
1568 }
1569
1570 void
1571 vn_knote_attach(struct vnode *vp, struct knote *kn)
1572 {
1573 struct vnode_klist *vk = vp->v_klist;
1574 long interest = 0;
1575
1576 /*
1577 * In the case of layered / stacked file systems, knotes
1578 * should only ever be associated with the base vnode.
1579 */
1580 KASSERT(kn->kn_hook == vp);
1581 KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist);
1582
1583 /*
1584 * We maintain a bitmask of the kevents that there is interest in,
1585 * to minimize the impact of having watchers. It's silly to have
1586 * to traverse vn_klist every time a read or write happens simply
1587 * because there is someone interested in knowing when the file
1588 * is deleted, for example.
1589 */
1590
1591 mutex_enter(vp->v_interlock);
1592 SLIST_INSERT_HEAD(&vk->vk_klist, kn, kn_selnext);
1593 SLIST_FOREACH(kn, &vk->vk_klist, kn_selnext) {
1594 interest |= vn_knote_to_interest(kn);
1595 }
1596 vk->vk_interest = interest;
1597 mutex_exit(vp->v_interlock);
1598 }
1599
1600 void
1601 vn_knote_detach(struct vnode *vp, struct knote *kn)
1602 {
1603 struct vnode_klist *vk = vp->v_klist;
1604 long interest = 0;
1605
1606 /* See above. */
1607 KASSERT(kn->kn_hook == vp);
1608 KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist);
1609
1610 /*
1611 * We special case removing the head of the list, because:
1612 *
1613 * 1. It's extremely likely that we're detaching the only
1614 * knote.
1615 *
1616 * 2. We're already traversing the whole list, so we don't
1617 * want to use the generic SLIST_REMOVE() which would
1618 * traverse it *again*.
1619 */
1620
1621 mutex_enter(vp->v_interlock);
1622 if (__predict_true(kn == SLIST_FIRST(&vk->vk_klist))) {
1623 SLIST_REMOVE_HEAD(&vk->vk_klist, kn_selnext);
1624 SLIST_FOREACH(kn, &vk->vk_klist, kn_selnext) {
1625 interest |= vn_knote_to_interest(kn);
1626 }
1627 vk->vk_interest = interest;
1628 } else {
1629 struct knote *thiskn, *nextkn, *prevkn = NULL;
1630
1631 SLIST_FOREACH_SAFE(thiskn, &vk->vk_klist, kn_selnext, nextkn) {
1632 if (thiskn == kn) {
1633 KASSERT(kn != NULL);
1634 KASSERT(prevkn != NULL);
1635 SLIST_REMOVE_AFTER(prevkn, kn_selnext);
1636 kn = NULL;
1637 } else {
1638 interest |= vn_knote_to_interest(thiskn);
1639 prevkn = thiskn;
1640 }
1641 }
1642 vk->vk_interest = interest;
1643 }
1644 mutex_exit(vp->v_interlock);
1645 }
1646