vfs_vnops.c revision 1.193.2.1 1 /* $NetBSD: vfs_vnops.c,v 1.193.2.1 2017/03/20 06:57:48 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.193.2.1 2017/03/20 06:57:48 pgoyette Exp $");
70
71 #include "veriexec.h"
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/file.h>
77 #include <sys/stat.h>
78 #include <sys/buf.h>
79 #include <sys/proc.h>
80 #include <sys/mount.h>
81 #include <sys/namei.h>
82 #include <sys/vnode.h>
83 #include <sys/ioctl.h>
84 #include <sys/tty.h>
85 #include <sys/poll.h>
86 #include <sys/kauth.h>
87 #include <sys/syslog.h>
88 #include <sys/fstrans.h>
89 #include <sys/atomic.h>
90 #include <sys/filedesc.h>
91 #include <sys/wapbl.h>
92 #include <sys/mman.h>
93
94 #include <miscfs/specfs/specdev.h>
95 #include <miscfs/fifofs/fifo.h>
96
97 #include <uvm/uvm_extern.h>
98 #include <uvm/uvm_readahead.h>
99 #include <uvm/uvm_device.h>
100
101 #ifdef UNION
102 #include <fs/union/union.h>
103 #endif
104
105 #ifndef COMPAT_ZERODEV
106 #define COMPAT_ZERODEV(dev) (0)
107 #endif
108
109 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
110
111 #include <sys/verified_exec.h>
112
113 static int vn_read(file_t *fp, off_t *offset, struct uio *uio,
114 kauth_cred_t cred, int flags);
115 static int vn_write(file_t *fp, off_t *offset, struct uio *uio,
116 kauth_cred_t cred, int flags);
117 static int vn_closefile(file_t *fp);
118 static int vn_poll(file_t *fp, int events);
119 static int vn_fcntl(file_t *fp, u_int com, void *data);
120 static int vn_statfile(file_t *fp, struct stat *sb);
121 static int vn_ioctl(file_t *fp, u_long com, void *data);
122 static int vn_mmap(struct file *, off_t *, size_t, int, int *, int *,
123 struct uvm_object **, int *);
124
125 const struct fileops vnops = {
126 .fo_read = vn_read,
127 .fo_write = vn_write,
128 .fo_ioctl = vn_ioctl,
129 .fo_fcntl = vn_fcntl,
130 .fo_poll = vn_poll,
131 .fo_stat = vn_statfile,
132 .fo_close = vn_closefile,
133 .fo_kqfilter = vn_kqfilter,
134 .fo_restart = fnullop_restart,
135 .fo_mmap = vn_mmap,
136 };
137
138 /*
139 * Common code for vnode open operations.
140 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
141 */
142 int
143 vn_open(struct nameidata *ndp, int fmode, int cmode)
144 {
145 struct vnode *vp;
146 struct lwp *l = curlwp;
147 kauth_cred_t cred = l->l_cred;
148 struct vattr va;
149 int error;
150 const char *pathstring;
151
152 if ((fmode & (O_CREAT | O_DIRECTORY)) == (O_CREAT | O_DIRECTORY))
153 return EINVAL;
154
155 ndp->ni_cnd.cn_flags &= TRYEMULROOT | NOCHROOT;
156
157 if (fmode & O_CREAT) {
158 ndp->ni_cnd.cn_nameiop = CREATE;
159 ndp->ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF;
160 if ((fmode & O_EXCL) == 0 &&
161 ((fmode & O_NOFOLLOW) == 0))
162 ndp->ni_cnd.cn_flags |= FOLLOW;
163 } else {
164 ndp->ni_cnd.cn_nameiop = LOOKUP;
165 ndp->ni_cnd.cn_flags |= LOCKLEAF;
166 if ((fmode & O_NOFOLLOW) == 0)
167 ndp->ni_cnd.cn_flags |= FOLLOW;
168 }
169
170 pathstring = pathbuf_stringcopy_get(ndp->ni_pathbuf);
171 if (pathstring == NULL) {
172 return ENOMEM;
173 }
174
175 error = namei(ndp);
176 if (error)
177 goto out;
178
179 vp = ndp->ni_vp;
180
181 #if NVERIEXEC > 0
182 error = veriexec_openchk(l, ndp->ni_vp, pathstring, fmode);
183 if (error) {
184 /* We have to release the locks ourselves */
185 if (fmode & O_CREAT) {
186 if (vp == NULL) {
187 vput(ndp->ni_dvp);
188 } else {
189 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
190 if (ndp->ni_dvp == ndp->ni_vp)
191 vrele(ndp->ni_dvp);
192 else
193 vput(ndp->ni_dvp);
194 ndp->ni_dvp = NULL;
195 vput(vp);
196 }
197 } else {
198 vput(vp);
199 }
200 goto out;
201 }
202 #endif /* NVERIEXEC > 0 */
203
204 if (fmode & O_CREAT) {
205 if (ndp->ni_vp == NULL) {
206 vattr_null(&va);
207 va.va_type = VREG;
208 va.va_mode = cmode;
209 if (fmode & O_EXCL)
210 va.va_vaflags |= VA_EXCLUSIVE;
211 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
212 &ndp->ni_cnd, &va);
213 if (error) {
214 vput(ndp->ni_dvp);
215 goto out;
216 }
217 fmode &= ~O_TRUNC;
218 vp = ndp->ni_vp;
219 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
220 vput(ndp->ni_dvp);
221 } else {
222 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
223 if (ndp->ni_dvp == ndp->ni_vp)
224 vrele(ndp->ni_dvp);
225 else
226 vput(ndp->ni_dvp);
227 ndp->ni_dvp = NULL;
228 vp = ndp->ni_vp;
229 if (fmode & O_EXCL) {
230 error = EEXIST;
231 goto bad;
232 }
233 fmode &= ~O_CREAT;
234 }
235 } else {
236 vp = ndp->ni_vp;
237 }
238 if (vp->v_type == VSOCK) {
239 error = EOPNOTSUPP;
240 goto bad;
241 }
242 if (ndp->ni_vp->v_type == VLNK) {
243 error = EFTYPE;
244 goto bad;
245 }
246
247 if ((fmode & O_CREAT) == 0) {
248 error = vn_openchk(vp, cred, fmode);
249 if (error != 0)
250 goto bad;
251 }
252
253 if (fmode & O_TRUNC) {
254 vattr_null(&va);
255 va.va_size = 0;
256 error = VOP_SETATTR(vp, &va, cred);
257 if (error != 0)
258 goto bad;
259 }
260 if ((error = VOP_OPEN(vp, fmode, cred)) != 0)
261 goto bad;
262 if (fmode & FWRITE) {
263 mutex_enter(vp->v_interlock);
264 vp->v_writecount++;
265 mutex_exit(vp->v_interlock);
266 }
267
268 bad:
269 if (error)
270 vput(vp);
271 out:
272 pathbuf_stringcopy_put(ndp->ni_pathbuf, pathstring);
273 return (error);
274 }
275
276 /*
277 * Check for write permissions on the specified vnode.
278 * Prototype text segments cannot be written.
279 */
280 int
281 vn_writechk(struct vnode *vp)
282 {
283
284 /*
285 * If the vnode is in use as a process's text,
286 * we can't allow writing.
287 */
288 if (vp->v_iflag & VI_TEXT)
289 return (ETXTBSY);
290 return (0);
291 }
292
293 int
294 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags)
295 {
296 int permbits = 0;
297 int error;
298
299 if ((fflags & O_DIRECTORY) != 0 && vp->v_type != VDIR)
300 return ENOTDIR;
301
302 if ((fflags & FREAD) != 0) {
303 permbits = VREAD;
304 }
305 if ((fflags & (FWRITE | O_TRUNC)) != 0) {
306 permbits |= VWRITE;
307 if (vp->v_type == VDIR) {
308 error = EISDIR;
309 goto bad;
310 }
311 error = vn_writechk(vp);
312 if (error != 0)
313 goto bad;
314 }
315 error = VOP_ACCESS(vp, permbits, cred);
316 bad:
317 return error;
318 }
319
320 /*
321 * Mark a vnode as having executable mappings.
322 */
323 void
324 vn_markexec(struct vnode *vp)
325 {
326
327 if ((vp->v_iflag & VI_EXECMAP) != 0) {
328 /* Safe unlocked, as long as caller holds a reference. */
329 return;
330 }
331
332 mutex_enter(vp->v_interlock);
333 if ((vp->v_iflag & VI_EXECMAP) == 0) {
334 atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
335 atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
336 vp->v_iflag |= VI_EXECMAP;
337 }
338 mutex_exit(vp->v_interlock);
339 }
340
341 /*
342 * Mark a vnode as being the text of a process.
343 * Fail if the vnode is currently writable.
344 */
345 int
346 vn_marktext(struct vnode *vp)
347 {
348
349 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) {
350 /* Safe unlocked, as long as caller holds a reference. */
351 return (0);
352 }
353
354 mutex_enter(vp->v_interlock);
355 if (vp->v_writecount != 0) {
356 KASSERT((vp->v_iflag & VI_TEXT) == 0);
357 mutex_exit(vp->v_interlock);
358 return (ETXTBSY);
359 }
360 if ((vp->v_iflag & VI_EXECMAP) == 0) {
361 atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
362 atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
363 }
364 vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
365 mutex_exit(vp->v_interlock);
366 return (0);
367 }
368
369 /*
370 * Vnode close call
371 *
372 * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
373 */
374 int
375 vn_close(struct vnode *vp, int flags, kauth_cred_t cred)
376 {
377 int error;
378
379 if (flags & FWRITE) {
380 mutex_enter(vp->v_interlock);
381 KASSERT(vp->v_writecount > 0);
382 vp->v_writecount--;
383 mutex_exit(vp->v_interlock);
384 }
385 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
386 error = VOP_CLOSE(vp, flags, cred);
387 vput(vp);
388 return (error);
389 }
390
391 static int
392 enforce_rlimit_fsize(struct vnode *vp, struct uio *uio, int ioflag)
393 {
394 struct lwp *l = curlwp;
395 off_t testoff;
396
397 if (uio->uio_rw != UIO_WRITE || vp->v_type != VREG)
398 return 0;
399
400 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
401 if (ioflag & IO_APPEND)
402 testoff = vp->v_size;
403 else
404 testoff = uio->uio_offset;
405
406 if (testoff + uio->uio_resid >
407 l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
408 mutex_enter(proc_lock);
409 psignal(l->l_proc, SIGXFSZ);
410 mutex_exit(proc_lock);
411 return EFBIG;
412 }
413
414 return 0;
415 }
416
417 /*
418 * Package up an I/O request on a vnode into a uio and do it.
419 */
420 int
421 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
422 enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
423 struct lwp *l)
424 {
425 struct uio auio;
426 struct iovec aiov;
427 int error;
428
429 if ((ioflg & IO_NODELOCKED) == 0) {
430 if (rw == UIO_READ) {
431 vn_lock(vp, LK_SHARED | LK_RETRY);
432 } else /* UIO_WRITE */ {
433 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
434 }
435 }
436 auio.uio_iov = &aiov;
437 auio.uio_iovcnt = 1;
438 aiov.iov_base = base;
439 aiov.iov_len = len;
440 auio.uio_resid = len;
441 auio.uio_offset = offset;
442 auio.uio_rw = rw;
443 if (segflg == UIO_SYSSPACE) {
444 UIO_SETUP_SYSSPACE(&auio);
445 } else {
446 auio.uio_vmspace = l->l_proc->p_vmspace;
447 }
448
449 if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0)
450 goto out;
451
452 if (rw == UIO_READ) {
453 error = VOP_READ(vp, &auio, ioflg, cred);
454 } else {
455 error = VOP_WRITE(vp, &auio, ioflg, cred);
456 }
457
458 if (aresid)
459 *aresid = auio.uio_resid;
460 else
461 if (auio.uio_resid && error == 0)
462 error = EIO;
463
464 out:
465 if ((ioflg & IO_NODELOCKED) == 0) {
466 VOP_UNLOCK(vp);
467 }
468 return (error);
469 }
470
471 int
472 vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done,
473 struct lwp *l, off_t **cookies, int *ncookies)
474 {
475 struct vnode *vp = fp->f_vnode;
476 struct iovec aiov;
477 struct uio auio;
478 int error, eofflag;
479
480 /* Limit the size on any kernel buffers used by VOP_READDIR */
481 count = min(MAXBSIZE, count);
482
483 unionread:
484 if (vp->v_type != VDIR)
485 return (EINVAL);
486 aiov.iov_base = bf;
487 aiov.iov_len = count;
488 auio.uio_iov = &aiov;
489 auio.uio_iovcnt = 1;
490 auio.uio_rw = UIO_READ;
491 if (segflg == UIO_SYSSPACE) {
492 UIO_SETUP_SYSSPACE(&auio);
493 } else {
494 KASSERT(l == curlwp);
495 auio.uio_vmspace = l->l_proc->p_vmspace;
496 }
497 auio.uio_resid = count;
498 vn_lock(vp, LK_SHARED | LK_RETRY);
499 auio.uio_offset = fp->f_offset;
500 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
501 ncookies);
502 mutex_enter(&fp->f_lock);
503 fp->f_offset = auio.uio_offset;
504 mutex_exit(&fp->f_lock);
505 VOP_UNLOCK(vp);
506 if (error)
507 return (error);
508
509 if (count == auio.uio_resid && vn_union_readdir_hook) {
510 struct vnode *ovp = vp;
511
512 error = (*vn_union_readdir_hook)(&vp, fp, l);
513 if (error)
514 return (error);
515 if (vp != ovp)
516 goto unionread;
517 }
518
519 if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) &&
520 (vp->v_mount->mnt_flag & MNT_UNION)) {
521 struct vnode *tvp = vp;
522 vp = vp->v_mount->mnt_vnodecovered;
523 vref(vp);
524 mutex_enter(&fp->f_lock);
525 fp->f_vnode = vp;
526 fp->f_offset = 0;
527 mutex_exit(&fp->f_lock);
528 vrele(tvp);
529 goto unionread;
530 }
531 *done = count - auio.uio_resid;
532 return error;
533 }
534
535 /*
536 * File table vnode read routine.
537 */
538 static int
539 vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
540 int flags)
541 {
542 struct vnode *vp = fp->f_vnode;
543 int error, ioflag, fflag;
544 size_t count;
545
546 ioflag = IO_ADV_ENCODE(fp->f_advice);
547 fflag = fp->f_flag;
548 if (fflag & FNONBLOCK)
549 ioflag |= IO_NDELAY;
550 if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
551 ioflag |= IO_SYNC;
552 if (fflag & FALTIO)
553 ioflag |= IO_ALTSEMANTICS;
554 if (fflag & FDIRECT)
555 ioflag |= IO_DIRECT;
556 vn_lock(vp, LK_SHARED | LK_RETRY);
557 uio->uio_offset = *offset;
558 count = uio->uio_resid;
559 error = VOP_READ(vp, uio, ioflag, cred);
560 if (flags & FOF_UPDATE_OFFSET)
561 *offset += count - uio->uio_resid;
562 VOP_UNLOCK(vp);
563 return (error);
564 }
565
566 /*
567 * File table vnode write routine.
568 */
569 static int
570 vn_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
571 int flags)
572 {
573 struct vnode *vp = fp->f_vnode;
574 int error, ioflag, fflag;
575 size_t count;
576
577 ioflag = IO_ADV_ENCODE(fp->f_advice) | IO_UNIT;
578 fflag = fp->f_flag;
579 if (vp->v_type == VREG && (fflag & O_APPEND))
580 ioflag |= IO_APPEND;
581 if (fflag & FNONBLOCK)
582 ioflag |= IO_NDELAY;
583 if (fflag & FFSYNC ||
584 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
585 ioflag |= IO_SYNC;
586 else if (fflag & FDSYNC)
587 ioflag |= IO_DSYNC;
588 if (fflag & FALTIO)
589 ioflag |= IO_ALTSEMANTICS;
590 if (fflag & FDIRECT)
591 ioflag |= IO_DIRECT;
592 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
593 uio->uio_offset = *offset;
594 count = uio->uio_resid;
595
596 if ((error = enforce_rlimit_fsize(vp, uio, ioflag)) != 0)
597 goto out;
598
599 error = VOP_WRITE(vp, uio, ioflag, cred);
600
601 if (flags & FOF_UPDATE_OFFSET) {
602 if (ioflag & IO_APPEND) {
603 /*
604 * SUSv3 describes behaviour for count = 0 as following:
605 * "Before any action ... is taken, and if nbyte is zero
606 * and the file is a regular file, the write() function
607 * ... in the absence of errors ... shall return zero
608 * and have no other results."
609 */
610 if (count)
611 *offset = uio->uio_offset;
612 } else
613 *offset += count - uio->uio_resid;
614 }
615
616 out:
617 VOP_UNLOCK(vp);
618 return (error);
619 }
620
621 /*
622 * File table vnode stat routine.
623 */
624 static int
625 vn_statfile(file_t *fp, struct stat *sb)
626 {
627 struct vnode *vp = fp->f_vnode;
628 int error;
629
630 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
631 error = vn_stat(vp, sb);
632 VOP_UNLOCK(vp);
633 return error;
634 }
635
636 int
637 vn_stat(struct vnode *vp, struct stat *sb)
638 {
639 struct vattr va;
640 int error;
641 mode_t mode;
642
643 memset(&va, 0, sizeof(va));
644 error = VOP_GETATTR(vp, &va, kauth_cred_get());
645 if (error)
646 return (error);
647 /*
648 * Copy from vattr table
649 */
650 memset(sb, 0, sizeof(*sb));
651 sb->st_dev = va.va_fsid;
652 sb->st_ino = va.va_fileid;
653 mode = va.va_mode;
654 switch (vp->v_type) {
655 case VREG:
656 mode |= S_IFREG;
657 break;
658 case VDIR:
659 mode |= S_IFDIR;
660 break;
661 case VBLK:
662 mode |= S_IFBLK;
663 break;
664 case VCHR:
665 mode |= S_IFCHR;
666 break;
667 case VLNK:
668 mode |= S_IFLNK;
669 break;
670 case VSOCK:
671 mode |= S_IFSOCK;
672 break;
673 case VFIFO:
674 mode |= S_IFIFO;
675 break;
676 default:
677 return (EBADF);
678 }
679 sb->st_mode = mode;
680 sb->st_nlink = va.va_nlink;
681 sb->st_uid = va.va_uid;
682 sb->st_gid = va.va_gid;
683 sb->st_rdev = va.va_rdev;
684 sb->st_size = va.va_size;
685 sb->st_atimespec = va.va_atime;
686 sb->st_mtimespec = va.va_mtime;
687 sb->st_ctimespec = va.va_ctime;
688 sb->st_birthtimespec = va.va_birthtime;
689 sb->st_blksize = va.va_blocksize;
690 sb->st_flags = va.va_flags;
691 sb->st_gen = 0;
692 sb->st_blocks = va.va_bytes / S_BLKSIZE;
693 return (0);
694 }
695
696 /*
697 * File table vnode fcntl routine.
698 */
699 static int
700 vn_fcntl(file_t *fp, u_int com, void *data)
701 {
702 struct vnode *vp = fp->f_vnode;
703 int error;
704
705 error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get());
706 return (error);
707 }
708
709 /*
710 * File table vnode ioctl routine.
711 */
712 static int
713 vn_ioctl(file_t *fp, u_long com, void *data)
714 {
715 struct vnode *vp = fp->f_vnode, *ovp;
716 struct vattr vattr;
717 int error;
718
719 switch (vp->v_type) {
720
721 case VREG:
722 case VDIR:
723 if (com == FIONREAD) {
724 vn_lock(vp, LK_SHARED | LK_RETRY);
725 error = VOP_GETATTR(vp, &vattr, kauth_cred_get());
726 VOP_UNLOCK(vp);
727 if (error)
728 return (error);
729 *(int *)data = vattr.va_size - fp->f_offset;
730 return (0);
731 }
732 if ((com == FIONWRITE) || (com == FIONSPACE)) {
733 /*
734 * Files don't have send queues, so there never
735 * are any bytes in them, nor is there any
736 * open space in them.
737 */
738 *(int *)data = 0;
739 return (0);
740 }
741 if (com == FIOGETBMAP) {
742 daddr_t *block;
743
744 if (*(daddr_t *)data < 0)
745 return (EINVAL);
746 block = (daddr_t *)data;
747 return (VOP_BMAP(vp, *block, NULL, block, NULL));
748 }
749 if (com == OFIOGETBMAP) {
750 daddr_t ibn, obn;
751
752 if (*(int32_t *)data < 0)
753 return (EINVAL);
754 ibn = (daddr_t)*(int32_t *)data;
755 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
756 *(int32_t *)data = (int32_t)obn;
757 return error;
758 }
759 if (com == FIONBIO || com == FIOASYNC) /* XXX */
760 return (0); /* XXX */
761 /* fall into ... */
762 case VFIFO:
763 case VCHR:
764 case VBLK:
765 error = VOP_IOCTL(vp, com, data, fp->f_flag,
766 kauth_cred_get());
767 if (error == 0 && com == TIOCSCTTY) {
768 vref(vp);
769 mutex_enter(proc_lock);
770 ovp = curproc->p_session->s_ttyvp;
771 curproc->p_session->s_ttyvp = vp;
772 mutex_exit(proc_lock);
773 if (ovp != NULL)
774 vrele(ovp);
775 }
776 return (error);
777
778 default:
779 return (EPASSTHROUGH);
780 }
781 }
782
783 /*
784 * File table vnode poll routine.
785 */
786 static int
787 vn_poll(file_t *fp, int events)
788 {
789
790 return (VOP_POLL(fp->f_vnode, events));
791 }
792
793 /*
794 * File table vnode kqfilter routine.
795 */
796 int
797 vn_kqfilter(file_t *fp, struct knote *kn)
798 {
799
800 return (VOP_KQFILTER(fp->f_vnode, kn));
801 }
802
803 static int
804 vn_mmap(struct file *fp, off_t *offp, size_t size, int prot, int *flagsp,
805 int *advicep, struct uvm_object **uobjp, int *maxprotp)
806 {
807 struct uvm_object *uobj;
808 struct vnode *vp;
809 struct vattr va;
810 struct lwp *l;
811 vm_prot_t maxprot;
812 off_t off;
813 int error, flags;
814 bool needwritemap;
815
816 l = curlwp;
817
818 off = *offp;
819 flags = *flagsp;
820 maxprot = VM_PROT_EXECUTE;
821
822 vp = fp->f_vnode;
823 if (vp->v_type != VREG && vp->v_type != VCHR &&
824 vp->v_type != VBLK) {
825 /* only REG/CHR/BLK support mmap */
826 return ENODEV;
827 }
828 if (vp->v_type != VCHR && off < 0) {
829 return EINVAL;
830 }
831 if (vp->v_type != VCHR && (off_t)(off + size) < off) {
832 /* no offset wrapping */
833 return EOVERFLOW;
834 }
835
836 /* special case: catch SunOS style /dev/zero */
837 if (vp->v_type == VCHR &&
838 (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
839 *uobjp = NULL;
840 *maxprotp = VM_PROT_ALL;
841 return 0;
842 }
843
844 /*
845 * Old programs may not select a specific sharing type, so
846 * default to an appropriate one.
847 *
848 * XXX: how does MAP_ANON fit in the picture?
849 */
850 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
851 #if defined(DEBUG)
852 struct proc *p = l->l_proc;
853 printf("WARNING: defaulted mmap() share type to "
854 "%s (pid %d command %s)\n", vp->v_type == VCHR ?
855 "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
856 p->p_comm);
857 #endif
858 if (vp->v_type == VCHR)
859 flags |= MAP_SHARED; /* for a device */
860 else
861 flags |= MAP_PRIVATE; /* for a file */
862 }
863
864 /*
865 * MAP_PRIVATE device mappings don't make sense (and aren't
866 * supported anyway). However, some programs rely on this,
867 * so just change it to MAP_SHARED.
868 */
869 if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
870 flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
871 }
872
873 /*
874 * now check protection
875 */
876
877 /* check read access */
878 if (fp->f_flag & FREAD)
879 maxprot |= VM_PROT_READ;
880 else if (prot & PROT_READ) {
881 return EACCES;
882 }
883
884 /* check write access, shared case first */
885 if (flags & MAP_SHARED) {
886 /*
887 * if the file is writable, only add PROT_WRITE to
888 * maxprot if the file is not immutable, append-only.
889 * otherwise, if we have asked for PROT_WRITE, return
890 * EPERM.
891 */
892 if (fp->f_flag & FWRITE) {
893 vn_lock(vp, LK_SHARED | LK_RETRY);
894 error = VOP_GETATTR(vp, &va, l->l_cred);
895 VOP_UNLOCK(vp);
896 if (error) {
897 return error;
898 }
899 if ((va.va_flags &
900 (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
901 maxprot |= VM_PROT_WRITE;
902 else if (prot & PROT_WRITE) {
903 return EPERM;
904 }
905 } else if (prot & PROT_WRITE) {
906 return EACCES;
907 }
908 } else {
909 /* MAP_PRIVATE mappings can always write to */
910 maxprot |= VM_PROT_WRITE;
911 }
912
913 /*
914 * Don't allow mmap for EXEC if the file system
915 * is mounted NOEXEC.
916 */
917 if ((prot & PROT_EXEC) != 0 &&
918 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
919 return EACCES;
920 }
921
922 if (vp->v_type != VCHR) {
923 error = VOP_MMAP(vp, prot, curlwp->l_cred);
924 if (error) {
925 return error;
926 }
927 vref(vp);
928 uobj = &vp->v_uobj;
929
930 /*
931 * If the vnode is being mapped with PROT_EXEC,
932 * then mark it as text.
933 */
934 if (prot & PROT_EXEC) {
935 vn_markexec(vp);
936 }
937 } else {
938 int i = maxprot;
939
940 /*
941 * XXX Some devices don't like to be mapped with
942 * XXX PROT_EXEC or PROT_WRITE, but we don't really
943 * XXX have a better way of handling this, right now
944 */
945 do {
946 uobj = udv_attach(vp->v_rdev,
947 (flags & MAP_SHARED) ? i :
948 (i & ~VM_PROT_WRITE), off, size);
949 i--;
950 } while ((uobj == NULL) && (i > 0));
951 if (uobj == NULL) {
952 return EINVAL;
953 }
954 *advicep = UVM_ADV_RANDOM;
955 }
956
957 /*
958 * Set vnode flags to indicate the new kinds of mapping.
959 * We take the vnode lock in exclusive mode here to serialize
960 * with direct I/O.
961 *
962 * Safe to check for these flag values without a lock, as
963 * long as a reference to the vnode is held.
964 */
965 needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
966 (flags & MAP_SHARED) != 0 &&
967 (maxprot & VM_PROT_WRITE) != 0;
968 if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
969 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
970 vp->v_vflag |= VV_MAPPED;
971 if (needwritemap) {
972 mutex_enter(vp->v_interlock);
973 vp->v_iflag |= VI_WRMAP;
974 mutex_exit(vp->v_interlock);
975 }
976 VOP_UNLOCK(vp);
977 }
978
979 #if NVERIEXEC > 0
980
981 /*
982 * Check if the file can be executed indirectly.
983 *
984 * XXX: This gives false warnings about "Incorrect access type"
985 * XXX: if the mapping is not executable. Harmless, but will be
986 * XXX: fixed as part of other changes.
987 */
988 if (veriexec_verify(l, vp, "(mmap)", VERIEXEC_INDIRECT,
989 NULL)) {
990
991 /*
992 * Don't allow executable mappings if we can't
993 * indirectly execute the file.
994 */
995 if (prot & VM_PROT_EXECUTE) {
996 return EPERM;
997 }
998
999 /*
1000 * Strip the executable bit from 'maxprot' to make sure
1001 * it can't be made executable later.
1002 */
1003 maxprot &= ~VM_PROT_EXECUTE;
1004 }
1005 #endif /* NVERIEXEC > 0 */
1006
1007 *uobjp = uobj;
1008 *maxprotp = maxprot;
1009 *flagsp = flags;
1010
1011 return 0;
1012 }
1013
1014
1015
1016 /*
1017 * Check that the vnode is still valid, and if so
1018 * acquire requested lock.
1019 */
1020 int
1021 vn_lock(struct vnode *vp, int flags)
1022 {
1023 int error;
1024
1025 #if 0
1026 KASSERT(vp->v_usecount > 0 || (vp->v_iflag & VI_ONWORKLST) != 0);
1027 #endif
1028 KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT|LK_RETRY)) == 0);
1029 KASSERT(!mutex_owned(vp->v_interlock));
1030
1031 #ifdef DIAGNOSTIC
1032 if (wapbl_vphaswapbl(vp))
1033 WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
1034 #endif
1035
1036 error = VOP_LOCK(vp, flags);
1037 if ((flags & LK_RETRY) != 0 && error == ENOENT)
1038 error = VOP_LOCK(vp, flags);
1039
1040 KASSERT((flags & LK_RETRY) == 0 || (flags & LK_NOWAIT) != 0 ||
1041 error == 0);
1042
1043 return error;
1044 }
1045
1046 /*
1047 * File table vnode close routine.
1048 */
1049 static int
1050 vn_closefile(file_t *fp)
1051 {
1052
1053 return vn_close(fp->f_vnode, fp->f_flag, fp->f_cred);
1054 }
1055
1056 /*
1057 * Simplified in-kernel wrapper calls for extended attribute access.
1058 * Both calls pass in a NULL credential, authorizing a "kernel" access.
1059 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1060 */
1061 int
1062 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1063 const char *attrname, size_t *buflen, void *bf, struct lwp *l)
1064 {
1065 struct uio auio;
1066 struct iovec aiov;
1067 int error;
1068
1069 aiov.iov_len = *buflen;
1070 aiov.iov_base = bf;
1071
1072 auio.uio_iov = &aiov;
1073 auio.uio_iovcnt = 1;
1074 auio.uio_rw = UIO_READ;
1075 auio.uio_offset = 0;
1076 auio.uio_resid = *buflen;
1077 UIO_SETUP_SYSSPACE(&auio);
1078
1079 if ((ioflg & IO_NODELOCKED) == 0)
1080 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1081
1082 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL);
1083
1084 if ((ioflg & IO_NODELOCKED) == 0)
1085 VOP_UNLOCK(vp);
1086
1087 if (error == 0)
1088 *buflen = *buflen - auio.uio_resid;
1089
1090 return (error);
1091 }
1092
1093 /*
1094 * XXX Failure mode if partially written?
1095 */
1096 int
1097 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1098 const char *attrname, size_t buflen, const void *bf, struct lwp *l)
1099 {
1100 struct uio auio;
1101 struct iovec aiov;
1102 int error;
1103
1104 aiov.iov_len = buflen;
1105 aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */
1106
1107 auio.uio_iov = &aiov;
1108 auio.uio_iovcnt = 1;
1109 auio.uio_rw = UIO_WRITE;
1110 auio.uio_offset = 0;
1111 auio.uio_resid = buflen;
1112 UIO_SETUP_SYSSPACE(&auio);
1113
1114 if ((ioflg & IO_NODELOCKED) == 0) {
1115 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1116 }
1117
1118 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL);
1119
1120 if ((ioflg & IO_NODELOCKED) == 0) {
1121 VOP_UNLOCK(vp);
1122 }
1123
1124 return (error);
1125 }
1126
1127 int
1128 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1129 const char *attrname, struct lwp *l)
1130 {
1131 int error;
1132
1133 if ((ioflg & IO_NODELOCKED) == 0) {
1134 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1135 }
1136
1137 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL);
1138 if (error == EOPNOTSUPP)
1139 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL);
1140
1141 if ((ioflg & IO_NODELOCKED) == 0) {
1142 VOP_UNLOCK(vp);
1143 }
1144
1145 return (error);
1146 }
1147
1148 void
1149 vn_ra_allocctx(struct vnode *vp)
1150 {
1151 struct uvm_ractx *ra = NULL;
1152
1153 KASSERT(mutex_owned(vp->v_interlock));
1154
1155 if (vp->v_type != VREG) {
1156 return;
1157 }
1158 if (vp->v_ractx != NULL) {
1159 return;
1160 }
1161 if (vp->v_ractx == NULL) {
1162 mutex_exit(vp->v_interlock);
1163 ra = uvm_ra_allocctx();
1164 mutex_enter(vp->v_interlock);
1165 if (ra != NULL && vp->v_ractx == NULL) {
1166 vp->v_ractx = ra;
1167 ra = NULL;
1168 }
1169 }
1170 if (ra != NULL) {
1171 uvm_ra_freectx(ra);
1172 }
1173 }
1174
1175 int
1176 vn_fifo_bypass(void *v)
1177 {
1178 struct vop_generic_args *ap = v;
1179
1180 return VOCALL(fifo_vnodeop_p, ap->a_desc->vdesc_offset, v);
1181 }
1182