vfs_vnops.c revision 1.229 1 /* $NetBSD: vfs_vnops.c,v 1.229 2022/07/06 01:12:46 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.229 2022/07/06 01:12:46 riastradh Exp $");
70
71 #include "veriexec.h"
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/file.h>
77 #include <sys/stat.h>
78 #include <sys/buf.h>
79 #include <sys/proc.h>
80 #include <sys/mount.h>
81 #include <sys/namei.h>
82 #include <sys/vnode.h>
83 #include <sys/ioctl.h>
84 #include <sys/tty.h>
85 #include <sys/poll.h>
86 #include <sys/kauth.h>
87 #include <sys/syslog.h>
88 #include <sys/fstrans.h>
89 #include <sys/atomic.h>
90 #include <sys/filedesc.h>
91 #include <sys/wapbl.h>
92 #include <sys/mman.h>
93
94 #include <miscfs/specfs/specdev.h>
95 #include <miscfs/fifofs/fifo.h>
96
97 #include <uvm/uvm_extern.h>
98 #include <uvm/uvm_readahead.h>
99 #include <uvm/uvm_device.h>
100
101 #ifdef UNION
102 #include <fs/union/union.h>
103 #endif
104
105 #ifndef COMPAT_ZERODEV
106 #define COMPAT_ZERODEV(dev) (0)
107 #endif
108
109 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
110
111 #include <sys/verified_exec.h>
112
113 static int vn_read(file_t *fp, off_t *offset, struct uio *uio,
114 kauth_cred_t cred, int flags);
115 static int vn_write(file_t *fp, off_t *offset, struct uio *uio,
116 kauth_cred_t cred, int flags);
117 static int vn_closefile(file_t *fp);
118 static int vn_poll(file_t *fp, int events);
119 static int vn_fcntl(file_t *fp, u_int com, void *data);
120 static int vn_statfile(file_t *fp, struct stat *sb);
121 static int vn_ioctl(file_t *fp, u_long com, void *data);
122 static int vn_mmap(struct file *, off_t *, size_t, int, int *, int *,
123 struct uvm_object **, int *);
124 static int vn_seek(struct file *, off_t, int, off_t *, int);
125
126 const struct fileops vnops = {
127 .fo_name = "vn",
128 .fo_read = vn_read,
129 .fo_write = vn_write,
130 .fo_ioctl = vn_ioctl,
131 .fo_fcntl = vn_fcntl,
132 .fo_poll = vn_poll,
133 .fo_stat = vn_statfile,
134 .fo_close = vn_closefile,
135 .fo_kqfilter = vn_kqfilter,
136 .fo_restart = fnullop_restart,
137 .fo_mmap = vn_mmap,
138 .fo_seek = vn_seek,
139 };
140
141 /*
142 * Common code for vnode open operations.
143 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
144 *
145 * at_dvp is the directory for openat(), if any.
146 * pb is the path.
147 * nmode is additional namei flags, restricted to TRYEMULROOT and NOCHROOT.
148 * fmode is the open flags, converted from O_* to F*
149 * cmode is the creation file permissions.
150 *
151 * XXX shouldn't cmode be mode_t?
152 *
153 * On success produces either a vnode in *ret_vp, or if that is NULL,
154 * a file descriptor number in ret_fd.
155 *
156 * The caller may pass NULL for ret_fd (and ret_domove), in which case
157 * EOPNOTSUPP will be produced in the cases that would otherwise return
158 * a file descriptor.
159 *
160 * Note that callers that want no-follow behavior should pass
161 * O_NOFOLLOW in fmode. Neither FOLLOW nor NOFOLLOW in nmode is
162 * honored.
163 */
164 int
165 vn_open(struct vnode *at_dvp, struct pathbuf *pb,
166 int nmode, int fmode, int cmode,
167 struct vnode **ret_vp, bool *ret_domove, int *ret_fd)
168 {
169 struct nameidata nd;
170 struct vnode *vp = NULL;
171 struct lwp *l = curlwp;
172 kauth_cred_t cred = l->l_cred;
173 struct vattr va;
174 int error;
175 const char *pathstring;
176
177 KASSERT((nmode & (TRYEMULROOT | NOCHROOT)) == nmode);
178
179 KASSERT(ret_vp != NULL);
180 KASSERT((ret_domove == NULL) == (ret_fd == NULL));
181
182 if ((fmode & (O_CREAT | O_DIRECTORY)) == (O_CREAT | O_DIRECTORY))
183 return EINVAL;
184
185 NDINIT(&nd, LOOKUP, nmode, pb);
186 if (at_dvp != NULL)
187 NDAT(&nd, at_dvp);
188
189 nd.ni_cnd.cn_flags &= TRYEMULROOT | NOCHROOT;
190
191 if (fmode & O_CREAT) {
192 nd.ni_cnd.cn_nameiop = CREATE;
193 nd.ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF;
194 if ((fmode & O_EXCL) == 0 &&
195 ((fmode & O_NOFOLLOW) == 0))
196 nd.ni_cnd.cn_flags |= FOLLOW;
197 if ((fmode & O_EXCL) == 0)
198 nd.ni_cnd.cn_flags |= NONEXCLHACK;
199 } else {
200 nd.ni_cnd.cn_nameiop = LOOKUP;
201 nd.ni_cnd.cn_flags |= LOCKLEAF;
202 if ((fmode & O_NOFOLLOW) == 0)
203 nd.ni_cnd.cn_flags |= FOLLOW;
204 }
205
206 pathstring = pathbuf_stringcopy_get(nd.ni_pathbuf);
207 if (pathstring == NULL) {
208 return ENOMEM;
209 }
210
211 /*
212 * When this "interface" was exposed to do_open() it used
213 * to initialize l_dupfd to -newfd-1 (thus passing in the
214 * new file handle number to use)... but nothing in the
215 * kernel uses that value. So just send 0.
216 */
217 l->l_dupfd = 0;
218
219 error = namei(&nd);
220 if (error)
221 goto out;
222
223 vp = nd.ni_vp;
224
225 #if NVERIEXEC > 0
226 error = veriexec_openchk(l, nd.ni_vp, pathstring, fmode);
227 if (error) {
228 /* We have to release the locks ourselves */
229 /*
230 * 20210604 dholland passing NONEXCLHACK means we can
231 * get ni_dvp == NULL back if ni_vp exists, and we should
232 * treat that like the non-O_CREAT case.
233 */
234 if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
235 if (vp == NULL) {
236 vput(nd.ni_dvp);
237 } else {
238 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
239 if (nd.ni_dvp == nd.ni_vp)
240 vrele(nd.ni_dvp);
241 else
242 vput(nd.ni_dvp);
243 nd.ni_dvp = NULL;
244 vput(vp);
245 }
246 } else {
247 vput(vp);
248 }
249 goto out;
250 }
251 #endif /* NVERIEXEC > 0 */
252
253 /*
254 * 20210604 dholland ditto
255 */
256 if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
257 if (nd.ni_vp == NULL) {
258 vattr_null(&va);
259 va.va_type = VREG;
260 va.va_mode = cmode;
261 if (fmode & O_EXCL)
262 va.va_vaflags |= VA_EXCLUSIVE;
263 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
264 &nd.ni_cnd, &va);
265 if (error) {
266 vput(nd.ni_dvp);
267 goto out;
268 }
269 fmode &= ~O_TRUNC;
270 vp = nd.ni_vp;
271 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
272 vput(nd.ni_dvp);
273 } else {
274 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
275 if (nd.ni_dvp == nd.ni_vp)
276 vrele(nd.ni_dvp);
277 else
278 vput(nd.ni_dvp);
279 nd.ni_dvp = NULL;
280 vp = nd.ni_vp;
281 if (fmode & O_EXCL) {
282 error = EEXIST;
283 goto bad;
284 }
285 fmode &= ~O_CREAT;
286 }
287 } else if ((fmode & O_CREAT) != 0) {
288 /*
289 * 20210606 dholland passing NONEXCLHACK means this
290 * case exists; it is the same as the following one
291 * but also needs to do things in the second (exists)
292 * half of the following block. (Besides handle
293 * ni_dvp, anyway.)
294 */
295 vp = nd.ni_vp;
296 KASSERT((fmode & O_EXCL) == 0);
297 fmode &= ~O_CREAT;
298 } else {
299 vp = nd.ni_vp;
300 }
301 if (vp->v_type == VSOCK) {
302 error = EOPNOTSUPP;
303 goto bad;
304 }
305 if (nd.ni_vp->v_type == VLNK) {
306 error = EFTYPE;
307 goto bad;
308 }
309
310 if ((fmode & O_CREAT) == 0) {
311 error = vn_openchk(vp, cred, fmode);
312 if (error != 0)
313 goto bad;
314 }
315
316 if (fmode & O_TRUNC) {
317 vattr_null(&va);
318 va.va_size = 0;
319 error = VOP_SETATTR(vp, &va, cred);
320 if (error != 0)
321 goto bad;
322 }
323 if ((error = VOP_OPEN(vp, fmode, cred)) != 0)
324 goto bad;
325 if (fmode & FWRITE) {
326 mutex_enter(vp->v_interlock);
327 vp->v_writecount++;
328 mutex_exit(vp->v_interlock);
329 }
330
331 bad:
332 if (error)
333 vput(vp);
334 out:
335 pathbuf_stringcopy_put(nd.ni_pathbuf, pathstring);
336
337 switch (error) {
338 case EDUPFD:
339 case EMOVEFD:
340 /* if the caller isn't prepared to handle fds, fail for them */
341 if (ret_fd == NULL) {
342 error = EOPNOTSUPP;
343 break;
344 }
345 *ret_vp = NULL;
346 *ret_domove = error == EMOVEFD;
347 *ret_fd = l->l_dupfd;
348 error = 0;
349 break;
350 case 0:
351 *ret_vp = vp;
352 break;
353 }
354 l->l_dupfd = 0;
355 return error;
356 }
357
358 /*
359 * Check for write permissions on the specified vnode.
360 * Prototype text segments cannot be written.
361 */
362 int
363 vn_writechk(struct vnode *vp)
364 {
365
366 /*
367 * If the vnode is in use as a process's text,
368 * we can't allow writing.
369 */
370 if (vp->v_iflag & VI_TEXT)
371 return (ETXTBSY);
372 return (0);
373 }
374
375 int
376 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags)
377 {
378 int permbits = 0;
379 int error;
380
381 if (vp->v_type == VNON || vp->v_type == VBAD)
382 return ENXIO;
383
384 if ((fflags & O_DIRECTORY) != 0 && vp->v_type != VDIR)
385 return ENOTDIR;
386
387 if ((fflags & O_REGULAR) != 0 && vp->v_type != VREG)
388 return EFTYPE;
389
390 if ((fflags & FREAD) != 0) {
391 permbits = VREAD;
392 }
393 if ((fflags & FEXEC) != 0) {
394 permbits |= VEXEC;
395 }
396 if ((fflags & (FWRITE | O_TRUNC)) != 0) {
397 permbits |= VWRITE;
398 if (vp->v_type == VDIR) {
399 error = EISDIR;
400 goto bad;
401 }
402 error = vn_writechk(vp);
403 if (error != 0)
404 goto bad;
405 }
406 error = VOP_ACCESS(vp, permbits, cred);
407 bad:
408 return error;
409 }
410
411 /*
412 * Mark a vnode as having executable mappings.
413 */
414 void
415 vn_markexec(struct vnode *vp)
416 {
417
418 if ((vp->v_iflag & VI_EXECMAP) != 0) {
419 /* Safe unlocked, as long as caller holds a reference. */
420 return;
421 }
422
423 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
424 mutex_enter(vp->v_interlock);
425 if ((vp->v_iflag & VI_EXECMAP) == 0) {
426 cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
427 vp->v_iflag |= VI_EXECMAP;
428 }
429 mutex_exit(vp->v_interlock);
430 rw_exit(vp->v_uobj.vmobjlock);
431 }
432
433 /*
434 * Mark a vnode as being the text of a process.
435 * Fail if the vnode is currently writable.
436 */
437 int
438 vn_marktext(struct vnode *vp)
439 {
440
441 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) {
442 /* Safe unlocked, as long as caller holds a reference. */
443 return (0);
444 }
445
446 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
447 mutex_enter(vp->v_interlock);
448 if (vp->v_writecount != 0) {
449 KASSERT((vp->v_iflag & VI_TEXT) == 0);
450 mutex_exit(vp->v_interlock);
451 rw_exit(vp->v_uobj.vmobjlock);
452 return (ETXTBSY);
453 }
454 if ((vp->v_iflag & VI_EXECMAP) == 0) {
455 cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
456 }
457 vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
458 mutex_exit(vp->v_interlock);
459 rw_exit(vp->v_uobj.vmobjlock);
460 return (0);
461 }
462
463 /*
464 * Vnode close call
465 *
466 * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
467 */
468 int
469 vn_close(struct vnode *vp, int flags, kauth_cred_t cred)
470 {
471 int error;
472
473 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
474 if (flags & FWRITE) {
475 mutex_enter(vp->v_interlock);
476 KASSERT(vp->v_writecount > 0);
477 vp->v_writecount--;
478 mutex_exit(vp->v_interlock);
479 }
480 error = VOP_CLOSE(vp, flags, cred);
481 vput(vp);
482 return (error);
483 }
484
485 static int
486 enforce_rlimit_fsize(struct vnode *vp, struct uio *uio, int ioflag)
487 {
488 struct lwp *l = curlwp;
489 off_t testoff;
490
491 if (uio->uio_rw != UIO_WRITE || vp->v_type != VREG)
492 return 0;
493
494 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
495 if (ioflag & IO_APPEND)
496 testoff = vp->v_size;
497 else
498 testoff = uio->uio_offset;
499
500 if (testoff + uio->uio_resid >
501 l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
502 mutex_enter(&proc_lock);
503 psignal(l->l_proc, SIGXFSZ);
504 mutex_exit(&proc_lock);
505 return EFBIG;
506 }
507
508 return 0;
509 }
510
511 /*
512 * Package up an I/O request on a vnode into a uio and do it.
513 */
514 int
515 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
516 enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
517 struct lwp *l)
518 {
519 struct uio auio;
520 struct iovec aiov;
521 int error;
522
523 if ((ioflg & IO_NODELOCKED) == 0) {
524 if (rw == UIO_READ) {
525 vn_lock(vp, LK_SHARED | LK_RETRY);
526 } else /* UIO_WRITE */ {
527 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
528 }
529 }
530 auio.uio_iov = &aiov;
531 auio.uio_iovcnt = 1;
532 aiov.iov_base = base;
533 aiov.iov_len = len;
534 auio.uio_resid = len;
535 auio.uio_offset = offset;
536 auio.uio_rw = rw;
537 if (segflg == UIO_SYSSPACE) {
538 UIO_SETUP_SYSSPACE(&auio);
539 } else {
540 auio.uio_vmspace = l->l_proc->p_vmspace;
541 }
542
543 if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0)
544 goto out;
545
546 if (rw == UIO_READ) {
547 error = VOP_READ(vp, &auio, ioflg, cred);
548 } else {
549 error = VOP_WRITE(vp, &auio, ioflg, cred);
550 }
551
552 if (aresid)
553 *aresid = auio.uio_resid;
554 else
555 if (auio.uio_resid && error == 0)
556 error = EIO;
557
558 out:
559 if ((ioflg & IO_NODELOCKED) == 0) {
560 VOP_UNLOCK(vp);
561 }
562 return (error);
563 }
564
565 int
566 vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done,
567 struct lwp *l, off_t **cookies, int *ncookies)
568 {
569 struct vnode *vp = fp->f_vnode;
570 struct iovec aiov;
571 struct uio auio;
572 int error, eofflag;
573
574 /* Limit the size on any kernel buffers used by VOP_READDIR */
575 count = uimin(MAXBSIZE, count);
576
577 unionread:
578 if (vp->v_type != VDIR)
579 return (EINVAL);
580 aiov.iov_base = bf;
581 aiov.iov_len = count;
582 auio.uio_iov = &aiov;
583 auio.uio_iovcnt = 1;
584 auio.uio_rw = UIO_READ;
585 if (segflg == UIO_SYSSPACE) {
586 UIO_SETUP_SYSSPACE(&auio);
587 } else {
588 KASSERT(l == curlwp);
589 auio.uio_vmspace = l->l_proc->p_vmspace;
590 }
591 auio.uio_resid = count;
592 vn_lock(vp, LK_SHARED | LK_RETRY);
593 auio.uio_offset = fp->f_offset;
594 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
595 ncookies);
596 mutex_enter(&fp->f_lock);
597 fp->f_offset = auio.uio_offset;
598 mutex_exit(&fp->f_lock);
599 VOP_UNLOCK(vp);
600 if (error)
601 return (error);
602
603 if (count == auio.uio_resid && vn_union_readdir_hook) {
604 struct vnode *ovp = vp;
605
606 error = (*vn_union_readdir_hook)(&vp, fp, l);
607 if (error)
608 return (error);
609 if (vp != ovp)
610 goto unionread;
611 }
612
613 if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) &&
614 (vp->v_mount->mnt_flag & MNT_UNION)) {
615 struct vnode *tvp = vp;
616 vp = vp->v_mount->mnt_vnodecovered;
617 vref(vp);
618 mutex_enter(&fp->f_lock);
619 fp->f_vnode = vp;
620 fp->f_offset = 0;
621 mutex_exit(&fp->f_lock);
622 vrele(tvp);
623 goto unionread;
624 }
625 *done = count - auio.uio_resid;
626 return error;
627 }
628
629 /*
630 * File table vnode read routine.
631 */
632 static int
633 vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
634 int flags)
635 {
636 struct vnode *vp = fp->f_vnode;
637 int error, ioflag, fflag;
638 size_t count;
639
640 ioflag = IO_ADV_ENCODE(fp->f_advice);
641 fflag = fp->f_flag;
642 if (fflag & FNONBLOCK)
643 ioflag |= IO_NDELAY;
644 if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
645 ioflag |= IO_SYNC;
646 if (fflag & FALTIO)
647 ioflag |= IO_ALTSEMANTICS;
648 if (fflag & FDIRECT)
649 ioflag |= IO_DIRECT;
650 vn_lock(vp, LK_SHARED | LK_RETRY);
651 uio->uio_offset = *offset;
652 count = uio->uio_resid;
653 error = VOP_READ(vp, uio, ioflag, cred);
654 if (flags & FOF_UPDATE_OFFSET)
655 *offset += count - uio->uio_resid;
656 VOP_UNLOCK(vp);
657 return (error);
658 }
659
660 /*
661 * File table vnode write routine.
662 */
663 static int
664 vn_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
665 int flags)
666 {
667 struct vnode *vp = fp->f_vnode;
668 int error, ioflag, fflag;
669 size_t count;
670
671 ioflag = IO_ADV_ENCODE(fp->f_advice) | IO_UNIT;
672 fflag = fp->f_flag;
673 if (vp->v_type == VREG && (fflag & O_APPEND))
674 ioflag |= IO_APPEND;
675 if (fflag & FNONBLOCK)
676 ioflag |= IO_NDELAY;
677 if (fflag & FFSYNC ||
678 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
679 ioflag |= IO_SYNC;
680 else if (fflag & FDSYNC)
681 ioflag |= IO_DSYNC;
682 if (fflag & FALTIO)
683 ioflag |= IO_ALTSEMANTICS;
684 if (fflag & FDIRECT)
685 ioflag |= IO_DIRECT;
686 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
687 uio->uio_offset = *offset;
688 count = uio->uio_resid;
689
690 if ((error = enforce_rlimit_fsize(vp, uio, ioflag)) != 0)
691 goto out;
692
693 error = VOP_WRITE(vp, uio, ioflag, cred);
694
695 if (flags & FOF_UPDATE_OFFSET) {
696 if (ioflag & IO_APPEND) {
697 /*
698 * SUSv3 describes behaviour for count = 0 as following:
699 * "Before any action ... is taken, and if nbyte is zero
700 * and the file is a regular file, the write() function
701 * ... in the absence of errors ... shall return zero
702 * and have no other results."
703 */
704 if (count)
705 *offset = uio->uio_offset;
706 } else
707 *offset += count - uio->uio_resid;
708 }
709
710 out:
711 VOP_UNLOCK(vp);
712 return (error);
713 }
714
715 /*
716 * File table vnode stat routine.
717 */
718 static int
719 vn_statfile(file_t *fp, struct stat *sb)
720 {
721 struct vnode *vp = fp->f_vnode;
722 int error;
723
724 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
725 error = vn_stat(vp, sb);
726 VOP_UNLOCK(vp);
727 return error;
728 }
729
730 int
731 vn_stat(struct vnode *vp, struct stat *sb)
732 {
733 struct vattr va;
734 int error;
735 mode_t mode;
736
737 memset(&va, 0, sizeof(va));
738 error = VOP_GETATTR(vp, &va, kauth_cred_get());
739 if (error)
740 return (error);
741 /*
742 * Copy from vattr table
743 */
744 memset(sb, 0, sizeof(*sb));
745 sb->st_dev = va.va_fsid;
746 sb->st_ino = va.va_fileid;
747 mode = va.va_mode;
748 switch (vp->v_type) {
749 case VREG:
750 mode |= S_IFREG;
751 break;
752 case VDIR:
753 mode |= S_IFDIR;
754 break;
755 case VBLK:
756 mode |= S_IFBLK;
757 break;
758 case VCHR:
759 mode |= S_IFCHR;
760 break;
761 case VLNK:
762 mode |= S_IFLNK;
763 break;
764 case VSOCK:
765 mode |= S_IFSOCK;
766 break;
767 case VFIFO:
768 mode |= S_IFIFO;
769 break;
770 default:
771 return (EBADF);
772 }
773 sb->st_mode = mode;
774 sb->st_nlink = va.va_nlink;
775 sb->st_uid = va.va_uid;
776 sb->st_gid = va.va_gid;
777 sb->st_rdev = va.va_rdev;
778 sb->st_size = va.va_size;
779 sb->st_atimespec = va.va_atime;
780 sb->st_mtimespec = va.va_mtime;
781 sb->st_ctimespec = va.va_ctime;
782 sb->st_birthtimespec = va.va_birthtime;
783 sb->st_blksize = va.va_blocksize;
784 sb->st_flags = va.va_flags;
785 sb->st_gen = 0;
786 sb->st_blocks = va.va_bytes / S_BLKSIZE;
787 return (0);
788 }
789
790 /*
791 * File table vnode fcntl routine.
792 */
793 static int
794 vn_fcntl(file_t *fp, u_int com, void *data)
795 {
796 struct vnode *vp = fp->f_vnode;
797 int error;
798
799 error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get());
800 return (error);
801 }
802
803 /*
804 * File table vnode ioctl routine.
805 */
806 static int
807 vn_ioctl(file_t *fp, u_long com, void *data)
808 {
809 struct vnode *vp = fp->f_vnode, *ovp;
810 struct vattr vattr;
811 int error;
812
813 switch (vp->v_type) {
814
815 case VREG:
816 case VDIR:
817 if (com == FIONREAD) {
818 vn_lock(vp, LK_SHARED | LK_RETRY);
819 error = VOP_GETATTR(vp, &vattr, kauth_cred_get());
820 if (error == 0)
821 *(int *)data = vattr.va_size - fp->f_offset;
822 VOP_UNLOCK(vp);
823 if (error)
824 return (error);
825 return (0);
826 }
827 if ((com == FIONWRITE) || (com == FIONSPACE)) {
828 /*
829 * Files don't have send queues, so there never
830 * are any bytes in them, nor is there any
831 * open space in them.
832 */
833 *(int *)data = 0;
834 return (0);
835 }
836 if (com == FIOGETBMAP) {
837 daddr_t *block;
838
839 if (*(daddr_t *)data < 0)
840 return (EINVAL);
841 block = (daddr_t *)data;
842 vn_lock(vp, LK_SHARED | LK_RETRY);
843 error = VOP_BMAP(vp, *block, NULL, block, NULL);
844 VOP_UNLOCK(vp);
845 return error;
846 }
847 if (com == OFIOGETBMAP) {
848 daddr_t ibn, obn;
849
850 if (*(int32_t *)data < 0)
851 return (EINVAL);
852 ibn = (daddr_t)*(int32_t *)data;
853 vn_lock(vp, LK_SHARED | LK_RETRY);
854 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
855 VOP_UNLOCK(vp);
856 *(int32_t *)data = (int32_t)obn;
857 return error;
858 }
859 if (com == FIONBIO || com == FIOASYNC) /* XXX */
860 return (0); /* XXX */
861 /* FALLTHROUGH */
862 case VFIFO:
863 case VCHR:
864 case VBLK:
865 error = VOP_IOCTL(vp, com, data, fp->f_flag,
866 kauth_cred_get());
867 if (error == 0 && com == TIOCSCTTY) {
868 vref(vp);
869 mutex_enter(&proc_lock);
870 ovp = curproc->p_session->s_ttyvp;
871 curproc->p_session->s_ttyvp = vp;
872 mutex_exit(&proc_lock);
873 if (ovp != NULL)
874 vrele(ovp);
875 }
876 return (error);
877
878 default:
879 return (EPASSTHROUGH);
880 }
881 }
882
883 /*
884 * File table vnode poll routine.
885 */
886 static int
887 vn_poll(file_t *fp, int events)
888 {
889
890 return (VOP_POLL(fp->f_vnode, events));
891 }
892
893 /*
894 * File table vnode kqfilter routine.
895 */
896 int
897 vn_kqfilter(file_t *fp, struct knote *kn)
898 {
899
900 return (VOP_KQFILTER(fp->f_vnode, kn));
901 }
902
903 static int
904 vn_mmap(struct file *fp, off_t *offp, size_t size, int prot, int *flagsp,
905 int *advicep, struct uvm_object **uobjp, int *maxprotp)
906 {
907 struct uvm_object *uobj;
908 struct vnode *vp;
909 struct vattr va;
910 struct lwp *l;
911 vm_prot_t maxprot;
912 off_t off;
913 int error, flags;
914 bool needwritemap;
915
916 l = curlwp;
917
918 off = *offp;
919 flags = *flagsp;
920 maxprot = VM_PROT_EXECUTE;
921
922 KASSERT(size > 0);
923
924 vp = fp->f_vnode;
925 if (vp->v_type != VREG && vp->v_type != VCHR &&
926 vp->v_type != VBLK) {
927 /* only REG/CHR/BLK support mmap */
928 return ENODEV;
929 }
930 if (vp->v_type != VCHR && off < 0) {
931 return EINVAL;
932 }
933 if (vp->v_type != VCHR && (off_t)(off + size) < off) {
934 /* no offset wrapping */
935 return EOVERFLOW;
936 }
937
938 /* special case: catch SunOS style /dev/zero */
939 if (vp->v_type == VCHR &&
940 (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
941 *uobjp = NULL;
942 *maxprotp = VM_PROT_ALL;
943 return 0;
944 }
945
946 /*
947 * Old programs may not select a specific sharing type, so
948 * default to an appropriate one.
949 *
950 * XXX: how does MAP_ANON fit in the picture?
951 */
952 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
953 #if defined(DEBUG)
954 struct proc *p = l->l_proc;
955 printf("WARNING: defaulted mmap() share type to "
956 "%s (pid %d command %s)\n", vp->v_type == VCHR ?
957 "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
958 p->p_comm);
959 #endif
960 if (vp->v_type == VCHR)
961 flags |= MAP_SHARED; /* for a device */
962 else
963 flags |= MAP_PRIVATE; /* for a file */
964 }
965
966 /*
967 * MAP_PRIVATE device mappings don't make sense (and aren't
968 * supported anyway). However, some programs rely on this,
969 * so just change it to MAP_SHARED.
970 */
971 if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
972 flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
973 }
974
975 /*
976 * now check protection
977 */
978
979 /* check read access */
980 if (fp->f_flag & FREAD)
981 maxprot |= VM_PROT_READ;
982 else if (prot & PROT_READ) {
983 return EACCES;
984 }
985
986 /* check write access, shared case first */
987 if (flags & MAP_SHARED) {
988 /*
989 * if the file is writable, only add PROT_WRITE to
990 * maxprot if the file is not immutable, append-only.
991 * otherwise, if we have asked for PROT_WRITE, return
992 * EPERM.
993 */
994 if (fp->f_flag & FWRITE) {
995 vn_lock(vp, LK_SHARED | LK_RETRY);
996 error = VOP_GETATTR(vp, &va, l->l_cred);
997 VOP_UNLOCK(vp);
998 if (error) {
999 return error;
1000 }
1001 if ((va.va_flags &
1002 (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
1003 maxprot |= VM_PROT_WRITE;
1004 else if (prot & PROT_WRITE) {
1005 return EPERM;
1006 }
1007 } else if (prot & PROT_WRITE) {
1008 return EACCES;
1009 }
1010 } else {
1011 /* MAP_PRIVATE mappings can always write to */
1012 maxprot |= VM_PROT_WRITE;
1013 }
1014
1015 /*
1016 * Don't allow mmap for EXEC if the file system
1017 * is mounted NOEXEC.
1018 */
1019 if ((prot & PROT_EXEC) != 0 &&
1020 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
1021 return EACCES;
1022 }
1023
1024 if (vp->v_type != VCHR) {
1025 error = VOP_MMAP(vp, prot, curlwp->l_cred);
1026 if (error) {
1027 return error;
1028 }
1029 vref(vp);
1030 uobj = &vp->v_uobj;
1031
1032 /*
1033 * If the vnode is being mapped with PROT_EXEC,
1034 * then mark it as text.
1035 */
1036 if (prot & PROT_EXEC) {
1037 vn_markexec(vp);
1038 }
1039 } else {
1040 int i = maxprot;
1041
1042 /*
1043 * XXX Some devices don't like to be mapped with
1044 * XXX PROT_EXEC or PROT_WRITE, but we don't really
1045 * XXX have a better way of handling this, right now
1046 */
1047 do {
1048 uobj = udv_attach(vp->v_rdev,
1049 (flags & MAP_SHARED) ? i :
1050 (i & ~VM_PROT_WRITE), off, size);
1051 i--;
1052 } while ((uobj == NULL) && (i > 0));
1053 if (uobj == NULL) {
1054 return EINVAL;
1055 }
1056 *advicep = UVM_ADV_RANDOM;
1057 }
1058
1059 /*
1060 * Set vnode flags to indicate the new kinds of mapping.
1061 * We take the vnode lock in exclusive mode here to serialize
1062 * with direct I/O.
1063 *
1064 * Safe to check for these flag values without a lock, as
1065 * long as a reference to the vnode is held.
1066 */
1067 needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
1068 (flags & MAP_SHARED) != 0 &&
1069 (maxprot & VM_PROT_WRITE) != 0;
1070 if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
1071 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1072 vp->v_vflag |= VV_MAPPED;
1073 if (needwritemap) {
1074 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1075 mutex_enter(vp->v_interlock);
1076 vp->v_iflag |= VI_WRMAP;
1077 mutex_exit(vp->v_interlock);
1078 rw_exit(vp->v_uobj.vmobjlock);
1079 }
1080 VOP_UNLOCK(vp);
1081 }
1082
1083 #if NVERIEXEC > 0
1084
1085 /*
1086 * Check if the file can be executed indirectly.
1087 *
1088 * XXX: This gives false warnings about "Incorrect access type"
1089 * XXX: if the mapping is not executable. Harmless, but will be
1090 * XXX: fixed as part of other changes.
1091 */
1092 if (veriexec_verify(l, vp, "(mmap)", VERIEXEC_INDIRECT,
1093 NULL)) {
1094
1095 /*
1096 * Don't allow executable mappings if we can't
1097 * indirectly execute the file.
1098 */
1099 if (prot & VM_PROT_EXECUTE) {
1100 return EPERM;
1101 }
1102
1103 /*
1104 * Strip the executable bit from 'maxprot' to make sure
1105 * it can't be made executable later.
1106 */
1107 maxprot &= ~VM_PROT_EXECUTE;
1108 }
1109 #endif /* NVERIEXEC > 0 */
1110
1111 *uobjp = uobj;
1112 *maxprotp = maxprot;
1113 *flagsp = flags;
1114
1115 return 0;
1116 }
1117
1118 static int
1119 vn_seek(struct file *fp, off_t delta, int whence, off_t *newoffp,
1120 int flags)
1121 {
1122 const off_t OFF_MIN = __type_min(off_t);
1123 const off_t OFF_MAX = __type_max(off_t);
1124 kauth_cred_t cred = fp->f_cred;
1125 off_t oldoff, newoff;
1126 struct vnode *vp = fp->f_vnode;
1127 struct vattr vattr;
1128 int error;
1129
1130 if (vp->v_type == VFIFO)
1131 return ESPIPE;
1132
1133 vn_lock(vp, LK_SHARED | LK_RETRY);
1134
1135 /* Compute the old and new offsets. */
1136 oldoff = fp->f_offset;
1137 switch (whence) {
1138 case SEEK_CUR:
1139 if (delta > 0) {
1140 if (oldoff > 0 && delta > OFF_MAX - oldoff) {
1141 newoff = OFF_MAX;
1142 break;
1143 }
1144 } else {
1145 if (oldoff < 0 && delta < OFF_MIN - oldoff) {
1146 newoff = OFF_MIN;
1147 break;
1148 }
1149 }
1150 newoff = oldoff + delta;
1151 break;
1152 case SEEK_END:
1153 error = VOP_GETATTR(vp, &vattr, cred);
1154 if (error)
1155 goto out;
1156 if (vattr.va_size > OFF_MAX ||
1157 delta > OFF_MAX - (off_t)vattr.va_size) {
1158 newoff = OFF_MAX;
1159 break;
1160 }
1161 newoff = delta + vattr.va_size;
1162 break;
1163 case SEEK_SET:
1164 newoff = delta;
1165 break;
1166 default:
1167 error = EINVAL;
1168 goto out;
1169 }
1170
1171 /* Pass the proposed change to the file system to audit. */
1172 error = VOP_SEEK(vp, oldoff, newoff, cred);
1173 if (error)
1174 goto out;
1175
1176 /* Success! */
1177 if (newoffp)
1178 *newoffp = newoff;
1179 if (flags & FOF_UPDATE_OFFSET)
1180 fp->f_offset = newoff;
1181 error = 0;
1182
1183 out: VOP_UNLOCK(vp);
1184 return error;
1185 }
1186
1187 /*
1188 * Check that the vnode is still valid, and if so
1189 * acquire requested lock.
1190 */
1191 int
1192 vn_lock(struct vnode *vp, int flags)
1193 {
1194 struct lwp *l;
1195 int error;
1196
1197 KASSERT(vrefcnt(vp) > 0);
1198 KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT|LK_RETRY|
1199 LK_UPGRADE|LK_DOWNGRADE)) == 0);
1200 KASSERT((flags & LK_NOWAIT) != 0 || !mutex_owned(vp->v_interlock));
1201
1202 #ifdef DIAGNOSTIC
1203 if (wapbl_vphaswapbl(vp))
1204 WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
1205 #endif
1206
1207 /* Get a more useful report for lockstat. */
1208 l = curlwp;
1209 KASSERT(l->l_rwcallsite == 0);
1210 l->l_rwcallsite = (uintptr_t)__builtin_return_address(0);
1211
1212 error = VOP_LOCK(vp, flags);
1213
1214 l->l_rwcallsite = 0;
1215
1216 switch (flags & (LK_RETRY | LK_NOWAIT)) {
1217 case 0:
1218 KASSERT(error == 0 || error == ENOENT);
1219 break;
1220 case LK_RETRY:
1221 KASSERT(error == 0);
1222 break;
1223 case LK_NOWAIT:
1224 KASSERT(error == 0 || error == EBUSY || error == ENOENT);
1225 break;
1226 case LK_RETRY | LK_NOWAIT:
1227 KASSERT(error == 0 || error == EBUSY);
1228 break;
1229 }
1230
1231 return error;
1232 }
1233
1234 /*
1235 * File table vnode close routine.
1236 */
1237 static int
1238 vn_closefile(file_t *fp)
1239 {
1240
1241 return vn_close(fp->f_vnode, fp->f_flag, fp->f_cred);
1242 }
1243
1244 /*
1245 * Simplified in-kernel wrapper calls for extended attribute access.
1246 * Both calls pass in a NULL credential, authorizing a "kernel" access.
1247 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1248 */
1249 int
1250 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1251 const char *attrname, size_t *buflen, void *bf, struct lwp *l)
1252 {
1253 struct uio auio;
1254 struct iovec aiov;
1255 int error;
1256
1257 aiov.iov_len = *buflen;
1258 aiov.iov_base = bf;
1259
1260 auio.uio_iov = &aiov;
1261 auio.uio_iovcnt = 1;
1262 auio.uio_rw = UIO_READ;
1263 auio.uio_offset = 0;
1264 auio.uio_resid = *buflen;
1265 UIO_SETUP_SYSSPACE(&auio);
1266
1267 if ((ioflg & IO_NODELOCKED) == 0)
1268 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1269
1270 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL,
1271 NOCRED);
1272
1273 if ((ioflg & IO_NODELOCKED) == 0)
1274 VOP_UNLOCK(vp);
1275
1276 if (error == 0)
1277 *buflen = *buflen - auio.uio_resid;
1278
1279 return (error);
1280 }
1281
1282 /*
1283 * XXX Failure mode if partially written?
1284 */
1285 int
1286 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1287 const char *attrname, size_t buflen, const void *bf, struct lwp *l)
1288 {
1289 struct uio auio;
1290 struct iovec aiov;
1291 int error;
1292
1293 aiov.iov_len = buflen;
1294 aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */
1295
1296 auio.uio_iov = &aiov;
1297 auio.uio_iovcnt = 1;
1298 auio.uio_rw = UIO_WRITE;
1299 auio.uio_offset = 0;
1300 auio.uio_resid = buflen;
1301 UIO_SETUP_SYSSPACE(&auio);
1302
1303 if ((ioflg & IO_NODELOCKED) == 0) {
1304 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1305 }
1306
1307 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NOCRED);
1308
1309 if ((ioflg & IO_NODELOCKED) == 0) {
1310 VOP_UNLOCK(vp);
1311 }
1312
1313 return (error);
1314 }
1315
1316 int
1317 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1318 const char *attrname, struct lwp *l)
1319 {
1320 int error;
1321
1322 if ((ioflg & IO_NODELOCKED) == 0) {
1323 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1324 }
1325
1326 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NOCRED);
1327 if (error == EOPNOTSUPP)
1328 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1329 NOCRED);
1330
1331 if ((ioflg & IO_NODELOCKED) == 0) {
1332 VOP_UNLOCK(vp);
1333 }
1334
1335 return (error);
1336 }
1337
1338 int
1339 vn_fifo_bypass(void *v)
1340 {
1341 struct vop_generic_args *ap = v;
1342
1343 return VOCALL(fifo_vnodeop_p, ap->a_desc->vdesc_offset, v);
1344 }
1345
1346 /*
1347 * Open block device by device number
1348 */
1349 int
1350 vn_bdev_open(dev_t dev, struct vnode **vpp, struct lwp *l)
1351 {
1352 int error;
1353
1354 if ((error = bdevvp(dev, vpp)) != 0)
1355 return error;
1356
1357 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1358 if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
1359 vput(*vpp);
1360 return error;
1361 }
1362 mutex_enter((*vpp)->v_interlock);
1363 (*vpp)->v_writecount++;
1364 mutex_exit((*vpp)->v_interlock);
1365 VOP_UNLOCK(*vpp);
1366
1367 return 0;
1368 }
1369
1370 /*
1371 * Lookup the provided name in the filesystem. If the file exists,
1372 * is a valid block device, and isn't being used by anyone else,
1373 * set *vpp to the file's vnode.
1374 */
1375 int
1376 vn_bdev_openpath(struct pathbuf *pb, struct vnode **vpp, struct lwp *l)
1377 {
1378 struct vnode *vp;
1379 dev_t dev;
1380 enum vtype vt;
1381 int error;
1382
1383 error = vn_open(NULL, pb, 0, FREAD | FWRITE, 0, &vp, NULL, NULL);
1384 if (error != 0)
1385 return error;
1386
1387 dev = vp->v_rdev;
1388 vt = vp->v_type;
1389
1390 VOP_UNLOCK(vp);
1391 (void) vn_close(vp, FREAD | FWRITE, l->l_cred);
1392
1393 if (vt != VBLK)
1394 return ENOTBLK;
1395
1396 return vn_bdev_open(dev, vpp, l);
1397 }
1398
1399 static long
1400 vn_knote_to_interest(const struct knote *kn)
1401 {
1402 switch (kn->kn_filter) {
1403 case EVFILT_READ:
1404 /*
1405 * Writing to the file or changing its attributes can
1406 * set the file size, which impacts the readability
1407 * filter.
1408 *
1409 * (No need to set NOTE_EXTEND here; it's only ever
1410 * send with other hints; see vnode_if.c.)
1411 */
1412 return NOTE_WRITE | NOTE_ATTRIB;
1413
1414 case EVFILT_VNODE:
1415 return kn->kn_sfflags;
1416
1417 case EVFILT_WRITE:
1418 default:
1419 return 0;
1420 }
1421 }
1422
1423 void
1424 vn_knote_attach(struct vnode *vp, struct knote *kn)
1425 {
1426 long interest = 0;
1427
1428 /*
1429 * We maintain a bitmask of the kevents that there is interest in,
1430 * to minimize the impact of having watchers. It's silly to have
1431 * to traverse vn_klist every time a read or write happens simply
1432 * because there is someone interested in knowing when the file
1433 * is deleted, for example.
1434 */
1435
1436 mutex_enter(vp->v_interlock);
1437 SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
1438 SLIST_FOREACH(kn, &vp->v_klist, kn_selnext) {
1439 interest |= vn_knote_to_interest(kn);
1440 }
1441 vp->v_klist_interest = interest;
1442 mutex_exit(vp->v_interlock);
1443 }
1444
1445 void
1446 vn_knote_detach(struct vnode *vp, struct knote *kn)
1447 {
1448 int interest = 0;
1449
1450 /*
1451 * We special case removing the head of the list, because:
1452 *
1453 * 1. It's extremely likely that we're detaching the only
1454 * knote.
1455 *
1456 * 2. We're already traversing the whole list, so we don't
1457 * want to use the generic SLIST_REMOVE() which would
1458 * traverse it *again*.
1459 */
1460
1461 mutex_enter(vp->v_interlock);
1462 if (__predict_true(kn == SLIST_FIRST(&vp->v_klist))) {
1463 SLIST_REMOVE_HEAD(&vp->v_klist, kn_selnext);
1464 SLIST_FOREACH(kn, &vp->v_klist, kn_selnext) {
1465 interest |= vn_knote_to_interest(kn);
1466 }
1467 vp->v_klist_interest = interest;
1468 } else {
1469 struct knote *thiskn, *nextkn, *prevkn = NULL;
1470
1471 SLIST_FOREACH_SAFE(thiskn, &vp->v_klist, kn_selnext, nextkn) {
1472 if (thiskn == kn) {
1473 KASSERT(kn != NULL);
1474 KASSERT(prevkn != NULL);
1475 SLIST_REMOVE_AFTER(prevkn, kn_selnext);
1476 kn = NULL;
1477 } else {
1478 interest |= vn_knote_to_interest(thiskn);
1479 prevkn = thiskn;
1480 }
1481 }
1482 vp->v_klist_interest = interest;
1483 }
1484 mutex_exit(vp->v_interlock);
1485 }
1486