vfs_syscalls.c revision 1.341 1 /* $NetBSD: vfs_syscalls.c,v 1.341 2008/01/10 19:04:23 ad Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_syscalls.c 8.42 (Berkeley) 7/31/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_syscalls.c,v 1.341 2008/01/10 19:04:23 ad Exp $");
41
42 #include "opt_compat_netbsd.h"
43 #include "opt_compat_43.h"
44 #include "opt_fileassoc.h"
45 #include "fss.h"
46 #include "veriexec.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/filedesc.h>
52 #include <sys/kernel.h>
53 #include <sys/file.h>
54 #include <sys/stat.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/proc.h>
58 #include <sys/uio.h>
59 #include <sys/malloc.h>
60 #include <sys/kmem.h>
61 #include <sys/dirent.h>
62 #include <sys/sysctl.h>
63 #include <sys/syscallargs.h>
64 #include <sys/vfs_syscalls.h>
65 #include <sys/ktrace.h>
66 #ifdef FILEASSOC
67 #include <sys/fileassoc.h>
68 #endif /* FILEASSOC */
69 #include <sys/verified_exec.h>
70 #include <sys/kauth.h>
71
72 #include <miscfs/genfs/genfs.h>
73 #include <miscfs/syncfs/syncfs.h>
74
75 #ifdef COMPAT_30
76 #include "opt_nfsserver.h"
77 #include <nfs/rpcv2.h>
78 #endif
79 #include <nfs/nfsproto.h>
80 #ifdef COMPAT_30
81 #include <nfs/nfs.h>
82 #include <nfs/nfs_var.h>
83 #endif
84
85 #if NFSS > 0
86 #include <dev/fssvar.h>
87 #endif
88
89 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount struct");
90
91 static int change_dir(struct nameidata *, struct lwp *);
92 static int change_flags(struct vnode *, u_long, struct lwp *);
93 static int change_mode(struct vnode *, int, struct lwp *l);
94 static int change_owner(struct vnode *, uid_t, gid_t, struct lwp *, int);
95
96 void checkdirs(struct vnode *);
97
98 int dovfsusermount = 0;
99
100 /*
101 * Virtual File System System Calls
102 */
103
104 /*
105 * Mount a file system.
106 */
107
108 #if defined(COMPAT_09) || defined(COMPAT_43)
109 /*
110 * This table is used to maintain compatibility with 4.3BSD
111 * and NetBSD 0.9 mount syscalls. Note, the order is important!
112 *
113 * Do not modify this table. It should only contain filesystems
114 * supported by NetBSD 0.9 and 4.3BSD.
115 */
116 const char * const mountcompatnames[] = {
117 NULL, /* 0 = MOUNT_NONE */
118 MOUNT_FFS, /* 1 = MOUNT_UFS */
119 MOUNT_NFS, /* 2 */
120 MOUNT_MFS, /* 3 */
121 MOUNT_MSDOS, /* 4 */
122 MOUNT_CD9660, /* 5 = MOUNT_ISOFS */
123 MOUNT_FDESC, /* 6 */
124 MOUNT_KERNFS, /* 7 */
125 NULL, /* 8 = MOUNT_DEVFS */
126 MOUNT_AFS, /* 9 */
127 };
128 const int nmountcompatnames = sizeof(mountcompatnames) /
129 sizeof(mountcompatnames[0]);
130 #endif /* COMPAT_09 || COMPAT_43 */
131
132 static int
133 mount_update(struct lwp *l, struct vnode *vp, const char *path, int flags,
134 void *data, size_t *data_len)
135 {
136 struct mount *mp;
137 int error = 0, saved_flags;
138
139 mp = vp->v_mount;
140 saved_flags = mp->mnt_flag;
141
142 /* We can operate only on VV_ROOT nodes. */
143 if ((vp->v_vflag & VV_ROOT) == 0) {
144 error = EINVAL;
145 goto out;
146 }
147
148 /*
149 * We only allow the filesystem to be reloaded if it
150 * is currently mounted read-only.
151 */
152 if (flags & MNT_RELOAD && !(mp->mnt_flag & MNT_RDONLY)) {
153 error = EOPNOTSUPP; /* Needs translation */
154 goto out;
155 }
156
157 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
158 KAUTH_REQ_SYSTEM_MOUNT_UPDATE, mp, KAUTH_ARG(flags), data);
159 if (error)
160 goto out;
161
162 if (vfs_busy(mp, LK_NOWAIT, 0)) {
163 error = EPERM;
164 goto out;
165 }
166
167 mp->mnt_flag &= ~MNT_OP_FLAGS;
168 mp->mnt_flag |= flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
169
170 /*
171 * Set the mount level flags.
172 */
173 if (flags & MNT_RDONLY)
174 mp->mnt_flag |= MNT_RDONLY;
175 else if (mp->mnt_flag & MNT_RDONLY)
176 mp->mnt_iflag |= IMNT_WANTRDWR;
177 mp->mnt_flag &=
178 ~(MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
179 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
180 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP);
181 mp->mnt_flag |= flags &
182 (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
183 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
184 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
185 MNT_IGNORE);
186
187 error = VFS_MOUNT(mp, path, data, data_len);
188
189 #if defined(COMPAT_30) && defined(NFSSERVER)
190 if (error && data != NULL) {
191 int error2;
192
193 /* Update failed; let's try and see if it was an
194 * export request. */
195 error2 = nfs_update_exports_30(mp, path, data, l);
196
197 /* Only update error code if the export request was
198 * understood but some problem occurred while
199 * processing it. */
200 if (error2 != EJUSTRETURN)
201 error = error2;
202 }
203 #endif
204 if (mp->mnt_iflag & IMNT_WANTRDWR)
205 mp->mnt_flag &= ~MNT_RDONLY;
206 if (error)
207 mp->mnt_flag = saved_flags;
208 mp->mnt_flag &= ~MNT_OP_FLAGS;
209 mp->mnt_iflag &= ~IMNT_WANTRDWR;
210 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0) {
211 if (mp->mnt_syncer == NULL)
212 error = vfs_allocate_syncvnode(mp);
213 } else {
214 if (mp->mnt_syncer != NULL)
215 vfs_deallocate_syncvnode(mp);
216 }
217 vfs_unbusy(mp);
218
219 out:
220 return (error);
221 }
222
223 static int
224 mount_get_vfsops(const char *fstype, struct vfsops **vfsops)
225 {
226 char fstypename[sizeof(((struct statvfs *)NULL)->f_fstypename)];
227 int error;
228
229 /* Copy file-system type from userspace. */
230 error = copyinstr(fstype, fstypename, sizeof(fstypename), NULL);
231 if (error) {
232 #if defined(COMPAT_09) || defined(COMPAT_43)
233 /*
234 * Historically, filesystem types were identified by numbers.
235 * If we get an integer for the filesystem type instead of a
236 * string, we check to see if it matches one of the historic
237 * filesystem types.
238 */
239 u_long fsindex = (u_long)fstype;
240 if (fsindex >= nmountcompatnames ||
241 mountcompatnames[fsindex] == NULL)
242 return ENODEV;
243 strlcpy(fstypename, mountcompatnames[fsindex],
244 sizeof(fstypename));
245 #else
246 return error;
247 #endif
248 }
249
250 #ifdef COMPAT_10
251 /* Accept `ufs' as an alias for `ffs'. */
252 if (strcmp(fstypename, "ufs") == 0)
253 fstypename[0] = 'f';
254 #endif
255
256 if ((*vfsops = vfs_getopsbyname(fstypename)) == NULL)
257 return ENODEV;
258 return 0;
259 }
260
261 static int
262 mount_domount(struct lwp *l, struct vnode **vpp, struct vfsops *vfsops,
263 const char *path, int flags, void *data, size_t *data_len, u_int recurse)
264 {
265 struct mount *mp = NULL;
266 struct vnode *vp = *vpp;
267 struct vattr va;
268 int error;
269
270 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
271 KAUTH_REQ_SYSTEM_MOUNT_NEW, vp, KAUTH_ARG(flags), data);
272 if (error)
273 return error;
274
275 /* Can't make a non-dir a mount-point (from here anyway). */
276 if (vp->v_type != VDIR)
277 return ENOTDIR;
278
279 /*
280 * If the user is not root, ensure that they own the directory
281 * onto which we are attempting to mount.
282 */
283 if ((error = VOP_GETATTR(vp, &va, l->l_cred)) != 0 ||
284 (va.va_uid != kauth_cred_geteuid(l->l_cred) &&
285 (error = kauth_authorize_generic(l->l_cred,
286 KAUTH_GENERIC_ISSUSER, NULL)) != 0)) {
287 return error;
288 }
289
290 if (flags & MNT_EXPORTED)
291 return EINVAL;
292
293 if ((error = vinvalbuf(vp, V_SAVE, l->l_cred, l, 0, 0)) != 0)
294 return error;
295
296 /*
297 * Check if a file-system is not already mounted on this vnode.
298 */
299 if (vp->v_mountedhere != NULL)
300 return EBUSY;
301
302 mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
303
304 mp->mnt_op = vfsops;
305
306 TAILQ_INIT(&mp->mnt_vnodelist);
307 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
308 mutex_init(&mp->mnt_mutex, MUTEX_DEFAULT, IPL_NONE);
309 (void)vfs_busy(mp, LK_NOWAIT, 0);
310
311 mp->mnt_vnodecovered = vp;
312 mp->mnt_stat.f_owner = kauth_cred_geteuid(l->l_cred);
313 mp->mnt_unmounter = NULL;
314 mount_initspecific(mp);
315
316 /*
317 * The underlying file system may refuse the mount for
318 * various reasons. Allow the user to force it to happen.
319 *
320 * Set the mount level flags.
321 */
322 mp->mnt_flag = flags &
323 (MNT_FORCE | MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
324 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
325 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
326 MNT_IGNORE | MNT_RDONLY);
327
328 error = VFS_MOUNT(mp, path, data, data_len);
329 mp->mnt_flag &= ~MNT_OP_FLAGS;
330
331 /*
332 * Put the new filesystem on the mount list after root.
333 */
334 cache_purge(vp);
335 if (error != 0) {
336 vp->v_mountedhere = NULL;
337 mp->mnt_op->vfs_refcount--;
338 vfs_unbusy(mp);
339 vfs_destroy(mp);
340 return error;
341 }
342
343 mp->mnt_iflag &= ~IMNT_WANTRDWR;
344 vp->v_mountedhere = mp;
345 mutex_enter(&mountlist_lock);
346 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
347 mutex_exit(&mountlist_lock);
348 vn_restorerecurse(vp, recurse);
349 VOP_UNLOCK(vp, 0);
350 checkdirs(vp);
351 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
352 error = vfs_allocate_syncvnode(mp);
353 vfs_unbusy(mp);
354 (void) VFS_STATVFS(mp, &mp->mnt_stat);
355 error = VFS_START(mp, 0);
356 if (error)
357 vrele(vp);
358 *vpp = NULL;
359 return error;
360 }
361
362 static int
363 mount_getargs(struct lwp *l, struct vnode *vp, const char *path, int flags,
364 void *data, size_t *data_len)
365 {
366 struct mount *mp;
367 int error;
368
369 /* If MNT_GETARGS is specified, it should be the only flag. */
370 if (flags & ~MNT_GETARGS)
371 return EINVAL;
372
373 mp = vp->v_mount;
374
375 /* XXX: probably some notion of "can see" here if we want isolation. */
376 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
377 KAUTH_REQ_SYSTEM_MOUNT_GET, mp, data, NULL);
378 if (error)
379 return error;
380
381 if ((vp->v_vflag & VV_ROOT) == 0)
382 return EINVAL;
383
384 if (vfs_busy(mp, LK_NOWAIT, 0))
385 return EPERM;
386
387 mp->mnt_flag &= ~MNT_OP_FLAGS;
388 mp->mnt_flag |= MNT_GETARGS;
389 error = VFS_MOUNT(mp, path, data, data_len);
390 mp->mnt_flag &= ~MNT_OP_FLAGS;
391
392 vfs_unbusy(mp);
393 return (error);
394 }
395
396 #ifdef COMPAT_40
397 /* ARGSUSED */
398 int
399 compat_40_sys_mount(struct lwp *l, const struct compat_40_sys_mount_args *uap, register_t *retval)
400 {
401 /* {
402 syscallarg(const char *) type;
403 syscallarg(const char *) path;
404 syscallarg(int) flags;
405 syscallarg(void *) data;
406 } */
407 register_t dummy;
408
409 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
410 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE, 0, &dummy);
411 }
412 #endif
413
414 int
415 sys___mount50(struct lwp *l, const struct sys___mount50_args *uap, register_t *retval)
416 {
417 /* {
418 syscallarg(const char *) type;
419 syscallarg(const char *) path;
420 syscallarg(int) flags;
421 syscallarg(void *) data;
422 syscallarg(size_t) data_len;
423 } */
424
425 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
426 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE,
427 SCARG(uap, data_len), retval);
428 }
429
430 int
431 do_sys_mount(struct lwp *l, struct vfsops *vfsops, const char *type,
432 const char *path, int flags, void *data, enum uio_seg data_seg,
433 size_t data_len, register_t *retval)
434 {
435 struct vnode *vp;
436 struct nameidata nd;
437 void *data_buf = data;
438 u_int recurse;
439 int error;
440
441 /*
442 * Get vnode to be covered
443 */
444 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
445 if ((error = namei(&nd)) != 0)
446 return (error);
447 vp = nd.ni_vp;
448
449 /*
450 * A lookup in VFS_MOUNT might result in an attempt to
451 * lock this vnode again, so make the lock recursive.
452 */
453 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
454 recurse = vn_setrecurse(vp);
455
456 if (vfsops == NULL) {
457 if (flags & (MNT_GETARGS | MNT_UPDATE))
458 vfsops = vp->v_mount->mnt_op;
459 else {
460 /* 'type' is userspace */
461 error = mount_get_vfsops(type, &vfsops);
462 if (error != 0)
463 goto done;
464 }
465 }
466
467 if (data != NULL && data_seg == UIO_USERSPACE) {
468 if (data_len == 0) {
469 /* No length supplied, use default for filesystem */
470 data_len = vfsops->vfs_min_mount_data;
471 if (data_len > VFS_MAX_MOUNT_DATA) {
472 /* maybe a force loaded old LKM */
473 error = EINVAL;
474 goto done;
475 }
476 #ifdef COMPAT_30
477 /* Hopefully a longer buffer won't make copyin() fail */
478 if (flags & MNT_UPDATE
479 && data_len < sizeof (struct mnt_export_args30))
480 data_len = sizeof (struct mnt_export_args30);
481 #endif
482 }
483 data_buf = malloc(data_len, M_TEMP, M_WAITOK);
484
485 /* NFS needs the buffer even for mnt_getargs .... */
486 error = copyin(data, data_buf, data_len);
487 if (error != 0)
488 goto done;
489 }
490
491 if (flags & MNT_GETARGS) {
492 if (data_len == 0) {
493 error = EINVAL;
494 goto done;
495 }
496 error = mount_getargs(l, vp, path, flags, data_buf, &data_len);
497 if (error != 0)
498 goto done;
499 if (data_seg == UIO_USERSPACE)
500 error = copyout(data_buf, data, data_len);
501 *retval = data_len;
502 } else if (flags & MNT_UPDATE) {
503 error = mount_update(l, vp, path, flags, data_buf, &data_len);
504 } else {
505 /* Locking is handled internally in mount_domount(). */
506 error = mount_domount(l, &vp, vfsops, path, flags, data_buf,
507 &data_len, recurse);
508 }
509
510 done:
511 if (vp != NULL) {
512 vn_restorerecurse(vp, recurse);
513 vput(vp);
514 }
515 if (data_buf != data)
516 free(data_buf, M_TEMP);
517 return (error);
518 }
519
520 /*
521 * Scan all active processes to see if any of them have a current
522 * or root directory onto which the new filesystem has just been
523 * mounted. If so, replace them with the new mount point.
524 */
525 void
526 checkdirs(struct vnode *olddp)
527 {
528 struct cwdinfo *cwdi;
529 struct vnode *newdp;
530 struct proc *p;
531
532 if (olddp->v_usecount == 1)
533 return;
534 if (VFS_ROOT(olddp->v_mountedhere, &newdp))
535 panic("mount: lost mount");
536 mutex_enter(&proclist_lock);
537 PROCLIST_FOREACH(p, &allproc) {
538 cwdi = p->p_cwdi;
539 if (!cwdi)
540 continue;
541 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
542 if (cwdi->cwdi_cdir == olddp) {
543 vrele(cwdi->cwdi_cdir);
544 VREF(newdp);
545 cwdi->cwdi_cdir = newdp;
546 }
547 if (cwdi->cwdi_rdir == olddp) {
548 vrele(cwdi->cwdi_rdir);
549 VREF(newdp);
550 cwdi->cwdi_rdir = newdp;
551 }
552 rw_exit(&cwdi->cwdi_lock);
553 }
554 mutex_exit(&proclist_lock);
555 if (rootvnode == olddp) {
556 vrele(rootvnode);
557 VREF(newdp);
558 rootvnode = newdp;
559 }
560 vput(newdp);
561 }
562
563 /*
564 * Unmount a file system.
565 *
566 * Note: unmount takes a path to the vnode mounted on as argument,
567 * not special file (as before).
568 */
569 /* ARGSUSED */
570 int
571 sys_unmount(struct lwp *l, const struct sys_unmount_args *uap, register_t *retval)
572 {
573 /* {
574 syscallarg(const char *) path;
575 syscallarg(int) flags;
576 } */
577 struct vnode *vp;
578 struct mount *mp;
579 int error;
580 struct nameidata nd;
581
582 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
583 SCARG(uap, path));
584 if ((error = namei(&nd)) != 0)
585 return (error);
586 vp = nd.ni_vp;
587 mp = vp->v_mount;
588
589 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
590 KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT, mp, NULL, NULL);
591 if (error) {
592 vput(vp);
593 return (error);
594 }
595
596 /*
597 * Don't allow unmounting the root file system.
598 */
599 if (mp->mnt_flag & MNT_ROOTFS) {
600 vput(vp);
601 return (EINVAL);
602 }
603
604 /*
605 * Must be the root of the filesystem
606 */
607 if ((vp->v_vflag & VV_ROOT) == 0) {
608 vput(vp);
609 return (EINVAL);
610 }
611 vput(vp);
612
613 /*
614 * XXX Freeze syncer. Must do this before locking the
615 * mount point. See dounmount() for details.
616 */
617 mutex_enter(&syncer_mutex);
618
619 if (vfs_busy(mp, 0, 0)) {
620 mutex_exit(&syncer_mutex);
621 return (EBUSY);
622 }
623
624 return (dounmount(mp, SCARG(uap, flags), l));
625 }
626
627 /*
628 * Do the actual file system unmount. File system is assumed to have been
629 * marked busy by the caller.
630 */
631 int
632 dounmount(struct mount *mp, int flags, struct lwp *l)
633 {
634 struct vnode *coveredvp;
635 int error;
636 int async;
637 int used_syncer;
638
639 #if NVERIEXEC > 0
640 error = veriexec_unmountchk(mp);
641 if (error)
642 return (error);
643 #endif /* NVERIEXEC > 0 */
644
645 mutex_enter(&mountlist_lock);
646 vfs_unbusy(mp);
647 used_syncer = (mp->mnt_syncer != NULL);
648
649 /*
650 * XXX Syncer must be frozen when we get here. This should really
651 * be done on a per-mountpoint basis, but especially the softdep
652 * code possibly called from the syncer doesn't exactly work on a
653 * per-mountpoint basis, so the softdep code would become a maze
654 * of vfs_busy() calls.
655 *
656 * The caller of dounmount() must acquire syncer_mutex because
657 * the syncer itself acquires locks in syncer_mutex -> vfs_busy
658 * order, and we must preserve that order to avoid deadlock.
659 *
660 * So, if the file system did not use the syncer, now is
661 * the time to release the syncer_mutex.
662 */
663 if (used_syncer == 0)
664 mutex_exit(&syncer_mutex);
665
666 mp->mnt_iflag |= IMNT_UNMOUNT;
667 mp->mnt_unmounter = l;
668 lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_lock);
669
670 async = mp->mnt_flag & MNT_ASYNC;
671 mp->mnt_flag &= ~MNT_ASYNC;
672 cache_purgevfs(mp); /* remove cache entries for this file sys */
673 if (mp->mnt_syncer != NULL)
674 vfs_deallocate_syncvnode(mp);
675 error = 0;
676 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
677 #if NFSS > 0
678 error = fss_umount_hook(mp, (flags & MNT_FORCE));
679 #endif
680 if (error == 0)
681 error = VFS_SYNC(mp, MNT_WAIT, l->l_cred);
682 }
683 if (error == 0 || (flags & MNT_FORCE))
684 error = VFS_UNMOUNT(mp, flags);
685 if (error) {
686 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
687 (void) vfs_allocate_syncvnode(mp);
688 mutex_enter(&mountlist_lock);
689 mp->mnt_iflag &= ~IMNT_UNMOUNT;
690 mp->mnt_unmounter = NULL;
691 mp->mnt_flag |= async;
692 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
693 &mountlist_lock);
694 if (used_syncer)
695 mutex_exit(&syncer_mutex);
696 mutex_enter(&mp->mnt_mutex);
697 while (mp->mnt_wcnt > 0) {
698 wakeup(mp);
699 mtsleep(&mp->mnt_wcnt, PVFS, "mntwcnt1",
700 0, &mp->mnt_mutex);
701 }
702 mutex_exit(&mp->mnt_mutex);
703 return (error);
704 }
705 vfs_scrubvnlist(mp);
706 mutex_enter(&mountlist_lock);
707 CIRCLEQ_REMOVE(&mountlist, mp, mnt_list);
708 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP)
709 coveredvp->v_mountedhere = NULL;
710 if (TAILQ_FIRST(&mp->mnt_vnodelist) != NULL)
711 panic("unmount: dangling vnode");
712 mp->mnt_iflag |= IMNT_GONE;
713 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_lock);
714 if (coveredvp != NULLVP)
715 vrele(coveredvp);
716 if (used_syncer)
717 mutex_exit(&syncer_mutex);
718 mutex_enter(&mp->mnt_mutex);
719 while (mp->mnt_wcnt > 0) {
720 wakeup(mp);
721 mtsleep(&mp->mnt_wcnt, PVFS, "mntwcnt2", 0, &mp->mnt_mutex);
722 }
723 mutex_exit(&mp->mnt_mutex);
724 vfs_hooks_unmount(mp);
725 vfs_delref(mp->mnt_op);
726 vfs_destroy(mp);
727 return (0);
728 }
729
730 /*
731 * Sync each mounted filesystem.
732 */
733 #ifdef DEBUG
734 int syncprt = 0;
735 struct ctldebug debug0 = { "syncprt", &syncprt };
736 #endif
737
738 /* ARGSUSED */
739 int
740 sys_sync(struct lwp *l, const void *v, register_t *retval)
741 {
742 struct mount *mp, *nmp;
743 int asyncflag;
744
745 if (l == NULL)
746 l = &lwp0;
747
748 mutex_enter(&mountlist_lock);
749 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
750 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
751 nmp = mp->mnt_list.cqe_prev;
752 continue;
753 }
754 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
755 asyncflag = mp->mnt_flag & MNT_ASYNC;
756 mp->mnt_flag &= ~MNT_ASYNC;
757 VFS_SYNC(mp, MNT_NOWAIT, l->l_cred);
758 if (asyncflag)
759 mp->mnt_flag |= MNT_ASYNC;
760 }
761 mutex_enter(&mountlist_lock);
762 nmp = mp->mnt_list.cqe_prev;
763 vfs_unbusy(mp);
764
765 }
766 mutex_exit(&mountlist_lock);
767 #ifdef DEBUG
768 if (syncprt)
769 vfs_bufstats();
770 #endif /* DEBUG */
771 return (0);
772 }
773
774 /*
775 * Change filesystem quotas.
776 */
777 /* ARGSUSED */
778 int
779 sys_quotactl(struct lwp *l, const struct sys_quotactl_args *uap, register_t *retval)
780 {
781 /* {
782 syscallarg(const char *) path;
783 syscallarg(int) cmd;
784 syscallarg(int) uid;
785 syscallarg(void *) arg;
786 } */
787 struct mount *mp;
788 int error;
789 struct nameidata nd;
790
791 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
792 SCARG(uap, path));
793 if ((error = namei(&nd)) != 0)
794 return (error);
795 mp = nd.ni_vp->v_mount;
796 error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
797 SCARG(uap, arg));
798 vrele(nd.ni_vp);
799 return (error);
800 }
801
802 int
803 dostatvfs(struct mount *mp, struct statvfs *sp, struct lwp *l, int flags,
804 int root)
805 {
806 struct cwdinfo *cwdi = l->l_proc->p_cwdi;
807 int error = 0;
808
809 /*
810 * If MNT_NOWAIT or MNT_LAZY is specified, do not
811 * refresh the fsstat cache. MNT_WAIT or MNT_LAZY
812 * overrides MNT_NOWAIT.
813 */
814 if (flags == MNT_NOWAIT || flags == MNT_LAZY ||
815 (flags != MNT_WAIT && flags != 0)) {
816 memcpy(sp, &mp->mnt_stat, sizeof(*sp));
817 goto done;
818 }
819
820 /* Get the filesystem stats now */
821 memset(sp, 0, sizeof(*sp));
822 if ((error = VFS_STATVFS(mp, sp)) != 0) {
823 return error;
824 }
825
826 if (cwdi->cwdi_rdir == NULL)
827 (void)memcpy(&mp->mnt_stat, sp, sizeof(mp->mnt_stat));
828 done:
829 if (cwdi->cwdi_rdir != NULL) {
830 size_t len;
831 char *bp;
832 char *path = PNBUF_GET();
833
834 bp = path + MAXPATHLEN;
835 *--bp = '\0';
836 rw_enter(&cwdi->cwdi_lock, RW_READER);
837 error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp, path,
838 MAXPATHLEN / 2, 0, l);
839 rw_exit(&cwdi->cwdi_lock);
840 if (error) {
841 PNBUF_PUT(path);
842 return error;
843 }
844 len = strlen(bp);
845 /*
846 * for mount points that are below our root, we can see
847 * them, so we fix up the pathname and return them. The
848 * rest we cannot see, so we don't allow viewing the
849 * data.
850 */
851 if (strncmp(bp, sp->f_mntonname, len) == 0) {
852 strlcpy(sp->f_mntonname, &sp->f_mntonname[len],
853 sizeof(sp->f_mntonname));
854 if (sp->f_mntonname[0] == '\0')
855 (void)strlcpy(sp->f_mntonname, "/",
856 sizeof(sp->f_mntonname));
857 } else {
858 if (root)
859 (void)strlcpy(sp->f_mntonname, "/",
860 sizeof(sp->f_mntonname));
861 else
862 error = EPERM;
863 }
864 PNBUF_PUT(path);
865 }
866 sp->f_flag = mp->mnt_flag & MNT_VISFLAGMASK;
867 return error;
868 }
869
870 /*
871 * Get filesystem statistics by path.
872 */
873 int
874 do_sys_pstatvfs(struct lwp *l, const char *path, int flags, struct statvfs *sb)
875 {
876 struct mount *mp;
877 int error;
878 struct nameidata nd;
879
880 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
881 if ((error = namei(&nd)) != 0)
882 return error;
883 mp = nd.ni_vp->v_mount;
884 error = dostatvfs(mp, sb, l, flags, 1);
885 vrele(nd.ni_vp);
886 return error;
887 }
888
889 /* ARGSUSED */
890 int
891 sys_statvfs1(struct lwp *l, const struct sys_statvfs1_args *uap, register_t *retval)
892 {
893 /* {
894 syscallarg(const char *) path;
895 syscallarg(struct statvfs *) buf;
896 syscallarg(int) flags;
897 } */
898 struct statvfs *sb;
899 int error;
900
901 sb = STATVFSBUF_GET();
902 error = do_sys_pstatvfs(l, SCARG(uap, path), SCARG(uap, flags), sb);
903 if (error == 0)
904 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
905 STATVFSBUF_PUT(sb);
906 return error;
907 }
908
909 /*
910 * Get filesystem statistics by fd.
911 */
912 int
913 do_sys_fstatvfs(struct lwp *l, int fd, int flags, struct statvfs *sb)
914 {
915 struct proc *p = l->l_proc;
916 struct file *fp;
917 struct mount *mp;
918 int error;
919
920 /* getvnode() will use the descriptor for us */
921 if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
922 return (error);
923 mp = ((struct vnode *)fp->f_data)->v_mount;
924 error = dostatvfs(mp, sb, l, flags, 1);
925 FILE_UNUSE(fp, l);
926 return error;
927 }
928
929 /* ARGSUSED */
930 int
931 sys_fstatvfs1(struct lwp *l, const struct sys_fstatvfs1_args *uap, register_t *retval)
932 {
933 /* {
934 syscallarg(int) fd;
935 syscallarg(struct statvfs *) buf;
936 syscallarg(int) flags;
937 } */
938 struct statvfs *sb;
939 int error;
940
941 sb = STATVFSBUF_GET();
942 error = do_sys_fstatvfs(l, SCARG(uap, fd), SCARG(uap, flags), sb);
943 if (error == 0)
944 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
945 STATVFSBUF_PUT(sb);
946 return error;
947 }
948
949
950 /*
951 * Get statistics on all filesystems.
952 */
953 int
954 do_sys_getvfsstat(struct lwp *l, void *sfsp, size_t bufsize, int flags,
955 int (*copyfn)(const void *, void *, size_t), size_t entry_sz,
956 register_t *retval)
957 {
958 int root = 0;
959 struct proc *p = l->l_proc;
960 struct mount *mp, *nmp;
961 struct statvfs *sb;
962 size_t count, maxcount;
963 int error = 0;
964
965 sb = STATVFSBUF_GET();
966 maxcount = bufsize / entry_sz;
967 mutex_enter(&mountlist_lock);
968 count = 0;
969 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
970 mp = nmp) {
971 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
972 nmp = CIRCLEQ_NEXT(mp, mnt_list);
973 continue;
974 }
975 if (sfsp && count < maxcount) {
976 error = dostatvfs(mp, sb, l, flags, 0);
977 if (error) {
978 mutex_enter(&mountlist_lock);
979 nmp = CIRCLEQ_NEXT(mp, mnt_list);
980 vfs_unbusy(mp);
981 continue;
982 }
983 error = copyfn(sb, sfsp, entry_sz);
984 if (error) {
985 vfs_unbusy(mp);
986 goto out;
987 }
988 sfsp = (char *)sfsp + entry_sz;
989 root |= strcmp(sb->f_mntonname, "/") == 0;
990 }
991 count++;
992 mutex_enter(&mountlist_lock);
993 nmp = CIRCLEQ_NEXT(mp, mnt_list);
994 vfs_unbusy(mp);
995 }
996
997 mutex_exit(&mountlist_lock);
998 if (root == 0 && p->p_cwdi->cwdi_rdir) {
999 /*
1000 * fake a root entry
1001 */
1002 error = dostatvfs(p->p_cwdi->cwdi_rdir->v_mount,
1003 sb, l, flags, 1);
1004 if (error != 0)
1005 goto out;
1006 if (sfsp)
1007 error = copyfn(sb, sfsp, entry_sz);
1008 count++;
1009 }
1010 if (sfsp && count > maxcount)
1011 *retval = maxcount;
1012 else
1013 *retval = count;
1014 out:
1015 STATVFSBUF_PUT(sb);
1016 return error;
1017 }
1018
1019 int
1020 sys_getvfsstat(struct lwp *l, const struct sys_getvfsstat_args *uap, register_t *retval)
1021 {
1022 /* {
1023 syscallarg(struct statvfs *) buf;
1024 syscallarg(size_t) bufsize;
1025 syscallarg(int) flags;
1026 } */
1027
1028 return do_sys_getvfsstat(l, SCARG(uap, buf), SCARG(uap, bufsize),
1029 SCARG(uap, flags), copyout, sizeof (struct statvfs), retval);
1030 }
1031
1032 /*
1033 * Change current working directory to a given file descriptor.
1034 */
1035 /* ARGSUSED */
1036 int
1037 sys_fchdir(struct lwp *l, const struct sys_fchdir_args *uap, register_t *retval)
1038 {
1039 /* {
1040 syscallarg(int) fd;
1041 } */
1042 struct proc *p = l->l_proc;
1043 struct filedesc *fdp = p->p_fd;
1044 struct cwdinfo *cwdi;
1045 struct vnode *vp, *tdp;
1046 struct mount *mp;
1047 struct file *fp;
1048 int error;
1049
1050 /* getvnode() will use the descriptor for us */
1051 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1052 return (error);
1053 vp = (struct vnode *)fp->f_data;
1054
1055 VREF(vp);
1056 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1057 if (vp->v_type != VDIR)
1058 error = ENOTDIR;
1059 else
1060 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1061 if (error) {
1062 vput(vp);
1063 goto out;
1064 }
1065 while ((mp = vp->v_mountedhere) != NULL) {
1066 if (vfs_busy(mp, 0, 0))
1067 continue;
1068
1069 vput(vp);
1070 error = VFS_ROOT(mp, &tdp);
1071 vfs_unbusy(mp);
1072 if (error)
1073 goto out;
1074 vp = tdp;
1075 }
1076 VOP_UNLOCK(vp, 0);
1077
1078 /*
1079 * Disallow changing to a directory not under the process's
1080 * current root directory (if there is one).
1081 */
1082 cwdi = p->p_cwdi;
1083 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1084 if (cwdi->cwdi_rdir && !vn_isunder(vp, NULL, l)) {
1085 vrele(vp);
1086 error = EPERM; /* operation not permitted */
1087 } else {
1088 vrele(cwdi->cwdi_cdir);
1089 cwdi->cwdi_cdir = vp;
1090 }
1091 rw_exit(&cwdi->cwdi_lock);
1092
1093 out:
1094 FILE_UNUSE(fp, l);
1095 return (error);
1096 }
1097
1098 /*
1099 * Change this process's notion of the root directory to a given file
1100 * descriptor.
1101 */
1102 int
1103 sys_fchroot(struct lwp *l, const struct sys_fchroot_args *uap, register_t *retval)
1104 {
1105 struct proc *p = l->l_proc;
1106 struct filedesc *fdp = p->p_fd;
1107 struct cwdinfo *cwdi;
1108 struct vnode *vp;
1109 struct file *fp;
1110 int error;
1111
1112 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1113 KAUTH_REQ_SYSTEM_CHROOT_FCHROOT, NULL, NULL, NULL)) != 0)
1114 return error;
1115 /* getvnode() will use the descriptor for us */
1116 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1117 return error;
1118 vp = (struct vnode *) fp->f_data;
1119 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1120 if (vp->v_type != VDIR)
1121 error = ENOTDIR;
1122 else
1123 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1124 VOP_UNLOCK(vp, 0);
1125 if (error)
1126 goto out;
1127 VREF(vp);
1128
1129 /*
1130 * Prevent escaping from chroot by putting the root under
1131 * the working directory. Silently chdir to / if we aren't
1132 * already there.
1133 */
1134 cwdi = p->p_cwdi;
1135 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1136 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1137 /*
1138 * XXX would be more failsafe to change directory to a
1139 * deadfs node here instead
1140 */
1141 vrele(cwdi->cwdi_cdir);
1142 VREF(vp);
1143 cwdi->cwdi_cdir = vp;
1144 }
1145
1146 if (cwdi->cwdi_rdir != NULL)
1147 vrele(cwdi->cwdi_rdir);
1148 cwdi->cwdi_rdir = vp;
1149 rw_exit(&cwdi->cwdi_lock);
1150
1151 out:
1152 FILE_UNUSE(fp, l);
1153 return (error);
1154 }
1155
1156 /*
1157 * Change current working directory (``.'').
1158 */
1159 /* ARGSUSED */
1160 int
1161 sys_chdir(struct lwp *l, const struct sys_chdir_args *uap, register_t *retval)
1162 {
1163 /* {
1164 syscallarg(const char *) path;
1165 } */
1166 struct proc *p = l->l_proc;
1167 struct cwdinfo *cwdi;
1168 int error;
1169 struct nameidata nd;
1170
1171 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1172 SCARG(uap, path));
1173 if ((error = change_dir(&nd, l)) != 0)
1174 return (error);
1175 cwdi = p->p_cwdi;
1176 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1177 vrele(cwdi->cwdi_cdir);
1178 cwdi->cwdi_cdir = nd.ni_vp;
1179 rw_exit(&cwdi->cwdi_lock);
1180 return (0);
1181 }
1182
1183 /*
1184 * Change notion of root (``/'') directory.
1185 */
1186 /* ARGSUSED */
1187 int
1188 sys_chroot(struct lwp *l, const struct sys_chroot_args *uap, register_t *retval)
1189 {
1190 /* {
1191 syscallarg(const char *) path;
1192 } */
1193 struct proc *p = l->l_proc;
1194 struct cwdinfo *cwdi;
1195 struct vnode *vp;
1196 int error;
1197 struct nameidata nd;
1198
1199 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1200 KAUTH_REQ_SYSTEM_CHROOT_CHROOT, NULL, NULL, NULL)) != 0)
1201 return (error);
1202 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1203 SCARG(uap, path));
1204 if ((error = change_dir(&nd, l)) != 0)
1205 return (error);
1206
1207 cwdi = p->p_cwdi;
1208 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1209 if (cwdi->cwdi_rdir != NULL)
1210 vrele(cwdi->cwdi_rdir);
1211 vp = nd.ni_vp;
1212 cwdi->cwdi_rdir = vp;
1213
1214 /*
1215 * Prevent escaping from chroot by putting the root under
1216 * the working directory. Silently chdir to / if we aren't
1217 * already there.
1218 */
1219 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1220 /*
1221 * XXX would be more failsafe to change directory to a
1222 * deadfs node here instead
1223 */
1224 vrele(cwdi->cwdi_cdir);
1225 VREF(vp);
1226 cwdi->cwdi_cdir = vp;
1227 }
1228 rw_exit(&cwdi->cwdi_lock);
1229
1230 return (0);
1231 }
1232
1233 /*
1234 * Common routine for chroot and chdir.
1235 */
1236 static int
1237 change_dir(struct nameidata *ndp, struct lwp *l)
1238 {
1239 struct vnode *vp;
1240 int error;
1241
1242 if ((error = namei(ndp)) != 0)
1243 return (error);
1244 vp = ndp->ni_vp;
1245 if (vp->v_type != VDIR)
1246 error = ENOTDIR;
1247 else
1248 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1249
1250 if (error)
1251 vput(vp);
1252 else
1253 VOP_UNLOCK(vp, 0);
1254 return (error);
1255 }
1256
1257 /*
1258 * Check permissions, allocate an open file structure,
1259 * and call the device open routine if any.
1260 */
1261 int
1262 sys_open(struct lwp *l, const struct sys_open_args *uap, register_t *retval)
1263 {
1264 /* {
1265 syscallarg(const char *) path;
1266 syscallarg(int) flags;
1267 syscallarg(int) mode;
1268 } */
1269 struct proc *p = l->l_proc;
1270 struct cwdinfo *cwdi = p->p_cwdi;
1271 struct filedesc *fdp = p->p_fd;
1272 struct file *fp;
1273 struct vnode *vp;
1274 int flags, cmode;
1275 int type, indx, error;
1276 struct flock lf;
1277 struct nameidata nd;
1278
1279 flags = FFLAGS(SCARG(uap, flags));
1280 if ((flags & (FREAD | FWRITE)) == 0)
1281 return (EINVAL);
1282 /* falloc() will use the file descriptor for us */
1283 if ((error = falloc(l, &fp, &indx)) != 0)
1284 return (error);
1285 /* We're going to read cwdi->cwdi_cmask unlocked here. */
1286 cmode = ((SCARG(uap, mode) &~ cwdi->cwdi_cmask) & ALLPERMS) &~ S_ISTXT;
1287 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1288 SCARG(uap, path));
1289 l->l_dupfd = -indx - 1; /* XXX check for fdopen */
1290 if ((error = vn_open(&nd, flags, cmode)) != 0) {
1291 rw_enter(&fdp->fd_lock, RW_WRITER);
1292 FILE_UNUSE(fp, l);
1293 fdp->fd_ofiles[indx] = NULL;
1294 rw_exit(&fdp->fd_lock);
1295 ffree(fp);
1296 if ((error == EDUPFD || error == EMOVEFD) &&
1297 l->l_dupfd >= 0 && /* XXX from fdopen */
1298 (error =
1299 dupfdopen(l, indx, l->l_dupfd, flags, error)) == 0) {
1300 *retval = indx;
1301 return (0);
1302 }
1303 if (error == ERESTART)
1304 error = EINTR;
1305 fdremove(fdp, indx);
1306 return (error);
1307 }
1308
1309 l->l_dupfd = 0;
1310 vp = nd.ni_vp;
1311 fp->f_flag = flags & FMASK;
1312 fp->f_type = DTYPE_VNODE;
1313 fp->f_ops = &vnops;
1314 fp->f_data = vp;
1315 if (flags & (O_EXLOCK | O_SHLOCK)) {
1316 lf.l_whence = SEEK_SET;
1317 lf.l_start = 0;
1318 lf.l_len = 0;
1319 if (flags & O_EXLOCK)
1320 lf.l_type = F_WRLCK;
1321 else
1322 lf.l_type = F_RDLCK;
1323 type = F_FLOCK;
1324 if ((flags & FNONBLOCK) == 0)
1325 type |= F_WAIT;
1326 VOP_UNLOCK(vp, 0);
1327 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1328 if (error) {
1329 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1330 FILE_UNUSE(fp, l);
1331 ffree(fp);
1332 fdremove(fdp, indx);
1333 return (error);
1334 }
1335 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1336 fp->f_flag |= FHASLOCK;
1337 }
1338 VOP_UNLOCK(vp, 0);
1339 *retval = indx;
1340 FILE_SET_MATURE(fp);
1341 FILE_UNUSE(fp, l);
1342 return (0);
1343 }
1344
1345 static void
1346 vfs__fhfree(fhandle_t *fhp)
1347 {
1348 size_t fhsize;
1349
1350 if (fhp == NULL) {
1351 return;
1352 }
1353 fhsize = FHANDLE_SIZE(fhp);
1354 kmem_free(fhp, fhsize);
1355 }
1356
1357 /*
1358 * vfs_composefh: compose a filehandle.
1359 */
1360
1361 int
1362 vfs_composefh(struct vnode *vp, fhandle_t *fhp, size_t *fh_size)
1363 {
1364 struct mount *mp;
1365 struct fid *fidp;
1366 int error;
1367 size_t needfhsize;
1368 size_t fidsize;
1369
1370 mp = vp->v_mount;
1371 fidp = NULL;
1372 if (*fh_size < FHANDLE_SIZE_MIN) {
1373 fidsize = 0;
1374 } else {
1375 fidsize = *fh_size - offsetof(fhandle_t, fh_fid);
1376 if (fhp != NULL) {
1377 memset(fhp, 0, *fh_size);
1378 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1379 fidp = &fhp->fh_fid;
1380 }
1381 }
1382 error = VFS_VPTOFH(vp, fidp, &fidsize);
1383 needfhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1384 if (error == 0 && *fh_size < needfhsize) {
1385 error = E2BIG;
1386 }
1387 *fh_size = needfhsize;
1388 return error;
1389 }
1390
1391 int
1392 vfs_composefh_alloc(struct vnode *vp, fhandle_t **fhpp)
1393 {
1394 struct mount *mp;
1395 fhandle_t *fhp;
1396 size_t fhsize;
1397 size_t fidsize;
1398 int error;
1399
1400 *fhpp = NULL;
1401 mp = vp->v_mount;
1402 fidsize = 0;
1403 error = VFS_VPTOFH(vp, NULL, &fidsize);
1404 KASSERT(error != 0);
1405 if (error != E2BIG) {
1406 goto out;
1407 }
1408 fhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1409 fhp = kmem_zalloc(fhsize, KM_SLEEP);
1410 if (fhp == NULL) {
1411 error = ENOMEM;
1412 goto out;
1413 }
1414 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1415 error = VFS_VPTOFH(vp, &fhp->fh_fid, &fidsize);
1416 if (error == 0) {
1417 KASSERT((FHANDLE_SIZE(fhp) == fhsize &&
1418 FHANDLE_FILEID(fhp)->fid_len == fidsize));
1419 *fhpp = fhp;
1420 } else {
1421 kmem_free(fhp, fhsize);
1422 }
1423 out:
1424 return error;
1425 }
1426
1427 void
1428 vfs_composefh_free(fhandle_t *fhp)
1429 {
1430
1431 vfs__fhfree(fhp);
1432 }
1433
1434 /*
1435 * vfs_fhtovp: lookup a vnode by a filehandle.
1436 */
1437
1438 int
1439 vfs_fhtovp(fhandle_t *fhp, struct vnode **vpp)
1440 {
1441 struct mount *mp;
1442 int error;
1443
1444 *vpp = NULL;
1445 mp = vfs_getvfs(FHANDLE_FSID(fhp));
1446 if (mp == NULL) {
1447 error = ESTALE;
1448 goto out;
1449 }
1450 if (mp->mnt_op->vfs_fhtovp == NULL) {
1451 error = EOPNOTSUPP;
1452 goto out;
1453 }
1454 error = VFS_FHTOVP(mp, FHANDLE_FILEID(fhp), vpp);
1455 out:
1456 return error;
1457 }
1458
1459 /*
1460 * vfs_copyinfh_alloc: allocate and copyin a filehandle, given
1461 * the needed size.
1462 */
1463
1464 int
1465 vfs_copyinfh_alloc(const void *ufhp, size_t fhsize, fhandle_t **fhpp)
1466 {
1467 fhandle_t *fhp;
1468 int error;
1469
1470 *fhpp = NULL;
1471 if (fhsize > FHANDLE_SIZE_MAX) {
1472 return EINVAL;
1473 }
1474 if (fhsize < FHANDLE_SIZE_MIN) {
1475 return EINVAL;
1476 }
1477 again:
1478 fhp = kmem_alloc(fhsize, KM_SLEEP);
1479 if (fhp == NULL) {
1480 return ENOMEM;
1481 }
1482 error = copyin(ufhp, fhp, fhsize);
1483 if (error == 0) {
1484 /* XXX this check shouldn't be here */
1485 if (FHANDLE_SIZE(fhp) == fhsize) {
1486 *fhpp = fhp;
1487 return 0;
1488 } else if (fhsize == NFSX_V2FH && FHANDLE_SIZE(fhp) < fhsize) {
1489 /*
1490 * a kludge for nfsv2 padded handles.
1491 */
1492 size_t sz;
1493
1494 sz = FHANDLE_SIZE(fhp);
1495 kmem_free(fhp, fhsize);
1496 fhsize = sz;
1497 goto again;
1498 } else {
1499 /*
1500 * userland told us wrong size.
1501 */
1502 error = EINVAL;
1503 }
1504 }
1505 kmem_free(fhp, fhsize);
1506 return error;
1507 }
1508
1509 void
1510 vfs_copyinfh_free(fhandle_t *fhp)
1511 {
1512
1513 vfs__fhfree(fhp);
1514 }
1515
1516 /*
1517 * Get file handle system call
1518 */
1519 int
1520 sys___getfh30(struct lwp *l, const struct sys___getfh30_args *uap, register_t *retval)
1521 {
1522 /* {
1523 syscallarg(char *) fname;
1524 syscallarg(fhandle_t *) fhp;
1525 syscallarg(size_t *) fh_size;
1526 } */
1527 struct vnode *vp;
1528 fhandle_t *fh;
1529 int error;
1530 struct nameidata nd;
1531 size_t sz;
1532 size_t usz;
1533
1534 /*
1535 * Must be super user
1536 */
1537 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1538 0, NULL, NULL, NULL);
1539 if (error)
1540 return (error);
1541 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1542 SCARG(uap, fname));
1543 error = namei(&nd);
1544 if (error)
1545 return (error);
1546 vp = nd.ni_vp;
1547 error = vfs_composefh_alloc(vp, &fh);
1548 vput(vp);
1549 if (error != 0) {
1550 goto out;
1551 }
1552 error = copyin(SCARG(uap, fh_size), &usz, sizeof(size_t));
1553 if (error != 0) {
1554 goto out;
1555 }
1556 sz = FHANDLE_SIZE(fh);
1557 error = copyout(&sz, SCARG(uap, fh_size), sizeof(size_t));
1558 if (error != 0) {
1559 goto out;
1560 }
1561 if (usz >= sz) {
1562 error = copyout(fh, SCARG(uap, fhp), sz);
1563 } else {
1564 error = E2BIG;
1565 }
1566 out:
1567 vfs_composefh_free(fh);
1568 return (error);
1569 }
1570
1571 /*
1572 * Open a file given a file handle.
1573 *
1574 * Check permissions, allocate an open file structure,
1575 * and call the device open routine if any.
1576 */
1577
1578 int
1579 dofhopen(struct lwp *l, const void *ufhp, size_t fhsize, int oflags,
1580 register_t *retval)
1581 {
1582 struct filedesc *fdp = l->l_proc->p_fd;
1583 struct file *fp;
1584 struct vnode *vp = NULL;
1585 kauth_cred_t cred = l->l_cred;
1586 struct file *nfp;
1587 int type, indx, error=0;
1588 struct flock lf;
1589 struct vattr va;
1590 fhandle_t *fh;
1591 int flags;
1592
1593 /*
1594 * Must be super user
1595 */
1596 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1597 0, NULL, NULL, NULL)))
1598 return (error);
1599
1600 flags = FFLAGS(oflags);
1601 if ((flags & (FREAD | FWRITE)) == 0)
1602 return (EINVAL);
1603 if ((flags & O_CREAT))
1604 return (EINVAL);
1605 /* falloc() will use the file descriptor for us */
1606 if ((error = falloc(l, &nfp, &indx)) != 0)
1607 return (error);
1608 fp = nfp;
1609 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1610 if (error != 0) {
1611 goto bad;
1612 }
1613 error = vfs_fhtovp(fh, &vp);
1614 if (error != 0) {
1615 goto bad;
1616 }
1617
1618 /* Now do an effective vn_open */
1619
1620 if (vp->v_type == VSOCK) {
1621 error = EOPNOTSUPP;
1622 goto bad;
1623 }
1624 error = vn_openchk(vp, cred, flags);
1625 if (error != 0)
1626 goto bad;
1627 if (flags & O_TRUNC) {
1628 VOP_UNLOCK(vp, 0); /* XXX */
1629 VOP_LEASE(vp, cred, LEASE_WRITE);
1630 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
1631 VATTR_NULL(&va);
1632 va.va_size = 0;
1633 error = VOP_SETATTR(vp, &va, cred);
1634 if (error)
1635 goto bad;
1636 }
1637 if ((error = VOP_OPEN(vp, flags, cred)) != 0)
1638 goto bad;
1639 if (flags & FWRITE)
1640 vp->v_writecount++;
1641
1642 /* done with modified vn_open, now finish what sys_open does. */
1643
1644 fp->f_flag = flags & FMASK;
1645 fp->f_type = DTYPE_VNODE;
1646 fp->f_ops = &vnops;
1647 fp->f_data = vp;
1648 if (flags & (O_EXLOCK | O_SHLOCK)) {
1649 lf.l_whence = SEEK_SET;
1650 lf.l_start = 0;
1651 lf.l_len = 0;
1652 if (flags & O_EXLOCK)
1653 lf.l_type = F_WRLCK;
1654 else
1655 lf.l_type = F_RDLCK;
1656 type = F_FLOCK;
1657 if ((flags & FNONBLOCK) == 0)
1658 type |= F_WAIT;
1659 VOP_UNLOCK(vp, 0);
1660 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1661 if (error) {
1662 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1663 FILE_UNUSE(fp, l);
1664 ffree(fp);
1665 fdremove(fdp, indx);
1666 return (error);
1667 }
1668 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1669 fp->f_flag |= FHASLOCK;
1670 }
1671 VOP_UNLOCK(vp, 0);
1672 *retval = indx;
1673 FILE_SET_MATURE(fp);
1674 FILE_UNUSE(fp, l);
1675 vfs_copyinfh_free(fh);
1676 return (0);
1677
1678 bad:
1679 FILE_UNUSE(fp, l);
1680 ffree(fp);
1681 fdremove(fdp, indx);
1682 if (vp != NULL)
1683 vput(vp);
1684 vfs_copyinfh_free(fh);
1685 return (error);
1686 }
1687
1688 int
1689 sys___fhopen40(struct lwp *l, const struct sys___fhopen40_args *uap, register_t *retval)
1690 {
1691 /* {
1692 syscallarg(const void *) fhp;
1693 syscallarg(size_t) fh_size;
1694 syscallarg(int) flags;
1695 } */
1696
1697 return dofhopen(l, SCARG(uap, fhp), SCARG(uap, fh_size),
1698 SCARG(uap, flags), retval);
1699 }
1700
1701 int
1702 do_fhstat(struct lwp *l, const void *ufhp, size_t fhsize, struct stat *sb)
1703 {
1704 int error;
1705 fhandle_t *fh;
1706 struct vnode *vp;
1707
1708 /*
1709 * Must be super user
1710 */
1711 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1712 0, NULL, NULL, NULL)))
1713 return (error);
1714
1715 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1716 if (error != 0)
1717 return error;
1718
1719 error = vfs_fhtovp(fh, &vp);
1720 vfs_copyinfh_free(fh);
1721 if (error != 0)
1722 return error;
1723
1724 error = vn_stat(vp, sb, l);
1725 vput(vp);
1726 return error;
1727 }
1728
1729
1730 /* ARGSUSED */
1731 int
1732 sys___fhstat40(struct lwp *l, const struct sys___fhstat40_args *uap, register_t *retval)
1733 {
1734 /* {
1735 syscallarg(const void *) fhp;
1736 syscallarg(size_t) fh_size;
1737 syscallarg(struct stat *) sb;
1738 } */
1739 struct stat sb;
1740 int error;
1741
1742 error = do_fhstat(l, SCARG(uap, fhp), SCARG(uap, fh_size), &sb);
1743 if (error)
1744 return error;
1745 return copyout(&sb, SCARG(uap, sb), sizeof(sb));
1746 }
1747
1748 int
1749 do_fhstatvfs(struct lwp *l, const void *ufhp, size_t fhsize, struct statvfs *sb,
1750 int flags)
1751 {
1752 fhandle_t *fh;
1753 struct mount *mp;
1754 struct vnode *vp;
1755 int error;
1756
1757 /*
1758 * Must be super user
1759 */
1760 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1761 0, NULL, NULL, NULL)))
1762 return error;
1763
1764 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1765 if (error != 0)
1766 return error;
1767
1768 error = vfs_fhtovp(fh, &vp);
1769 vfs_copyinfh_free(fh);
1770 if (error != 0)
1771 return error;
1772
1773 mp = vp->v_mount;
1774 error = dostatvfs(mp, sb, l, flags, 1);
1775 vput(vp);
1776 return error;
1777 }
1778
1779 /* ARGSUSED */
1780 int
1781 sys___fhstatvfs140(struct lwp *l, const struct sys___fhstatvfs140_args *uap, register_t *retval)
1782 {
1783 /* {
1784 syscallarg(const void *) fhp;
1785 syscallarg(size_t) fh_size;
1786 syscallarg(struct statvfs *) buf;
1787 syscallarg(int) flags;
1788 } */
1789 struct statvfs *sb = STATVFSBUF_GET();
1790 int error;
1791
1792 error = do_fhstatvfs(l, SCARG(uap, fhp), SCARG(uap, fh_size), sb,
1793 SCARG(uap, flags));
1794 if (error == 0)
1795 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
1796 STATVFSBUF_PUT(sb);
1797 return error;
1798 }
1799
1800 /*
1801 * Create a special file.
1802 */
1803 /* ARGSUSED */
1804 int
1805 sys_mknod(struct lwp *l, const struct sys_mknod_args *uap, register_t *retval)
1806 {
1807 /* {
1808 syscallarg(const char *) path;
1809 syscallarg(int) mode;
1810 syscallarg(int) dev;
1811 } */
1812 struct proc *p = l->l_proc;
1813 struct vnode *vp;
1814 struct vattr vattr;
1815 int error, optype;
1816 struct nameidata nd;
1817 char *path;
1818 const char *cpath;
1819 enum uio_seg seg = UIO_USERSPACE;
1820
1821 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MKNOD,
1822 0, NULL, NULL, NULL)) != 0)
1823 return (error);
1824
1825 optype = VOP_MKNOD_DESCOFFSET;
1826
1827 VERIEXEC_PATH_GET(SCARG(uap, path), seg, cpath, path);
1828 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, seg, cpath);
1829
1830 if ((error = namei(&nd)) != 0)
1831 goto out;
1832 vp = nd.ni_vp;
1833 if (vp != NULL)
1834 error = EEXIST;
1835 else {
1836 VATTR_NULL(&vattr);
1837 /* We will read cwdi->cwdi_cmask unlocked. */
1838 vattr.va_mode =
1839 (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1840 vattr.va_rdev = SCARG(uap, dev);
1841
1842 switch (SCARG(uap, mode) & S_IFMT) {
1843 case S_IFMT: /* used by badsect to flag bad sectors */
1844 vattr.va_type = VBAD;
1845 break;
1846 case S_IFCHR:
1847 vattr.va_type = VCHR;
1848 break;
1849 case S_IFBLK:
1850 vattr.va_type = VBLK;
1851 break;
1852 case S_IFWHT:
1853 optype = VOP_WHITEOUT_DESCOFFSET;
1854 break;
1855 case S_IFREG:
1856 #if NVERIEXEC > 0
1857 error = veriexec_openchk(l, nd.ni_vp, nd.ni_dirp,
1858 O_CREAT);
1859 #endif /* NVERIEXEC > 0 */
1860 vattr.va_type = VREG;
1861 vattr.va_rdev = VNOVAL;
1862 optype = VOP_CREATE_DESCOFFSET;
1863 break;
1864 default:
1865 error = EINVAL;
1866 break;
1867 }
1868 }
1869 if (!error) {
1870 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1871 switch (optype) {
1872 case VOP_WHITEOUT_DESCOFFSET:
1873 error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
1874 if (error)
1875 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1876 vput(nd.ni_dvp);
1877 break;
1878
1879 case VOP_MKNOD_DESCOFFSET:
1880 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp,
1881 &nd.ni_cnd, &vattr);
1882 if (error == 0)
1883 vput(nd.ni_vp);
1884 break;
1885
1886 case VOP_CREATE_DESCOFFSET:
1887 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
1888 &nd.ni_cnd, &vattr);
1889 if (error == 0)
1890 vput(nd.ni_vp);
1891 break;
1892 }
1893 } else {
1894 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1895 if (nd.ni_dvp == vp)
1896 vrele(nd.ni_dvp);
1897 else
1898 vput(nd.ni_dvp);
1899 if (vp)
1900 vrele(vp);
1901 }
1902 out:
1903 VERIEXEC_PATH_PUT(path);
1904 return (error);
1905 }
1906
1907 /*
1908 * Create a named pipe.
1909 */
1910 /* ARGSUSED */
1911 int
1912 sys_mkfifo(struct lwp *l, const struct sys_mkfifo_args *uap, register_t *retval)
1913 {
1914 /* {
1915 syscallarg(const char *) path;
1916 syscallarg(int) mode;
1917 } */
1918 struct proc *p = l->l_proc;
1919 struct vattr vattr;
1920 int error;
1921 struct nameidata nd;
1922
1923 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1924 SCARG(uap, path));
1925 if ((error = namei(&nd)) != 0)
1926 return (error);
1927 if (nd.ni_vp != NULL) {
1928 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1929 if (nd.ni_dvp == nd.ni_vp)
1930 vrele(nd.ni_dvp);
1931 else
1932 vput(nd.ni_dvp);
1933 vrele(nd.ni_vp);
1934 return (EEXIST);
1935 }
1936 VATTR_NULL(&vattr);
1937 vattr.va_type = VFIFO;
1938 /* We will read cwdi->cwdi_cmask unlocked. */
1939 vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1940 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1941 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
1942 if (error == 0)
1943 vput(nd.ni_vp);
1944 return (error);
1945 }
1946
1947 /*
1948 * Make a hard file link.
1949 */
1950 /* ARGSUSED */
1951 int
1952 sys_link(struct lwp *l, const struct sys_link_args *uap, register_t *retval)
1953 {
1954 /* {
1955 syscallarg(const char *) path;
1956 syscallarg(const char *) link;
1957 } */
1958 struct vnode *vp;
1959 struct nameidata nd;
1960 int error;
1961
1962 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1963 SCARG(uap, path));
1964 if ((error = namei(&nd)) != 0)
1965 return (error);
1966 vp = nd.ni_vp;
1967 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1968 SCARG(uap, link));
1969 if ((error = namei(&nd)) != 0)
1970 goto out;
1971 if (nd.ni_vp) {
1972 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1973 if (nd.ni_dvp == nd.ni_vp)
1974 vrele(nd.ni_dvp);
1975 else
1976 vput(nd.ni_dvp);
1977 vrele(nd.ni_vp);
1978 error = EEXIST;
1979 goto out;
1980 }
1981 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1982 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
1983 error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
1984 out:
1985 vrele(vp);
1986 return (error);
1987 }
1988
1989 /*
1990 * Make a symbolic link.
1991 */
1992 /* ARGSUSED */
1993 int
1994 sys_symlink(struct lwp *l, const struct sys_symlink_args *uap, register_t *retval)
1995 {
1996 /* {
1997 syscallarg(const char *) path;
1998 syscallarg(const char *) link;
1999 } */
2000 struct proc *p = l->l_proc;
2001 struct vattr vattr;
2002 char *path;
2003 int error;
2004 struct nameidata nd;
2005
2006 path = PNBUF_GET();
2007 error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL);
2008 if (error)
2009 goto out;
2010 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
2011 SCARG(uap, link));
2012 if ((error = namei(&nd)) != 0)
2013 goto out;
2014 if (nd.ni_vp) {
2015 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2016 if (nd.ni_dvp == nd.ni_vp)
2017 vrele(nd.ni_dvp);
2018 else
2019 vput(nd.ni_dvp);
2020 vrele(nd.ni_vp);
2021 error = EEXIST;
2022 goto out;
2023 }
2024 VATTR_NULL(&vattr);
2025 vattr.va_type = VLNK;
2026 /* We will read cwdi->cwdi_cmask unlocked. */
2027 vattr.va_mode = ACCESSPERMS &~ p->p_cwdi->cwdi_cmask;
2028 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2029 error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
2030 if (error == 0)
2031 vput(nd.ni_vp);
2032 out:
2033 PNBUF_PUT(path);
2034 return (error);
2035 }
2036
2037 /*
2038 * Delete a whiteout from the filesystem.
2039 */
2040 /* ARGSUSED */
2041 int
2042 sys_undelete(struct lwp *l, const struct sys_undelete_args *uap, register_t *retval)
2043 {
2044 /* {
2045 syscallarg(const char *) path;
2046 } */
2047 int error;
2048 struct nameidata nd;
2049
2050 NDINIT(&nd, DELETE, LOCKPARENT | DOWHITEOUT | TRYEMULROOT,
2051 UIO_USERSPACE, SCARG(uap, path));
2052 error = namei(&nd);
2053 if (error)
2054 return (error);
2055
2056 if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
2057 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2058 if (nd.ni_dvp == nd.ni_vp)
2059 vrele(nd.ni_dvp);
2060 else
2061 vput(nd.ni_dvp);
2062 if (nd.ni_vp)
2063 vrele(nd.ni_vp);
2064 return (EEXIST);
2065 }
2066 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2067 if ((error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE)) != 0)
2068 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2069 vput(nd.ni_dvp);
2070 return (error);
2071 }
2072
2073 /*
2074 * Delete a name from the filesystem.
2075 */
2076 /* ARGSUSED */
2077 int
2078 sys_unlink(struct lwp *l, const struct sys_unlink_args *uap, register_t *retval)
2079 {
2080 /* {
2081 syscallarg(const char *) path;
2082 } */
2083
2084 return do_sys_unlink(SCARG(uap, path), UIO_USERSPACE);
2085 }
2086
2087 int
2088 do_sys_unlink(const char *arg, enum uio_seg seg)
2089 {
2090 struct vnode *vp;
2091 int error;
2092 struct nameidata nd;
2093 kauth_cred_t cred;
2094 char *path;
2095 const char *cpath;
2096
2097 VERIEXEC_PATH_GET(arg, seg, cpath, path);
2098 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, seg, cpath);
2099
2100 if ((error = namei(&nd)) != 0)
2101 goto out;
2102 vp = nd.ni_vp;
2103
2104 /*
2105 * The root of a mounted filesystem cannot be deleted.
2106 */
2107 if (vp->v_vflag & VV_ROOT) {
2108 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2109 if (nd.ni_dvp == vp)
2110 vrele(nd.ni_dvp);
2111 else
2112 vput(nd.ni_dvp);
2113 vput(vp);
2114 error = EBUSY;
2115 goto out;
2116 }
2117
2118 #if NVERIEXEC > 0
2119 /* Handle remove requests for veriexec entries. */
2120 if ((error = veriexec_removechk(curlwp, nd.ni_vp, nd.ni_dirp)) != 0) {
2121 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2122 if (nd.ni_dvp == vp)
2123 vrele(nd.ni_dvp);
2124 else
2125 vput(nd.ni_dvp);
2126 vput(vp);
2127 goto out;
2128 }
2129 #endif /* NVERIEXEC > 0 */
2130
2131 cred = kauth_cred_get();
2132 VOP_LEASE(nd.ni_dvp, cred, LEASE_WRITE);
2133 VOP_LEASE(vp, cred, LEASE_WRITE);
2134 #ifdef FILEASSOC
2135 (void)fileassoc_file_delete(vp);
2136 #endif /* FILEASSOC */
2137 error = VOP_REMOVE(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
2138 out:
2139 VERIEXEC_PATH_PUT(path);
2140 return (error);
2141 }
2142
2143 /*
2144 * Reposition read/write file offset.
2145 */
2146 int
2147 sys_lseek(struct lwp *l, const struct sys_lseek_args *uap, register_t *retval)
2148 {
2149 /* {
2150 syscallarg(int) fd;
2151 syscallarg(int) pad;
2152 syscallarg(off_t) offset;
2153 syscallarg(int) whence;
2154 } */
2155 struct proc *p = l->l_proc;
2156 kauth_cred_t cred = l->l_cred;
2157 struct filedesc *fdp = p->p_fd;
2158 struct file *fp;
2159 struct vnode *vp;
2160 struct vattr vattr;
2161 off_t newoff;
2162 int error;
2163
2164 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
2165 return (EBADF);
2166
2167 vp = (struct vnode *)fp->f_data;
2168 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2169 error = ESPIPE;
2170 FILE_UNLOCK(fp);
2171 goto out;
2172 }
2173
2174 switch (SCARG(uap, whence)) {
2175 case SEEK_CUR:
2176 newoff = fp->f_offset + SCARG(uap, offset);
2177 FILE_USE(fp);
2178 break;
2179 case SEEK_END:
2180 FILE_USE(fp);
2181 error = VOP_GETATTR(vp, &vattr, cred);
2182 if (error) {
2183 FILE_UNUSE(fp, l);
2184 goto out;
2185 }
2186 newoff = SCARG(uap, offset) + vattr.va_size;
2187 break;
2188 case SEEK_SET:
2189 FILE_USE(fp);
2190 newoff = SCARG(uap, offset);
2191 break;
2192 default:
2193 FILE_UNLOCK(fp);
2194 error = EINVAL;
2195 goto out;
2196 }
2197 if ((error = VOP_SEEK(vp, fp->f_offset, newoff, cred)) == 0) {
2198 FILE_LOCK(fp);
2199 *(off_t *)retval = fp->f_offset = newoff;
2200 FILE_UNLOCK(fp);
2201 }
2202 FILE_UNUSE(fp, l);
2203 out:
2204 return (error);
2205 }
2206
2207 /*
2208 * Positional read system call.
2209 */
2210 int
2211 sys_pread(struct lwp *l, const struct sys_pread_args *uap, register_t *retval)
2212 {
2213 /* {
2214 syscallarg(int) fd;
2215 syscallarg(void *) buf;
2216 syscallarg(size_t) nbyte;
2217 syscallarg(off_t) offset;
2218 } */
2219 struct proc *p = l->l_proc;
2220 struct filedesc *fdp = p->p_fd;
2221 struct file *fp;
2222 struct vnode *vp;
2223 off_t offset;
2224 int error, fd = SCARG(uap, fd);
2225
2226 if ((fp = fd_getfile(fdp, fd)) == NULL)
2227 return (EBADF);
2228
2229 if ((fp->f_flag & FREAD) == 0) {
2230 FILE_UNLOCK(fp);
2231 return (EBADF);
2232 }
2233
2234 FILE_USE(fp);
2235
2236 vp = (struct vnode *)fp->f_data;
2237 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2238 error = ESPIPE;
2239 goto out;
2240 }
2241
2242 offset = SCARG(uap, offset);
2243
2244 /*
2245 * XXX This works because no file systems actually
2246 * XXX take any action on the seek operation.
2247 */
2248 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2249 goto out;
2250
2251 /* dofileread() will unuse the descriptor for us */
2252 return (dofileread(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2253 &offset, 0, retval));
2254
2255 out:
2256 FILE_UNUSE(fp, l);
2257 return (error);
2258 }
2259
2260 /*
2261 * Positional scatter read system call.
2262 */
2263 int
2264 sys_preadv(struct lwp *l, const struct sys_preadv_args *uap, register_t *retval)
2265 {
2266 /* {
2267 syscallarg(int) fd;
2268 syscallarg(const struct iovec *) iovp;
2269 syscallarg(int) iovcnt;
2270 syscallarg(off_t) offset;
2271 } */
2272 off_t offset = SCARG(uap, offset);
2273
2274 return do_filereadv(SCARG(uap, fd), SCARG(uap, iovp),
2275 SCARG(uap, iovcnt), &offset, 0, retval);
2276 }
2277
2278 /*
2279 * Positional write system call.
2280 */
2281 int
2282 sys_pwrite(struct lwp *l, const struct sys_pwrite_args *uap, register_t *retval)
2283 {
2284 /* {
2285 syscallarg(int) fd;
2286 syscallarg(const void *) buf;
2287 syscallarg(size_t) nbyte;
2288 syscallarg(off_t) offset;
2289 } */
2290 struct proc *p = l->l_proc;
2291 struct filedesc *fdp = p->p_fd;
2292 struct file *fp;
2293 struct vnode *vp;
2294 off_t offset;
2295 int error, fd = SCARG(uap, fd);
2296
2297 if ((fp = fd_getfile(fdp, fd)) == NULL)
2298 return (EBADF);
2299
2300 if ((fp->f_flag & FWRITE) == 0) {
2301 FILE_UNLOCK(fp);
2302 return (EBADF);
2303 }
2304
2305 FILE_USE(fp);
2306
2307 vp = (struct vnode *)fp->f_data;
2308 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2309 error = ESPIPE;
2310 goto out;
2311 }
2312
2313 offset = SCARG(uap, offset);
2314
2315 /*
2316 * XXX This works because no file systems actually
2317 * XXX take any action on the seek operation.
2318 */
2319 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2320 goto out;
2321
2322 /* dofilewrite() will unuse the descriptor for us */
2323 return (dofilewrite(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2324 &offset, 0, retval));
2325
2326 out:
2327 FILE_UNUSE(fp, l);
2328 return (error);
2329 }
2330
2331 /*
2332 * Positional gather write system call.
2333 */
2334 int
2335 sys_pwritev(struct lwp *l, const struct sys_pwritev_args *uap, register_t *retval)
2336 {
2337 /* {
2338 syscallarg(int) fd;
2339 syscallarg(const struct iovec *) iovp;
2340 syscallarg(int) iovcnt;
2341 syscallarg(off_t) offset;
2342 } */
2343 off_t offset = SCARG(uap, offset);
2344
2345 return do_filewritev(SCARG(uap, fd), SCARG(uap, iovp),
2346 SCARG(uap, iovcnt), &offset, 0, retval);
2347 }
2348
2349 /*
2350 * Check access permissions.
2351 */
2352 int
2353 sys_access(struct lwp *l, const struct sys_access_args *uap, register_t *retval)
2354 {
2355 /* {
2356 syscallarg(const char *) path;
2357 syscallarg(int) flags;
2358 } */
2359 kauth_cred_t cred;
2360 struct vnode *vp;
2361 int error, flags;
2362 struct nameidata nd;
2363
2364 cred = kauth_cred_dup(l->l_cred);
2365 kauth_cred_seteuid(cred, kauth_cred_getuid(l->l_cred));
2366 kauth_cred_setegid(cred, kauth_cred_getgid(l->l_cred));
2367 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2368 SCARG(uap, path));
2369 /* Override default credentials */
2370 nd.ni_cnd.cn_cred = cred;
2371 if ((error = namei(&nd)) != 0)
2372 goto out;
2373 vp = nd.ni_vp;
2374
2375 /* Flags == 0 means only check for existence. */
2376 if (SCARG(uap, flags)) {
2377 flags = 0;
2378 if (SCARG(uap, flags) & R_OK)
2379 flags |= VREAD;
2380 if (SCARG(uap, flags) & W_OK)
2381 flags |= VWRITE;
2382 if (SCARG(uap, flags) & X_OK)
2383 flags |= VEXEC;
2384
2385 error = VOP_ACCESS(vp, flags, cred);
2386 if (!error && (flags & VWRITE))
2387 error = vn_writechk(vp);
2388 }
2389 vput(vp);
2390 out:
2391 kauth_cred_free(cred);
2392 return (error);
2393 }
2394
2395 /*
2396 * Common code for all sys_stat functions, including compat versions.
2397 */
2398 int
2399 do_sys_stat(struct lwp *l, const char *path, unsigned int nd_flags,
2400 struct stat *sb)
2401 {
2402 int error;
2403 struct nameidata nd;
2404
2405 NDINIT(&nd, LOOKUP, nd_flags | LOCKLEAF | TRYEMULROOT,
2406 UIO_USERSPACE, path);
2407 error = namei(&nd);
2408 if (error != 0)
2409 return error;
2410 error = vn_stat(nd.ni_vp, sb, l);
2411 vput(nd.ni_vp);
2412 return error;
2413 }
2414
2415 /*
2416 * Get file status; this version follows links.
2417 */
2418 /* ARGSUSED */
2419 int
2420 sys___stat30(struct lwp *l, const struct sys___stat30_args *uap, register_t *retval)
2421 {
2422 /* {
2423 syscallarg(const char *) path;
2424 syscallarg(struct stat *) ub;
2425 } */
2426 struct stat sb;
2427 int error;
2428
2429 error = do_sys_stat(l, SCARG(uap, path), FOLLOW, &sb);
2430 if (error)
2431 return error;
2432 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2433 }
2434
2435 /*
2436 * Get file status; this version does not follow links.
2437 */
2438 /* ARGSUSED */
2439 int
2440 sys___lstat30(struct lwp *l, const struct sys___lstat30_args *uap, register_t *retval)
2441 {
2442 /* {
2443 syscallarg(const char *) path;
2444 syscallarg(struct stat *) ub;
2445 } */
2446 struct stat sb;
2447 int error;
2448
2449 error = do_sys_stat(l, SCARG(uap, path), NOFOLLOW, &sb);
2450 if (error)
2451 return error;
2452 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2453 }
2454
2455 /*
2456 * Get configurable pathname variables.
2457 */
2458 /* ARGSUSED */
2459 int
2460 sys_pathconf(struct lwp *l, const struct sys_pathconf_args *uap, register_t *retval)
2461 {
2462 /* {
2463 syscallarg(const char *) path;
2464 syscallarg(int) name;
2465 } */
2466 int error;
2467 struct nameidata nd;
2468
2469 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2470 SCARG(uap, path));
2471 if ((error = namei(&nd)) != 0)
2472 return (error);
2473 error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), retval);
2474 vput(nd.ni_vp);
2475 return (error);
2476 }
2477
2478 /*
2479 * Return target name of a symbolic link.
2480 */
2481 /* ARGSUSED */
2482 int
2483 sys_readlink(struct lwp *l, const struct sys_readlink_args *uap, register_t *retval)
2484 {
2485 /* {
2486 syscallarg(const char *) path;
2487 syscallarg(char *) buf;
2488 syscallarg(size_t) count;
2489 } */
2490 struct vnode *vp;
2491 struct iovec aiov;
2492 struct uio auio;
2493 int error;
2494 struct nameidata nd;
2495
2496 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2497 SCARG(uap, path));
2498 if ((error = namei(&nd)) != 0)
2499 return (error);
2500 vp = nd.ni_vp;
2501 if (vp->v_type != VLNK)
2502 error = EINVAL;
2503 else if (!(vp->v_mount->mnt_flag & MNT_SYMPERM) ||
2504 (error = VOP_ACCESS(vp, VREAD, l->l_cred)) == 0) {
2505 aiov.iov_base = SCARG(uap, buf);
2506 aiov.iov_len = SCARG(uap, count);
2507 auio.uio_iov = &aiov;
2508 auio.uio_iovcnt = 1;
2509 auio.uio_offset = 0;
2510 auio.uio_rw = UIO_READ;
2511 KASSERT(l == curlwp);
2512 auio.uio_vmspace = l->l_proc->p_vmspace;
2513 auio.uio_resid = SCARG(uap, count);
2514 error = VOP_READLINK(vp, &auio, l->l_cred);
2515 }
2516 vput(vp);
2517 *retval = SCARG(uap, count) - auio.uio_resid;
2518 return (error);
2519 }
2520
2521 /*
2522 * Change flags of a file given a path name.
2523 */
2524 /* ARGSUSED */
2525 int
2526 sys_chflags(struct lwp *l, const struct sys_chflags_args *uap, register_t *retval)
2527 {
2528 /* {
2529 syscallarg(const char *) path;
2530 syscallarg(u_long) flags;
2531 } */
2532 struct vnode *vp;
2533 int error;
2534 struct nameidata nd;
2535
2536 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2537 SCARG(uap, path));
2538 if ((error = namei(&nd)) != 0)
2539 return (error);
2540 vp = nd.ni_vp;
2541 error = change_flags(vp, SCARG(uap, flags), l);
2542 vput(vp);
2543 return (error);
2544 }
2545
2546 /*
2547 * Change flags of a file given a file descriptor.
2548 */
2549 /* ARGSUSED */
2550 int
2551 sys_fchflags(struct lwp *l, const struct sys_fchflags_args *uap, register_t *retval)
2552 {
2553 /* {
2554 syscallarg(int) fd;
2555 syscallarg(u_long) flags;
2556 } */
2557 struct proc *p = l->l_proc;
2558 struct vnode *vp;
2559 struct file *fp;
2560 int error;
2561
2562 /* getvnode() will use the descriptor for us */
2563 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2564 return (error);
2565 vp = (struct vnode *)fp->f_data;
2566 error = change_flags(vp, SCARG(uap, flags), l);
2567 VOP_UNLOCK(vp, 0);
2568 FILE_UNUSE(fp, l);
2569 return (error);
2570 }
2571
2572 /*
2573 * Change flags of a file given a path name; this version does
2574 * not follow links.
2575 */
2576 int
2577 sys_lchflags(struct lwp *l, const struct sys_lchflags_args *uap, register_t *retval)
2578 {
2579 /* {
2580 syscallarg(const char *) path;
2581 syscallarg(u_long) flags;
2582 } */
2583 struct vnode *vp;
2584 int error;
2585 struct nameidata nd;
2586
2587 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2588 SCARG(uap, path));
2589 if ((error = namei(&nd)) != 0)
2590 return (error);
2591 vp = nd.ni_vp;
2592 error = change_flags(vp, SCARG(uap, flags), l);
2593 vput(vp);
2594 return (error);
2595 }
2596
2597 /*
2598 * Common routine to change flags of a file.
2599 */
2600 int
2601 change_flags(struct vnode *vp, u_long flags, struct lwp *l)
2602 {
2603 struct vattr vattr;
2604 int error;
2605
2606 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2607 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2608 /*
2609 * Non-superusers cannot change the flags on devices, even if they
2610 * own them.
2611 */
2612 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) {
2613 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2614 goto out;
2615 if (vattr.va_type == VCHR || vattr.va_type == VBLK) {
2616 error = EINVAL;
2617 goto out;
2618 }
2619 }
2620 VATTR_NULL(&vattr);
2621 vattr.va_flags = flags;
2622 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2623 out:
2624 return (error);
2625 }
2626
2627 /*
2628 * Change mode of a file given path name; this version follows links.
2629 */
2630 /* ARGSUSED */
2631 int
2632 sys_chmod(struct lwp *l, const struct sys_chmod_args *uap, register_t *retval)
2633 {
2634 /* {
2635 syscallarg(const char *) path;
2636 syscallarg(int) mode;
2637 } */
2638 int error;
2639 struct nameidata nd;
2640
2641 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2642 SCARG(uap, path));
2643 if ((error = namei(&nd)) != 0)
2644 return (error);
2645
2646 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2647
2648 vrele(nd.ni_vp);
2649 return (error);
2650 }
2651
2652 /*
2653 * Change mode of a file given a file descriptor.
2654 */
2655 /* ARGSUSED */
2656 int
2657 sys_fchmod(struct lwp *l, const struct sys_fchmod_args *uap, register_t *retval)
2658 {
2659 /* {
2660 syscallarg(int) fd;
2661 syscallarg(int) mode;
2662 } */
2663 struct proc *p = l->l_proc;
2664 struct file *fp;
2665 int error;
2666
2667 /* getvnode() will use the descriptor for us */
2668 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2669 return (error);
2670
2671 error = change_mode((struct vnode *)fp->f_data, SCARG(uap, mode), l);
2672 FILE_UNUSE(fp, l);
2673 return (error);
2674 }
2675
2676 /*
2677 * Change mode of a file given path name; this version does not follow links.
2678 */
2679 /* ARGSUSED */
2680 int
2681 sys_lchmod(struct lwp *l, const struct sys_lchmod_args *uap, register_t *retval)
2682 {
2683 /* {
2684 syscallarg(const char *) path;
2685 syscallarg(int) mode;
2686 } */
2687 int error;
2688 struct nameidata nd;
2689
2690 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2691 SCARG(uap, path));
2692 if ((error = namei(&nd)) != 0)
2693 return (error);
2694
2695 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2696
2697 vrele(nd.ni_vp);
2698 return (error);
2699 }
2700
2701 /*
2702 * Common routine to set mode given a vnode.
2703 */
2704 static int
2705 change_mode(struct vnode *vp, int mode, struct lwp *l)
2706 {
2707 struct vattr vattr;
2708 int error;
2709
2710 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2711 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2712 VATTR_NULL(&vattr);
2713 vattr.va_mode = mode & ALLPERMS;
2714 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2715 VOP_UNLOCK(vp, 0);
2716 return (error);
2717 }
2718
2719 /*
2720 * Set ownership given a path name; this version follows links.
2721 */
2722 /* ARGSUSED */
2723 int
2724 sys_chown(struct lwp *l, const struct sys_chown_args *uap, register_t *retval)
2725 {
2726 /* {
2727 syscallarg(const char *) path;
2728 syscallarg(uid_t) uid;
2729 syscallarg(gid_t) gid;
2730 } */
2731 int error;
2732 struct nameidata nd;
2733
2734 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2735 SCARG(uap, path));
2736 if ((error = namei(&nd)) != 0)
2737 return (error);
2738
2739 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2740
2741 vrele(nd.ni_vp);
2742 return (error);
2743 }
2744
2745 /*
2746 * Set ownership given a path name; this version follows links.
2747 * Provides POSIX semantics.
2748 */
2749 /* ARGSUSED */
2750 int
2751 sys___posix_chown(struct lwp *l, const struct sys___posix_chown_args *uap, register_t *retval)
2752 {
2753 /* {
2754 syscallarg(const char *) path;
2755 syscallarg(uid_t) uid;
2756 syscallarg(gid_t) gid;
2757 } */
2758 int error;
2759 struct nameidata nd;
2760
2761 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2762 SCARG(uap, path));
2763 if ((error = namei(&nd)) != 0)
2764 return (error);
2765
2766 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2767
2768 vrele(nd.ni_vp);
2769 return (error);
2770 }
2771
2772 /*
2773 * Set ownership given a file descriptor.
2774 */
2775 /* ARGSUSED */
2776 int
2777 sys_fchown(struct lwp *l, const struct sys_fchown_args *uap, register_t *retval)
2778 {
2779 /* {
2780 syscallarg(int) fd;
2781 syscallarg(uid_t) uid;
2782 syscallarg(gid_t) gid;
2783 } */
2784 struct proc *p = l->l_proc;
2785 int error;
2786 struct file *fp;
2787
2788 /* getvnode() will use the descriptor for us */
2789 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2790 return (error);
2791
2792 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2793 SCARG(uap, gid), l, 0);
2794 FILE_UNUSE(fp, l);
2795 return (error);
2796 }
2797
2798 /*
2799 * Set ownership given a file descriptor, providing POSIX/XPG semantics.
2800 */
2801 /* ARGSUSED */
2802 int
2803 sys___posix_fchown(struct lwp *l, const struct sys___posix_fchown_args *uap, register_t *retval)
2804 {
2805 /* {
2806 syscallarg(int) fd;
2807 syscallarg(uid_t) uid;
2808 syscallarg(gid_t) gid;
2809 } */
2810 struct proc *p = l->l_proc;
2811 int error;
2812 struct file *fp;
2813
2814 /* getvnode() will use the descriptor for us */
2815 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2816 return (error);
2817
2818 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2819 SCARG(uap, gid), l, 1);
2820 FILE_UNUSE(fp, l);
2821 return (error);
2822 }
2823
2824 /*
2825 * Set ownership given a path name; this version does not follow links.
2826 */
2827 /* ARGSUSED */
2828 int
2829 sys_lchown(struct lwp *l, const struct sys_lchown_args *uap, register_t *retval)
2830 {
2831 /* {
2832 syscallarg(const char *) path;
2833 syscallarg(uid_t) uid;
2834 syscallarg(gid_t) gid;
2835 } */
2836 int error;
2837 struct nameidata nd;
2838
2839 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2840 SCARG(uap, path));
2841 if ((error = namei(&nd)) != 0)
2842 return (error);
2843
2844 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2845
2846 vrele(nd.ni_vp);
2847 return (error);
2848 }
2849
2850 /*
2851 * Set ownership given a path name; this version does not follow links.
2852 * Provides POSIX/XPG semantics.
2853 */
2854 /* ARGSUSED */
2855 int
2856 sys___posix_lchown(struct lwp *l, const struct sys___posix_lchown_args *uap, register_t *retval)
2857 {
2858 /* {
2859 syscallarg(const char *) path;
2860 syscallarg(uid_t) uid;
2861 syscallarg(gid_t) gid;
2862 } */
2863 int error;
2864 struct nameidata nd;
2865
2866 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2867 SCARG(uap, path));
2868 if ((error = namei(&nd)) != 0)
2869 return (error);
2870
2871 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2872
2873 vrele(nd.ni_vp);
2874 return (error);
2875 }
2876
2877 /*
2878 * Common routine to set ownership given a vnode.
2879 */
2880 static int
2881 change_owner(struct vnode *vp, uid_t uid, gid_t gid, struct lwp *l,
2882 int posix_semantics)
2883 {
2884 struct vattr vattr;
2885 mode_t newmode;
2886 int error;
2887
2888 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2889 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2890 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2891 goto out;
2892
2893 #define CHANGED(x) ((int)(x) != -1)
2894 newmode = vattr.va_mode;
2895 if (posix_semantics) {
2896 /*
2897 * POSIX/XPG semantics: if the caller is not the super-user,
2898 * clear set-user-id and set-group-id bits. Both POSIX and
2899 * the XPG consider the behaviour for calls by the super-user
2900 * implementation-defined; we leave the set-user-id and set-
2901 * group-id settings intact in that case.
2902 */
2903 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER,
2904 NULL) != 0)
2905 newmode &= ~(S_ISUID | S_ISGID);
2906 } else {
2907 /*
2908 * NetBSD semantics: when changing owner and/or group,
2909 * clear the respective bit(s).
2910 */
2911 if (CHANGED(uid))
2912 newmode &= ~S_ISUID;
2913 if (CHANGED(gid))
2914 newmode &= ~S_ISGID;
2915 }
2916 /* Update va_mode iff altered. */
2917 if (vattr.va_mode == newmode)
2918 newmode = VNOVAL;
2919
2920 VATTR_NULL(&vattr);
2921 vattr.va_uid = CHANGED(uid) ? uid : (uid_t)VNOVAL;
2922 vattr.va_gid = CHANGED(gid) ? gid : (gid_t)VNOVAL;
2923 vattr.va_mode = newmode;
2924 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2925 #undef CHANGED
2926
2927 out:
2928 VOP_UNLOCK(vp, 0);
2929 return (error);
2930 }
2931
2932 /*
2933 * Set the access and modification times given a path name; this
2934 * version follows links.
2935 */
2936 /* ARGSUSED */
2937 int
2938 sys_utimes(struct lwp *l, const struct sys_utimes_args *uap, register_t *retval)
2939 {
2940 /* {
2941 syscallarg(const char *) path;
2942 syscallarg(const struct timeval *) tptr;
2943 } */
2944
2945 return do_sys_utimes(l, NULL, SCARG(uap, path), FOLLOW,
2946 SCARG(uap, tptr), UIO_USERSPACE);
2947 }
2948
2949 /*
2950 * Set the access and modification times given a file descriptor.
2951 */
2952 /* ARGSUSED */
2953 int
2954 sys_futimes(struct lwp *l, const struct sys_futimes_args *uap, register_t *retval)
2955 {
2956 /* {
2957 syscallarg(int) fd;
2958 syscallarg(const struct timeval *) tptr;
2959 } */
2960 int error;
2961 struct file *fp;
2962
2963 /* getvnode() will use the descriptor for us */
2964 if ((error = getvnode(l->l_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
2965 return (error);
2966
2967 error = do_sys_utimes(l, fp->f_data, NULL, 0,
2968 SCARG(uap, tptr), UIO_USERSPACE);
2969
2970 FILE_UNUSE(fp, l);
2971 return (error);
2972 }
2973
2974 /*
2975 * Set the access and modification times given a path name; this
2976 * version does not follow links.
2977 */
2978 int
2979 sys_lutimes(struct lwp *l, const struct sys_lutimes_args *uap, register_t *retval)
2980 {
2981 /* {
2982 syscallarg(const char *) path;
2983 syscallarg(const struct timeval *) tptr;
2984 } */
2985
2986 return do_sys_utimes(l, NULL, SCARG(uap, path), NOFOLLOW,
2987 SCARG(uap, tptr), UIO_USERSPACE);
2988 }
2989
2990 /*
2991 * Common routine to set access and modification times given a vnode.
2992 */
2993 int
2994 do_sys_utimes(struct lwp *l, struct vnode *vp, const char *path, int flag,
2995 const struct timeval *tptr, enum uio_seg seg)
2996 {
2997 struct vattr vattr;
2998 struct nameidata nd;
2999 int error;
3000
3001 VATTR_NULL(&vattr);
3002 if (tptr == NULL) {
3003 nanotime(&vattr.va_atime);
3004 vattr.va_mtime = vattr.va_atime;
3005 vattr.va_vaflags |= VA_UTIMES_NULL;
3006 } else {
3007 struct timeval tv[2];
3008
3009 if (seg != UIO_SYSSPACE) {
3010 error = copyin(tptr, &tv, sizeof (tv));
3011 if (error != 0)
3012 return error;
3013 tptr = tv;
3014 }
3015 TIMEVAL_TO_TIMESPEC(tptr, &vattr.va_atime);
3016 TIMEVAL_TO_TIMESPEC(tptr + 1, &vattr.va_mtime);
3017 }
3018
3019 if (vp == NULL) {
3020 NDINIT(&nd, LOOKUP, flag | TRYEMULROOT, UIO_USERSPACE, path);
3021 if ((error = namei(&nd)) != 0)
3022 return (error);
3023 vp = nd.ni_vp;
3024 } else
3025 nd.ni_vp = NULL;
3026
3027 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3028 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3029 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3030 VOP_UNLOCK(vp, 0);
3031
3032 if (nd.ni_vp != NULL)
3033 vrele(nd.ni_vp);
3034
3035 return (error);
3036 }
3037
3038 /*
3039 * Truncate a file given its path name.
3040 */
3041 /* ARGSUSED */
3042 int
3043 sys_truncate(struct lwp *l, const struct sys_truncate_args *uap, register_t *retval)
3044 {
3045 /* {
3046 syscallarg(const char *) path;
3047 syscallarg(int) pad;
3048 syscallarg(off_t) length;
3049 } */
3050 struct vnode *vp;
3051 struct vattr vattr;
3052 int error;
3053 struct nameidata nd;
3054
3055 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3056 SCARG(uap, path));
3057 if ((error = namei(&nd)) != 0)
3058 return (error);
3059 vp = nd.ni_vp;
3060 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3061 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3062 if (vp->v_type == VDIR)
3063 error = EISDIR;
3064 else if ((error = vn_writechk(vp)) == 0 &&
3065 (error = VOP_ACCESS(vp, VWRITE, l->l_cred)) == 0) {
3066 VATTR_NULL(&vattr);
3067 vattr.va_size = SCARG(uap, length);
3068 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3069 }
3070 vput(vp);
3071 return (error);
3072 }
3073
3074 /*
3075 * Truncate a file given a file descriptor.
3076 */
3077 /* ARGSUSED */
3078 int
3079 sys_ftruncate(struct lwp *l, const struct sys_ftruncate_args *uap, register_t *retval)
3080 {
3081 /* {
3082 syscallarg(int) fd;
3083 syscallarg(int) pad;
3084 syscallarg(off_t) length;
3085 } */
3086 struct proc *p = l->l_proc;
3087 struct vattr vattr;
3088 struct vnode *vp;
3089 struct file *fp;
3090 int error;
3091
3092 /* getvnode() will use the descriptor for us */
3093 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3094 return (error);
3095 if ((fp->f_flag & FWRITE) == 0) {
3096 error = EINVAL;
3097 goto out;
3098 }
3099 vp = (struct vnode *)fp->f_data;
3100 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3101 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3102 if (vp->v_type == VDIR)
3103 error = EISDIR;
3104 else if ((error = vn_writechk(vp)) == 0) {
3105 VATTR_NULL(&vattr);
3106 vattr.va_size = SCARG(uap, length);
3107 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
3108 }
3109 VOP_UNLOCK(vp, 0);
3110 out:
3111 FILE_UNUSE(fp, l);
3112 return (error);
3113 }
3114
3115 /*
3116 * Sync an open file.
3117 */
3118 /* ARGSUSED */
3119 int
3120 sys_fsync(struct lwp *l, const struct sys_fsync_args *uap, register_t *retval)
3121 {
3122 /* {
3123 syscallarg(int) fd;
3124 } */
3125 struct proc *p = l->l_proc;
3126 struct vnode *vp;
3127 struct file *fp;
3128 int error;
3129
3130 /* getvnode() will use the descriptor for us */
3131 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3132 return (error);
3133 vp = (struct vnode *)fp->f_data;
3134 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3135 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT, 0, 0);
3136 if (error == 0 && bioopsp != NULL &&
3137 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3138 (*bioopsp->io_fsync)(vp, 0);
3139 VOP_UNLOCK(vp, 0);
3140 FILE_UNUSE(fp, l);
3141 return (error);
3142 }
3143
3144 /*
3145 * Sync a range of file data. API modeled after that found in AIX.
3146 *
3147 * FDATASYNC indicates that we need only save enough metadata to be able
3148 * to re-read the written data. Note we duplicate AIX's requirement that
3149 * the file be open for writing.
3150 */
3151 /* ARGSUSED */
3152 int
3153 sys_fsync_range(struct lwp *l, const struct sys_fsync_range_args *uap, register_t *retval)
3154 {
3155 /* {
3156 syscallarg(int) fd;
3157 syscallarg(int) flags;
3158 syscallarg(off_t) start;
3159 syscallarg(off_t) length;
3160 } */
3161 struct proc *p = l->l_proc;
3162 struct vnode *vp;
3163 struct file *fp;
3164 int flags, nflags;
3165 off_t s, e, len;
3166 int error;
3167
3168 /* getvnode() will use the descriptor for us */
3169 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3170 return (error);
3171
3172 if ((fp->f_flag & FWRITE) == 0) {
3173 error = EBADF;
3174 goto out;
3175 }
3176
3177 flags = SCARG(uap, flags);
3178 if (((flags & (FDATASYNC | FFILESYNC)) == 0) ||
3179 ((~flags & (FDATASYNC | FFILESYNC)) == 0)) {
3180 error = EINVAL;
3181 goto out;
3182 }
3183 /* Now set up the flags for value(s) to pass to VOP_FSYNC() */
3184 if (flags & FDATASYNC)
3185 nflags = FSYNC_DATAONLY | FSYNC_WAIT;
3186 else
3187 nflags = FSYNC_WAIT;
3188 if (flags & FDISKSYNC)
3189 nflags |= FSYNC_CACHE;
3190
3191 len = SCARG(uap, length);
3192 /* If length == 0, we do the whole file, and s = l = 0 will do that */
3193 if (len) {
3194 s = SCARG(uap, start);
3195 e = s + len;
3196 if (e < s) {
3197 error = EINVAL;
3198 goto out;
3199 }
3200 } else {
3201 e = 0;
3202 s = 0;
3203 }
3204
3205 vp = (struct vnode *)fp->f_data;
3206 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3207 error = VOP_FSYNC(vp, fp->f_cred, nflags, s, e);
3208
3209 if (error == 0 && bioopsp != NULL &&
3210 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3211 (*bioopsp->io_fsync)(vp, nflags);
3212
3213 VOP_UNLOCK(vp, 0);
3214 out:
3215 FILE_UNUSE(fp, l);
3216 return (error);
3217 }
3218
3219 /*
3220 * Sync the data of an open file.
3221 */
3222 /* ARGSUSED */
3223 int
3224 sys_fdatasync(struct lwp *l, const struct sys_fdatasync_args *uap, register_t *retval)
3225 {
3226 /* {
3227 syscallarg(int) fd;
3228 } */
3229 struct proc *p = l->l_proc;
3230 struct vnode *vp;
3231 struct file *fp;
3232 int error;
3233
3234 /* getvnode() will use the descriptor for us */
3235 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3236 return (error);
3237 if ((fp->f_flag & FWRITE) == 0) {
3238 FILE_UNUSE(fp, l);
3239 return (EBADF);
3240 }
3241 vp = (struct vnode *)fp->f_data;
3242 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3243 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT|FSYNC_DATAONLY, 0, 0);
3244 VOP_UNLOCK(vp, 0);
3245 FILE_UNUSE(fp, l);
3246 return (error);
3247 }
3248
3249 /*
3250 * Rename files, (standard) BSD semantics frontend.
3251 */
3252 /* ARGSUSED */
3253 int
3254 sys_rename(struct lwp *l, const struct sys_rename_args *uap, register_t *retval)
3255 {
3256 /* {
3257 syscallarg(const char *) from;
3258 syscallarg(const char *) to;
3259 } */
3260
3261 return (do_sys_rename(SCARG(uap, from), SCARG(uap, to), UIO_USERSPACE, 0));
3262 }
3263
3264 /*
3265 * Rename files, POSIX semantics frontend.
3266 */
3267 /* ARGSUSED */
3268 int
3269 sys___posix_rename(struct lwp *l, const struct sys___posix_rename_args *uap, register_t *retval)
3270 {
3271 /* {
3272 syscallarg(const char *) from;
3273 syscallarg(const char *) to;
3274 } */
3275
3276 return (do_sys_rename(SCARG(uap, from), SCARG(uap, to), UIO_USERSPACE, 1));
3277 }
3278
3279 /*
3280 * Rename files. Source and destination must either both be directories,
3281 * or both not be directories. If target is a directory, it must be empty.
3282 * If `from' and `to' refer to the same object, the value of the `retain'
3283 * argument is used to determine whether `from' will be
3284 *
3285 * (retain == 0) deleted unless `from' and `to' refer to the same
3286 * object in the file system's name space (BSD).
3287 * (retain == 1) always retained (POSIX).
3288 */
3289 int
3290 do_sys_rename(const char *from, const char *to, enum uio_seg seg, int retain)
3291 {
3292 struct vnode *tvp, *fvp, *tdvp;
3293 struct nameidata fromnd, tond;
3294 struct lwp *l = curlwp;
3295 struct proc *p;
3296 int error;
3297
3298 NDINIT(&fromnd, DELETE, LOCKPARENT | SAVESTART | TRYEMULROOT,
3299 seg, from);
3300 if ((error = namei(&fromnd)) != 0)
3301 return (error);
3302 if (fromnd.ni_dvp != fromnd.ni_vp)
3303 VOP_UNLOCK(fromnd.ni_dvp, 0);
3304 fvp = fromnd.ni_vp;
3305 NDINIT(&tond, RENAME,
3306 LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | TRYEMULROOT
3307 | (fvp->v_type == VDIR ? CREATEDIR : 0),
3308 seg, to);
3309 if ((error = namei(&tond)) != 0) {
3310 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3311 vrele(fromnd.ni_dvp);
3312 vrele(fvp);
3313 goto out1;
3314 }
3315 tdvp = tond.ni_dvp;
3316 tvp = tond.ni_vp;
3317
3318 if (tvp != NULL) {
3319 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
3320 error = ENOTDIR;
3321 goto out;
3322 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
3323 error = EISDIR;
3324 goto out;
3325 }
3326 }
3327
3328 if (fvp == tdvp)
3329 error = EINVAL;
3330
3331 /*
3332 * Source and destination refer to the same object.
3333 */
3334 if (fvp == tvp) {
3335 if (retain)
3336 error = -1;
3337 else if (fromnd.ni_dvp == tdvp &&
3338 fromnd.ni_cnd.cn_namelen == tond.ni_cnd.cn_namelen &&
3339 !memcmp(fromnd.ni_cnd.cn_nameptr,
3340 tond.ni_cnd.cn_nameptr,
3341 fromnd.ni_cnd.cn_namelen))
3342 error = -1;
3343 }
3344
3345 #if NVERIEXEC > 0
3346 if (!error) {
3347 char *f1, *f2;
3348
3349 f1 = malloc(fromnd.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3350 strlcpy(f1, fromnd.ni_cnd.cn_nameptr, fromnd.ni_cnd.cn_namelen);
3351
3352 f2 = malloc(tond.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3353 strlcpy(f2, tond.ni_cnd.cn_nameptr, tond.ni_cnd.cn_namelen);
3354
3355 error = veriexec_renamechk(l, fvp, f1, tvp, f2);
3356
3357 free(f1, M_TEMP);
3358 free(f2, M_TEMP);
3359 }
3360 #endif /* NVERIEXEC > 0 */
3361
3362 out:
3363 p = l->l_proc;
3364 if (!error) {
3365 VOP_LEASE(tdvp, l->l_cred, LEASE_WRITE);
3366 if (fromnd.ni_dvp != tdvp)
3367 VOP_LEASE(fromnd.ni_dvp, l->l_cred, LEASE_WRITE);
3368 if (tvp) {
3369 VOP_LEASE(tvp, l->l_cred, LEASE_WRITE);
3370 }
3371 error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
3372 tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
3373 } else {
3374 VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd);
3375 if (tdvp == tvp)
3376 vrele(tdvp);
3377 else
3378 vput(tdvp);
3379 if (tvp)
3380 vput(tvp);
3381 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3382 vrele(fromnd.ni_dvp);
3383 vrele(fvp);
3384 }
3385 vrele(tond.ni_startdir);
3386 PNBUF_PUT(tond.ni_cnd.cn_pnbuf);
3387 out1:
3388 if (fromnd.ni_startdir)
3389 vrele(fromnd.ni_startdir);
3390 PNBUF_PUT(fromnd.ni_cnd.cn_pnbuf);
3391 return (error == -1 ? 0 : error);
3392 }
3393
3394 /*
3395 * Make a directory file.
3396 */
3397 /* ARGSUSED */
3398 int
3399 sys_mkdir(struct lwp *l, const struct sys_mkdir_args *uap, register_t *retval)
3400 {
3401 /* {
3402 syscallarg(const char *) path;
3403 syscallarg(int) mode;
3404 } */
3405 struct proc *p = l->l_proc;
3406 struct vnode *vp;
3407 struct vattr vattr;
3408 int error;
3409 struct nameidata nd;
3410
3411 NDINIT(&nd, CREATE, LOCKPARENT | CREATEDIR | TRYEMULROOT, UIO_USERSPACE,
3412 SCARG(uap, path));
3413 if ((error = namei(&nd)) != 0)
3414 return (error);
3415 vp = nd.ni_vp;
3416 if (vp != NULL) {
3417 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3418 if (nd.ni_dvp == vp)
3419 vrele(nd.ni_dvp);
3420 else
3421 vput(nd.ni_dvp);
3422 vrele(vp);
3423 return (EEXIST);
3424 }
3425 VATTR_NULL(&vattr);
3426 vattr.va_type = VDIR;
3427 /* We will read cwdi->cwdi_cmask unlocked. */
3428 vattr.va_mode =
3429 (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_cwdi->cwdi_cmask;
3430 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3431 error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
3432 if (!error)
3433 vput(nd.ni_vp);
3434 return (error);
3435 }
3436
3437 /*
3438 * Remove a directory file.
3439 */
3440 /* ARGSUSED */
3441 int
3442 sys_rmdir(struct lwp *l, const struct sys_rmdir_args *uap, register_t *retval)
3443 {
3444 /* {
3445 syscallarg(const char *) path;
3446 } */
3447 struct vnode *vp;
3448 int error;
3449 struct nameidata nd;
3450
3451 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
3452 SCARG(uap, path));
3453 if ((error = namei(&nd)) != 0)
3454 return (error);
3455 vp = nd.ni_vp;
3456 if (vp->v_type != VDIR) {
3457 error = ENOTDIR;
3458 goto out;
3459 }
3460 /*
3461 * No rmdir "." please.
3462 */
3463 if (nd.ni_dvp == vp) {
3464 error = EINVAL;
3465 goto out;
3466 }
3467 /*
3468 * The root of a mounted filesystem cannot be deleted.
3469 */
3470 if (vp->v_vflag & VV_ROOT) {
3471 error = EBUSY;
3472 goto out;
3473 }
3474 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3475 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3476 error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
3477 return (error);
3478
3479 out:
3480 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3481 if (nd.ni_dvp == vp)
3482 vrele(nd.ni_dvp);
3483 else
3484 vput(nd.ni_dvp);
3485 vput(vp);
3486 return (error);
3487 }
3488
3489 /*
3490 * Read a block of directory entries in a file system independent format.
3491 */
3492 int
3493 sys___getdents30(struct lwp *l, const struct sys___getdents30_args *uap, register_t *retval)
3494 {
3495 /* {
3496 syscallarg(int) fd;
3497 syscallarg(char *) buf;
3498 syscallarg(size_t) count;
3499 } */
3500 struct proc *p = l->l_proc;
3501 struct file *fp;
3502 int error, done;
3503
3504 /* getvnode() will use the descriptor for us */
3505 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3506 return (error);
3507 if ((fp->f_flag & FREAD) == 0) {
3508 error = EBADF;
3509 goto out;
3510 }
3511 error = vn_readdir(fp, SCARG(uap, buf), UIO_USERSPACE,
3512 SCARG(uap, count), &done, l, 0, 0);
3513 ktrgenio(SCARG(uap, fd), UIO_READ, SCARG(uap, buf), done, error);
3514 *retval = done;
3515 out:
3516 FILE_UNUSE(fp, l);
3517 return (error);
3518 }
3519
3520 /*
3521 * Set the mode mask for creation of filesystem nodes.
3522 */
3523 int
3524 sys_umask(struct lwp *l, const struct sys_umask_args *uap, register_t *retval)
3525 {
3526 /* {
3527 syscallarg(mode_t) newmask;
3528 } */
3529 struct proc *p = l->l_proc;
3530 struct cwdinfo *cwdi;
3531
3532 /*
3533 * cwdi->cwdi_cmask will be read unlocked elsewhere. What's
3534 * important is that we serialize changes to the mask. The
3535 * rw_exit() will issue a write memory barrier on our behalf,
3536 * and force the changes out to other CPUs (as it must use an
3537 * atomic operation, draining the local CPU's store buffers).
3538 */
3539 cwdi = p->p_cwdi;
3540 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
3541 *retval = cwdi->cwdi_cmask;
3542 cwdi->cwdi_cmask = SCARG(uap, newmask) & ALLPERMS;
3543 rw_exit(&cwdi->cwdi_lock);
3544
3545 return (0);
3546 }
3547
3548 int
3549 dorevoke(struct vnode *vp, kauth_cred_t cred)
3550 {
3551 struct vattr vattr;
3552 int error;
3553 bool revoke;
3554
3555 if ((error = VOP_GETATTR(vp, &vattr, cred)) != 0)
3556 goto out;
3557 if (kauth_cred_geteuid(cred) != vattr.va_uid &&
3558 (error = kauth_authorize_generic(cred,
3559 KAUTH_GENERIC_ISSUSER, NULL)) != 0)
3560 goto out;
3561 mutex_enter(&vp->v_interlock);
3562 revoke = (vp->v_usecount > 1 || (vp->v_iflag & (VI_ALIASED|VI_LAYER)));
3563 mutex_exit(&vp->v_interlock);
3564 if (revoke)
3565 VOP_REVOKE(vp, REVOKEALL);
3566
3567 out:
3568 return (error);
3569 }
3570
3571 /*
3572 * Void all references to file by ripping underlying filesystem
3573 * away from vnode.
3574 */
3575 /* ARGSUSED */
3576 int
3577 sys_revoke(struct lwp *l, const struct sys_revoke_args *uap, register_t *retval)
3578 {
3579 /* {
3580 syscallarg(const char *) path;
3581 } */
3582 struct vnode *vp;
3583 int error;
3584 struct nameidata nd;
3585
3586 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3587 SCARG(uap, path));
3588 if ((error = namei(&nd)) != 0)
3589 return (error);
3590 vp = nd.ni_vp;
3591 error = dorevoke(vp, l->l_cred);
3592 vrele(vp);
3593 return (error);
3594 }
3595
3596 /*
3597 * Convert a user file descriptor to a kernel file entry.
3598 */
3599 int
3600 getvnode(struct filedesc *fdp, int fd, struct file **fpp)
3601 {
3602 struct vnode *vp;
3603 struct file *fp;
3604
3605 if ((fp = fd_getfile(fdp, fd)) == NULL)
3606 return (EBADF);
3607
3608 FILE_USE(fp);
3609
3610 if (fp->f_type != DTYPE_VNODE) {
3611 FILE_UNUSE(fp, NULL);
3612 return (EINVAL);
3613 }
3614
3615 vp = (struct vnode *)fp->f_data;
3616 if (vp->v_type == VBAD) {
3617 FILE_UNUSE(fp, NULL);
3618 return (EBADF);
3619 }
3620
3621 *fpp = fp;
3622 return (0);
3623 }
3624