vfs_syscalls.c revision 1.331.2.2 1 /* $NetBSD: vfs_syscalls.c,v 1.331.2.2 2007/12/27 00:46:19 mjf Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_syscalls.c 8.42 (Berkeley) 7/31/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_syscalls.c,v 1.331.2.2 2007/12/27 00:46:19 mjf Exp $");
41
42 #include "opt_compat_netbsd.h"
43 #include "opt_compat_43.h"
44 #include "opt_fileassoc.h"
45 #include "fss.h"
46 #include "veriexec.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/filedesc.h>
52 #include <sys/kernel.h>
53 #include <sys/file.h>
54 #include <sys/stat.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/proc.h>
58 #include <sys/uio.h>
59 #include <sys/malloc.h>
60 #include <sys/kmem.h>
61 #include <sys/dirent.h>
62 #include <sys/sysctl.h>
63 #include <sys/syscallargs.h>
64 #include <sys/vfs_syscalls.h>
65 #include <sys/ktrace.h>
66 #ifdef FILEASSOC
67 #include <sys/fileassoc.h>
68 #endif /* FILEASSOC */
69 #include <sys/verified_exec.h>
70 #include <sys/kauth.h>
71
72 #include <miscfs/genfs/genfs.h>
73 #include <miscfs/syncfs/syncfs.h>
74
75 #ifdef COMPAT_30
76 #include "opt_nfsserver.h"
77 #include <nfs/rpcv2.h>
78 #endif
79 #include <nfs/nfsproto.h>
80 #ifdef COMPAT_30
81 #include <nfs/nfs.h>
82 #include <nfs/nfs_var.h>
83 #endif
84
85 #if NFSS > 0
86 #include <dev/fssvar.h>
87 #endif
88
89 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount struct");
90
91 static int change_dir(struct nameidata *, struct lwp *);
92 static int change_flags(struct vnode *, u_long, struct lwp *);
93 static int change_mode(struct vnode *, int, struct lwp *l);
94 static int change_owner(struct vnode *, uid_t, gid_t, struct lwp *, int);
95
96 void checkdirs(struct vnode *);
97
98 int dovfsusermount = 0;
99
100 /*
101 * Virtual File System System Calls
102 */
103
104 /*
105 * Mount a file system.
106 */
107
108 #if defined(COMPAT_09) || defined(COMPAT_43)
109 /*
110 * This table is used to maintain compatibility with 4.3BSD
111 * and NetBSD 0.9 mount syscalls. Note, the order is important!
112 *
113 * Do not modify this table. It should only contain filesystems
114 * supported by NetBSD 0.9 and 4.3BSD.
115 */
116 const char * const mountcompatnames[] = {
117 NULL, /* 0 = MOUNT_NONE */
118 MOUNT_FFS, /* 1 = MOUNT_UFS */
119 MOUNT_NFS, /* 2 */
120 MOUNT_MFS, /* 3 */
121 MOUNT_MSDOS, /* 4 */
122 MOUNT_CD9660, /* 5 = MOUNT_ISOFS */
123 MOUNT_FDESC, /* 6 */
124 MOUNT_KERNFS, /* 7 */
125 NULL, /* 8 = MOUNT_DEVFS */
126 MOUNT_AFS, /* 9 */
127 };
128 const int nmountcompatnames = sizeof(mountcompatnames) /
129 sizeof(mountcompatnames[0]);
130 #endif /* COMPAT_09 || COMPAT_43 */
131
132 static int
133 mount_update(struct lwp *l, struct vnode *vp, const char *path, int flags,
134 void *data, size_t *data_len)
135 {
136 struct mount *mp;
137 int error = 0, saved_flags;
138
139 mp = vp->v_mount;
140 saved_flags = mp->mnt_flag;
141
142 /* We can operate only on VV_ROOT nodes. */
143 if ((vp->v_vflag & VV_ROOT) == 0) {
144 error = EINVAL;
145 goto out;
146 }
147
148 /*
149 * We only allow the filesystem to be reloaded if it
150 * is currently mounted read-only.
151 */
152 if (flags & MNT_RELOAD && !(mp->mnt_flag & MNT_RDONLY)) {
153 error = EOPNOTSUPP; /* Needs translation */
154 goto out;
155 }
156
157 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
158 KAUTH_REQ_SYSTEM_MOUNT_UPDATE, mp, KAUTH_ARG(flags), data);
159 if (error)
160 goto out;
161
162 if (vfs_busy(mp, LK_NOWAIT, 0)) {
163 error = EPERM;
164 goto out;
165 }
166
167 mp->mnt_flag &= ~MNT_OP_FLAGS;
168 mp->mnt_flag |= flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
169
170 /*
171 * Set the mount level flags.
172 */
173 if (flags & MNT_RDONLY)
174 mp->mnt_flag |= MNT_RDONLY;
175 else if (mp->mnt_flag & MNT_RDONLY)
176 mp->mnt_iflag |= IMNT_WANTRDWR;
177 mp->mnt_flag &=
178 ~(MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
179 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
180 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP);
181 mp->mnt_flag |= flags &
182 (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
183 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
184 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
185 MNT_IGNORE);
186
187 error = VFS_MOUNT(mp, path, data, data_len);
188
189 #if defined(COMPAT_30) && defined(NFSSERVER)
190 if (error && data != NULL) {
191 int error2;
192
193 /* Update failed; let's try and see if it was an
194 * export request. */
195 error2 = nfs_update_exports_30(mp, path, data, l);
196
197 /* Only update error code if the export request was
198 * understood but some problem occurred while
199 * processing it. */
200 if (error2 != EJUSTRETURN)
201 error = error2;
202 }
203 #endif
204 if (mp->mnt_iflag & IMNT_WANTRDWR)
205 mp->mnt_flag &= ~MNT_RDONLY;
206 if (error)
207 mp->mnt_flag = saved_flags;
208 mp->mnt_flag &= ~MNT_OP_FLAGS;
209 mp->mnt_iflag &= ~IMNT_WANTRDWR;
210 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0) {
211 if (mp->mnt_syncer == NULL)
212 error = vfs_allocate_syncvnode(mp);
213 } else {
214 if (mp->mnt_syncer != NULL)
215 vfs_deallocate_syncvnode(mp);
216 }
217 vfs_unbusy(mp);
218
219 out:
220 return (error);
221 }
222
223 static int
224 mount_get_vfsops(const char *fstype, struct vfsops **vfsops)
225 {
226 char fstypename[sizeof(((struct statvfs *)NULL)->f_fstypename)];
227 int error;
228
229 /* Copy file-system type from userspace. */
230 error = copyinstr(fstype, fstypename, sizeof(fstypename), NULL);
231 if (error) {
232 #if defined(COMPAT_09) || defined(COMPAT_43)
233 /*
234 * Historically, filesystem types were identified by numbers.
235 * If we get an integer for the filesystem type instead of a
236 * string, we check to see if it matches one of the historic
237 * filesystem types.
238 */
239 u_long fsindex = (u_long)fstype;
240 if (fsindex >= nmountcompatnames ||
241 mountcompatnames[fsindex] == NULL)
242 return ENODEV;
243 strlcpy(fstypename, mountcompatnames[fsindex],
244 sizeof(fstypename));
245 #else
246 return error;
247 #endif
248 }
249
250 #ifdef COMPAT_10
251 /* Accept `ufs' as an alias for `ffs'. */
252 if (strcmp(fstypename, "ufs") == 0)
253 fstypename[0] = 'f';
254 #endif
255
256 if ((*vfsops = vfs_getopsbyname(fstypename)) == NULL)
257 return ENODEV;
258 return 0;
259 }
260
261 static int
262 mount_domount(struct lwp *l, struct vnode **vpp, struct vfsops *vfsops,
263 const char *path, int flags, void *data, size_t *data_len)
264 {
265 struct mount *mp = NULL;
266 struct vnode *vp = *vpp;
267 struct vattr va;
268 int error;
269
270 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
271 KAUTH_REQ_SYSTEM_MOUNT_NEW, vp, KAUTH_ARG(flags), data);
272 if (error)
273 return error;
274
275 /* Can't make a non-dir a mount-point (from here anyway). */
276 if (vp->v_type != VDIR)
277 return ENOTDIR;
278
279 /*
280 * If the user is not root, ensure that they own the directory
281 * onto which we are attempting to mount.
282 */
283 if ((error = VOP_GETATTR(vp, &va, l->l_cred)) != 0 ||
284 (va.va_uid != kauth_cred_geteuid(l->l_cred) &&
285 (error = kauth_authorize_generic(l->l_cred,
286 KAUTH_GENERIC_ISSUSER, NULL)) != 0)) {
287 return error;
288 }
289
290 if (flags & MNT_EXPORTED)
291 return EINVAL;
292
293 if ((error = vinvalbuf(vp, V_SAVE, l->l_cred, l, 0, 0)) != 0)
294 return error;
295
296 /*
297 * Check if a file-system is not already mounted on this vnode.
298 */
299 if (vp->v_mountedhere != NULL)
300 return EBUSY;
301
302 mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
303
304 mp->mnt_op = vfsops;
305
306 TAILQ_INIT(&mp->mnt_vnodelist);
307 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
308 simple_lock_init(&mp->mnt_slock);
309 (void)vfs_busy(mp, LK_NOWAIT, 0);
310
311 mp->mnt_vnodecovered = vp;
312 mp->mnt_stat.f_owner = kauth_cred_geteuid(l->l_cred);
313 mp->mnt_unmounter = NULL;
314 mount_initspecific(mp);
315
316 /*
317 * The underlying file system may refuse the mount for
318 * various reasons. Allow the user to force it to happen.
319 *
320 * Set the mount level flags.
321 */
322 mp->mnt_flag = flags &
323 (MNT_FORCE | MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
324 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
325 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
326 MNT_IGNORE | MNT_RDONLY);
327
328 error = VFS_MOUNT(mp, path, data, data_len);
329 mp->mnt_flag &= ~MNT_OP_FLAGS;
330
331 /*
332 * Put the new filesystem on the mount list after root.
333 */
334 cache_purge(vp);
335 if (error != 0) {
336 vp->v_mountedhere = NULL;
337 mp->mnt_op->vfs_refcount--;
338 vfs_unbusy(mp);
339 vfs_destroy(mp);
340 return error;
341 }
342
343 mp->mnt_iflag &= ~IMNT_WANTRDWR;
344 vp->v_mountedhere = mp;
345 mutex_enter(&mountlist_lock);
346 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
347 mutex_exit(&mountlist_lock);
348 VOP_UNLOCK(vp, 0);
349 checkdirs(vp);
350 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
351 error = vfs_allocate_syncvnode(mp);
352 vfs_unbusy(mp);
353 (void) VFS_STATVFS(mp, &mp->mnt_stat);
354 error = VFS_START(mp, 0);
355 if (error)
356 vrele(vp);
357 *vpp = NULL;
358 return error;
359 }
360
361 static int
362 mount_getargs(struct lwp *l, struct vnode *vp, const char *path, int flags,
363 void *data, size_t *data_len)
364 {
365 struct mount *mp;
366 int error;
367
368 /* If MNT_GETARGS is specified, it should be the only flag. */
369 if (flags & ~MNT_GETARGS)
370 return EINVAL;
371
372 mp = vp->v_mount;
373
374 /* XXX: probably some notion of "can see" here if we want isolation. */
375 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
376 KAUTH_REQ_SYSTEM_MOUNT_GET, mp, data, NULL);
377 if (error)
378 return error;
379
380 if ((vp->v_vflag & VV_ROOT) == 0)
381 return EINVAL;
382
383 if (vfs_busy(mp, LK_NOWAIT, 0))
384 return EPERM;
385
386 mp->mnt_flag &= ~MNT_OP_FLAGS;
387 mp->mnt_flag |= MNT_GETARGS;
388 error = VFS_MOUNT(mp, path, data, data_len);
389 mp->mnt_flag &= ~MNT_OP_FLAGS;
390
391 vfs_unbusy(mp);
392 return (error);
393 }
394
395 #ifdef COMPAT_40
396 /* ARGSUSED */
397 int
398 compat_40_sys_mount(struct lwp *l, const struct compat_40_sys_mount_args *uap, register_t *retval)
399 {
400 /* {
401 syscallarg(const char *) type;
402 syscallarg(const char *) path;
403 syscallarg(int) flags;
404 syscallarg(void *) data;
405 } */
406 register_t dummy;
407
408 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
409 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE, 0, &dummy);
410 }
411 #endif
412
413 int
414 sys___mount50(struct lwp *l, const struct sys___mount50_args *uap, register_t *retval)
415 {
416 /* {
417 syscallarg(const char *) type;
418 syscallarg(const char *) path;
419 syscallarg(int) flags;
420 syscallarg(void *) data;
421 syscallarg(size_t) data_len;
422 } */
423
424 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
425 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE,
426 SCARG(uap, data_len), retval);
427 }
428
429 int
430 do_sys_mount(struct lwp *l, struct vfsops *vfsops, const char *type,
431 const char *path, int flags, void *data, enum uio_seg data_seg,
432 size_t data_len, register_t *retval)
433 {
434 struct vnode *vp;
435 struct nameidata nd;
436 void *data_buf = data;
437 int error;
438
439 /*
440 * Get vnode to be covered
441 */
442 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
443 if ((error = namei(&nd)) != 0)
444 return (error);
445 vp = nd.ni_vp;
446
447 /*
448 * A lookup in VFS_MOUNT might result in an attempt to
449 * lock this vnode again, so make the lock recursive.
450 */
451 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_SETRECURSE);
452
453 if (vfsops == NULL) {
454 if (flags & (MNT_GETARGS | MNT_UPDATE))
455 vfsops = vp->v_mount->mnt_op;
456 else {
457 /* 'type' is userspace */
458 error = mount_get_vfsops(type, &vfsops);
459 if (error != 0)
460 goto done;
461 }
462 }
463
464 if (data != NULL && data_seg == UIO_USERSPACE) {
465 if (data_len == 0) {
466 /* No length supplied, use default for filesystem */
467 data_len = vfsops->vfs_min_mount_data;
468 if (data_len > VFS_MAX_MOUNT_DATA) {
469 /* maybe a force loaded old LKM */
470 error = EINVAL;
471 goto done;
472 }
473 #ifdef COMPAT_30
474 /* Hopefully a longer buffer won't make copyin() fail */
475 if (flags & MNT_UPDATE
476 && data_len < sizeof (struct mnt_export_args30))
477 data_len = sizeof (struct mnt_export_args30);
478 #endif
479 }
480 data_buf = malloc(data_len, M_TEMP, M_WAITOK);
481
482 /* NFS needs the buffer even for mnt_getargs .... */
483 error = copyin(data, data_buf, data_len);
484 if (error != 0)
485 goto done;
486 }
487
488 if (flags & MNT_GETARGS) {
489 if (data_len == 0) {
490 error = EINVAL;
491 goto done;
492 }
493 error = mount_getargs(l, vp, path, flags, data_buf, &data_len);
494 if (error != 0)
495 goto done;
496 if (data_seg == UIO_USERSPACE)
497 error = copyout(data_buf, data, data_len);
498 *retval = data_len;
499 } else if (flags & MNT_UPDATE) {
500 error = mount_update(l, vp, path, flags, data_buf, &data_len);
501 } else {
502 /* Locking is handled internally in mount_domount(). */
503 error = mount_domount(l, &vp, vfsops, path, flags, data_buf,
504 &data_len);
505 }
506
507 done:
508 if (vp)
509 vput(vp);
510 if (data_buf != data)
511 free(data_buf, M_TEMP);
512 return (error);
513 }
514
515 /*
516 * Scan all active processes to see if any of them have a current
517 * or root directory onto which the new filesystem has just been
518 * mounted. If so, replace them with the new mount point.
519 */
520 void
521 checkdirs(struct vnode *olddp)
522 {
523 struct cwdinfo *cwdi;
524 struct vnode *newdp;
525 struct proc *p;
526
527 if (olddp->v_usecount == 1)
528 return;
529 if (VFS_ROOT(olddp->v_mountedhere, &newdp))
530 panic("mount: lost mount");
531 mutex_enter(&proclist_lock);
532 PROCLIST_FOREACH(p, &allproc) {
533 cwdi = p->p_cwdi;
534 if (!cwdi)
535 continue;
536 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
537 if (cwdi->cwdi_cdir == olddp) {
538 vrele(cwdi->cwdi_cdir);
539 VREF(newdp);
540 cwdi->cwdi_cdir = newdp;
541 }
542 if (cwdi->cwdi_rdir == olddp) {
543 vrele(cwdi->cwdi_rdir);
544 VREF(newdp);
545 cwdi->cwdi_rdir = newdp;
546 }
547 rw_exit(&cwdi->cwdi_lock);
548 }
549 mutex_exit(&proclist_lock);
550 if (rootvnode == olddp) {
551 vrele(rootvnode);
552 VREF(newdp);
553 rootvnode = newdp;
554 }
555 vput(newdp);
556 }
557
558 /*
559 * Unmount a file system.
560 *
561 * Note: unmount takes a path to the vnode mounted on as argument,
562 * not special file (as before).
563 */
564 /* ARGSUSED */
565 int
566 sys_unmount(struct lwp *l, const struct sys_unmount_args *uap, register_t *retval)
567 {
568 /* {
569 syscallarg(const char *) path;
570 syscallarg(int) flags;
571 } */
572 struct vnode *vp;
573 struct mount *mp;
574 int error;
575 struct nameidata nd;
576
577 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
578 SCARG(uap, path));
579 if ((error = namei(&nd)) != 0)
580 return (error);
581 vp = nd.ni_vp;
582 mp = vp->v_mount;
583
584 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
585 KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT, mp, NULL, NULL);
586 if (error) {
587 vput(vp);
588 return (error);
589 }
590
591 /*
592 * Don't allow unmounting the root file system.
593 */
594 if (mp->mnt_flag & MNT_ROOTFS) {
595 vput(vp);
596 return (EINVAL);
597 }
598
599 /*
600 * Must be the root of the filesystem
601 */
602 if ((vp->v_vflag & VV_ROOT) == 0) {
603 vput(vp);
604 return (EINVAL);
605 }
606 vput(vp);
607
608 /*
609 * XXX Freeze syncer. Must do this before locking the
610 * mount point. See dounmount() for details.
611 */
612 mutex_enter(&syncer_mutex);
613
614 if (vfs_busy(mp, 0, 0)) {
615 mutex_exit(&syncer_mutex);
616 return (EBUSY);
617 }
618
619 return (dounmount(mp, SCARG(uap, flags), l));
620 }
621
622 /*
623 * Do the actual file system unmount. File system is assumed to have been
624 * marked busy by the caller.
625 */
626 int
627 dounmount(struct mount *mp, int flags, struct lwp *l)
628 {
629 struct vnode *coveredvp;
630 int error;
631 int async;
632 int used_syncer;
633
634 #if NVERIEXEC > 0
635 error = veriexec_unmountchk(mp);
636 if (error)
637 return (error);
638 #endif /* NVERIEXEC > 0 */
639
640 mutex_enter(&mountlist_lock);
641 vfs_unbusy(mp);
642 used_syncer = (mp->mnt_syncer != NULL);
643
644 /*
645 * XXX Syncer must be frozen when we get here. This should really
646 * be done on a per-mountpoint basis, but especially the softdep
647 * code possibly called from the syncer doesn't exactly work on a
648 * per-mountpoint basis, so the softdep code would become a maze
649 * of vfs_busy() calls.
650 *
651 * The caller of dounmount() must acquire syncer_mutex because
652 * the syncer itself acquires locks in syncer_mutex -> vfs_busy
653 * order, and we must preserve that order to avoid deadlock.
654 *
655 * So, if the file system did not use the syncer, now is
656 * the time to release the syncer_mutex.
657 */
658 if (used_syncer == 0)
659 mutex_exit(&syncer_mutex);
660
661 mp->mnt_iflag |= IMNT_UNMOUNT;
662 mp->mnt_unmounter = l;
663 mutex_exit(&mountlist_lock); /* XXX */
664 lockmgr(&mp->mnt_lock, LK_DRAIN, NULL);
665
666 async = mp->mnt_flag & MNT_ASYNC;
667 mp->mnt_flag &= ~MNT_ASYNC;
668 cache_purgevfs(mp); /* remove cache entries for this file sys */
669 if (mp->mnt_syncer != NULL)
670 vfs_deallocate_syncvnode(mp);
671 error = 0;
672 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
673 #if NFSS > 0
674 error = fss_umount_hook(mp, (flags & MNT_FORCE));
675 #endif
676 if (error == 0)
677 error = VFS_SYNC(mp, MNT_WAIT, l->l_cred);
678 }
679 if (error == 0 || (flags & MNT_FORCE))
680 error = VFS_UNMOUNT(mp, flags);
681 if (error) {
682 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
683 (void) vfs_allocate_syncvnode(mp);
684 mutex_enter(&mountlist_lock);
685 mp->mnt_iflag &= ~IMNT_UNMOUNT;
686 mp->mnt_unmounter = NULL;
687 mp->mnt_flag |= async;
688 mutex_exit(&mountlist_lock); /* XXX */
689 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_REENABLE,
690 NULL);
691 if (used_syncer)
692 mutex_exit(&syncer_mutex);
693 simple_lock(&mp->mnt_slock);
694 while (mp->mnt_wcnt > 0) {
695 wakeup(mp);
696 ltsleep(&mp->mnt_wcnt, PVFS, "mntwcnt1",
697 0, &mp->mnt_slock);
698 }
699 simple_unlock(&mp->mnt_slock);
700 return (error);
701 }
702 mutex_enter(&mountlist_lock);
703 CIRCLEQ_REMOVE(&mountlist, mp, mnt_list);
704 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP)
705 coveredvp->v_mountedhere = NULL;
706 if (TAILQ_FIRST(&mp->mnt_vnodelist) != NULL)
707 panic("unmount: dangling vnode");
708 mp->mnt_iflag |= IMNT_GONE;
709 mutex_exit(&mountlist_lock);
710 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
711 if (coveredvp != NULLVP)
712 vrele(coveredvp);
713 if (used_syncer)
714 mutex_exit(&syncer_mutex);
715 simple_lock(&mp->mnt_slock);
716 while (mp->mnt_wcnt > 0) {
717 wakeup(mp);
718 ltsleep(&mp->mnt_wcnt, PVFS, "mntwcnt2", 0, &mp->mnt_slock);
719 }
720 simple_unlock(&mp->mnt_slock);
721 vfs_hooks_unmount(mp);
722 vfs_delref(mp->mnt_op);
723 vfs_destroy(mp);
724 return (0);
725 }
726
727 /*
728 * Sync each mounted filesystem.
729 */
730 #ifdef DEBUG
731 int syncprt = 0;
732 struct ctldebug debug0 = { "syncprt", &syncprt };
733 #endif
734
735 /* ARGSUSED */
736 int
737 sys_sync(struct lwp *l, const void *v, register_t *retval)
738 {
739 struct mount *mp, *nmp;
740 int asyncflag;
741
742 if (l == NULL)
743 l = &lwp0;
744
745 mutex_enter(&mountlist_lock);
746 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
747 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
748 nmp = mp->mnt_list.cqe_prev;
749 continue;
750 }
751 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
752 asyncflag = mp->mnt_flag & MNT_ASYNC;
753 mp->mnt_flag &= ~MNT_ASYNC;
754 VFS_SYNC(mp, MNT_NOWAIT, l->l_cred);
755 if (asyncflag)
756 mp->mnt_flag |= MNT_ASYNC;
757 }
758 mutex_enter(&mountlist_lock);
759 nmp = mp->mnt_list.cqe_prev;
760 vfs_unbusy(mp);
761
762 }
763 mutex_exit(&mountlist_lock);
764 #ifdef DEBUG
765 if (syncprt)
766 vfs_bufstats();
767 #endif /* DEBUG */
768 return (0);
769 }
770
771 /*
772 * Change filesystem quotas.
773 */
774 /* ARGSUSED */
775 int
776 sys_quotactl(struct lwp *l, const struct sys_quotactl_args *uap, register_t *retval)
777 {
778 /* {
779 syscallarg(const char *) path;
780 syscallarg(int) cmd;
781 syscallarg(int) uid;
782 syscallarg(void *) arg;
783 } */
784 struct mount *mp;
785 int error;
786 struct nameidata nd;
787
788 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
789 SCARG(uap, path));
790 if ((error = namei(&nd)) != 0)
791 return (error);
792 mp = nd.ni_vp->v_mount;
793 error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
794 SCARG(uap, arg));
795 vrele(nd.ni_vp);
796 return (error);
797 }
798
799 int
800 dostatvfs(struct mount *mp, struct statvfs *sp, struct lwp *l, int flags,
801 int root)
802 {
803 struct cwdinfo *cwdi = l->l_proc->p_cwdi;
804 int error = 0;
805
806 /*
807 * If MNT_NOWAIT or MNT_LAZY is specified, do not
808 * refresh the fsstat cache. MNT_WAIT or MNT_LAZY
809 * overrides MNT_NOWAIT.
810 */
811 if (flags == MNT_NOWAIT || flags == MNT_LAZY ||
812 (flags != MNT_WAIT && flags != 0)) {
813 memcpy(sp, &mp->mnt_stat, sizeof(*sp));
814 goto done;
815 }
816
817 /* Get the filesystem stats now */
818 memset(sp, 0, sizeof(*sp));
819 if ((error = VFS_STATVFS(mp, sp)) != 0) {
820 return error;
821 }
822
823 if (cwdi->cwdi_rdir == NULL)
824 (void)memcpy(&mp->mnt_stat, sp, sizeof(mp->mnt_stat));
825 done:
826 if (cwdi->cwdi_rdir != NULL) {
827 size_t len;
828 char *bp;
829 char *path = PNBUF_GET();
830
831 bp = path + MAXPATHLEN;
832 *--bp = '\0';
833 rw_enter(&cwdi->cwdi_lock, RW_READER);
834 error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp, path,
835 MAXPATHLEN / 2, 0, l);
836 rw_exit(&cwdi->cwdi_lock);
837 if (error) {
838 PNBUF_PUT(path);
839 return error;
840 }
841 len = strlen(bp);
842 /*
843 * for mount points that are below our root, we can see
844 * them, so we fix up the pathname and return them. The
845 * rest we cannot see, so we don't allow viewing the
846 * data.
847 */
848 if (strncmp(bp, sp->f_mntonname, len) == 0) {
849 strlcpy(sp->f_mntonname, &sp->f_mntonname[len],
850 sizeof(sp->f_mntonname));
851 if (sp->f_mntonname[0] == '\0')
852 (void)strlcpy(sp->f_mntonname, "/",
853 sizeof(sp->f_mntonname));
854 } else {
855 if (root)
856 (void)strlcpy(sp->f_mntonname, "/",
857 sizeof(sp->f_mntonname));
858 else
859 error = EPERM;
860 }
861 PNBUF_PUT(path);
862 }
863 sp->f_flag = mp->mnt_flag & MNT_VISFLAGMASK;
864 return error;
865 }
866
867 /*
868 * Get filesystem statistics by path.
869 */
870 int
871 do_sys_pstatvfs(struct lwp *l, const char *path, int flags, struct statvfs *sb)
872 {
873 struct mount *mp;
874 int error;
875 struct nameidata nd;
876
877 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
878 if ((error = namei(&nd)) != 0)
879 return error;
880 mp = nd.ni_vp->v_mount;
881 error = dostatvfs(mp, sb, l, flags, 1);
882 vrele(nd.ni_vp);
883 return error;
884 }
885
886 /* ARGSUSED */
887 int
888 sys_statvfs1(struct lwp *l, const struct sys_statvfs1_args *uap, register_t *retval)
889 {
890 /* {
891 syscallarg(const char *) path;
892 syscallarg(struct statvfs *) buf;
893 syscallarg(int) flags;
894 } */
895 struct statvfs *sb;
896 int error;
897
898 sb = STATVFSBUF_GET();
899 error = do_sys_pstatvfs(l, SCARG(uap, path), SCARG(uap, flags), sb);
900 if (error == 0)
901 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
902 STATVFSBUF_PUT(sb);
903 return error;
904 }
905
906 /*
907 * Get filesystem statistics by fd.
908 */
909 int
910 do_sys_fstatvfs(struct lwp *l, int fd, int flags, struct statvfs *sb)
911 {
912 struct proc *p = l->l_proc;
913 struct file *fp;
914 struct mount *mp;
915 int error;
916
917 /* getvnode() will use the descriptor for us */
918 if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
919 return (error);
920 mp = ((struct vnode *)fp->f_data)->v_mount;
921 error = dostatvfs(mp, sb, l, flags, 1);
922 FILE_UNUSE(fp, l);
923 return error;
924 }
925
926 /* ARGSUSED */
927 int
928 sys_fstatvfs1(struct lwp *l, const struct sys_fstatvfs1_args *uap, register_t *retval)
929 {
930 /* {
931 syscallarg(int) fd;
932 syscallarg(struct statvfs *) buf;
933 syscallarg(int) flags;
934 } */
935 struct statvfs *sb;
936 int error;
937
938 sb = STATVFSBUF_GET();
939 error = do_sys_fstatvfs(l, SCARG(uap, fd), SCARG(uap, flags), sb);
940 if (error == 0)
941 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
942 STATVFSBUF_PUT(sb);
943 return error;
944 }
945
946
947 /*
948 * Get statistics on all filesystems.
949 */
950 int
951 do_sys_getvfsstat(struct lwp *l, void *sfsp, size_t bufsize, int flags,
952 int (*copyfn)(const void *, void *, size_t), size_t entry_sz,
953 register_t *retval)
954 {
955 int root = 0;
956 struct proc *p = l->l_proc;
957 struct mount *mp, *nmp;
958 struct statvfs *sb;
959 size_t count, maxcount;
960 int error = 0;
961
962 sb = STATVFSBUF_GET();
963 maxcount = bufsize / entry_sz;
964 mutex_enter(&mountlist_lock);
965 count = 0;
966 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
967 mp = nmp) {
968 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
969 nmp = CIRCLEQ_NEXT(mp, mnt_list);
970 continue;
971 }
972 if (sfsp && count < maxcount) {
973 error = dostatvfs(mp, sb, l, flags, 0);
974 if (error) {
975 mutex_enter(&mountlist_lock);
976 nmp = CIRCLEQ_NEXT(mp, mnt_list);
977 vfs_unbusy(mp);
978 continue;
979 }
980 error = copyfn(sb, sfsp, entry_sz);
981 if (error) {
982 vfs_unbusy(mp);
983 goto out;
984 }
985 sfsp = (char *)sfsp + entry_sz;
986 root |= strcmp(sb->f_mntonname, "/") == 0;
987 }
988 count++;
989 mutex_enter(&mountlist_lock);
990 nmp = CIRCLEQ_NEXT(mp, mnt_list);
991 vfs_unbusy(mp);
992 }
993
994 mutex_exit(&mountlist_lock);
995 if (root == 0 && p->p_cwdi->cwdi_rdir) {
996 /*
997 * fake a root entry
998 */
999 error = dostatvfs(p->p_cwdi->cwdi_rdir->v_mount,
1000 sb, l, flags, 1);
1001 if (error != 0)
1002 goto out;
1003 if (sfsp)
1004 error = copyfn(sb, sfsp, entry_sz);
1005 count++;
1006 }
1007 if (sfsp && count > maxcount)
1008 *retval = maxcount;
1009 else
1010 *retval = count;
1011 out:
1012 STATVFSBUF_PUT(sb);
1013 return error;
1014 }
1015
1016 int
1017 sys_getvfsstat(struct lwp *l, const struct sys_getvfsstat_args *uap, register_t *retval)
1018 {
1019 /* {
1020 syscallarg(struct statvfs *) buf;
1021 syscallarg(size_t) bufsize;
1022 syscallarg(int) flags;
1023 } */
1024
1025 return do_sys_getvfsstat(l, SCARG(uap, buf), SCARG(uap, bufsize),
1026 SCARG(uap, flags), copyout, sizeof (struct statvfs), retval);
1027 }
1028
1029 /*
1030 * Change current working directory to a given file descriptor.
1031 */
1032 /* ARGSUSED */
1033 int
1034 sys_fchdir(struct lwp *l, const struct sys_fchdir_args *uap, register_t *retval)
1035 {
1036 /* {
1037 syscallarg(int) fd;
1038 } */
1039 struct proc *p = l->l_proc;
1040 struct filedesc *fdp = p->p_fd;
1041 struct cwdinfo *cwdi;
1042 struct vnode *vp, *tdp;
1043 struct mount *mp;
1044 struct file *fp;
1045 int error;
1046
1047 /* getvnode() will use the descriptor for us */
1048 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1049 return (error);
1050 vp = (struct vnode *)fp->f_data;
1051
1052 VREF(vp);
1053 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1054 if (vp->v_type != VDIR)
1055 error = ENOTDIR;
1056 else
1057 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1058 if (error) {
1059 vput(vp);
1060 goto out;
1061 }
1062 while ((mp = vp->v_mountedhere) != NULL) {
1063 if (vfs_busy(mp, 0, 0))
1064 continue;
1065
1066 vput(vp);
1067 error = VFS_ROOT(mp, &tdp);
1068 vfs_unbusy(mp);
1069 if (error)
1070 goto out;
1071 vp = tdp;
1072 }
1073 VOP_UNLOCK(vp, 0);
1074
1075 /*
1076 * Disallow changing to a directory not under the process's
1077 * current root directory (if there is one).
1078 */
1079 cwdi = p->p_cwdi;
1080 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1081 if (cwdi->cwdi_rdir && !vn_isunder(vp, NULL, l)) {
1082 vrele(vp);
1083 error = EPERM; /* operation not permitted */
1084 } else {
1085 vrele(cwdi->cwdi_cdir);
1086 cwdi->cwdi_cdir = vp;
1087 }
1088 rw_exit(&cwdi->cwdi_lock);
1089
1090 out:
1091 FILE_UNUSE(fp, l);
1092 return (error);
1093 }
1094
1095 /*
1096 * Change this process's notion of the root directory to a given file
1097 * descriptor.
1098 */
1099 int
1100 sys_fchroot(struct lwp *l, const struct sys_fchroot_args *uap, register_t *retval)
1101 {
1102 struct proc *p = l->l_proc;
1103 struct filedesc *fdp = p->p_fd;
1104 struct cwdinfo *cwdi;
1105 struct vnode *vp;
1106 struct file *fp;
1107 int error;
1108
1109 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1110 KAUTH_REQ_SYSTEM_CHROOT_FCHROOT, NULL, NULL, NULL)) != 0)
1111 return error;
1112 /* getvnode() will use the descriptor for us */
1113 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1114 return error;
1115 vp = (struct vnode *) fp->f_data;
1116 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1117 if (vp->v_type != VDIR)
1118 error = ENOTDIR;
1119 else
1120 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1121 VOP_UNLOCK(vp, 0);
1122 if (error)
1123 goto out;
1124 VREF(vp);
1125
1126 /*
1127 * Prevent escaping from chroot by putting the root under
1128 * the working directory. Silently chdir to / if we aren't
1129 * already there.
1130 */
1131 cwdi = p->p_cwdi;
1132 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1133 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1134 /*
1135 * XXX would be more failsafe to change directory to a
1136 * deadfs node here instead
1137 */
1138 vrele(cwdi->cwdi_cdir);
1139 VREF(vp);
1140 cwdi->cwdi_cdir = vp;
1141 }
1142
1143 if (cwdi->cwdi_rdir != NULL)
1144 vrele(cwdi->cwdi_rdir);
1145 cwdi->cwdi_rdir = vp;
1146 rw_exit(&cwdi->cwdi_lock);
1147
1148 out:
1149 FILE_UNUSE(fp, l);
1150 return (error);
1151 }
1152
1153 /*
1154 * Change current working directory (``.'').
1155 */
1156 /* ARGSUSED */
1157 int
1158 sys_chdir(struct lwp *l, const struct sys_chdir_args *uap, register_t *retval)
1159 {
1160 /* {
1161 syscallarg(const char *) path;
1162 } */
1163 struct proc *p = l->l_proc;
1164 struct cwdinfo *cwdi;
1165 int error;
1166 struct nameidata nd;
1167
1168 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1169 SCARG(uap, path));
1170 if ((error = change_dir(&nd, l)) != 0)
1171 return (error);
1172 cwdi = p->p_cwdi;
1173 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1174 vrele(cwdi->cwdi_cdir);
1175 cwdi->cwdi_cdir = nd.ni_vp;
1176 rw_exit(&cwdi->cwdi_lock);
1177 return (0);
1178 }
1179
1180 /*
1181 * Change notion of root (``/'') directory.
1182 */
1183 /* ARGSUSED */
1184 int
1185 sys_chroot(struct lwp *l, const struct sys_chroot_args *uap, register_t *retval)
1186 {
1187 /* {
1188 syscallarg(const char *) path;
1189 } */
1190 struct proc *p = l->l_proc;
1191 struct cwdinfo *cwdi;
1192 struct vnode *vp;
1193 int error;
1194 struct nameidata nd;
1195
1196 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1197 KAUTH_REQ_SYSTEM_CHROOT_CHROOT, NULL, NULL, NULL)) != 0)
1198 return (error);
1199 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1200 SCARG(uap, path));
1201 if ((error = change_dir(&nd, l)) != 0)
1202 return (error);
1203
1204 cwdi = p->p_cwdi;
1205 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1206 if (cwdi->cwdi_rdir != NULL)
1207 vrele(cwdi->cwdi_rdir);
1208 vp = nd.ni_vp;
1209 cwdi->cwdi_rdir = vp;
1210
1211 /*
1212 * Prevent escaping from chroot by putting the root under
1213 * the working directory. Silently chdir to / if we aren't
1214 * already there.
1215 */
1216 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1217 /*
1218 * XXX would be more failsafe to change directory to a
1219 * deadfs node here instead
1220 */
1221 vrele(cwdi->cwdi_cdir);
1222 VREF(vp);
1223 cwdi->cwdi_cdir = vp;
1224 }
1225 rw_exit(&cwdi->cwdi_lock);
1226
1227 return (0);
1228 }
1229
1230 /*
1231 * Common routine for chroot and chdir.
1232 */
1233 static int
1234 change_dir(struct nameidata *ndp, struct lwp *l)
1235 {
1236 struct vnode *vp;
1237 int error;
1238
1239 if ((error = namei(ndp)) != 0)
1240 return (error);
1241 vp = ndp->ni_vp;
1242 if (vp->v_type != VDIR)
1243 error = ENOTDIR;
1244 else
1245 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1246
1247 if (error)
1248 vput(vp);
1249 else
1250 VOP_UNLOCK(vp, 0);
1251 return (error);
1252 }
1253
1254 /*
1255 * Check permissions, allocate an open file structure,
1256 * and call the device open routine if any.
1257 */
1258 int
1259 sys_open(struct lwp *l, const struct sys_open_args *uap, register_t *retval)
1260 {
1261 /* {
1262 syscallarg(const char *) path;
1263 syscallarg(int) flags;
1264 syscallarg(int) mode;
1265 } */
1266 struct proc *p = l->l_proc;
1267 struct cwdinfo *cwdi = p->p_cwdi;
1268 struct filedesc *fdp = p->p_fd;
1269 struct file *fp;
1270 struct vnode *vp;
1271 int flags, cmode;
1272 int type, indx, error;
1273 struct flock lf;
1274 struct nameidata nd;
1275
1276 flags = FFLAGS(SCARG(uap, flags));
1277 if ((flags & (FREAD | FWRITE)) == 0)
1278 return (EINVAL);
1279 /* falloc() will use the file descriptor for us */
1280 if ((error = falloc(l, &fp, &indx)) != 0)
1281 return (error);
1282 /* We're going to read cwdi->cwdi_cmask unlocked here. */
1283 cmode = ((SCARG(uap, mode) &~ cwdi->cwdi_cmask) & ALLPERMS) &~ S_ISTXT;
1284 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1285 SCARG(uap, path));
1286 l->l_dupfd = -indx - 1; /* XXX check for fdopen */
1287 if ((error = vn_open(&nd, flags, cmode)) != 0) {
1288 rw_enter(&fdp->fd_lock, RW_WRITER);
1289 FILE_UNUSE(fp, l);
1290 fdp->fd_ofiles[indx] = NULL;
1291 rw_exit(&fdp->fd_lock);
1292 ffree(fp);
1293 if ((error == EDUPFD || error == EMOVEFD) &&
1294 l->l_dupfd >= 0 && /* XXX from fdopen */
1295 (error =
1296 dupfdopen(l, indx, l->l_dupfd, flags, error)) == 0) {
1297 *retval = indx;
1298 return (0);
1299 }
1300 if (error == ERESTART)
1301 error = EINTR;
1302 fdremove(fdp, indx);
1303 return (error);
1304 }
1305
1306 l->l_dupfd = 0;
1307 vp = nd.ni_vp;
1308 fp->f_flag = flags & FMASK;
1309 fp->f_type = DTYPE_VNODE;
1310 fp->f_ops = &vnops;
1311 fp->f_data = vp;
1312 if (flags & (O_EXLOCK | O_SHLOCK)) {
1313 lf.l_whence = SEEK_SET;
1314 lf.l_start = 0;
1315 lf.l_len = 0;
1316 if (flags & O_EXLOCK)
1317 lf.l_type = F_WRLCK;
1318 else
1319 lf.l_type = F_RDLCK;
1320 type = F_FLOCK;
1321 if ((flags & FNONBLOCK) == 0)
1322 type |= F_WAIT;
1323 VOP_UNLOCK(vp, 0);
1324 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1325 if (error) {
1326 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1327 FILE_UNUSE(fp, l);
1328 ffree(fp);
1329 fdremove(fdp, indx);
1330 return (error);
1331 }
1332 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1333 fp->f_flag |= FHASLOCK;
1334 }
1335 VOP_UNLOCK(vp, 0);
1336 *retval = indx;
1337 FILE_SET_MATURE(fp);
1338 FILE_UNUSE(fp, l);
1339 return (0);
1340 }
1341
1342 static void
1343 vfs__fhfree(fhandle_t *fhp)
1344 {
1345 size_t fhsize;
1346
1347 if (fhp == NULL) {
1348 return;
1349 }
1350 fhsize = FHANDLE_SIZE(fhp);
1351 kmem_free(fhp, fhsize);
1352 }
1353
1354 /*
1355 * vfs_composefh: compose a filehandle.
1356 */
1357
1358 int
1359 vfs_composefh(struct vnode *vp, fhandle_t *fhp, size_t *fh_size)
1360 {
1361 struct mount *mp;
1362 struct fid *fidp;
1363 int error;
1364 size_t needfhsize;
1365 size_t fidsize;
1366
1367 mp = vp->v_mount;
1368 fidp = NULL;
1369 if (*fh_size < FHANDLE_SIZE_MIN) {
1370 fidsize = 0;
1371 } else {
1372 fidsize = *fh_size - offsetof(fhandle_t, fh_fid);
1373 if (fhp != NULL) {
1374 memset(fhp, 0, *fh_size);
1375 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1376 fidp = &fhp->fh_fid;
1377 }
1378 }
1379 error = VFS_VPTOFH(vp, fidp, &fidsize);
1380 needfhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1381 if (error == 0 && *fh_size < needfhsize) {
1382 error = E2BIG;
1383 }
1384 *fh_size = needfhsize;
1385 return error;
1386 }
1387
1388 int
1389 vfs_composefh_alloc(struct vnode *vp, fhandle_t **fhpp)
1390 {
1391 struct mount *mp;
1392 fhandle_t *fhp;
1393 size_t fhsize;
1394 size_t fidsize;
1395 int error;
1396
1397 *fhpp = NULL;
1398 mp = vp->v_mount;
1399 fidsize = 0;
1400 error = VFS_VPTOFH(vp, NULL, &fidsize);
1401 KASSERT(error != 0);
1402 if (error != E2BIG) {
1403 goto out;
1404 }
1405 fhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1406 fhp = kmem_zalloc(fhsize, KM_SLEEP);
1407 if (fhp == NULL) {
1408 error = ENOMEM;
1409 goto out;
1410 }
1411 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1412 error = VFS_VPTOFH(vp, &fhp->fh_fid, &fidsize);
1413 if (error == 0) {
1414 KASSERT((FHANDLE_SIZE(fhp) == fhsize &&
1415 FHANDLE_FILEID(fhp)->fid_len == fidsize));
1416 *fhpp = fhp;
1417 } else {
1418 kmem_free(fhp, fhsize);
1419 }
1420 out:
1421 return error;
1422 }
1423
1424 void
1425 vfs_composefh_free(fhandle_t *fhp)
1426 {
1427
1428 vfs__fhfree(fhp);
1429 }
1430
1431 /*
1432 * vfs_fhtovp: lookup a vnode by a filehandle.
1433 */
1434
1435 int
1436 vfs_fhtovp(fhandle_t *fhp, struct vnode **vpp)
1437 {
1438 struct mount *mp;
1439 int error;
1440
1441 *vpp = NULL;
1442 mp = vfs_getvfs(FHANDLE_FSID(fhp));
1443 if (mp == NULL) {
1444 error = ESTALE;
1445 goto out;
1446 }
1447 if (mp->mnt_op->vfs_fhtovp == NULL) {
1448 error = EOPNOTSUPP;
1449 goto out;
1450 }
1451 error = VFS_FHTOVP(mp, FHANDLE_FILEID(fhp), vpp);
1452 out:
1453 return error;
1454 }
1455
1456 /*
1457 * vfs_copyinfh_alloc: allocate and copyin a filehandle, given
1458 * the needed size.
1459 */
1460
1461 int
1462 vfs_copyinfh_alloc(const void *ufhp, size_t fhsize, fhandle_t **fhpp)
1463 {
1464 fhandle_t *fhp;
1465 int error;
1466
1467 *fhpp = NULL;
1468 if (fhsize > FHANDLE_SIZE_MAX) {
1469 return EINVAL;
1470 }
1471 if (fhsize < FHANDLE_SIZE_MIN) {
1472 return EINVAL;
1473 }
1474 again:
1475 fhp = kmem_alloc(fhsize, KM_SLEEP);
1476 if (fhp == NULL) {
1477 return ENOMEM;
1478 }
1479 error = copyin(ufhp, fhp, fhsize);
1480 if (error == 0) {
1481 /* XXX this check shouldn't be here */
1482 if (FHANDLE_SIZE(fhp) == fhsize) {
1483 *fhpp = fhp;
1484 return 0;
1485 } else if (fhsize == NFSX_V2FH && FHANDLE_SIZE(fhp) < fhsize) {
1486 /*
1487 * a kludge for nfsv2 padded handles.
1488 */
1489 size_t sz;
1490
1491 sz = FHANDLE_SIZE(fhp);
1492 kmem_free(fhp, fhsize);
1493 fhsize = sz;
1494 goto again;
1495 } else {
1496 /*
1497 * userland told us wrong size.
1498 */
1499 error = EINVAL;
1500 }
1501 }
1502 kmem_free(fhp, fhsize);
1503 return error;
1504 }
1505
1506 void
1507 vfs_copyinfh_free(fhandle_t *fhp)
1508 {
1509
1510 vfs__fhfree(fhp);
1511 }
1512
1513 /*
1514 * Get file handle system call
1515 */
1516 int
1517 sys___getfh30(struct lwp *l, const struct sys___getfh30_args *uap, register_t *retval)
1518 {
1519 /* {
1520 syscallarg(char *) fname;
1521 syscallarg(fhandle_t *) fhp;
1522 syscallarg(size_t *) fh_size;
1523 } */
1524 struct vnode *vp;
1525 fhandle_t *fh;
1526 int error;
1527 struct nameidata nd;
1528 size_t sz;
1529 size_t usz;
1530
1531 /*
1532 * Must be super user
1533 */
1534 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1535 0, NULL, NULL, NULL);
1536 if (error)
1537 return (error);
1538 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1539 SCARG(uap, fname));
1540 error = namei(&nd);
1541 if (error)
1542 return (error);
1543 vp = nd.ni_vp;
1544 error = vfs_composefh_alloc(vp, &fh);
1545 vput(vp);
1546 if (error != 0) {
1547 goto out;
1548 }
1549 error = copyin(SCARG(uap, fh_size), &usz, sizeof(size_t));
1550 if (error != 0) {
1551 goto out;
1552 }
1553 sz = FHANDLE_SIZE(fh);
1554 error = copyout(&sz, SCARG(uap, fh_size), sizeof(size_t));
1555 if (error != 0) {
1556 goto out;
1557 }
1558 if (usz >= sz) {
1559 error = copyout(fh, SCARG(uap, fhp), sz);
1560 } else {
1561 error = E2BIG;
1562 }
1563 out:
1564 vfs_composefh_free(fh);
1565 return (error);
1566 }
1567
1568 /*
1569 * Open a file given a file handle.
1570 *
1571 * Check permissions, allocate an open file structure,
1572 * and call the device open routine if any.
1573 */
1574
1575 int
1576 dofhopen(struct lwp *l, const void *ufhp, size_t fhsize, int oflags,
1577 register_t *retval)
1578 {
1579 struct filedesc *fdp = l->l_proc->p_fd;
1580 struct file *fp;
1581 struct vnode *vp = NULL;
1582 kauth_cred_t cred = l->l_cred;
1583 struct file *nfp;
1584 int type, indx, error=0;
1585 struct flock lf;
1586 struct vattr va;
1587 fhandle_t *fh;
1588 int flags;
1589
1590 /*
1591 * Must be super user
1592 */
1593 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1594 0, NULL, NULL, NULL)))
1595 return (error);
1596
1597 flags = FFLAGS(oflags);
1598 if ((flags & (FREAD | FWRITE)) == 0)
1599 return (EINVAL);
1600 if ((flags & O_CREAT))
1601 return (EINVAL);
1602 /* falloc() will use the file descriptor for us */
1603 if ((error = falloc(l, &nfp, &indx)) != 0)
1604 return (error);
1605 fp = nfp;
1606 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1607 if (error != 0) {
1608 goto bad;
1609 }
1610 error = vfs_fhtovp(fh, &vp);
1611 if (error != 0) {
1612 goto bad;
1613 }
1614
1615 /* Now do an effective vn_open */
1616
1617 if (vp->v_type == VSOCK) {
1618 error = EOPNOTSUPP;
1619 goto bad;
1620 }
1621 error = vn_openchk(vp, cred, flags);
1622 if (error != 0)
1623 goto bad;
1624 if (flags & O_TRUNC) {
1625 VOP_UNLOCK(vp, 0); /* XXX */
1626 VOP_LEASE(vp, cred, LEASE_WRITE);
1627 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
1628 VATTR_NULL(&va);
1629 va.va_size = 0;
1630 error = VOP_SETATTR(vp, &va, cred);
1631 if (error)
1632 goto bad;
1633 }
1634 if ((error = VOP_OPEN(vp, flags, cred)) != 0)
1635 goto bad;
1636 if (flags & FWRITE)
1637 vp->v_writecount++;
1638
1639 /* done with modified vn_open, now finish what sys_open does. */
1640
1641 fp->f_flag = flags & FMASK;
1642 fp->f_type = DTYPE_VNODE;
1643 fp->f_ops = &vnops;
1644 fp->f_data = vp;
1645 if (flags & (O_EXLOCK | O_SHLOCK)) {
1646 lf.l_whence = SEEK_SET;
1647 lf.l_start = 0;
1648 lf.l_len = 0;
1649 if (flags & O_EXLOCK)
1650 lf.l_type = F_WRLCK;
1651 else
1652 lf.l_type = F_RDLCK;
1653 type = F_FLOCK;
1654 if ((flags & FNONBLOCK) == 0)
1655 type |= F_WAIT;
1656 VOP_UNLOCK(vp, 0);
1657 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1658 if (error) {
1659 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1660 FILE_UNUSE(fp, l);
1661 ffree(fp);
1662 fdremove(fdp, indx);
1663 return (error);
1664 }
1665 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1666 fp->f_flag |= FHASLOCK;
1667 }
1668 VOP_UNLOCK(vp, 0);
1669 *retval = indx;
1670 FILE_SET_MATURE(fp);
1671 FILE_UNUSE(fp, l);
1672 vfs_copyinfh_free(fh);
1673 return (0);
1674
1675 bad:
1676 FILE_UNUSE(fp, l);
1677 ffree(fp);
1678 fdremove(fdp, indx);
1679 if (vp != NULL)
1680 vput(vp);
1681 vfs_copyinfh_free(fh);
1682 return (error);
1683 }
1684
1685 int
1686 sys___fhopen40(struct lwp *l, const struct sys___fhopen40_args *uap, register_t *retval)
1687 {
1688 /* {
1689 syscallarg(const void *) fhp;
1690 syscallarg(size_t) fh_size;
1691 syscallarg(int) flags;
1692 } */
1693
1694 return dofhopen(l, SCARG(uap, fhp), SCARG(uap, fh_size),
1695 SCARG(uap, flags), retval);
1696 }
1697
1698 int
1699 do_fhstat(struct lwp *l, const void *ufhp, size_t fhsize, struct stat *sb)
1700 {
1701 int error;
1702 fhandle_t *fh;
1703 struct vnode *vp;
1704
1705 /*
1706 * Must be super user
1707 */
1708 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1709 0, NULL, NULL, NULL)))
1710 return (error);
1711
1712 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1713 if (error != 0)
1714 return error;
1715
1716 error = vfs_fhtovp(fh, &vp);
1717 vfs_copyinfh_free(fh);
1718 if (error != 0)
1719 return error;
1720
1721 error = vn_stat(vp, sb, l);
1722 vput(vp);
1723 return error;
1724 }
1725
1726
1727 /* ARGSUSED */
1728 int
1729 sys___fhstat40(struct lwp *l, const struct sys___fhstat40_args *uap, register_t *retval)
1730 {
1731 /* {
1732 syscallarg(const void *) fhp;
1733 syscallarg(size_t) fh_size;
1734 syscallarg(struct stat *) sb;
1735 } */
1736 struct stat sb;
1737 int error;
1738
1739 error = do_fhstat(l, SCARG(uap, fhp), SCARG(uap, fh_size), &sb);
1740 if (error)
1741 return error;
1742 return copyout(&sb, SCARG(uap, sb), sizeof(sb));
1743 }
1744
1745 int
1746 do_fhstatvfs(struct lwp *l, const void *ufhp, size_t fhsize, struct statvfs *sb,
1747 int flags)
1748 {
1749 fhandle_t *fh;
1750 struct mount *mp;
1751 struct vnode *vp;
1752 int error;
1753
1754 /*
1755 * Must be super user
1756 */
1757 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1758 0, NULL, NULL, NULL)))
1759 return error;
1760
1761 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1762 if (error != 0)
1763 return error;
1764
1765 error = vfs_fhtovp(fh, &vp);
1766 vfs_copyinfh_free(fh);
1767 if (error != 0)
1768 return error;
1769
1770 mp = vp->v_mount;
1771 error = dostatvfs(mp, sb, l, flags, 1);
1772 vput(vp);
1773 return error;
1774 }
1775
1776 /* ARGSUSED */
1777 int
1778 sys___fhstatvfs140(struct lwp *l, const struct sys___fhstatvfs140_args *uap, register_t *retval)
1779 {
1780 /* {
1781 syscallarg(const void *) fhp;
1782 syscallarg(size_t) fh_size;
1783 syscallarg(struct statvfs *) buf;
1784 syscallarg(int) flags;
1785 } */
1786 struct statvfs *sb = STATVFSBUF_GET();
1787 int error;
1788
1789 error = do_fhstatvfs(l, SCARG(uap, fhp), SCARG(uap, fh_size), sb,
1790 SCARG(uap, flags));
1791 if (error == 0)
1792 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
1793 STATVFSBUF_PUT(sb);
1794 return error;
1795 }
1796
1797 /*
1798 * Create a special file.
1799 */
1800 /* ARGSUSED */
1801 int
1802 sys_mknod(struct lwp *l, const struct sys_mknod_args *uap, register_t *retval)
1803 {
1804 /* {
1805 syscallarg(const char *) path;
1806 syscallarg(int) mode;
1807 syscallarg(int) dev;
1808 } */
1809 struct proc *p = l->l_proc;
1810 struct vnode *vp;
1811 struct vattr vattr;
1812 int error, optype;
1813 struct nameidata nd;
1814 char *path;
1815 const char *cpath;
1816 enum uio_seg seg = UIO_USERSPACE;
1817
1818 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MKNOD,
1819 0, NULL, NULL, NULL)) != 0)
1820 return (error);
1821
1822 optype = VOP_MKNOD_DESCOFFSET;
1823
1824 VERIEXEC_PATH_GET(SCARG(uap, path), seg, cpath, path);
1825 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, seg, cpath);
1826
1827 if ((error = namei(&nd)) != 0)
1828 goto out;
1829 vp = nd.ni_vp;
1830 if (vp != NULL)
1831 error = EEXIST;
1832 else {
1833 VATTR_NULL(&vattr);
1834 /* We will read cwdi->cwdi_cmask unlocked. */
1835 vattr.va_mode =
1836 (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1837 vattr.va_rdev = SCARG(uap, dev);
1838
1839 switch (SCARG(uap, mode) & S_IFMT) {
1840 case S_IFMT: /* used by badsect to flag bad sectors */
1841 vattr.va_type = VBAD;
1842 break;
1843 case S_IFCHR:
1844 vattr.va_type = VCHR;
1845 break;
1846 case S_IFBLK:
1847 vattr.va_type = VBLK;
1848 break;
1849 case S_IFWHT:
1850 optype = VOP_WHITEOUT_DESCOFFSET;
1851 break;
1852 case S_IFREG:
1853 #if NVERIEXEC > 0
1854 error = veriexec_openchk(l, nd.ni_vp, nd.ni_dirp,
1855 O_CREAT);
1856 #endif /* NVERIEXEC > 0 */
1857 vattr.va_type = VREG;
1858 vattr.va_rdev = VNOVAL;
1859 optype = VOP_CREATE_DESCOFFSET;
1860 break;
1861 default:
1862 error = EINVAL;
1863 break;
1864 }
1865 }
1866 if (!error) {
1867 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1868 switch (optype) {
1869 case VOP_WHITEOUT_DESCOFFSET:
1870 error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
1871 if (error)
1872 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1873 vput(nd.ni_dvp);
1874 break;
1875
1876 case VOP_MKNOD_DESCOFFSET:
1877 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp,
1878 &nd.ni_cnd, &vattr);
1879 if (error == 0)
1880 vput(nd.ni_vp);
1881 break;
1882
1883 case VOP_CREATE_DESCOFFSET:
1884 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
1885 &nd.ni_cnd, &vattr);
1886 if (error == 0)
1887 vput(nd.ni_vp);
1888 break;
1889 }
1890 } else {
1891 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1892 if (nd.ni_dvp == vp)
1893 vrele(nd.ni_dvp);
1894 else
1895 vput(nd.ni_dvp);
1896 if (vp)
1897 vrele(vp);
1898 }
1899 out:
1900 VERIEXEC_PATH_PUT(path);
1901 return (error);
1902 }
1903
1904 /*
1905 * Create a named pipe.
1906 */
1907 /* ARGSUSED */
1908 int
1909 sys_mkfifo(struct lwp *l, const struct sys_mkfifo_args *uap, register_t *retval)
1910 {
1911 /* {
1912 syscallarg(const char *) path;
1913 syscallarg(int) mode;
1914 } */
1915 struct proc *p = l->l_proc;
1916 struct vattr vattr;
1917 int error;
1918 struct nameidata nd;
1919
1920 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1921 SCARG(uap, path));
1922 if ((error = namei(&nd)) != 0)
1923 return (error);
1924 if (nd.ni_vp != NULL) {
1925 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1926 if (nd.ni_dvp == nd.ni_vp)
1927 vrele(nd.ni_dvp);
1928 else
1929 vput(nd.ni_dvp);
1930 vrele(nd.ni_vp);
1931 return (EEXIST);
1932 }
1933 VATTR_NULL(&vattr);
1934 vattr.va_type = VFIFO;
1935 /* We will read cwdi->cwdi_cmask unlocked. */
1936 vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1937 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1938 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
1939 if (error == 0)
1940 vput(nd.ni_vp);
1941 return (error);
1942 }
1943
1944 /*
1945 * Make a hard file link.
1946 */
1947 /* ARGSUSED */
1948 int
1949 sys_link(struct lwp *l, const struct sys_link_args *uap, register_t *retval)
1950 {
1951 /* {
1952 syscallarg(const char *) path;
1953 syscallarg(const char *) link;
1954 } */
1955 struct vnode *vp;
1956 struct nameidata nd;
1957 int error;
1958
1959 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1960 SCARG(uap, path));
1961 if ((error = namei(&nd)) != 0)
1962 return (error);
1963 vp = nd.ni_vp;
1964 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1965 SCARG(uap, link));
1966 if ((error = namei(&nd)) != 0)
1967 goto out;
1968 if (nd.ni_vp) {
1969 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1970 if (nd.ni_dvp == nd.ni_vp)
1971 vrele(nd.ni_dvp);
1972 else
1973 vput(nd.ni_dvp);
1974 vrele(nd.ni_vp);
1975 error = EEXIST;
1976 goto out;
1977 }
1978 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1979 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
1980 error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
1981 out:
1982 vrele(vp);
1983 return (error);
1984 }
1985
1986 /*
1987 * Make a symbolic link.
1988 */
1989 /* ARGSUSED */
1990 int
1991 sys_symlink(struct lwp *l, const struct sys_symlink_args *uap, register_t *retval)
1992 {
1993 /* {
1994 syscallarg(const char *) path;
1995 syscallarg(const char *) link;
1996 } */
1997 struct proc *p = l->l_proc;
1998 struct vattr vattr;
1999 char *path;
2000 int error;
2001 struct nameidata nd;
2002
2003 path = PNBUF_GET();
2004 error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL);
2005 if (error)
2006 goto out;
2007 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
2008 SCARG(uap, link));
2009 if ((error = namei(&nd)) != 0)
2010 goto out;
2011 if (nd.ni_vp) {
2012 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2013 if (nd.ni_dvp == nd.ni_vp)
2014 vrele(nd.ni_dvp);
2015 else
2016 vput(nd.ni_dvp);
2017 vrele(nd.ni_vp);
2018 error = EEXIST;
2019 goto out;
2020 }
2021 VATTR_NULL(&vattr);
2022 vattr.va_type = VLNK;
2023 /* We will read cwdi->cwdi_cmask unlocked. */
2024 vattr.va_mode = ACCESSPERMS &~ p->p_cwdi->cwdi_cmask;
2025 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2026 error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
2027 if (error == 0)
2028 vput(nd.ni_vp);
2029 out:
2030 PNBUF_PUT(path);
2031 return (error);
2032 }
2033
2034 /*
2035 * Delete a whiteout from the filesystem.
2036 */
2037 /* ARGSUSED */
2038 int
2039 sys_undelete(struct lwp *l, const struct sys_undelete_args *uap, register_t *retval)
2040 {
2041 /* {
2042 syscallarg(const char *) path;
2043 } */
2044 int error;
2045 struct nameidata nd;
2046
2047 NDINIT(&nd, DELETE, LOCKPARENT | DOWHITEOUT | TRYEMULROOT,
2048 UIO_USERSPACE, SCARG(uap, path));
2049 error = namei(&nd);
2050 if (error)
2051 return (error);
2052
2053 if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
2054 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2055 if (nd.ni_dvp == nd.ni_vp)
2056 vrele(nd.ni_dvp);
2057 else
2058 vput(nd.ni_dvp);
2059 if (nd.ni_vp)
2060 vrele(nd.ni_vp);
2061 return (EEXIST);
2062 }
2063 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2064 if ((error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE)) != 0)
2065 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2066 vput(nd.ni_dvp);
2067 return (error);
2068 }
2069
2070 /*
2071 * Delete a name from the filesystem.
2072 */
2073 /* ARGSUSED */
2074 int
2075 sys_unlink(struct lwp *l, const struct sys_unlink_args *uap, register_t *retval)
2076 {
2077 /* {
2078 syscallarg(const char *) path;
2079 } */
2080
2081 return do_sys_unlink(SCARG(uap, path), UIO_USERSPACE);
2082 }
2083
2084 int
2085 do_sys_unlink(const char *arg, enum uio_seg seg)
2086 {
2087 struct vnode *vp;
2088 int error;
2089 struct nameidata nd;
2090 kauth_cred_t cred;
2091 char *path;
2092 const char *cpath;
2093
2094 VERIEXEC_PATH_GET(arg, seg, cpath, path);
2095 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, seg, cpath);
2096
2097 if ((error = namei(&nd)) != 0)
2098 goto out;
2099 vp = nd.ni_vp;
2100
2101 /*
2102 * The root of a mounted filesystem cannot be deleted.
2103 */
2104 if (vp->v_vflag & VV_ROOT) {
2105 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2106 if (nd.ni_dvp == vp)
2107 vrele(nd.ni_dvp);
2108 else
2109 vput(nd.ni_dvp);
2110 vput(vp);
2111 error = EBUSY;
2112 goto out;
2113 }
2114
2115 #if NVERIEXEC > 0
2116 /* Handle remove requests for veriexec entries. */
2117 if ((error = veriexec_removechk(curlwp, nd.ni_vp, nd.ni_dirp)) != 0) {
2118 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2119 if (nd.ni_dvp == vp)
2120 vrele(nd.ni_dvp);
2121 else
2122 vput(nd.ni_dvp);
2123 vput(vp);
2124 goto out;
2125 }
2126 #endif /* NVERIEXEC > 0 */
2127
2128 cred = kauth_cred_get();
2129 VOP_LEASE(nd.ni_dvp, cred, LEASE_WRITE);
2130 VOP_LEASE(vp, cred, LEASE_WRITE);
2131 #ifdef FILEASSOC
2132 (void)fileassoc_file_delete(vp);
2133 #endif /* FILEASSOC */
2134 error = VOP_REMOVE(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
2135 out:
2136 VERIEXEC_PATH_PUT(path);
2137 return (error);
2138 }
2139
2140 /*
2141 * Reposition read/write file offset.
2142 */
2143 int
2144 sys_lseek(struct lwp *l, const struct sys_lseek_args *uap, register_t *retval)
2145 {
2146 /* {
2147 syscallarg(int) fd;
2148 syscallarg(int) pad;
2149 syscallarg(off_t) offset;
2150 syscallarg(int) whence;
2151 } */
2152 struct proc *p = l->l_proc;
2153 kauth_cred_t cred = l->l_cred;
2154 struct filedesc *fdp = p->p_fd;
2155 struct file *fp;
2156 struct vnode *vp;
2157 struct vattr vattr;
2158 off_t newoff;
2159 int error;
2160
2161 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
2162 return (EBADF);
2163
2164 vp = (struct vnode *)fp->f_data;
2165 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2166 error = ESPIPE;
2167 mutex_exit(&fp->f_lock);
2168 goto out;
2169 }
2170
2171 switch (SCARG(uap, whence)) {
2172 case SEEK_CUR:
2173 newoff = fp->f_offset + SCARG(uap, offset);
2174 FILE_USE(fp);
2175 break;
2176 case SEEK_END:
2177 FILE_USE(fp);
2178 error = VOP_GETATTR(vp, &vattr, cred);
2179 if (error) {
2180 FILE_UNUSE(fp, l);
2181 goto out;
2182 }
2183 newoff = SCARG(uap, offset) + vattr.va_size;
2184 break;
2185 case SEEK_SET:
2186 FILE_USE(fp);
2187 newoff = SCARG(uap, offset);
2188 break;
2189 default:
2190 mutex_exit(&fp->f_lock);
2191 error = EINVAL;
2192 goto out;
2193 }
2194 if ((error = VOP_SEEK(vp, fp->f_offset, newoff, cred)) == 0) {
2195 mutex_enter(&fp->f_lock);
2196 *(off_t *)retval = fp->f_offset = newoff;
2197 mutex_exit(&fp->f_lock);
2198 }
2199 FILE_UNUSE(fp, l);
2200 out:
2201 return (error);
2202 }
2203
2204 /*
2205 * Positional read system call.
2206 */
2207 int
2208 sys_pread(struct lwp *l, const struct sys_pread_args *uap, register_t *retval)
2209 {
2210 /* {
2211 syscallarg(int) fd;
2212 syscallarg(void *) buf;
2213 syscallarg(size_t) nbyte;
2214 syscallarg(off_t) offset;
2215 } */
2216 struct proc *p = l->l_proc;
2217 struct filedesc *fdp = p->p_fd;
2218 struct file *fp;
2219 struct vnode *vp;
2220 off_t offset;
2221 int error, fd = SCARG(uap, fd);
2222
2223 if ((fp = fd_getfile(fdp, fd)) == NULL)
2224 return (EBADF);
2225
2226 if ((fp->f_flag & FREAD) == 0) {
2227 mutex_exit(&fp->f_lock);
2228 return (EBADF);
2229 }
2230
2231 FILE_USE(fp);
2232
2233 vp = (struct vnode *)fp->f_data;
2234 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2235 error = ESPIPE;
2236 goto out;
2237 }
2238
2239 offset = SCARG(uap, offset);
2240
2241 /*
2242 * XXX This works because no file systems actually
2243 * XXX take any action on the seek operation.
2244 */
2245 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2246 goto out;
2247
2248 /* dofileread() will unuse the descriptor for us */
2249 return (dofileread(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2250 &offset, 0, retval));
2251
2252 out:
2253 FILE_UNUSE(fp, l);
2254 return (error);
2255 }
2256
2257 /*
2258 * Positional scatter read system call.
2259 */
2260 int
2261 sys_preadv(struct lwp *l, const struct sys_preadv_args *uap, register_t *retval)
2262 {
2263 /* {
2264 syscallarg(int) fd;
2265 syscallarg(const struct iovec *) iovp;
2266 syscallarg(int) iovcnt;
2267 syscallarg(off_t) offset;
2268 } */
2269 off_t offset = SCARG(uap, offset);
2270
2271 return do_filereadv(SCARG(uap, fd), SCARG(uap, iovp),
2272 SCARG(uap, iovcnt), &offset, 0, retval);
2273 }
2274
2275 /*
2276 * Positional write system call.
2277 */
2278 int
2279 sys_pwrite(struct lwp *l, const struct sys_pwrite_args *uap, register_t *retval)
2280 {
2281 /* {
2282 syscallarg(int) fd;
2283 syscallarg(const void *) buf;
2284 syscallarg(size_t) nbyte;
2285 syscallarg(off_t) offset;
2286 } */
2287 struct proc *p = l->l_proc;
2288 struct filedesc *fdp = p->p_fd;
2289 struct file *fp;
2290 struct vnode *vp;
2291 off_t offset;
2292 int error, fd = SCARG(uap, fd);
2293
2294 if ((fp = fd_getfile(fdp, fd)) == NULL)
2295 return (EBADF);
2296
2297 if ((fp->f_flag & FWRITE) == 0) {
2298 mutex_exit(&fp->f_lock);
2299 return (EBADF);
2300 }
2301
2302 FILE_USE(fp);
2303
2304 vp = (struct vnode *)fp->f_data;
2305 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2306 error = ESPIPE;
2307 goto out;
2308 }
2309
2310 offset = SCARG(uap, offset);
2311
2312 /*
2313 * XXX This works because no file systems actually
2314 * XXX take any action on the seek operation.
2315 */
2316 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2317 goto out;
2318
2319 /* dofilewrite() will unuse the descriptor for us */
2320 return (dofilewrite(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2321 &offset, 0, retval));
2322
2323 out:
2324 FILE_UNUSE(fp, l);
2325 return (error);
2326 }
2327
2328 /*
2329 * Positional gather write system call.
2330 */
2331 int
2332 sys_pwritev(struct lwp *l, const struct sys_pwritev_args *uap, register_t *retval)
2333 {
2334 /* {
2335 syscallarg(int) fd;
2336 syscallarg(const struct iovec *) iovp;
2337 syscallarg(int) iovcnt;
2338 syscallarg(off_t) offset;
2339 } */
2340 off_t offset = SCARG(uap, offset);
2341
2342 return do_filewritev(SCARG(uap, fd), SCARG(uap, iovp),
2343 SCARG(uap, iovcnt), &offset, 0, retval);
2344 }
2345
2346 /*
2347 * Check access permissions.
2348 */
2349 int
2350 sys_access(struct lwp *l, const struct sys_access_args *uap, register_t *retval)
2351 {
2352 /* {
2353 syscallarg(const char *) path;
2354 syscallarg(int) flags;
2355 } */
2356 kauth_cred_t cred;
2357 struct vnode *vp;
2358 int error, flags;
2359 struct nameidata nd;
2360
2361 cred = kauth_cred_dup(l->l_cred);
2362 kauth_cred_seteuid(cred, kauth_cred_getuid(l->l_cred));
2363 kauth_cred_setegid(cred, kauth_cred_getgid(l->l_cred));
2364 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2365 SCARG(uap, path));
2366 /* Override default credentials */
2367 nd.ni_cnd.cn_cred = cred;
2368 if ((error = namei(&nd)) != 0)
2369 goto out;
2370 vp = nd.ni_vp;
2371
2372 /* Flags == 0 means only check for existence. */
2373 if (SCARG(uap, flags)) {
2374 flags = 0;
2375 if (SCARG(uap, flags) & R_OK)
2376 flags |= VREAD;
2377 if (SCARG(uap, flags) & W_OK)
2378 flags |= VWRITE;
2379 if (SCARG(uap, flags) & X_OK)
2380 flags |= VEXEC;
2381
2382 error = VOP_ACCESS(vp, flags, cred);
2383 if (!error && (flags & VWRITE))
2384 error = vn_writechk(vp);
2385 }
2386 vput(vp);
2387 out:
2388 kauth_cred_free(cred);
2389 return (error);
2390 }
2391
2392 /*
2393 * Common code for all sys_stat functions, including compat versions.
2394 */
2395 int
2396 do_sys_stat(struct lwp *l, const char *path, unsigned int nd_flags,
2397 struct stat *sb)
2398 {
2399 int error;
2400 struct nameidata nd;
2401
2402 NDINIT(&nd, LOOKUP, nd_flags | LOCKLEAF | TRYEMULROOT,
2403 UIO_USERSPACE, path);
2404 error = namei(&nd);
2405 if (error != 0)
2406 return error;
2407 error = vn_stat(nd.ni_vp, sb, l);
2408 vput(nd.ni_vp);
2409 return error;
2410 }
2411
2412 /*
2413 * Get file status; this version follows links.
2414 */
2415 /* ARGSUSED */
2416 int
2417 sys___stat30(struct lwp *l, const struct sys___stat30_args *uap, register_t *retval)
2418 {
2419 /* {
2420 syscallarg(const char *) path;
2421 syscallarg(struct stat *) ub;
2422 } */
2423 struct stat sb;
2424 int error;
2425
2426 error = do_sys_stat(l, SCARG(uap, path), FOLLOW, &sb);
2427 if (error)
2428 return error;
2429 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2430 }
2431
2432 /*
2433 * Get file status; this version does not follow links.
2434 */
2435 /* ARGSUSED */
2436 int
2437 sys___lstat30(struct lwp *l, const struct sys___lstat30_args *uap, register_t *retval)
2438 {
2439 /* {
2440 syscallarg(const char *) path;
2441 syscallarg(struct stat *) ub;
2442 } */
2443 struct stat sb;
2444 int error;
2445
2446 error = do_sys_stat(l, SCARG(uap, path), NOFOLLOW, &sb);
2447 if (error)
2448 return error;
2449 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2450 }
2451
2452 /*
2453 * Get configurable pathname variables.
2454 */
2455 /* ARGSUSED */
2456 int
2457 sys_pathconf(struct lwp *l, const struct sys_pathconf_args *uap, register_t *retval)
2458 {
2459 /* {
2460 syscallarg(const char *) path;
2461 syscallarg(int) name;
2462 } */
2463 int error;
2464 struct nameidata nd;
2465
2466 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2467 SCARG(uap, path));
2468 if ((error = namei(&nd)) != 0)
2469 return (error);
2470 error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), retval);
2471 vput(nd.ni_vp);
2472 return (error);
2473 }
2474
2475 /*
2476 * Return target name of a symbolic link.
2477 */
2478 /* ARGSUSED */
2479 int
2480 sys_readlink(struct lwp *l, const struct sys_readlink_args *uap, register_t *retval)
2481 {
2482 /* {
2483 syscallarg(const char *) path;
2484 syscallarg(char *) buf;
2485 syscallarg(size_t) count;
2486 } */
2487 struct vnode *vp;
2488 struct iovec aiov;
2489 struct uio auio;
2490 int error;
2491 struct nameidata nd;
2492
2493 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2494 SCARG(uap, path));
2495 if ((error = namei(&nd)) != 0)
2496 return (error);
2497 vp = nd.ni_vp;
2498 if (vp->v_type != VLNK)
2499 error = EINVAL;
2500 else if (!(vp->v_mount->mnt_flag & MNT_SYMPERM) ||
2501 (error = VOP_ACCESS(vp, VREAD, l->l_cred)) == 0) {
2502 aiov.iov_base = SCARG(uap, buf);
2503 aiov.iov_len = SCARG(uap, count);
2504 auio.uio_iov = &aiov;
2505 auio.uio_iovcnt = 1;
2506 auio.uio_offset = 0;
2507 auio.uio_rw = UIO_READ;
2508 KASSERT(l == curlwp);
2509 auio.uio_vmspace = l->l_proc->p_vmspace;
2510 auio.uio_resid = SCARG(uap, count);
2511 error = VOP_READLINK(vp, &auio, l->l_cred);
2512 }
2513 vput(vp);
2514 *retval = SCARG(uap, count) - auio.uio_resid;
2515 return (error);
2516 }
2517
2518 /*
2519 * Change flags of a file given a path name.
2520 */
2521 /* ARGSUSED */
2522 int
2523 sys_chflags(struct lwp *l, const struct sys_chflags_args *uap, register_t *retval)
2524 {
2525 /* {
2526 syscallarg(const char *) path;
2527 syscallarg(u_long) flags;
2528 } */
2529 struct vnode *vp;
2530 int error;
2531 struct nameidata nd;
2532
2533 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2534 SCARG(uap, path));
2535 if ((error = namei(&nd)) != 0)
2536 return (error);
2537 vp = nd.ni_vp;
2538 error = change_flags(vp, SCARG(uap, flags), l);
2539 vput(vp);
2540 return (error);
2541 }
2542
2543 /*
2544 * Change flags of a file given a file descriptor.
2545 */
2546 /* ARGSUSED */
2547 int
2548 sys_fchflags(struct lwp *l, const struct sys_fchflags_args *uap, register_t *retval)
2549 {
2550 /* {
2551 syscallarg(int) fd;
2552 syscallarg(u_long) flags;
2553 } */
2554 struct proc *p = l->l_proc;
2555 struct vnode *vp;
2556 struct file *fp;
2557 int error;
2558
2559 /* getvnode() will use the descriptor for us */
2560 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2561 return (error);
2562 vp = (struct vnode *)fp->f_data;
2563 error = change_flags(vp, SCARG(uap, flags), l);
2564 VOP_UNLOCK(vp, 0);
2565 FILE_UNUSE(fp, l);
2566 return (error);
2567 }
2568
2569 /*
2570 * Change flags of a file given a path name; this version does
2571 * not follow links.
2572 */
2573 int
2574 sys_lchflags(struct lwp *l, const struct sys_lchflags_args *uap, register_t *retval)
2575 {
2576 /* {
2577 syscallarg(const char *) path;
2578 syscallarg(u_long) flags;
2579 } */
2580 struct vnode *vp;
2581 int error;
2582 struct nameidata nd;
2583
2584 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2585 SCARG(uap, path));
2586 if ((error = namei(&nd)) != 0)
2587 return (error);
2588 vp = nd.ni_vp;
2589 error = change_flags(vp, SCARG(uap, flags), l);
2590 vput(vp);
2591 return (error);
2592 }
2593
2594 /*
2595 * Common routine to change flags of a file.
2596 */
2597 int
2598 change_flags(struct vnode *vp, u_long flags, struct lwp *l)
2599 {
2600 struct vattr vattr;
2601 int error;
2602
2603 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2604 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2605 /*
2606 * Non-superusers cannot change the flags on devices, even if they
2607 * own them.
2608 */
2609 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) {
2610 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2611 goto out;
2612 if (vattr.va_type == VCHR || vattr.va_type == VBLK) {
2613 error = EINVAL;
2614 goto out;
2615 }
2616 }
2617 VATTR_NULL(&vattr);
2618 vattr.va_flags = flags;
2619 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2620 out:
2621 return (error);
2622 }
2623
2624 /*
2625 * Change mode of a file given path name; this version follows links.
2626 */
2627 /* ARGSUSED */
2628 int
2629 sys_chmod(struct lwp *l, const struct sys_chmod_args *uap, register_t *retval)
2630 {
2631 /* {
2632 syscallarg(const char *) path;
2633 syscallarg(int) mode;
2634 } */
2635 int error;
2636 struct nameidata nd;
2637
2638 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2639 SCARG(uap, path));
2640 if ((error = namei(&nd)) != 0)
2641 return (error);
2642
2643 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2644
2645 vrele(nd.ni_vp);
2646 return (error);
2647 }
2648
2649 /*
2650 * Change mode of a file given a file descriptor.
2651 */
2652 /* ARGSUSED */
2653 int
2654 sys_fchmod(struct lwp *l, const struct sys_fchmod_args *uap, register_t *retval)
2655 {
2656 /* {
2657 syscallarg(int) fd;
2658 syscallarg(int) mode;
2659 } */
2660 struct proc *p = l->l_proc;
2661 struct file *fp;
2662 int error;
2663
2664 /* getvnode() will use the descriptor for us */
2665 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2666 return (error);
2667
2668 error = change_mode((struct vnode *)fp->f_data, SCARG(uap, mode), l);
2669 FILE_UNUSE(fp, l);
2670 return (error);
2671 }
2672
2673 /*
2674 * Change mode of a file given path name; this version does not follow links.
2675 */
2676 /* ARGSUSED */
2677 int
2678 sys_lchmod(struct lwp *l, const struct sys_lchmod_args *uap, register_t *retval)
2679 {
2680 /* {
2681 syscallarg(const char *) path;
2682 syscallarg(int) mode;
2683 } */
2684 int error;
2685 struct nameidata nd;
2686
2687 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2688 SCARG(uap, path));
2689 if ((error = namei(&nd)) != 0)
2690 return (error);
2691
2692 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2693
2694 vrele(nd.ni_vp);
2695 return (error);
2696 }
2697
2698 /*
2699 * Common routine to set mode given a vnode.
2700 */
2701 static int
2702 change_mode(struct vnode *vp, int mode, struct lwp *l)
2703 {
2704 struct vattr vattr;
2705 int error;
2706
2707 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2708 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2709 VATTR_NULL(&vattr);
2710 vattr.va_mode = mode & ALLPERMS;
2711 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2712 VOP_UNLOCK(vp, 0);
2713 return (error);
2714 }
2715
2716 /*
2717 * Set ownership given a path name; this version follows links.
2718 */
2719 /* ARGSUSED */
2720 int
2721 sys_chown(struct lwp *l, const struct sys_chown_args *uap, register_t *retval)
2722 {
2723 /* {
2724 syscallarg(const char *) path;
2725 syscallarg(uid_t) uid;
2726 syscallarg(gid_t) gid;
2727 } */
2728 int error;
2729 struct nameidata nd;
2730
2731 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2732 SCARG(uap, path));
2733 if ((error = namei(&nd)) != 0)
2734 return (error);
2735
2736 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2737
2738 vrele(nd.ni_vp);
2739 return (error);
2740 }
2741
2742 /*
2743 * Set ownership given a path name; this version follows links.
2744 * Provides POSIX semantics.
2745 */
2746 /* ARGSUSED */
2747 int
2748 sys___posix_chown(struct lwp *l, const struct sys___posix_chown_args *uap, register_t *retval)
2749 {
2750 /* {
2751 syscallarg(const char *) path;
2752 syscallarg(uid_t) uid;
2753 syscallarg(gid_t) gid;
2754 } */
2755 int error;
2756 struct nameidata nd;
2757
2758 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2759 SCARG(uap, path));
2760 if ((error = namei(&nd)) != 0)
2761 return (error);
2762
2763 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2764
2765 vrele(nd.ni_vp);
2766 return (error);
2767 }
2768
2769 /*
2770 * Set ownership given a file descriptor.
2771 */
2772 /* ARGSUSED */
2773 int
2774 sys_fchown(struct lwp *l, const struct sys_fchown_args *uap, register_t *retval)
2775 {
2776 /* {
2777 syscallarg(int) fd;
2778 syscallarg(uid_t) uid;
2779 syscallarg(gid_t) gid;
2780 } */
2781 struct proc *p = l->l_proc;
2782 int error;
2783 struct file *fp;
2784
2785 /* getvnode() will use the descriptor for us */
2786 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2787 return (error);
2788
2789 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2790 SCARG(uap, gid), l, 0);
2791 FILE_UNUSE(fp, l);
2792 return (error);
2793 }
2794
2795 /*
2796 * Set ownership given a file descriptor, providing POSIX/XPG semantics.
2797 */
2798 /* ARGSUSED */
2799 int
2800 sys___posix_fchown(struct lwp *l, const struct sys___posix_fchown_args *uap, register_t *retval)
2801 {
2802 /* {
2803 syscallarg(int) fd;
2804 syscallarg(uid_t) uid;
2805 syscallarg(gid_t) gid;
2806 } */
2807 struct proc *p = l->l_proc;
2808 int error;
2809 struct file *fp;
2810
2811 /* getvnode() will use the descriptor for us */
2812 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2813 return (error);
2814
2815 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2816 SCARG(uap, gid), l, 1);
2817 FILE_UNUSE(fp, l);
2818 return (error);
2819 }
2820
2821 /*
2822 * Set ownership given a path name; this version does not follow links.
2823 */
2824 /* ARGSUSED */
2825 int
2826 sys_lchown(struct lwp *l, const struct sys_lchown_args *uap, register_t *retval)
2827 {
2828 /* {
2829 syscallarg(const char *) path;
2830 syscallarg(uid_t) uid;
2831 syscallarg(gid_t) gid;
2832 } */
2833 int error;
2834 struct nameidata nd;
2835
2836 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2837 SCARG(uap, path));
2838 if ((error = namei(&nd)) != 0)
2839 return (error);
2840
2841 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2842
2843 vrele(nd.ni_vp);
2844 return (error);
2845 }
2846
2847 /*
2848 * Set ownership given a path name; this version does not follow links.
2849 * Provides POSIX/XPG semantics.
2850 */
2851 /* ARGSUSED */
2852 int
2853 sys___posix_lchown(struct lwp *l, const struct sys___posix_lchown_args *uap, register_t *retval)
2854 {
2855 /* {
2856 syscallarg(const char *) path;
2857 syscallarg(uid_t) uid;
2858 syscallarg(gid_t) gid;
2859 } */
2860 int error;
2861 struct nameidata nd;
2862
2863 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2864 SCARG(uap, path));
2865 if ((error = namei(&nd)) != 0)
2866 return (error);
2867
2868 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2869
2870 vrele(nd.ni_vp);
2871 return (error);
2872 }
2873
2874 /*
2875 * Common routine to set ownership given a vnode.
2876 */
2877 static int
2878 change_owner(struct vnode *vp, uid_t uid, gid_t gid, struct lwp *l,
2879 int posix_semantics)
2880 {
2881 struct vattr vattr;
2882 mode_t newmode;
2883 int error;
2884
2885 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2886 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2887 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2888 goto out;
2889
2890 #define CHANGED(x) ((int)(x) != -1)
2891 newmode = vattr.va_mode;
2892 if (posix_semantics) {
2893 /*
2894 * POSIX/XPG semantics: if the caller is not the super-user,
2895 * clear set-user-id and set-group-id bits. Both POSIX and
2896 * the XPG consider the behaviour for calls by the super-user
2897 * implementation-defined; we leave the set-user-id and set-
2898 * group-id settings intact in that case.
2899 */
2900 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER,
2901 NULL) != 0)
2902 newmode &= ~(S_ISUID | S_ISGID);
2903 } else {
2904 /*
2905 * NetBSD semantics: when changing owner and/or group,
2906 * clear the respective bit(s).
2907 */
2908 if (CHANGED(uid))
2909 newmode &= ~S_ISUID;
2910 if (CHANGED(gid))
2911 newmode &= ~S_ISGID;
2912 }
2913 /* Update va_mode iff altered. */
2914 if (vattr.va_mode == newmode)
2915 newmode = VNOVAL;
2916
2917 VATTR_NULL(&vattr);
2918 vattr.va_uid = CHANGED(uid) ? uid : (uid_t)VNOVAL;
2919 vattr.va_gid = CHANGED(gid) ? gid : (gid_t)VNOVAL;
2920 vattr.va_mode = newmode;
2921 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2922 #undef CHANGED
2923
2924 out:
2925 VOP_UNLOCK(vp, 0);
2926 return (error);
2927 }
2928
2929 /*
2930 * Set the access and modification times given a path name; this
2931 * version follows links.
2932 */
2933 /* ARGSUSED */
2934 int
2935 sys_utimes(struct lwp *l, const struct sys_utimes_args *uap, register_t *retval)
2936 {
2937 /* {
2938 syscallarg(const char *) path;
2939 syscallarg(const struct timeval *) tptr;
2940 } */
2941
2942 return do_sys_utimes(l, NULL, SCARG(uap, path), FOLLOW,
2943 SCARG(uap, tptr), UIO_USERSPACE);
2944 }
2945
2946 /*
2947 * Set the access and modification times given a file descriptor.
2948 */
2949 /* ARGSUSED */
2950 int
2951 sys_futimes(struct lwp *l, const struct sys_futimes_args *uap, register_t *retval)
2952 {
2953 /* {
2954 syscallarg(int) fd;
2955 syscallarg(const struct timeval *) tptr;
2956 } */
2957 int error;
2958 struct file *fp;
2959
2960 /* getvnode() will use the descriptor for us */
2961 if ((error = getvnode(l->l_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
2962 return (error);
2963
2964 error = do_sys_utimes(l, fp->f_data, NULL, 0,
2965 SCARG(uap, tptr), UIO_USERSPACE);
2966
2967 FILE_UNUSE(fp, l);
2968 return (error);
2969 }
2970
2971 /*
2972 * Set the access and modification times given a path name; this
2973 * version does not follow links.
2974 */
2975 int
2976 sys_lutimes(struct lwp *l, const struct sys_lutimes_args *uap, register_t *retval)
2977 {
2978 /* {
2979 syscallarg(const char *) path;
2980 syscallarg(const struct timeval *) tptr;
2981 } */
2982
2983 return do_sys_utimes(l, NULL, SCARG(uap, path), NOFOLLOW,
2984 SCARG(uap, tptr), UIO_USERSPACE);
2985 }
2986
2987 /*
2988 * Common routine to set access and modification times given a vnode.
2989 */
2990 int
2991 do_sys_utimes(struct lwp *l, struct vnode *vp, const char *path, int flag,
2992 const struct timeval *tptr, enum uio_seg seg)
2993 {
2994 struct vattr vattr;
2995 struct nameidata nd;
2996 int error;
2997
2998 VATTR_NULL(&vattr);
2999 if (tptr == NULL) {
3000 nanotime(&vattr.va_atime);
3001 vattr.va_mtime = vattr.va_atime;
3002 vattr.va_vaflags |= VA_UTIMES_NULL;
3003 } else {
3004 struct timeval tv[2];
3005
3006 if (seg != UIO_SYSSPACE) {
3007 error = copyin(tptr, &tv, sizeof (tv));
3008 if (error != 0)
3009 return error;
3010 tptr = tv;
3011 }
3012 TIMEVAL_TO_TIMESPEC(tptr, &vattr.va_atime);
3013 TIMEVAL_TO_TIMESPEC(tptr + 1, &vattr.va_mtime);
3014 }
3015
3016 if (vp == NULL) {
3017 NDINIT(&nd, LOOKUP, flag | TRYEMULROOT, UIO_USERSPACE, path);
3018 if ((error = namei(&nd)) != 0)
3019 return (error);
3020 vp = nd.ni_vp;
3021 } else
3022 nd.ni_vp = NULL;
3023
3024 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3025 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3026 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3027 VOP_UNLOCK(vp, 0);
3028
3029 if (nd.ni_vp != NULL)
3030 vrele(nd.ni_vp);
3031
3032 return (error);
3033 }
3034
3035 /*
3036 * Truncate a file given its path name.
3037 */
3038 /* ARGSUSED */
3039 int
3040 sys_truncate(struct lwp *l, const struct sys_truncate_args *uap, register_t *retval)
3041 {
3042 /* {
3043 syscallarg(const char *) path;
3044 syscallarg(int) pad;
3045 syscallarg(off_t) length;
3046 } */
3047 struct vnode *vp;
3048 struct vattr vattr;
3049 int error;
3050 struct nameidata nd;
3051
3052 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3053 SCARG(uap, path));
3054 if ((error = namei(&nd)) != 0)
3055 return (error);
3056 vp = nd.ni_vp;
3057 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3058 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3059 if (vp->v_type == VDIR)
3060 error = EISDIR;
3061 else if ((error = vn_writechk(vp)) == 0 &&
3062 (error = VOP_ACCESS(vp, VWRITE, l->l_cred)) == 0) {
3063 VATTR_NULL(&vattr);
3064 vattr.va_size = SCARG(uap, length);
3065 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3066 }
3067 vput(vp);
3068 return (error);
3069 }
3070
3071 /*
3072 * Truncate a file given a file descriptor.
3073 */
3074 /* ARGSUSED */
3075 int
3076 sys_ftruncate(struct lwp *l, const struct sys_ftruncate_args *uap, register_t *retval)
3077 {
3078 /* {
3079 syscallarg(int) fd;
3080 syscallarg(int) pad;
3081 syscallarg(off_t) length;
3082 } */
3083 struct proc *p = l->l_proc;
3084 struct vattr vattr;
3085 struct vnode *vp;
3086 struct file *fp;
3087 int error;
3088
3089 /* getvnode() will use the descriptor for us */
3090 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3091 return (error);
3092 if ((fp->f_flag & FWRITE) == 0) {
3093 error = EINVAL;
3094 goto out;
3095 }
3096 vp = (struct vnode *)fp->f_data;
3097 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3098 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3099 if (vp->v_type == VDIR)
3100 error = EISDIR;
3101 else if ((error = vn_writechk(vp)) == 0) {
3102 VATTR_NULL(&vattr);
3103 vattr.va_size = SCARG(uap, length);
3104 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
3105 }
3106 VOP_UNLOCK(vp, 0);
3107 out:
3108 FILE_UNUSE(fp, l);
3109 return (error);
3110 }
3111
3112 /*
3113 * Sync an open file.
3114 */
3115 /* ARGSUSED */
3116 int
3117 sys_fsync(struct lwp *l, const struct sys_fsync_args *uap, register_t *retval)
3118 {
3119 /* {
3120 syscallarg(int) fd;
3121 } */
3122 struct proc *p = l->l_proc;
3123 struct vnode *vp;
3124 struct file *fp;
3125 int error;
3126
3127 /* getvnode() will use the descriptor for us */
3128 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3129 return (error);
3130 vp = (struct vnode *)fp->f_data;
3131 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3132 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT, 0, 0);
3133 if (error == 0 && bioopsp != NULL &&
3134 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3135 (*bioopsp->io_fsync)(vp, 0);
3136 VOP_UNLOCK(vp, 0);
3137 FILE_UNUSE(fp, l);
3138 return (error);
3139 }
3140
3141 /*
3142 * Sync a range of file data. API modeled after that found in AIX.
3143 *
3144 * FDATASYNC indicates that we need only save enough metadata to be able
3145 * to re-read the written data. Note we duplicate AIX's requirement that
3146 * the file be open for writing.
3147 */
3148 /* ARGSUSED */
3149 int
3150 sys_fsync_range(struct lwp *l, const struct sys_fsync_range_args *uap, register_t *retval)
3151 {
3152 /* {
3153 syscallarg(int) fd;
3154 syscallarg(int) flags;
3155 syscallarg(off_t) start;
3156 syscallarg(off_t) length;
3157 } */
3158 struct proc *p = l->l_proc;
3159 struct vnode *vp;
3160 struct file *fp;
3161 int flags, nflags;
3162 off_t s, e, len;
3163 int error;
3164
3165 /* getvnode() will use the descriptor for us */
3166 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3167 return (error);
3168
3169 if ((fp->f_flag & FWRITE) == 0) {
3170 error = EBADF;
3171 goto out;
3172 }
3173
3174 flags = SCARG(uap, flags);
3175 if (((flags & (FDATASYNC | FFILESYNC)) == 0) ||
3176 ((~flags & (FDATASYNC | FFILESYNC)) == 0)) {
3177 error = EINVAL;
3178 goto out;
3179 }
3180 /* Now set up the flags for value(s) to pass to VOP_FSYNC() */
3181 if (flags & FDATASYNC)
3182 nflags = FSYNC_DATAONLY | FSYNC_WAIT;
3183 else
3184 nflags = FSYNC_WAIT;
3185 if (flags & FDISKSYNC)
3186 nflags |= FSYNC_CACHE;
3187
3188 len = SCARG(uap, length);
3189 /* If length == 0, we do the whole file, and s = l = 0 will do that */
3190 if (len) {
3191 s = SCARG(uap, start);
3192 e = s + len;
3193 if (e < s) {
3194 error = EINVAL;
3195 goto out;
3196 }
3197 } else {
3198 e = 0;
3199 s = 0;
3200 }
3201
3202 vp = (struct vnode *)fp->f_data;
3203 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3204 error = VOP_FSYNC(vp, fp->f_cred, nflags, s, e);
3205
3206 if (error == 0 && bioopsp != NULL &&
3207 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3208 (*bioopsp->io_fsync)(vp, nflags);
3209
3210 VOP_UNLOCK(vp, 0);
3211 out:
3212 FILE_UNUSE(fp, l);
3213 return (error);
3214 }
3215
3216 /*
3217 * Sync the data of an open file.
3218 */
3219 /* ARGSUSED */
3220 int
3221 sys_fdatasync(struct lwp *l, const struct sys_fdatasync_args *uap, register_t *retval)
3222 {
3223 /* {
3224 syscallarg(int) fd;
3225 } */
3226 struct proc *p = l->l_proc;
3227 struct vnode *vp;
3228 struct file *fp;
3229 int error;
3230
3231 /* getvnode() will use the descriptor for us */
3232 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3233 return (error);
3234 if ((fp->f_flag & FWRITE) == 0) {
3235 FILE_UNUSE(fp, l);
3236 return (EBADF);
3237 }
3238 vp = (struct vnode *)fp->f_data;
3239 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3240 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT|FSYNC_DATAONLY, 0, 0);
3241 VOP_UNLOCK(vp, 0);
3242 FILE_UNUSE(fp, l);
3243 return (error);
3244 }
3245
3246 /*
3247 * Rename files, (standard) BSD semantics frontend.
3248 */
3249 /* ARGSUSED */
3250 int
3251 sys_rename(struct lwp *l, const struct sys_rename_args *uap, register_t *retval)
3252 {
3253 /* {
3254 syscallarg(const char *) from;
3255 syscallarg(const char *) to;
3256 } */
3257
3258 return (do_sys_rename(SCARG(uap, from), SCARG(uap, to), UIO_USERSPACE, 0));
3259 }
3260
3261 /*
3262 * Rename files, POSIX semantics frontend.
3263 */
3264 /* ARGSUSED */
3265 int
3266 sys___posix_rename(struct lwp *l, const struct sys___posix_rename_args *uap, register_t *retval)
3267 {
3268 /* {
3269 syscallarg(const char *) from;
3270 syscallarg(const char *) to;
3271 } */
3272
3273 return (do_sys_rename(SCARG(uap, from), SCARG(uap, to), UIO_USERSPACE, 1));
3274 }
3275
3276 /*
3277 * Rename files. Source and destination must either both be directories,
3278 * or both not be directories. If target is a directory, it must be empty.
3279 * If `from' and `to' refer to the same object, the value of the `retain'
3280 * argument is used to determine whether `from' will be
3281 *
3282 * (retain == 0) deleted unless `from' and `to' refer to the same
3283 * object in the file system's name space (BSD).
3284 * (retain == 1) always retained (POSIX).
3285 */
3286 int
3287 do_sys_rename(const char *from, const char *to, enum uio_seg seg, int retain)
3288 {
3289 struct vnode *tvp, *fvp, *tdvp;
3290 struct nameidata fromnd, tond;
3291 struct lwp *l = curlwp;
3292 struct proc *p;
3293 int error;
3294
3295 NDINIT(&fromnd, DELETE, LOCKPARENT | SAVESTART | TRYEMULROOT,
3296 seg, from);
3297 if ((error = namei(&fromnd)) != 0)
3298 return (error);
3299 if (fromnd.ni_dvp != fromnd.ni_vp)
3300 VOP_UNLOCK(fromnd.ni_dvp, 0);
3301 fvp = fromnd.ni_vp;
3302 NDINIT(&tond, RENAME,
3303 LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | TRYEMULROOT
3304 | (fvp->v_type == VDIR ? CREATEDIR : 0),
3305 seg, to);
3306 if ((error = namei(&tond)) != 0) {
3307 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3308 vrele(fromnd.ni_dvp);
3309 vrele(fvp);
3310 goto out1;
3311 }
3312 tdvp = tond.ni_dvp;
3313 tvp = tond.ni_vp;
3314
3315 if (tvp != NULL) {
3316 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
3317 error = ENOTDIR;
3318 goto out;
3319 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
3320 error = EISDIR;
3321 goto out;
3322 }
3323 }
3324
3325 if (fvp == tdvp)
3326 error = EINVAL;
3327
3328 /*
3329 * Source and destination refer to the same object.
3330 */
3331 if (fvp == tvp) {
3332 if (retain)
3333 error = -1;
3334 else if (fromnd.ni_dvp == tdvp &&
3335 fromnd.ni_cnd.cn_namelen == tond.ni_cnd.cn_namelen &&
3336 !memcmp(fromnd.ni_cnd.cn_nameptr,
3337 tond.ni_cnd.cn_nameptr,
3338 fromnd.ni_cnd.cn_namelen))
3339 error = -1;
3340 }
3341
3342 #if NVERIEXEC > 0
3343 if (!error) {
3344 char *f1, *f2;
3345
3346 f1 = malloc(fromnd.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3347 strlcpy(f1, fromnd.ni_cnd.cn_nameptr, fromnd.ni_cnd.cn_namelen);
3348
3349 f2 = malloc(tond.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3350 strlcpy(f2, tond.ni_cnd.cn_nameptr, tond.ni_cnd.cn_namelen);
3351
3352 error = veriexec_renamechk(l, fvp, f1, tvp, f2);
3353
3354 free(f1, M_TEMP);
3355 free(f2, M_TEMP);
3356 }
3357 #endif /* NVERIEXEC > 0 */
3358
3359 out:
3360 p = l->l_proc;
3361 if (!error) {
3362 VOP_LEASE(tdvp, l->l_cred, LEASE_WRITE);
3363 if (fromnd.ni_dvp != tdvp)
3364 VOP_LEASE(fromnd.ni_dvp, l->l_cred, LEASE_WRITE);
3365 if (tvp) {
3366 VOP_LEASE(tvp, l->l_cred, LEASE_WRITE);
3367 }
3368 error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
3369 tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
3370 } else {
3371 VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd);
3372 if (tdvp == tvp)
3373 vrele(tdvp);
3374 else
3375 vput(tdvp);
3376 if (tvp)
3377 vput(tvp);
3378 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3379 vrele(fromnd.ni_dvp);
3380 vrele(fvp);
3381 }
3382 vrele(tond.ni_startdir);
3383 PNBUF_PUT(tond.ni_cnd.cn_pnbuf);
3384 out1:
3385 if (fromnd.ni_startdir)
3386 vrele(fromnd.ni_startdir);
3387 PNBUF_PUT(fromnd.ni_cnd.cn_pnbuf);
3388 return (error == -1 ? 0 : error);
3389 }
3390
3391 /*
3392 * Make a directory file.
3393 */
3394 /* ARGSUSED */
3395 int
3396 sys_mkdir(struct lwp *l, const struct sys_mkdir_args *uap, register_t *retval)
3397 {
3398 /* {
3399 syscallarg(const char *) path;
3400 syscallarg(int) mode;
3401 } */
3402 struct proc *p = l->l_proc;
3403 struct vnode *vp;
3404 struct vattr vattr;
3405 int error;
3406 struct nameidata nd;
3407
3408 NDINIT(&nd, CREATE, LOCKPARENT | CREATEDIR | TRYEMULROOT, UIO_USERSPACE,
3409 SCARG(uap, path));
3410 if ((error = namei(&nd)) != 0)
3411 return (error);
3412 vp = nd.ni_vp;
3413 if (vp != NULL) {
3414 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3415 if (nd.ni_dvp == vp)
3416 vrele(nd.ni_dvp);
3417 else
3418 vput(nd.ni_dvp);
3419 vrele(vp);
3420 return (EEXIST);
3421 }
3422 VATTR_NULL(&vattr);
3423 vattr.va_type = VDIR;
3424 /* We will read cwdi->cwdi_cmask unlocked. */
3425 vattr.va_mode =
3426 (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_cwdi->cwdi_cmask;
3427 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3428 error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
3429 if (!error)
3430 vput(nd.ni_vp);
3431 return (error);
3432 }
3433
3434 /*
3435 * Remove a directory file.
3436 */
3437 /* ARGSUSED */
3438 int
3439 sys_rmdir(struct lwp *l, const struct sys_rmdir_args *uap, register_t *retval)
3440 {
3441 /* {
3442 syscallarg(const char *) path;
3443 } */
3444 struct vnode *vp;
3445 int error;
3446 struct nameidata nd;
3447
3448 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
3449 SCARG(uap, path));
3450 if ((error = namei(&nd)) != 0)
3451 return (error);
3452 vp = nd.ni_vp;
3453 if (vp->v_type != VDIR) {
3454 error = ENOTDIR;
3455 goto out;
3456 }
3457 /*
3458 * No rmdir "." please.
3459 */
3460 if (nd.ni_dvp == vp) {
3461 error = EINVAL;
3462 goto out;
3463 }
3464 /*
3465 * The root of a mounted filesystem cannot be deleted.
3466 */
3467 if (vp->v_vflag & VV_ROOT) {
3468 error = EBUSY;
3469 goto out;
3470 }
3471 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3472 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3473 error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
3474 return (error);
3475
3476 out:
3477 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3478 if (nd.ni_dvp == vp)
3479 vrele(nd.ni_dvp);
3480 else
3481 vput(nd.ni_dvp);
3482 vput(vp);
3483 return (error);
3484 }
3485
3486 /*
3487 * Read a block of directory entries in a file system independent format.
3488 */
3489 int
3490 sys___getdents30(struct lwp *l, const struct sys___getdents30_args *uap, register_t *retval)
3491 {
3492 /* {
3493 syscallarg(int) fd;
3494 syscallarg(char *) buf;
3495 syscallarg(size_t) count;
3496 } */
3497 struct proc *p = l->l_proc;
3498 struct file *fp;
3499 int error, done;
3500
3501 /* getvnode() will use the descriptor for us */
3502 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3503 return (error);
3504 if ((fp->f_flag & FREAD) == 0) {
3505 error = EBADF;
3506 goto out;
3507 }
3508 error = vn_readdir(fp, SCARG(uap, buf), UIO_USERSPACE,
3509 SCARG(uap, count), &done, l, 0, 0);
3510 ktrgenio(SCARG(uap, fd), UIO_READ, SCARG(uap, buf), done, error);
3511 *retval = done;
3512 out:
3513 FILE_UNUSE(fp, l);
3514 return (error);
3515 }
3516
3517 /*
3518 * Set the mode mask for creation of filesystem nodes.
3519 */
3520 int
3521 sys_umask(struct lwp *l, const struct sys_umask_args *uap, register_t *retval)
3522 {
3523 /* {
3524 syscallarg(mode_t) newmask;
3525 } */
3526 struct proc *p = l->l_proc;
3527 struct cwdinfo *cwdi;
3528
3529 /*
3530 * cwdi->cwdi_cmask will be read unlocked elsewhere. What's
3531 * important is that we serialize changes to the mask. The
3532 * rw_exit() will issue a write memory barrier on our behalf,
3533 * and force the changes out to other CPUs (as it must use an
3534 * atomic operation, draining the local CPU's store buffers).
3535 */
3536 cwdi = p->p_cwdi;
3537 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
3538 *retval = cwdi->cwdi_cmask;
3539 cwdi->cwdi_cmask = SCARG(uap, newmask) & ALLPERMS;
3540 rw_exit(&cwdi->cwdi_lock);
3541
3542 return (0);
3543 }
3544
3545 /*
3546 * Void all references to file by ripping underlying filesystem
3547 * away from vnode.
3548 */
3549 /* ARGSUSED */
3550 int
3551 sys_revoke(struct lwp *l, const struct sys_revoke_args *uap, register_t *retval)
3552 {
3553 /* {
3554 syscallarg(const char *) path;
3555 } */
3556 struct vnode *vp;
3557 struct vattr vattr;
3558 int error;
3559 bool revoke;
3560 struct nameidata nd;
3561
3562 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3563 SCARG(uap, path));
3564 if ((error = namei(&nd)) != 0)
3565 return (error);
3566 vp = nd.ni_vp;
3567 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
3568 goto out;
3569 if (kauth_cred_geteuid(l->l_cred) != vattr.va_uid &&
3570 (error = kauth_authorize_generic(l->l_cred,
3571 KAUTH_GENERIC_ISSUSER, NULL)) != 0)
3572 goto out;
3573 simple_lock(&vp->v_interlock);
3574 revoke = (vp->v_usecount > 1 || (vp->v_iflag & (VI_ALIASED|VI_LAYER)));
3575 simple_unlock(&vp->v_interlock);
3576 if (revoke)
3577 VOP_REVOKE(vp, REVOKEALL);
3578 out:
3579 vrele(vp);
3580 return (error);
3581 }
3582
3583 /*
3584 * Convert a user file descriptor to a kernel file entry.
3585 */
3586 int
3587 getvnode(struct filedesc *fdp, int fd, struct file **fpp)
3588 {
3589 struct vnode *vp;
3590 struct file *fp;
3591
3592 if ((fp = fd_getfile(fdp, fd)) == NULL)
3593 return (EBADF);
3594
3595 FILE_USE(fp);
3596
3597 if (fp->f_type != DTYPE_VNODE) {
3598 FILE_UNUSE(fp, NULL);
3599 return (EINVAL);
3600 }
3601
3602 vp = (struct vnode *)fp->f_data;
3603 if (vp->v_type == VBAD) {
3604 FILE_UNUSE(fp, NULL);
3605 return (EBADF);
3606 }
3607
3608 *fpp = fp;
3609 return (0);
3610 }
3611