vfs_syscalls.c revision 1.337 1 /* $NetBSD: vfs_syscalls.c,v 1.337 2007/12/26 16:01:37 ad Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_syscalls.c 8.42 (Berkeley) 7/31/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_syscalls.c,v 1.337 2007/12/26 16:01:37 ad Exp $");
41
42 #include "opt_compat_netbsd.h"
43 #include "opt_compat_43.h"
44 #include "opt_fileassoc.h"
45 #include "fss.h"
46 #include "veriexec.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/filedesc.h>
52 #include <sys/kernel.h>
53 #include <sys/file.h>
54 #include <sys/stat.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/proc.h>
58 #include <sys/uio.h>
59 #include <sys/malloc.h>
60 #include <sys/kmem.h>
61 #include <sys/dirent.h>
62 #include <sys/sysctl.h>
63 #include <sys/syscallargs.h>
64 #include <sys/vfs_syscalls.h>
65 #include <sys/ktrace.h>
66 #ifdef FILEASSOC
67 #include <sys/fileassoc.h>
68 #endif /* FILEASSOC */
69 #include <sys/verified_exec.h>
70 #include <sys/kauth.h>
71
72 #include <miscfs/genfs/genfs.h>
73 #include <miscfs/syncfs/syncfs.h>
74
75 #ifdef COMPAT_30
76 #include "opt_nfsserver.h"
77 #include <nfs/rpcv2.h>
78 #endif
79 #include <nfs/nfsproto.h>
80 #ifdef COMPAT_30
81 #include <nfs/nfs.h>
82 #include <nfs/nfs_var.h>
83 #endif
84
85 #if NFSS > 0
86 #include <dev/fssvar.h>
87 #endif
88
89 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount struct");
90
91 static int change_dir(struct nameidata *, struct lwp *);
92 static int change_flags(struct vnode *, u_long, struct lwp *);
93 static int change_mode(struct vnode *, int, struct lwp *l);
94 static int change_owner(struct vnode *, uid_t, gid_t, struct lwp *, int);
95
96 void checkdirs(struct vnode *);
97
98 int dovfsusermount = 0;
99
100 /*
101 * Virtual File System System Calls
102 */
103
104 /*
105 * Mount a file system.
106 */
107
108 #if defined(COMPAT_09) || defined(COMPAT_43)
109 /*
110 * This table is used to maintain compatibility with 4.3BSD
111 * and NetBSD 0.9 mount syscalls. Note, the order is important!
112 *
113 * Do not modify this table. It should only contain filesystems
114 * supported by NetBSD 0.9 and 4.3BSD.
115 */
116 const char * const mountcompatnames[] = {
117 NULL, /* 0 = MOUNT_NONE */
118 MOUNT_FFS, /* 1 = MOUNT_UFS */
119 MOUNT_NFS, /* 2 */
120 MOUNT_MFS, /* 3 */
121 MOUNT_MSDOS, /* 4 */
122 MOUNT_CD9660, /* 5 = MOUNT_ISOFS */
123 MOUNT_FDESC, /* 6 */
124 MOUNT_KERNFS, /* 7 */
125 NULL, /* 8 = MOUNT_DEVFS */
126 MOUNT_AFS, /* 9 */
127 };
128 const int nmountcompatnames = sizeof(mountcompatnames) /
129 sizeof(mountcompatnames[0]);
130 #endif /* COMPAT_09 || COMPAT_43 */
131
132 static int
133 mount_update(struct lwp *l, struct vnode *vp, const char *path, int flags,
134 void *data, size_t *data_len)
135 {
136 struct mount *mp;
137 int error = 0, saved_flags;
138
139 mp = vp->v_mount;
140 saved_flags = mp->mnt_flag;
141
142 /* We can operate only on VV_ROOT nodes. */
143 if ((vp->v_vflag & VV_ROOT) == 0) {
144 error = EINVAL;
145 goto out;
146 }
147
148 /*
149 * We only allow the filesystem to be reloaded if it
150 * is currently mounted read-only.
151 */
152 if (flags & MNT_RELOAD && !(mp->mnt_flag & MNT_RDONLY)) {
153 error = EOPNOTSUPP; /* Needs translation */
154 goto out;
155 }
156
157 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
158 KAUTH_REQ_SYSTEM_MOUNT_UPDATE, mp, KAUTH_ARG(flags), data);
159 if (error)
160 goto out;
161
162 if (vfs_busy(mp, LK_NOWAIT, 0)) {
163 error = EPERM;
164 goto out;
165 }
166
167 mp->mnt_flag &= ~MNT_OP_FLAGS;
168 mp->mnt_flag |= flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
169
170 /*
171 * Set the mount level flags.
172 */
173 if (flags & MNT_RDONLY)
174 mp->mnt_flag |= MNT_RDONLY;
175 else if (mp->mnt_flag & MNT_RDONLY)
176 mp->mnt_iflag |= IMNT_WANTRDWR;
177 mp->mnt_flag &=
178 ~(MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
179 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
180 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP);
181 mp->mnt_flag |= flags &
182 (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
183 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
184 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
185 MNT_IGNORE);
186
187 error = VFS_MOUNT(mp, path, data, data_len);
188
189 #if defined(COMPAT_30) && defined(NFSSERVER)
190 if (error && data != NULL) {
191 int error2;
192
193 /* Update failed; let's try and see if it was an
194 * export request. */
195 error2 = nfs_update_exports_30(mp, path, data, l);
196
197 /* Only update error code if the export request was
198 * understood but some problem occurred while
199 * processing it. */
200 if (error2 != EJUSTRETURN)
201 error = error2;
202 }
203 #endif
204 if (mp->mnt_iflag & IMNT_WANTRDWR)
205 mp->mnt_flag &= ~MNT_RDONLY;
206 if (error)
207 mp->mnt_flag = saved_flags;
208 mp->mnt_flag &= ~MNT_OP_FLAGS;
209 mp->mnt_iflag &= ~IMNT_WANTRDWR;
210 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0) {
211 if (mp->mnt_syncer == NULL)
212 error = vfs_allocate_syncvnode(mp);
213 } else {
214 if (mp->mnt_syncer != NULL)
215 vfs_deallocate_syncvnode(mp);
216 }
217 vfs_unbusy(mp);
218
219 out:
220 return (error);
221 }
222
223 static int
224 mount_get_vfsops(const char *fstype, struct vfsops **vfsops)
225 {
226 char fstypename[sizeof(((struct statvfs *)NULL)->f_fstypename)];
227 int error;
228
229 /* Copy file-system type from userspace. */
230 error = copyinstr(fstype, fstypename, sizeof(fstypename), NULL);
231 if (error) {
232 #if defined(COMPAT_09) || defined(COMPAT_43)
233 /*
234 * Historically, filesystem types were identified by numbers.
235 * If we get an integer for the filesystem type instead of a
236 * string, we check to see if it matches one of the historic
237 * filesystem types.
238 */
239 u_long fsindex = (u_long)fstype;
240 if (fsindex >= nmountcompatnames ||
241 mountcompatnames[fsindex] == NULL)
242 return ENODEV;
243 strlcpy(fstypename, mountcompatnames[fsindex],
244 sizeof(fstypename));
245 #else
246 return error;
247 #endif
248 }
249
250 #ifdef COMPAT_10
251 /* Accept `ufs' as an alias for `ffs'. */
252 if (strcmp(fstypename, "ufs") == 0)
253 fstypename[0] = 'f';
254 #endif
255
256 if ((*vfsops = vfs_getopsbyname(fstypename)) == NULL)
257 return ENODEV;
258 return 0;
259 }
260
261 static int
262 mount_domount(struct lwp *l, struct vnode **vpp, struct vfsops *vfsops,
263 const char *path, int flags, void *data, size_t *data_len, u_int recurse)
264 {
265 struct mount *mp = NULL;
266 struct vnode *vp = *vpp;
267 struct vattr va;
268 int error;
269
270 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
271 KAUTH_REQ_SYSTEM_MOUNT_NEW, vp, KAUTH_ARG(flags), data);
272 if (error)
273 return error;
274
275 /* Can't make a non-dir a mount-point (from here anyway). */
276 if (vp->v_type != VDIR)
277 return ENOTDIR;
278
279 /*
280 * If the user is not root, ensure that they own the directory
281 * onto which we are attempting to mount.
282 */
283 if ((error = VOP_GETATTR(vp, &va, l->l_cred)) != 0 ||
284 (va.va_uid != kauth_cred_geteuid(l->l_cred) &&
285 (error = kauth_authorize_generic(l->l_cred,
286 KAUTH_GENERIC_ISSUSER, NULL)) != 0)) {
287 return error;
288 }
289
290 if (flags & MNT_EXPORTED)
291 return EINVAL;
292
293 if ((error = vinvalbuf(vp, V_SAVE, l->l_cred, l, 0, 0)) != 0)
294 return error;
295
296 /*
297 * Check if a file-system is not already mounted on this vnode.
298 */
299 if (vp->v_mountedhere != NULL)
300 return EBUSY;
301
302 mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
303
304 mp->mnt_op = vfsops;
305
306 TAILQ_INIT(&mp->mnt_vnodelist);
307 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
308 simple_lock_init(&mp->mnt_slock);
309 (void)vfs_busy(mp, LK_NOWAIT, 0);
310
311 mp->mnt_vnodecovered = vp;
312 mp->mnt_stat.f_owner = kauth_cred_geteuid(l->l_cred);
313 mp->mnt_unmounter = NULL;
314 mount_initspecific(mp);
315
316 /*
317 * The underlying file system may refuse the mount for
318 * various reasons. Allow the user to force it to happen.
319 *
320 * Set the mount level flags.
321 */
322 mp->mnt_flag = flags &
323 (MNT_FORCE | MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
324 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
325 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
326 MNT_IGNORE | MNT_RDONLY);
327
328 error = VFS_MOUNT(mp, path, data, data_len);
329 mp->mnt_flag &= ~MNT_OP_FLAGS;
330
331 /*
332 * Put the new filesystem on the mount list after root.
333 */
334 cache_purge(vp);
335 if (error != 0) {
336 vp->v_mountedhere = NULL;
337 mp->mnt_op->vfs_refcount--;
338 vfs_unbusy(mp);
339 vfs_destroy(mp);
340 return error;
341 }
342
343 mp->mnt_iflag &= ~IMNT_WANTRDWR;
344 vp->v_mountedhere = mp;
345 mutex_enter(&mountlist_lock);
346 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
347 mutex_exit(&mountlist_lock);
348 vn_restorerecurse(vp, recurse);
349 VOP_UNLOCK(vp, 0);
350 checkdirs(vp);
351 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
352 error = vfs_allocate_syncvnode(mp);
353 vfs_unbusy(mp);
354 (void) VFS_STATVFS(mp, &mp->mnt_stat);
355 error = VFS_START(mp, 0);
356 if (error)
357 vrele(vp);
358 *vpp = NULL;
359 return error;
360 }
361
362 static int
363 mount_getargs(struct lwp *l, struct vnode *vp, const char *path, int flags,
364 void *data, size_t *data_len)
365 {
366 struct mount *mp;
367 int error;
368
369 /* If MNT_GETARGS is specified, it should be the only flag. */
370 if (flags & ~MNT_GETARGS)
371 return EINVAL;
372
373 mp = vp->v_mount;
374
375 /* XXX: probably some notion of "can see" here if we want isolation. */
376 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
377 KAUTH_REQ_SYSTEM_MOUNT_GET, mp, data, NULL);
378 if (error)
379 return error;
380
381 if ((vp->v_vflag & VV_ROOT) == 0)
382 return EINVAL;
383
384 if (vfs_busy(mp, LK_NOWAIT, 0))
385 return EPERM;
386
387 mp->mnt_flag &= ~MNT_OP_FLAGS;
388 mp->mnt_flag |= MNT_GETARGS;
389 error = VFS_MOUNT(mp, path, data, data_len);
390 mp->mnt_flag &= ~MNT_OP_FLAGS;
391
392 vfs_unbusy(mp);
393 return (error);
394 }
395
396 #ifdef COMPAT_40
397 /* ARGSUSED */
398 int
399 compat_40_sys_mount(struct lwp *l, const struct compat_40_sys_mount_args *uap, register_t *retval)
400 {
401 /* {
402 syscallarg(const char *) type;
403 syscallarg(const char *) path;
404 syscallarg(int) flags;
405 syscallarg(void *) data;
406 } */
407 register_t dummy;
408
409 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
410 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE, 0, &dummy);
411 }
412 #endif
413
414 int
415 sys___mount50(struct lwp *l, const struct sys___mount50_args *uap, register_t *retval)
416 {
417 /* {
418 syscallarg(const char *) type;
419 syscallarg(const char *) path;
420 syscallarg(int) flags;
421 syscallarg(void *) data;
422 syscallarg(size_t) data_len;
423 } */
424
425 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
426 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE,
427 SCARG(uap, data_len), retval);
428 }
429
430 int
431 do_sys_mount(struct lwp *l, struct vfsops *vfsops, const char *type,
432 const char *path, int flags, void *data, enum uio_seg data_seg,
433 size_t data_len, register_t *retval)
434 {
435 struct vnode *vp;
436 struct nameidata nd;
437 void *data_buf = data;
438 u_int recurse;
439 int error;
440
441 /*
442 * Get vnode to be covered
443 */
444 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
445 if ((error = namei(&nd)) != 0)
446 return (error);
447 vp = nd.ni_vp;
448
449 /*
450 * A lookup in VFS_MOUNT might result in an attempt to
451 * lock this vnode again, so make the lock recursive.
452 */
453 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
454 recurse = vn_setrecurse(vp);
455
456 if (vfsops == NULL) {
457 if (flags & (MNT_GETARGS | MNT_UPDATE))
458 vfsops = vp->v_mount->mnt_op;
459 else {
460 /* 'type' is userspace */
461 error = mount_get_vfsops(type, &vfsops);
462 if (error != 0)
463 goto done;
464 }
465 }
466
467 if (data != NULL && data_seg == UIO_USERSPACE) {
468 if (data_len == 0) {
469 /* No length supplied, use default for filesystem */
470 data_len = vfsops->vfs_min_mount_data;
471 if (data_len > VFS_MAX_MOUNT_DATA) {
472 /* maybe a force loaded old LKM */
473 error = EINVAL;
474 goto done;
475 }
476 #ifdef COMPAT_30
477 /* Hopefully a longer buffer won't make copyin() fail */
478 if (flags & MNT_UPDATE
479 && data_len < sizeof (struct mnt_export_args30))
480 data_len = sizeof (struct mnt_export_args30);
481 #endif
482 }
483 data_buf = malloc(data_len, M_TEMP, M_WAITOK);
484
485 /* NFS needs the buffer even for mnt_getargs .... */
486 error = copyin(data, data_buf, data_len);
487 if (error != 0)
488 goto done;
489 }
490
491 if (flags & MNT_GETARGS) {
492 if (data_len == 0) {
493 error = EINVAL;
494 goto done;
495 }
496 error = mount_getargs(l, vp, path, flags, data_buf, &data_len);
497 if (error != 0)
498 goto done;
499 if (data_seg == UIO_USERSPACE)
500 error = copyout(data_buf, data, data_len);
501 *retval = data_len;
502 } else if (flags & MNT_UPDATE) {
503 error = mount_update(l, vp, path, flags, data_buf, &data_len);
504 } else {
505 /* Locking is handled internally in mount_domount(). */
506 error = mount_domount(l, &vp, vfsops, path, flags, data_buf,
507 &data_len, recurse);
508 }
509
510 done:
511 if (vp != NULL) {
512 vn_restorerecurse(vp, recurse);
513 vput(vp);
514 }
515 if (data_buf != data)
516 free(data_buf, M_TEMP);
517 return (error);
518 }
519
520 /*
521 * Scan all active processes to see if any of them have a current
522 * or root directory onto which the new filesystem has just been
523 * mounted. If so, replace them with the new mount point.
524 */
525 void
526 checkdirs(struct vnode *olddp)
527 {
528 struct cwdinfo *cwdi;
529 struct vnode *newdp;
530 struct proc *p;
531
532 if (olddp->v_usecount == 1)
533 return;
534 if (VFS_ROOT(olddp->v_mountedhere, &newdp))
535 panic("mount: lost mount");
536 mutex_enter(&proclist_lock);
537 PROCLIST_FOREACH(p, &allproc) {
538 cwdi = p->p_cwdi;
539 if (!cwdi)
540 continue;
541 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
542 if (cwdi->cwdi_cdir == olddp) {
543 vrele(cwdi->cwdi_cdir);
544 VREF(newdp);
545 cwdi->cwdi_cdir = newdp;
546 }
547 if (cwdi->cwdi_rdir == olddp) {
548 vrele(cwdi->cwdi_rdir);
549 VREF(newdp);
550 cwdi->cwdi_rdir = newdp;
551 }
552 rw_exit(&cwdi->cwdi_lock);
553 }
554 mutex_exit(&proclist_lock);
555 if (rootvnode == olddp) {
556 vrele(rootvnode);
557 VREF(newdp);
558 rootvnode = newdp;
559 }
560 vput(newdp);
561 }
562
563 /*
564 * Unmount a file system.
565 *
566 * Note: unmount takes a path to the vnode mounted on as argument,
567 * not special file (as before).
568 */
569 /* ARGSUSED */
570 int
571 sys_unmount(struct lwp *l, const struct sys_unmount_args *uap, register_t *retval)
572 {
573 /* {
574 syscallarg(const char *) path;
575 syscallarg(int) flags;
576 } */
577 struct vnode *vp;
578 struct mount *mp;
579 int error;
580 struct nameidata nd;
581
582 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
583 SCARG(uap, path));
584 if ((error = namei(&nd)) != 0)
585 return (error);
586 vp = nd.ni_vp;
587 mp = vp->v_mount;
588
589 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
590 KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT, mp, NULL, NULL);
591 if (error) {
592 vput(vp);
593 return (error);
594 }
595
596 /*
597 * Don't allow unmounting the root file system.
598 */
599 if (mp->mnt_flag & MNT_ROOTFS) {
600 vput(vp);
601 return (EINVAL);
602 }
603
604 /*
605 * Must be the root of the filesystem
606 */
607 if ((vp->v_vflag & VV_ROOT) == 0) {
608 vput(vp);
609 return (EINVAL);
610 }
611 vput(vp);
612
613 /*
614 * XXX Freeze syncer. Must do this before locking the
615 * mount point. See dounmount() for details.
616 */
617 mutex_enter(&syncer_mutex);
618
619 if (vfs_busy(mp, 0, 0)) {
620 mutex_exit(&syncer_mutex);
621 return (EBUSY);
622 }
623
624 return (dounmount(mp, SCARG(uap, flags), l));
625 }
626
627 /*
628 * Do the actual file system unmount. File system is assumed to have been
629 * marked busy by the caller.
630 */
631 int
632 dounmount(struct mount *mp, int flags, struct lwp *l)
633 {
634 struct vnode *coveredvp;
635 int error;
636 int async;
637 int used_syncer;
638
639 #if NVERIEXEC > 0
640 error = veriexec_unmountchk(mp);
641 if (error)
642 return (error);
643 #endif /* NVERIEXEC > 0 */
644
645 mutex_enter(&mountlist_lock);
646 vfs_unbusy(mp);
647 used_syncer = (mp->mnt_syncer != NULL);
648
649 /*
650 * XXX Syncer must be frozen when we get here. This should really
651 * be done on a per-mountpoint basis, but especially the softdep
652 * code possibly called from the syncer doesn't exactly work on a
653 * per-mountpoint basis, so the softdep code would become a maze
654 * of vfs_busy() calls.
655 *
656 * The caller of dounmount() must acquire syncer_mutex because
657 * the syncer itself acquires locks in syncer_mutex -> vfs_busy
658 * order, and we must preserve that order to avoid deadlock.
659 *
660 * So, if the file system did not use the syncer, now is
661 * the time to release the syncer_mutex.
662 */
663 if (used_syncer == 0)
664 mutex_exit(&syncer_mutex);
665
666 mp->mnt_iflag |= IMNT_UNMOUNT;
667 mp->mnt_unmounter = l;
668 mutex_exit(&mountlist_lock); /* XXX */
669 lockmgr(&mp->mnt_lock, LK_DRAIN, NULL);
670
671 async = mp->mnt_flag & MNT_ASYNC;
672 mp->mnt_flag &= ~MNT_ASYNC;
673 cache_purgevfs(mp); /* remove cache entries for this file sys */
674 if (mp->mnt_syncer != NULL)
675 vfs_deallocate_syncvnode(mp);
676 error = 0;
677 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
678 #if NFSS > 0
679 error = fss_umount_hook(mp, (flags & MNT_FORCE));
680 #endif
681 if (error == 0)
682 error = VFS_SYNC(mp, MNT_WAIT, l->l_cred);
683 }
684 if (error == 0 || (flags & MNT_FORCE))
685 error = VFS_UNMOUNT(mp, flags);
686 if (error) {
687 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
688 (void) vfs_allocate_syncvnode(mp);
689 mutex_enter(&mountlist_lock);
690 mp->mnt_iflag &= ~IMNT_UNMOUNT;
691 mp->mnt_unmounter = NULL;
692 mp->mnt_flag |= async;
693 mutex_exit(&mountlist_lock); /* XXX */
694 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_REENABLE,
695 NULL);
696 if (used_syncer)
697 mutex_exit(&syncer_mutex);
698 simple_lock(&mp->mnt_slock);
699 while (mp->mnt_wcnt > 0) {
700 wakeup(mp);
701 ltsleep(&mp->mnt_wcnt, PVFS, "mntwcnt1",
702 0, &mp->mnt_slock);
703 }
704 simple_unlock(&mp->mnt_slock);
705 return (error);
706 }
707 mutex_enter(&mountlist_lock);
708 CIRCLEQ_REMOVE(&mountlist, mp, mnt_list);
709 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP)
710 coveredvp->v_mountedhere = NULL;
711 if (TAILQ_FIRST(&mp->mnt_vnodelist) != NULL)
712 panic("unmount: dangling vnode");
713 mp->mnt_iflag |= IMNT_GONE;
714 mutex_exit(&mountlist_lock);
715 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
716 if (coveredvp != NULLVP)
717 vrele(coveredvp);
718 if (used_syncer)
719 mutex_exit(&syncer_mutex);
720 simple_lock(&mp->mnt_slock);
721 while (mp->mnt_wcnt > 0) {
722 wakeup(mp);
723 ltsleep(&mp->mnt_wcnt, PVFS, "mntwcnt2", 0, &mp->mnt_slock);
724 }
725 simple_unlock(&mp->mnt_slock);
726 vfs_hooks_unmount(mp);
727 vfs_delref(mp->mnt_op);
728 vfs_destroy(mp);
729 return (0);
730 }
731
732 /*
733 * Sync each mounted filesystem.
734 */
735 #ifdef DEBUG
736 int syncprt = 0;
737 struct ctldebug debug0 = { "syncprt", &syncprt };
738 #endif
739
740 /* ARGSUSED */
741 int
742 sys_sync(struct lwp *l, const void *v, register_t *retval)
743 {
744 struct mount *mp, *nmp;
745 int asyncflag;
746
747 if (l == NULL)
748 l = &lwp0;
749
750 mutex_enter(&mountlist_lock);
751 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
752 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
753 nmp = mp->mnt_list.cqe_prev;
754 continue;
755 }
756 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
757 asyncflag = mp->mnt_flag & MNT_ASYNC;
758 mp->mnt_flag &= ~MNT_ASYNC;
759 /* XXXSMP hack, sync is slow. */
760 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
761 KERNEL_LOCK(1, NULL);
762 }
763 VFS_SYNC(mp, MNT_NOWAIT, l->l_cred);
764 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
765 KERNEL_UNLOCK_ONE(NULL);
766 }
767 if (asyncflag)
768 mp->mnt_flag |= MNT_ASYNC;
769 }
770 mutex_enter(&mountlist_lock);
771 nmp = mp->mnt_list.cqe_prev;
772 vfs_unbusy(mp);
773
774 }
775 mutex_exit(&mountlist_lock);
776 #ifdef DEBUG
777 if (syncprt)
778 vfs_bufstats();
779 #endif /* DEBUG */
780 return (0);
781 }
782
783 /*
784 * Change filesystem quotas.
785 */
786 /* ARGSUSED */
787 int
788 sys_quotactl(struct lwp *l, const struct sys_quotactl_args *uap, register_t *retval)
789 {
790 /* {
791 syscallarg(const char *) path;
792 syscallarg(int) cmd;
793 syscallarg(int) uid;
794 syscallarg(void *) arg;
795 } */
796 struct mount *mp;
797 int error;
798 struct nameidata nd;
799
800 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
801 SCARG(uap, path));
802 if ((error = namei(&nd)) != 0)
803 return (error);
804 mp = nd.ni_vp->v_mount;
805 error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
806 SCARG(uap, arg));
807 vrele(nd.ni_vp);
808 return (error);
809 }
810
811 int
812 dostatvfs(struct mount *mp, struct statvfs *sp, struct lwp *l, int flags,
813 int root)
814 {
815 struct cwdinfo *cwdi = l->l_proc->p_cwdi;
816 int error = 0;
817
818 /*
819 * If MNT_NOWAIT or MNT_LAZY is specified, do not
820 * refresh the fsstat cache. MNT_WAIT or MNT_LAZY
821 * overrides MNT_NOWAIT.
822 */
823 if (flags == MNT_NOWAIT || flags == MNT_LAZY ||
824 (flags != MNT_WAIT && flags != 0)) {
825 memcpy(sp, &mp->mnt_stat, sizeof(*sp));
826 goto done;
827 }
828
829 /* Get the filesystem stats now */
830 memset(sp, 0, sizeof(*sp));
831 if ((error = VFS_STATVFS(mp, sp)) != 0) {
832 return error;
833 }
834
835 if (cwdi->cwdi_rdir == NULL)
836 (void)memcpy(&mp->mnt_stat, sp, sizeof(mp->mnt_stat));
837 done:
838 if (cwdi->cwdi_rdir != NULL) {
839 size_t len;
840 char *bp;
841 char *path = PNBUF_GET();
842
843 bp = path + MAXPATHLEN;
844 *--bp = '\0';
845 rw_enter(&cwdi->cwdi_lock, RW_READER);
846 error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp, path,
847 MAXPATHLEN / 2, 0, l);
848 rw_exit(&cwdi->cwdi_lock);
849 if (error) {
850 PNBUF_PUT(path);
851 return error;
852 }
853 len = strlen(bp);
854 /*
855 * for mount points that are below our root, we can see
856 * them, so we fix up the pathname and return them. The
857 * rest we cannot see, so we don't allow viewing the
858 * data.
859 */
860 if (strncmp(bp, sp->f_mntonname, len) == 0) {
861 strlcpy(sp->f_mntonname, &sp->f_mntonname[len],
862 sizeof(sp->f_mntonname));
863 if (sp->f_mntonname[0] == '\0')
864 (void)strlcpy(sp->f_mntonname, "/",
865 sizeof(sp->f_mntonname));
866 } else {
867 if (root)
868 (void)strlcpy(sp->f_mntonname, "/",
869 sizeof(sp->f_mntonname));
870 else
871 error = EPERM;
872 }
873 PNBUF_PUT(path);
874 }
875 sp->f_flag = mp->mnt_flag & MNT_VISFLAGMASK;
876 return error;
877 }
878
879 /*
880 * Get filesystem statistics by path.
881 */
882 int
883 do_sys_pstatvfs(struct lwp *l, const char *path, int flags, struct statvfs *sb)
884 {
885 struct mount *mp;
886 int error;
887 struct nameidata nd;
888
889 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
890 if ((error = namei(&nd)) != 0)
891 return error;
892 mp = nd.ni_vp->v_mount;
893 error = dostatvfs(mp, sb, l, flags, 1);
894 vrele(nd.ni_vp);
895 return error;
896 }
897
898 /* ARGSUSED */
899 int
900 sys_statvfs1(struct lwp *l, const struct sys_statvfs1_args *uap, register_t *retval)
901 {
902 /* {
903 syscallarg(const char *) path;
904 syscallarg(struct statvfs *) buf;
905 syscallarg(int) flags;
906 } */
907 struct statvfs *sb;
908 int error;
909
910 sb = STATVFSBUF_GET();
911 error = do_sys_pstatvfs(l, SCARG(uap, path), SCARG(uap, flags), sb);
912 if (error == 0)
913 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
914 STATVFSBUF_PUT(sb);
915 return error;
916 }
917
918 /*
919 * Get filesystem statistics by fd.
920 */
921 int
922 do_sys_fstatvfs(struct lwp *l, int fd, int flags, struct statvfs *sb)
923 {
924 struct proc *p = l->l_proc;
925 struct file *fp;
926 struct mount *mp;
927 int error;
928
929 /* getvnode() will use the descriptor for us */
930 if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
931 return (error);
932 mp = ((struct vnode *)fp->f_data)->v_mount;
933 error = dostatvfs(mp, sb, l, flags, 1);
934 FILE_UNUSE(fp, l);
935 return error;
936 }
937
938 /* ARGSUSED */
939 int
940 sys_fstatvfs1(struct lwp *l, const struct sys_fstatvfs1_args *uap, register_t *retval)
941 {
942 /* {
943 syscallarg(int) fd;
944 syscallarg(struct statvfs *) buf;
945 syscallarg(int) flags;
946 } */
947 struct statvfs *sb;
948 int error;
949
950 sb = STATVFSBUF_GET();
951 error = do_sys_fstatvfs(l, SCARG(uap, fd), SCARG(uap, flags), sb);
952 if (error == 0)
953 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
954 STATVFSBUF_PUT(sb);
955 return error;
956 }
957
958
959 /*
960 * Get statistics on all filesystems.
961 */
962 int
963 do_sys_getvfsstat(struct lwp *l, void *sfsp, size_t bufsize, int flags,
964 int (*copyfn)(const void *, void *, size_t), size_t entry_sz,
965 register_t *retval)
966 {
967 int root = 0;
968 struct proc *p = l->l_proc;
969 struct mount *mp, *nmp;
970 struct statvfs *sb;
971 size_t count, maxcount;
972 int error = 0;
973
974 sb = STATVFSBUF_GET();
975 maxcount = bufsize / entry_sz;
976 mutex_enter(&mountlist_lock);
977 count = 0;
978 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
979 mp = nmp) {
980 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
981 nmp = CIRCLEQ_NEXT(mp, mnt_list);
982 continue;
983 }
984 if (sfsp && count < maxcount) {
985 error = dostatvfs(mp, sb, l, flags, 0);
986 if (error) {
987 mutex_enter(&mountlist_lock);
988 nmp = CIRCLEQ_NEXT(mp, mnt_list);
989 vfs_unbusy(mp);
990 continue;
991 }
992 error = copyfn(sb, sfsp, entry_sz);
993 if (error) {
994 vfs_unbusy(mp);
995 goto out;
996 }
997 sfsp = (char *)sfsp + entry_sz;
998 root |= strcmp(sb->f_mntonname, "/") == 0;
999 }
1000 count++;
1001 mutex_enter(&mountlist_lock);
1002 nmp = CIRCLEQ_NEXT(mp, mnt_list);
1003 vfs_unbusy(mp);
1004 }
1005
1006 mutex_exit(&mountlist_lock);
1007 if (root == 0 && p->p_cwdi->cwdi_rdir) {
1008 /*
1009 * fake a root entry
1010 */
1011 error = dostatvfs(p->p_cwdi->cwdi_rdir->v_mount,
1012 sb, l, flags, 1);
1013 if (error != 0)
1014 goto out;
1015 if (sfsp)
1016 error = copyfn(sb, sfsp, entry_sz);
1017 count++;
1018 }
1019 if (sfsp && count > maxcount)
1020 *retval = maxcount;
1021 else
1022 *retval = count;
1023 out:
1024 STATVFSBUF_PUT(sb);
1025 return error;
1026 }
1027
1028 int
1029 sys_getvfsstat(struct lwp *l, const struct sys_getvfsstat_args *uap, register_t *retval)
1030 {
1031 /* {
1032 syscallarg(struct statvfs *) buf;
1033 syscallarg(size_t) bufsize;
1034 syscallarg(int) flags;
1035 } */
1036
1037 return do_sys_getvfsstat(l, SCARG(uap, buf), SCARG(uap, bufsize),
1038 SCARG(uap, flags), copyout, sizeof (struct statvfs), retval);
1039 }
1040
1041 /*
1042 * Change current working directory to a given file descriptor.
1043 */
1044 /* ARGSUSED */
1045 int
1046 sys_fchdir(struct lwp *l, const struct sys_fchdir_args *uap, register_t *retval)
1047 {
1048 /* {
1049 syscallarg(int) fd;
1050 } */
1051 struct proc *p = l->l_proc;
1052 struct filedesc *fdp = p->p_fd;
1053 struct cwdinfo *cwdi;
1054 struct vnode *vp, *tdp;
1055 struct mount *mp;
1056 struct file *fp;
1057 int error;
1058
1059 /* getvnode() will use the descriptor for us */
1060 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1061 return (error);
1062 vp = (struct vnode *)fp->f_data;
1063
1064 VREF(vp);
1065 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1066 if (vp->v_type != VDIR)
1067 error = ENOTDIR;
1068 else
1069 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1070 if (error) {
1071 vput(vp);
1072 goto out;
1073 }
1074 while ((mp = vp->v_mountedhere) != NULL) {
1075 if (vfs_busy(mp, 0, 0))
1076 continue;
1077
1078 vput(vp);
1079 error = VFS_ROOT(mp, &tdp);
1080 vfs_unbusy(mp);
1081 if (error)
1082 goto out;
1083 vp = tdp;
1084 }
1085 VOP_UNLOCK(vp, 0);
1086
1087 /*
1088 * Disallow changing to a directory not under the process's
1089 * current root directory (if there is one).
1090 */
1091 cwdi = p->p_cwdi;
1092 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1093 if (cwdi->cwdi_rdir && !vn_isunder(vp, NULL, l)) {
1094 vrele(vp);
1095 error = EPERM; /* operation not permitted */
1096 } else {
1097 vrele(cwdi->cwdi_cdir);
1098 cwdi->cwdi_cdir = vp;
1099 }
1100 rw_exit(&cwdi->cwdi_lock);
1101
1102 out:
1103 FILE_UNUSE(fp, l);
1104 return (error);
1105 }
1106
1107 /*
1108 * Change this process's notion of the root directory to a given file
1109 * descriptor.
1110 */
1111 int
1112 sys_fchroot(struct lwp *l, const struct sys_fchroot_args *uap, register_t *retval)
1113 {
1114 struct proc *p = l->l_proc;
1115 struct filedesc *fdp = p->p_fd;
1116 struct cwdinfo *cwdi;
1117 struct vnode *vp;
1118 struct file *fp;
1119 int error;
1120
1121 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1122 KAUTH_REQ_SYSTEM_CHROOT_FCHROOT, NULL, NULL, NULL)) != 0)
1123 return error;
1124 /* getvnode() will use the descriptor for us */
1125 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1126 return error;
1127 vp = (struct vnode *) fp->f_data;
1128 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1129 if (vp->v_type != VDIR)
1130 error = ENOTDIR;
1131 else
1132 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1133 VOP_UNLOCK(vp, 0);
1134 if (error)
1135 goto out;
1136 VREF(vp);
1137
1138 /*
1139 * Prevent escaping from chroot by putting the root under
1140 * the working directory. Silently chdir to / if we aren't
1141 * already there.
1142 */
1143 cwdi = p->p_cwdi;
1144 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1145 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1146 /*
1147 * XXX would be more failsafe to change directory to a
1148 * deadfs node here instead
1149 */
1150 vrele(cwdi->cwdi_cdir);
1151 VREF(vp);
1152 cwdi->cwdi_cdir = vp;
1153 }
1154
1155 if (cwdi->cwdi_rdir != NULL)
1156 vrele(cwdi->cwdi_rdir);
1157 cwdi->cwdi_rdir = vp;
1158 rw_exit(&cwdi->cwdi_lock);
1159
1160 out:
1161 FILE_UNUSE(fp, l);
1162 return (error);
1163 }
1164
1165 /*
1166 * Change current working directory (``.'').
1167 */
1168 /* ARGSUSED */
1169 int
1170 sys_chdir(struct lwp *l, const struct sys_chdir_args *uap, register_t *retval)
1171 {
1172 /* {
1173 syscallarg(const char *) path;
1174 } */
1175 struct proc *p = l->l_proc;
1176 struct cwdinfo *cwdi;
1177 int error;
1178 struct nameidata nd;
1179
1180 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1181 SCARG(uap, path));
1182 if ((error = change_dir(&nd, l)) != 0)
1183 return (error);
1184 cwdi = p->p_cwdi;
1185 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1186 vrele(cwdi->cwdi_cdir);
1187 cwdi->cwdi_cdir = nd.ni_vp;
1188 rw_exit(&cwdi->cwdi_lock);
1189 return (0);
1190 }
1191
1192 /*
1193 * Change notion of root (``/'') directory.
1194 */
1195 /* ARGSUSED */
1196 int
1197 sys_chroot(struct lwp *l, const struct sys_chroot_args *uap, register_t *retval)
1198 {
1199 /* {
1200 syscallarg(const char *) path;
1201 } */
1202 struct proc *p = l->l_proc;
1203 struct cwdinfo *cwdi;
1204 struct vnode *vp;
1205 int error;
1206 struct nameidata nd;
1207
1208 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1209 KAUTH_REQ_SYSTEM_CHROOT_CHROOT, NULL, NULL, NULL)) != 0)
1210 return (error);
1211 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1212 SCARG(uap, path));
1213 if ((error = change_dir(&nd, l)) != 0)
1214 return (error);
1215
1216 cwdi = p->p_cwdi;
1217 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1218 if (cwdi->cwdi_rdir != NULL)
1219 vrele(cwdi->cwdi_rdir);
1220 vp = nd.ni_vp;
1221 cwdi->cwdi_rdir = vp;
1222
1223 /*
1224 * Prevent escaping from chroot by putting the root under
1225 * the working directory. Silently chdir to / if we aren't
1226 * already there.
1227 */
1228 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1229 /*
1230 * XXX would be more failsafe to change directory to a
1231 * deadfs node here instead
1232 */
1233 vrele(cwdi->cwdi_cdir);
1234 VREF(vp);
1235 cwdi->cwdi_cdir = vp;
1236 }
1237 rw_exit(&cwdi->cwdi_lock);
1238
1239 return (0);
1240 }
1241
1242 /*
1243 * Common routine for chroot and chdir.
1244 */
1245 static int
1246 change_dir(struct nameidata *ndp, struct lwp *l)
1247 {
1248 struct vnode *vp;
1249 int error;
1250
1251 if ((error = namei(ndp)) != 0)
1252 return (error);
1253 vp = ndp->ni_vp;
1254 if (vp->v_type != VDIR)
1255 error = ENOTDIR;
1256 else
1257 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1258
1259 if (error)
1260 vput(vp);
1261 else
1262 VOP_UNLOCK(vp, 0);
1263 return (error);
1264 }
1265
1266 /*
1267 * Check permissions, allocate an open file structure,
1268 * and call the device open routine if any.
1269 */
1270 int
1271 sys_open(struct lwp *l, const struct sys_open_args *uap, register_t *retval)
1272 {
1273 /* {
1274 syscallarg(const char *) path;
1275 syscallarg(int) flags;
1276 syscallarg(int) mode;
1277 } */
1278 struct proc *p = l->l_proc;
1279 struct cwdinfo *cwdi = p->p_cwdi;
1280 struct filedesc *fdp = p->p_fd;
1281 struct file *fp;
1282 struct vnode *vp;
1283 int flags, cmode;
1284 int type, indx, error;
1285 struct flock lf;
1286 struct nameidata nd;
1287
1288 flags = FFLAGS(SCARG(uap, flags));
1289 if ((flags & (FREAD | FWRITE)) == 0)
1290 return (EINVAL);
1291 /* falloc() will use the file descriptor for us */
1292 if ((error = falloc(l, &fp, &indx)) != 0)
1293 return (error);
1294 /* We're going to read cwdi->cwdi_cmask unlocked here. */
1295 cmode = ((SCARG(uap, mode) &~ cwdi->cwdi_cmask) & ALLPERMS) &~ S_ISTXT;
1296 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1297 SCARG(uap, path));
1298 l->l_dupfd = -indx - 1; /* XXX check for fdopen */
1299 if ((error = vn_open(&nd, flags, cmode)) != 0) {
1300 rw_enter(&fdp->fd_lock, RW_WRITER);
1301 FILE_UNUSE(fp, l);
1302 fdp->fd_ofiles[indx] = NULL;
1303 rw_exit(&fdp->fd_lock);
1304 ffree(fp);
1305 if ((error == EDUPFD || error == EMOVEFD) &&
1306 l->l_dupfd >= 0 && /* XXX from fdopen */
1307 (error =
1308 dupfdopen(l, indx, l->l_dupfd, flags, error)) == 0) {
1309 *retval = indx;
1310 return (0);
1311 }
1312 if (error == ERESTART)
1313 error = EINTR;
1314 fdremove(fdp, indx);
1315 return (error);
1316 }
1317
1318 l->l_dupfd = 0;
1319 vp = nd.ni_vp;
1320 fp->f_flag = flags & FMASK;
1321 fp->f_type = DTYPE_VNODE;
1322 fp->f_ops = &vnops;
1323 fp->f_data = vp;
1324 if (flags & (O_EXLOCK | O_SHLOCK)) {
1325 lf.l_whence = SEEK_SET;
1326 lf.l_start = 0;
1327 lf.l_len = 0;
1328 if (flags & O_EXLOCK)
1329 lf.l_type = F_WRLCK;
1330 else
1331 lf.l_type = F_RDLCK;
1332 type = F_FLOCK;
1333 if ((flags & FNONBLOCK) == 0)
1334 type |= F_WAIT;
1335 VOP_UNLOCK(vp, 0);
1336 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1337 if (error) {
1338 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1339 FILE_UNUSE(fp, l);
1340 ffree(fp);
1341 fdremove(fdp, indx);
1342 return (error);
1343 }
1344 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1345 fp->f_flag |= FHASLOCK;
1346 }
1347 VOP_UNLOCK(vp, 0);
1348 *retval = indx;
1349 FILE_SET_MATURE(fp);
1350 FILE_UNUSE(fp, l);
1351 return (0);
1352 }
1353
1354 static void
1355 vfs__fhfree(fhandle_t *fhp)
1356 {
1357 size_t fhsize;
1358
1359 if (fhp == NULL) {
1360 return;
1361 }
1362 fhsize = FHANDLE_SIZE(fhp);
1363 kmem_free(fhp, fhsize);
1364 }
1365
1366 /*
1367 * vfs_composefh: compose a filehandle.
1368 */
1369
1370 int
1371 vfs_composefh(struct vnode *vp, fhandle_t *fhp, size_t *fh_size)
1372 {
1373 struct mount *mp;
1374 struct fid *fidp;
1375 int error;
1376 size_t needfhsize;
1377 size_t fidsize;
1378
1379 mp = vp->v_mount;
1380 fidp = NULL;
1381 if (*fh_size < FHANDLE_SIZE_MIN) {
1382 fidsize = 0;
1383 } else {
1384 fidsize = *fh_size - offsetof(fhandle_t, fh_fid);
1385 if (fhp != NULL) {
1386 memset(fhp, 0, *fh_size);
1387 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1388 fidp = &fhp->fh_fid;
1389 }
1390 }
1391 error = VFS_VPTOFH(vp, fidp, &fidsize);
1392 needfhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1393 if (error == 0 && *fh_size < needfhsize) {
1394 error = E2BIG;
1395 }
1396 *fh_size = needfhsize;
1397 return error;
1398 }
1399
1400 int
1401 vfs_composefh_alloc(struct vnode *vp, fhandle_t **fhpp)
1402 {
1403 struct mount *mp;
1404 fhandle_t *fhp;
1405 size_t fhsize;
1406 size_t fidsize;
1407 int error;
1408
1409 *fhpp = NULL;
1410 mp = vp->v_mount;
1411 fidsize = 0;
1412 error = VFS_VPTOFH(vp, NULL, &fidsize);
1413 KASSERT(error != 0);
1414 if (error != E2BIG) {
1415 goto out;
1416 }
1417 fhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1418 fhp = kmem_zalloc(fhsize, KM_SLEEP);
1419 if (fhp == NULL) {
1420 error = ENOMEM;
1421 goto out;
1422 }
1423 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1424 error = VFS_VPTOFH(vp, &fhp->fh_fid, &fidsize);
1425 if (error == 0) {
1426 KASSERT((FHANDLE_SIZE(fhp) == fhsize &&
1427 FHANDLE_FILEID(fhp)->fid_len == fidsize));
1428 *fhpp = fhp;
1429 } else {
1430 kmem_free(fhp, fhsize);
1431 }
1432 out:
1433 return error;
1434 }
1435
1436 void
1437 vfs_composefh_free(fhandle_t *fhp)
1438 {
1439
1440 vfs__fhfree(fhp);
1441 }
1442
1443 /*
1444 * vfs_fhtovp: lookup a vnode by a filehandle.
1445 */
1446
1447 int
1448 vfs_fhtovp(fhandle_t *fhp, struct vnode **vpp)
1449 {
1450 struct mount *mp;
1451 int error;
1452
1453 *vpp = NULL;
1454 mp = vfs_getvfs(FHANDLE_FSID(fhp));
1455 if (mp == NULL) {
1456 error = ESTALE;
1457 goto out;
1458 }
1459 if (mp->mnt_op->vfs_fhtovp == NULL) {
1460 error = EOPNOTSUPP;
1461 goto out;
1462 }
1463 error = VFS_FHTOVP(mp, FHANDLE_FILEID(fhp), vpp);
1464 out:
1465 return error;
1466 }
1467
1468 /*
1469 * vfs_copyinfh_alloc: allocate and copyin a filehandle, given
1470 * the needed size.
1471 */
1472
1473 int
1474 vfs_copyinfh_alloc(const void *ufhp, size_t fhsize, fhandle_t **fhpp)
1475 {
1476 fhandle_t *fhp;
1477 int error;
1478
1479 *fhpp = NULL;
1480 if (fhsize > FHANDLE_SIZE_MAX) {
1481 return EINVAL;
1482 }
1483 if (fhsize < FHANDLE_SIZE_MIN) {
1484 return EINVAL;
1485 }
1486 again:
1487 fhp = kmem_alloc(fhsize, KM_SLEEP);
1488 if (fhp == NULL) {
1489 return ENOMEM;
1490 }
1491 error = copyin(ufhp, fhp, fhsize);
1492 if (error == 0) {
1493 /* XXX this check shouldn't be here */
1494 if (FHANDLE_SIZE(fhp) == fhsize) {
1495 *fhpp = fhp;
1496 return 0;
1497 } else if (fhsize == NFSX_V2FH && FHANDLE_SIZE(fhp) < fhsize) {
1498 /*
1499 * a kludge for nfsv2 padded handles.
1500 */
1501 size_t sz;
1502
1503 sz = FHANDLE_SIZE(fhp);
1504 kmem_free(fhp, fhsize);
1505 fhsize = sz;
1506 goto again;
1507 } else {
1508 /*
1509 * userland told us wrong size.
1510 */
1511 error = EINVAL;
1512 }
1513 }
1514 kmem_free(fhp, fhsize);
1515 return error;
1516 }
1517
1518 void
1519 vfs_copyinfh_free(fhandle_t *fhp)
1520 {
1521
1522 vfs__fhfree(fhp);
1523 }
1524
1525 /*
1526 * Get file handle system call
1527 */
1528 int
1529 sys___getfh30(struct lwp *l, const struct sys___getfh30_args *uap, register_t *retval)
1530 {
1531 /* {
1532 syscallarg(char *) fname;
1533 syscallarg(fhandle_t *) fhp;
1534 syscallarg(size_t *) fh_size;
1535 } */
1536 struct vnode *vp;
1537 fhandle_t *fh;
1538 int error;
1539 struct nameidata nd;
1540 size_t sz;
1541 size_t usz;
1542
1543 /*
1544 * Must be super user
1545 */
1546 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1547 0, NULL, NULL, NULL);
1548 if (error)
1549 return (error);
1550 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1551 SCARG(uap, fname));
1552 error = namei(&nd);
1553 if (error)
1554 return (error);
1555 vp = nd.ni_vp;
1556 error = vfs_composefh_alloc(vp, &fh);
1557 vput(vp);
1558 if (error != 0) {
1559 goto out;
1560 }
1561 error = copyin(SCARG(uap, fh_size), &usz, sizeof(size_t));
1562 if (error != 0) {
1563 goto out;
1564 }
1565 sz = FHANDLE_SIZE(fh);
1566 error = copyout(&sz, SCARG(uap, fh_size), sizeof(size_t));
1567 if (error != 0) {
1568 goto out;
1569 }
1570 if (usz >= sz) {
1571 error = copyout(fh, SCARG(uap, fhp), sz);
1572 } else {
1573 error = E2BIG;
1574 }
1575 out:
1576 vfs_composefh_free(fh);
1577 return (error);
1578 }
1579
1580 /*
1581 * Open a file given a file handle.
1582 *
1583 * Check permissions, allocate an open file structure,
1584 * and call the device open routine if any.
1585 */
1586
1587 int
1588 dofhopen(struct lwp *l, const void *ufhp, size_t fhsize, int oflags,
1589 register_t *retval)
1590 {
1591 struct filedesc *fdp = l->l_proc->p_fd;
1592 struct file *fp;
1593 struct vnode *vp = NULL;
1594 kauth_cred_t cred = l->l_cred;
1595 struct file *nfp;
1596 int type, indx, error=0;
1597 struct flock lf;
1598 struct vattr va;
1599 fhandle_t *fh;
1600 int flags;
1601
1602 /*
1603 * Must be super user
1604 */
1605 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1606 0, NULL, NULL, NULL)))
1607 return (error);
1608
1609 flags = FFLAGS(oflags);
1610 if ((flags & (FREAD | FWRITE)) == 0)
1611 return (EINVAL);
1612 if ((flags & O_CREAT))
1613 return (EINVAL);
1614 /* falloc() will use the file descriptor for us */
1615 if ((error = falloc(l, &nfp, &indx)) != 0)
1616 return (error);
1617 fp = nfp;
1618 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1619 if (error != 0) {
1620 goto bad;
1621 }
1622 error = vfs_fhtovp(fh, &vp);
1623 if (error != 0) {
1624 goto bad;
1625 }
1626
1627 /* Now do an effective vn_open */
1628
1629 if (vp->v_type == VSOCK) {
1630 error = EOPNOTSUPP;
1631 goto bad;
1632 }
1633 error = vn_openchk(vp, cred, flags);
1634 if (error != 0)
1635 goto bad;
1636 if (flags & O_TRUNC) {
1637 VOP_UNLOCK(vp, 0); /* XXX */
1638 VOP_LEASE(vp, cred, LEASE_WRITE);
1639 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
1640 VATTR_NULL(&va);
1641 va.va_size = 0;
1642 error = VOP_SETATTR(vp, &va, cred);
1643 if (error)
1644 goto bad;
1645 }
1646 if ((error = VOP_OPEN(vp, flags, cred)) != 0)
1647 goto bad;
1648 if (flags & FWRITE)
1649 vp->v_writecount++;
1650
1651 /* done with modified vn_open, now finish what sys_open does. */
1652
1653 fp->f_flag = flags & FMASK;
1654 fp->f_type = DTYPE_VNODE;
1655 fp->f_ops = &vnops;
1656 fp->f_data = vp;
1657 if (flags & (O_EXLOCK | O_SHLOCK)) {
1658 lf.l_whence = SEEK_SET;
1659 lf.l_start = 0;
1660 lf.l_len = 0;
1661 if (flags & O_EXLOCK)
1662 lf.l_type = F_WRLCK;
1663 else
1664 lf.l_type = F_RDLCK;
1665 type = F_FLOCK;
1666 if ((flags & FNONBLOCK) == 0)
1667 type |= F_WAIT;
1668 VOP_UNLOCK(vp, 0);
1669 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1670 if (error) {
1671 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1672 FILE_UNUSE(fp, l);
1673 ffree(fp);
1674 fdremove(fdp, indx);
1675 return (error);
1676 }
1677 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1678 fp->f_flag |= FHASLOCK;
1679 }
1680 VOP_UNLOCK(vp, 0);
1681 *retval = indx;
1682 FILE_SET_MATURE(fp);
1683 FILE_UNUSE(fp, l);
1684 vfs_copyinfh_free(fh);
1685 return (0);
1686
1687 bad:
1688 FILE_UNUSE(fp, l);
1689 ffree(fp);
1690 fdremove(fdp, indx);
1691 if (vp != NULL)
1692 vput(vp);
1693 vfs_copyinfh_free(fh);
1694 return (error);
1695 }
1696
1697 int
1698 sys___fhopen40(struct lwp *l, const struct sys___fhopen40_args *uap, register_t *retval)
1699 {
1700 /* {
1701 syscallarg(const void *) fhp;
1702 syscallarg(size_t) fh_size;
1703 syscallarg(int) flags;
1704 } */
1705
1706 return dofhopen(l, SCARG(uap, fhp), SCARG(uap, fh_size),
1707 SCARG(uap, flags), retval);
1708 }
1709
1710 int
1711 do_fhstat(struct lwp *l, const void *ufhp, size_t fhsize, struct stat *sb)
1712 {
1713 int error;
1714 fhandle_t *fh;
1715 struct vnode *vp;
1716
1717 /*
1718 * Must be super user
1719 */
1720 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1721 0, NULL, NULL, NULL)))
1722 return (error);
1723
1724 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1725 if (error != 0)
1726 return error;
1727
1728 error = vfs_fhtovp(fh, &vp);
1729 vfs_copyinfh_free(fh);
1730 if (error != 0)
1731 return error;
1732
1733 error = vn_stat(vp, sb, l);
1734 vput(vp);
1735 return error;
1736 }
1737
1738
1739 /* ARGSUSED */
1740 int
1741 sys___fhstat40(struct lwp *l, const struct sys___fhstat40_args *uap, register_t *retval)
1742 {
1743 /* {
1744 syscallarg(const void *) fhp;
1745 syscallarg(size_t) fh_size;
1746 syscallarg(struct stat *) sb;
1747 } */
1748 struct stat sb;
1749 int error;
1750
1751 error = do_fhstat(l, SCARG(uap, fhp), SCARG(uap, fh_size), &sb);
1752 if (error)
1753 return error;
1754 return copyout(&sb, SCARG(uap, sb), sizeof(sb));
1755 }
1756
1757 int
1758 do_fhstatvfs(struct lwp *l, const void *ufhp, size_t fhsize, struct statvfs *sb,
1759 int flags)
1760 {
1761 fhandle_t *fh;
1762 struct mount *mp;
1763 struct vnode *vp;
1764 int error;
1765
1766 /*
1767 * Must be super user
1768 */
1769 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1770 0, NULL, NULL, NULL)))
1771 return error;
1772
1773 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1774 if (error != 0)
1775 return error;
1776
1777 error = vfs_fhtovp(fh, &vp);
1778 vfs_copyinfh_free(fh);
1779 if (error != 0)
1780 return error;
1781
1782 mp = vp->v_mount;
1783 error = dostatvfs(mp, sb, l, flags, 1);
1784 vput(vp);
1785 return error;
1786 }
1787
1788 /* ARGSUSED */
1789 int
1790 sys___fhstatvfs140(struct lwp *l, const struct sys___fhstatvfs140_args *uap, register_t *retval)
1791 {
1792 /* {
1793 syscallarg(const void *) fhp;
1794 syscallarg(size_t) fh_size;
1795 syscallarg(struct statvfs *) buf;
1796 syscallarg(int) flags;
1797 } */
1798 struct statvfs *sb = STATVFSBUF_GET();
1799 int error;
1800
1801 error = do_fhstatvfs(l, SCARG(uap, fhp), SCARG(uap, fh_size), sb,
1802 SCARG(uap, flags));
1803 if (error == 0)
1804 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
1805 STATVFSBUF_PUT(sb);
1806 return error;
1807 }
1808
1809 /*
1810 * Create a special file.
1811 */
1812 /* ARGSUSED */
1813 int
1814 sys_mknod(struct lwp *l, const struct sys_mknod_args *uap, register_t *retval)
1815 {
1816 /* {
1817 syscallarg(const char *) path;
1818 syscallarg(int) mode;
1819 syscallarg(int) dev;
1820 } */
1821 struct proc *p = l->l_proc;
1822 struct vnode *vp;
1823 struct vattr vattr;
1824 int error, optype;
1825 struct nameidata nd;
1826 char *path;
1827 const char *cpath;
1828 enum uio_seg seg = UIO_USERSPACE;
1829
1830 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MKNOD,
1831 0, NULL, NULL, NULL)) != 0)
1832 return (error);
1833
1834 optype = VOP_MKNOD_DESCOFFSET;
1835
1836 VERIEXEC_PATH_GET(SCARG(uap, path), seg, cpath, path);
1837 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, seg, cpath);
1838
1839 if ((error = namei(&nd)) != 0)
1840 goto out;
1841 vp = nd.ni_vp;
1842 if (vp != NULL)
1843 error = EEXIST;
1844 else {
1845 VATTR_NULL(&vattr);
1846 /* We will read cwdi->cwdi_cmask unlocked. */
1847 vattr.va_mode =
1848 (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1849 vattr.va_rdev = SCARG(uap, dev);
1850
1851 switch (SCARG(uap, mode) & S_IFMT) {
1852 case S_IFMT: /* used by badsect to flag bad sectors */
1853 vattr.va_type = VBAD;
1854 break;
1855 case S_IFCHR:
1856 vattr.va_type = VCHR;
1857 break;
1858 case S_IFBLK:
1859 vattr.va_type = VBLK;
1860 break;
1861 case S_IFWHT:
1862 optype = VOP_WHITEOUT_DESCOFFSET;
1863 break;
1864 case S_IFREG:
1865 #if NVERIEXEC > 0
1866 error = veriexec_openchk(l, nd.ni_vp, nd.ni_dirp,
1867 O_CREAT);
1868 #endif /* NVERIEXEC > 0 */
1869 vattr.va_type = VREG;
1870 vattr.va_rdev = VNOVAL;
1871 optype = VOP_CREATE_DESCOFFSET;
1872 break;
1873 default:
1874 error = EINVAL;
1875 break;
1876 }
1877 }
1878 if (!error) {
1879 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1880 switch (optype) {
1881 case VOP_WHITEOUT_DESCOFFSET:
1882 error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
1883 if (error)
1884 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1885 vput(nd.ni_dvp);
1886 break;
1887
1888 case VOP_MKNOD_DESCOFFSET:
1889 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp,
1890 &nd.ni_cnd, &vattr);
1891 if (error == 0)
1892 vput(nd.ni_vp);
1893 break;
1894
1895 case VOP_CREATE_DESCOFFSET:
1896 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
1897 &nd.ni_cnd, &vattr);
1898 if (error == 0)
1899 vput(nd.ni_vp);
1900 break;
1901 }
1902 } else {
1903 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1904 if (nd.ni_dvp == vp)
1905 vrele(nd.ni_dvp);
1906 else
1907 vput(nd.ni_dvp);
1908 if (vp)
1909 vrele(vp);
1910 }
1911 out:
1912 VERIEXEC_PATH_PUT(path);
1913 return (error);
1914 }
1915
1916 /*
1917 * Create a named pipe.
1918 */
1919 /* ARGSUSED */
1920 int
1921 sys_mkfifo(struct lwp *l, const struct sys_mkfifo_args *uap, register_t *retval)
1922 {
1923 /* {
1924 syscallarg(const char *) path;
1925 syscallarg(int) mode;
1926 } */
1927 struct proc *p = l->l_proc;
1928 struct vattr vattr;
1929 int error;
1930 struct nameidata nd;
1931
1932 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1933 SCARG(uap, path));
1934 if ((error = namei(&nd)) != 0)
1935 return (error);
1936 if (nd.ni_vp != NULL) {
1937 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1938 if (nd.ni_dvp == nd.ni_vp)
1939 vrele(nd.ni_dvp);
1940 else
1941 vput(nd.ni_dvp);
1942 vrele(nd.ni_vp);
1943 return (EEXIST);
1944 }
1945 VATTR_NULL(&vattr);
1946 vattr.va_type = VFIFO;
1947 /* We will read cwdi->cwdi_cmask unlocked. */
1948 vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1949 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1950 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
1951 if (error == 0)
1952 vput(nd.ni_vp);
1953 return (error);
1954 }
1955
1956 /*
1957 * Make a hard file link.
1958 */
1959 /* ARGSUSED */
1960 int
1961 sys_link(struct lwp *l, const struct sys_link_args *uap, register_t *retval)
1962 {
1963 /* {
1964 syscallarg(const char *) path;
1965 syscallarg(const char *) link;
1966 } */
1967 struct vnode *vp;
1968 struct nameidata nd;
1969 int error;
1970
1971 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1972 SCARG(uap, path));
1973 if ((error = namei(&nd)) != 0)
1974 return (error);
1975 vp = nd.ni_vp;
1976 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1977 SCARG(uap, link));
1978 if ((error = namei(&nd)) != 0)
1979 goto out;
1980 if (nd.ni_vp) {
1981 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1982 if (nd.ni_dvp == nd.ni_vp)
1983 vrele(nd.ni_dvp);
1984 else
1985 vput(nd.ni_dvp);
1986 vrele(nd.ni_vp);
1987 error = EEXIST;
1988 goto out;
1989 }
1990 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1991 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
1992 error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
1993 out:
1994 vrele(vp);
1995 return (error);
1996 }
1997
1998 /*
1999 * Make a symbolic link.
2000 */
2001 /* ARGSUSED */
2002 int
2003 sys_symlink(struct lwp *l, const struct sys_symlink_args *uap, register_t *retval)
2004 {
2005 /* {
2006 syscallarg(const char *) path;
2007 syscallarg(const char *) link;
2008 } */
2009 struct proc *p = l->l_proc;
2010 struct vattr vattr;
2011 char *path;
2012 int error;
2013 struct nameidata nd;
2014
2015 path = PNBUF_GET();
2016 error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL);
2017 if (error)
2018 goto out;
2019 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
2020 SCARG(uap, link));
2021 if ((error = namei(&nd)) != 0)
2022 goto out;
2023 if (nd.ni_vp) {
2024 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2025 if (nd.ni_dvp == nd.ni_vp)
2026 vrele(nd.ni_dvp);
2027 else
2028 vput(nd.ni_dvp);
2029 vrele(nd.ni_vp);
2030 error = EEXIST;
2031 goto out;
2032 }
2033 VATTR_NULL(&vattr);
2034 vattr.va_type = VLNK;
2035 /* We will read cwdi->cwdi_cmask unlocked. */
2036 vattr.va_mode = ACCESSPERMS &~ p->p_cwdi->cwdi_cmask;
2037 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2038 error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
2039 if (error == 0)
2040 vput(nd.ni_vp);
2041 out:
2042 PNBUF_PUT(path);
2043 return (error);
2044 }
2045
2046 /*
2047 * Delete a whiteout from the filesystem.
2048 */
2049 /* ARGSUSED */
2050 int
2051 sys_undelete(struct lwp *l, const struct sys_undelete_args *uap, register_t *retval)
2052 {
2053 /* {
2054 syscallarg(const char *) path;
2055 } */
2056 int error;
2057 struct nameidata nd;
2058
2059 NDINIT(&nd, DELETE, LOCKPARENT | DOWHITEOUT | TRYEMULROOT,
2060 UIO_USERSPACE, SCARG(uap, path));
2061 error = namei(&nd);
2062 if (error)
2063 return (error);
2064
2065 if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
2066 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2067 if (nd.ni_dvp == nd.ni_vp)
2068 vrele(nd.ni_dvp);
2069 else
2070 vput(nd.ni_dvp);
2071 if (nd.ni_vp)
2072 vrele(nd.ni_vp);
2073 return (EEXIST);
2074 }
2075 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2076 if ((error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE)) != 0)
2077 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2078 vput(nd.ni_dvp);
2079 return (error);
2080 }
2081
2082 /*
2083 * Delete a name from the filesystem.
2084 */
2085 /* ARGSUSED */
2086 int
2087 sys_unlink(struct lwp *l, const struct sys_unlink_args *uap, register_t *retval)
2088 {
2089 /* {
2090 syscallarg(const char *) path;
2091 } */
2092
2093 return do_sys_unlink(SCARG(uap, path), UIO_USERSPACE);
2094 }
2095
2096 int
2097 do_sys_unlink(const char *arg, enum uio_seg seg)
2098 {
2099 struct vnode *vp;
2100 int error;
2101 struct nameidata nd;
2102 kauth_cred_t cred;
2103 char *path;
2104 const char *cpath;
2105
2106 VERIEXEC_PATH_GET(arg, seg, cpath, path);
2107 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, seg, cpath);
2108
2109 if ((error = namei(&nd)) != 0)
2110 goto out;
2111 vp = nd.ni_vp;
2112
2113 /*
2114 * The root of a mounted filesystem cannot be deleted.
2115 */
2116 if (vp->v_vflag & VV_ROOT) {
2117 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2118 if (nd.ni_dvp == vp)
2119 vrele(nd.ni_dvp);
2120 else
2121 vput(nd.ni_dvp);
2122 vput(vp);
2123 error = EBUSY;
2124 goto out;
2125 }
2126
2127 #if NVERIEXEC > 0
2128 /* Handle remove requests for veriexec entries. */
2129 if ((error = veriexec_removechk(curlwp, nd.ni_vp, nd.ni_dirp)) != 0) {
2130 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2131 if (nd.ni_dvp == vp)
2132 vrele(nd.ni_dvp);
2133 else
2134 vput(nd.ni_dvp);
2135 vput(vp);
2136 goto out;
2137 }
2138 #endif /* NVERIEXEC > 0 */
2139
2140 cred = kauth_cred_get();
2141 VOP_LEASE(nd.ni_dvp, cred, LEASE_WRITE);
2142 VOP_LEASE(vp, cred, LEASE_WRITE);
2143 #ifdef FILEASSOC
2144 (void)fileassoc_file_delete(vp);
2145 #endif /* FILEASSOC */
2146 error = VOP_REMOVE(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
2147 out:
2148 VERIEXEC_PATH_PUT(path);
2149 return (error);
2150 }
2151
2152 /*
2153 * Reposition read/write file offset.
2154 */
2155 int
2156 sys_lseek(struct lwp *l, const struct sys_lseek_args *uap, register_t *retval)
2157 {
2158 /* {
2159 syscallarg(int) fd;
2160 syscallarg(int) pad;
2161 syscallarg(off_t) offset;
2162 syscallarg(int) whence;
2163 } */
2164 struct proc *p = l->l_proc;
2165 kauth_cred_t cred = l->l_cred;
2166 struct filedesc *fdp = p->p_fd;
2167 struct file *fp;
2168 struct vnode *vp;
2169 struct vattr vattr;
2170 off_t newoff;
2171 int error;
2172
2173 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
2174 return (EBADF);
2175
2176 vp = (struct vnode *)fp->f_data;
2177 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2178 error = ESPIPE;
2179 mutex_exit(&fp->f_lock);
2180 goto out;
2181 }
2182
2183 switch (SCARG(uap, whence)) {
2184 case SEEK_CUR:
2185 newoff = fp->f_offset + SCARG(uap, offset);
2186 FILE_USE(fp);
2187 break;
2188 case SEEK_END:
2189 FILE_USE(fp);
2190 error = VOP_GETATTR(vp, &vattr, cred);
2191 if (error) {
2192 FILE_UNUSE(fp, l);
2193 goto out;
2194 }
2195 newoff = SCARG(uap, offset) + vattr.va_size;
2196 break;
2197 case SEEK_SET:
2198 FILE_USE(fp);
2199 newoff = SCARG(uap, offset);
2200 break;
2201 default:
2202 mutex_exit(&fp->f_lock);
2203 error = EINVAL;
2204 goto out;
2205 }
2206 if ((error = VOP_SEEK(vp, fp->f_offset, newoff, cred)) == 0) {
2207 mutex_enter(&fp->f_lock);
2208 *(off_t *)retval = fp->f_offset = newoff;
2209 mutex_exit(&fp->f_lock);
2210 }
2211 FILE_UNUSE(fp, l);
2212 out:
2213 return (error);
2214 }
2215
2216 /*
2217 * Positional read system call.
2218 */
2219 int
2220 sys_pread(struct lwp *l, const struct sys_pread_args *uap, register_t *retval)
2221 {
2222 /* {
2223 syscallarg(int) fd;
2224 syscallarg(void *) buf;
2225 syscallarg(size_t) nbyte;
2226 syscallarg(off_t) offset;
2227 } */
2228 struct proc *p = l->l_proc;
2229 struct filedesc *fdp = p->p_fd;
2230 struct file *fp;
2231 struct vnode *vp;
2232 off_t offset;
2233 int error, fd = SCARG(uap, fd);
2234
2235 if ((fp = fd_getfile(fdp, fd)) == NULL)
2236 return (EBADF);
2237
2238 if ((fp->f_flag & FREAD) == 0) {
2239 mutex_exit(&fp->f_lock);
2240 return (EBADF);
2241 }
2242
2243 FILE_USE(fp);
2244
2245 vp = (struct vnode *)fp->f_data;
2246 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2247 error = ESPIPE;
2248 goto out;
2249 }
2250
2251 offset = SCARG(uap, offset);
2252
2253 /*
2254 * XXX This works because no file systems actually
2255 * XXX take any action on the seek operation.
2256 */
2257 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2258 goto out;
2259
2260 /* dofileread() will unuse the descriptor for us */
2261 return (dofileread(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2262 &offset, 0, retval));
2263
2264 out:
2265 FILE_UNUSE(fp, l);
2266 return (error);
2267 }
2268
2269 /*
2270 * Positional scatter read system call.
2271 */
2272 int
2273 sys_preadv(struct lwp *l, const struct sys_preadv_args *uap, register_t *retval)
2274 {
2275 /* {
2276 syscallarg(int) fd;
2277 syscallarg(const struct iovec *) iovp;
2278 syscallarg(int) iovcnt;
2279 syscallarg(off_t) offset;
2280 } */
2281 off_t offset = SCARG(uap, offset);
2282
2283 return do_filereadv(SCARG(uap, fd), SCARG(uap, iovp),
2284 SCARG(uap, iovcnt), &offset, 0, retval);
2285 }
2286
2287 /*
2288 * Positional write system call.
2289 */
2290 int
2291 sys_pwrite(struct lwp *l, const struct sys_pwrite_args *uap, register_t *retval)
2292 {
2293 /* {
2294 syscallarg(int) fd;
2295 syscallarg(const void *) buf;
2296 syscallarg(size_t) nbyte;
2297 syscallarg(off_t) offset;
2298 } */
2299 struct proc *p = l->l_proc;
2300 struct filedesc *fdp = p->p_fd;
2301 struct file *fp;
2302 struct vnode *vp;
2303 off_t offset;
2304 int error, fd = SCARG(uap, fd);
2305
2306 if ((fp = fd_getfile(fdp, fd)) == NULL)
2307 return (EBADF);
2308
2309 if ((fp->f_flag & FWRITE) == 0) {
2310 mutex_exit(&fp->f_lock);
2311 return (EBADF);
2312 }
2313
2314 FILE_USE(fp);
2315
2316 vp = (struct vnode *)fp->f_data;
2317 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2318 error = ESPIPE;
2319 goto out;
2320 }
2321
2322 offset = SCARG(uap, offset);
2323
2324 /*
2325 * XXX This works because no file systems actually
2326 * XXX take any action on the seek operation.
2327 */
2328 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2329 goto out;
2330
2331 /* dofilewrite() will unuse the descriptor for us */
2332 return (dofilewrite(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2333 &offset, 0, retval));
2334
2335 out:
2336 FILE_UNUSE(fp, l);
2337 return (error);
2338 }
2339
2340 /*
2341 * Positional gather write system call.
2342 */
2343 int
2344 sys_pwritev(struct lwp *l, const struct sys_pwritev_args *uap, register_t *retval)
2345 {
2346 /* {
2347 syscallarg(int) fd;
2348 syscallarg(const struct iovec *) iovp;
2349 syscallarg(int) iovcnt;
2350 syscallarg(off_t) offset;
2351 } */
2352 off_t offset = SCARG(uap, offset);
2353
2354 return do_filewritev(SCARG(uap, fd), SCARG(uap, iovp),
2355 SCARG(uap, iovcnt), &offset, 0, retval);
2356 }
2357
2358 /*
2359 * Check access permissions.
2360 */
2361 int
2362 sys_access(struct lwp *l, const struct sys_access_args *uap, register_t *retval)
2363 {
2364 /* {
2365 syscallarg(const char *) path;
2366 syscallarg(int) flags;
2367 } */
2368 kauth_cred_t cred;
2369 struct vnode *vp;
2370 int error, flags;
2371 struct nameidata nd;
2372
2373 cred = kauth_cred_dup(l->l_cred);
2374 kauth_cred_seteuid(cred, kauth_cred_getuid(l->l_cred));
2375 kauth_cred_setegid(cred, kauth_cred_getgid(l->l_cred));
2376 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2377 SCARG(uap, path));
2378 /* Override default credentials */
2379 nd.ni_cnd.cn_cred = cred;
2380 if ((error = namei(&nd)) != 0)
2381 goto out;
2382 vp = nd.ni_vp;
2383
2384 /* Flags == 0 means only check for existence. */
2385 if (SCARG(uap, flags)) {
2386 flags = 0;
2387 if (SCARG(uap, flags) & R_OK)
2388 flags |= VREAD;
2389 if (SCARG(uap, flags) & W_OK)
2390 flags |= VWRITE;
2391 if (SCARG(uap, flags) & X_OK)
2392 flags |= VEXEC;
2393
2394 error = VOP_ACCESS(vp, flags, cred);
2395 if (!error && (flags & VWRITE))
2396 error = vn_writechk(vp);
2397 }
2398 vput(vp);
2399 out:
2400 kauth_cred_free(cred);
2401 return (error);
2402 }
2403
2404 /*
2405 * Common code for all sys_stat functions, including compat versions.
2406 */
2407 int
2408 do_sys_stat(struct lwp *l, const char *path, unsigned int nd_flags,
2409 struct stat *sb)
2410 {
2411 int error;
2412 struct nameidata nd;
2413
2414 NDINIT(&nd, LOOKUP, nd_flags | LOCKLEAF | TRYEMULROOT,
2415 UIO_USERSPACE, path);
2416 error = namei(&nd);
2417 if (error != 0)
2418 return error;
2419 error = vn_stat(nd.ni_vp, sb, l);
2420 vput(nd.ni_vp);
2421 return error;
2422 }
2423
2424 /*
2425 * Get file status; this version follows links.
2426 */
2427 /* ARGSUSED */
2428 int
2429 sys___stat30(struct lwp *l, const struct sys___stat30_args *uap, register_t *retval)
2430 {
2431 /* {
2432 syscallarg(const char *) path;
2433 syscallarg(struct stat *) ub;
2434 } */
2435 struct stat sb;
2436 int error;
2437
2438 error = do_sys_stat(l, SCARG(uap, path), FOLLOW, &sb);
2439 if (error)
2440 return error;
2441 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2442 }
2443
2444 /*
2445 * Get file status; this version does not follow links.
2446 */
2447 /* ARGSUSED */
2448 int
2449 sys___lstat30(struct lwp *l, const struct sys___lstat30_args *uap, register_t *retval)
2450 {
2451 /* {
2452 syscallarg(const char *) path;
2453 syscallarg(struct stat *) ub;
2454 } */
2455 struct stat sb;
2456 int error;
2457
2458 error = do_sys_stat(l, SCARG(uap, path), NOFOLLOW, &sb);
2459 if (error)
2460 return error;
2461 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2462 }
2463
2464 /*
2465 * Get configurable pathname variables.
2466 */
2467 /* ARGSUSED */
2468 int
2469 sys_pathconf(struct lwp *l, const struct sys_pathconf_args *uap, register_t *retval)
2470 {
2471 /* {
2472 syscallarg(const char *) path;
2473 syscallarg(int) name;
2474 } */
2475 int error;
2476 struct nameidata nd;
2477
2478 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2479 SCARG(uap, path));
2480 if ((error = namei(&nd)) != 0)
2481 return (error);
2482 error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), retval);
2483 vput(nd.ni_vp);
2484 return (error);
2485 }
2486
2487 /*
2488 * Return target name of a symbolic link.
2489 */
2490 /* ARGSUSED */
2491 int
2492 sys_readlink(struct lwp *l, const struct sys_readlink_args *uap, register_t *retval)
2493 {
2494 /* {
2495 syscallarg(const char *) path;
2496 syscallarg(char *) buf;
2497 syscallarg(size_t) count;
2498 } */
2499 struct vnode *vp;
2500 struct iovec aiov;
2501 struct uio auio;
2502 int error;
2503 struct nameidata nd;
2504
2505 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2506 SCARG(uap, path));
2507 if ((error = namei(&nd)) != 0)
2508 return (error);
2509 vp = nd.ni_vp;
2510 if (vp->v_type != VLNK)
2511 error = EINVAL;
2512 else if (!(vp->v_mount->mnt_flag & MNT_SYMPERM) ||
2513 (error = VOP_ACCESS(vp, VREAD, l->l_cred)) == 0) {
2514 aiov.iov_base = SCARG(uap, buf);
2515 aiov.iov_len = SCARG(uap, count);
2516 auio.uio_iov = &aiov;
2517 auio.uio_iovcnt = 1;
2518 auio.uio_offset = 0;
2519 auio.uio_rw = UIO_READ;
2520 KASSERT(l == curlwp);
2521 auio.uio_vmspace = l->l_proc->p_vmspace;
2522 auio.uio_resid = SCARG(uap, count);
2523 error = VOP_READLINK(vp, &auio, l->l_cred);
2524 }
2525 vput(vp);
2526 *retval = SCARG(uap, count) - auio.uio_resid;
2527 return (error);
2528 }
2529
2530 /*
2531 * Change flags of a file given a path name.
2532 */
2533 /* ARGSUSED */
2534 int
2535 sys_chflags(struct lwp *l, const struct sys_chflags_args *uap, register_t *retval)
2536 {
2537 /* {
2538 syscallarg(const char *) path;
2539 syscallarg(u_long) flags;
2540 } */
2541 struct vnode *vp;
2542 int error;
2543 struct nameidata nd;
2544
2545 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2546 SCARG(uap, path));
2547 if ((error = namei(&nd)) != 0)
2548 return (error);
2549 vp = nd.ni_vp;
2550 error = change_flags(vp, SCARG(uap, flags), l);
2551 vput(vp);
2552 return (error);
2553 }
2554
2555 /*
2556 * Change flags of a file given a file descriptor.
2557 */
2558 /* ARGSUSED */
2559 int
2560 sys_fchflags(struct lwp *l, const struct sys_fchflags_args *uap, register_t *retval)
2561 {
2562 /* {
2563 syscallarg(int) fd;
2564 syscallarg(u_long) flags;
2565 } */
2566 struct proc *p = l->l_proc;
2567 struct vnode *vp;
2568 struct file *fp;
2569 int error;
2570
2571 /* getvnode() will use the descriptor for us */
2572 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2573 return (error);
2574 vp = (struct vnode *)fp->f_data;
2575 error = change_flags(vp, SCARG(uap, flags), l);
2576 VOP_UNLOCK(vp, 0);
2577 FILE_UNUSE(fp, l);
2578 return (error);
2579 }
2580
2581 /*
2582 * Change flags of a file given a path name; this version does
2583 * not follow links.
2584 */
2585 int
2586 sys_lchflags(struct lwp *l, const struct sys_lchflags_args *uap, register_t *retval)
2587 {
2588 /* {
2589 syscallarg(const char *) path;
2590 syscallarg(u_long) flags;
2591 } */
2592 struct vnode *vp;
2593 int error;
2594 struct nameidata nd;
2595
2596 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2597 SCARG(uap, path));
2598 if ((error = namei(&nd)) != 0)
2599 return (error);
2600 vp = nd.ni_vp;
2601 error = change_flags(vp, SCARG(uap, flags), l);
2602 vput(vp);
2603 return (error);
2604 }
2605
2606 /*
2607 * Common routine to change flags of a file.
2608 */
2609 int
2610 change_flags(struct vnode *vp, u_long flags, struct lwp *l)
2611 {
2612 struct vattr vattr;
2613 int error;
2614
2615 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2616 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2617 /*
2618 * Non-superusers cannot change the flags on devices, even if they
2619 * own them.
2620 */
2621 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) {
2622 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2623 goto out;
2624 if (vattr.va_type == VCHR || vattr.va_type == VBLK) {
2625 error = EINVAL;
2626 goto out;
2627 }
2628 }
2629 VATTR_NULL(&vattr);
2630 vattr.va_flags = flags;
2631 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2632 out:
2633 return (error);
2634 }
2635
2636 /*
2637 * Change mode of a file given path name; this version follows links.
2638 */
2639 /* ARGSUSED */
2640 int
2641 sys_chmod(struct lwp *l, const struct sys_chmod_args *uap, register_t *retval)
2642 {
2643 /* {
2644 syscallarg(const char *) path;
2645 syscallarg(int) mode;
2646 } */
2647 int error;
2648 struct nameidata nd;
2649
2650 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2651 SCARG(uap, path));
2652 if ((error = namei(&nd)) != 0)
2653 return (error);
2654
2655 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2656
2657 vrele(nd.ni_vp);
2658 return (error);
2659 }
2660
2661 /*
2662 * Change mode of a file given a file descriptor.
2663 */
2664 /* ARGSUSED */
2665 int
2666 sys_fchmod(struct lwp *l, const struct sys_fchmod_args *uap, register_t *retval)
2667 {
2668 /* {
2669 syscallarg(int) fd;
2670 syscallarg(int) mode;
2671 } */
2672 struct proc *p = l->l_proc;
2673 struct file *fp;
2674 int error;
2675
2676 /* getvnode() will use the descriptor for us */
2677 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2678 return (error);
2679
2680 error = change_mode((struct vnode *)fp->f_data, SCARG(uap, mode), l);
2681 FILE_UNUSE(fp, l);
2682 return (error);
2683 }
2684
2685 /*
2686 * Change mode of a file given path name; this version does not follow links.
2687 */
2688 /* ARGSUSED */
2689 int
2690 sys_lchmod(struct lwp *l, const struct sys_lchmod_args *uap, register_t *retval)
2691 {
2692 /* {
2693 syscallarg(const char *) path;
2694 syscallarg(int) mode;
2695 } */
2696 int error;
2697 struct nameidata nd;
2698
2699 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2700 SCARG(uap, path));
2701 if ((error = namei(&nd)) != 0)
2702 return (error);
2703
2704 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2705
2706 vrele(nd.ni_vp);
2707 return (error);
2708 }
2709
2710 /*
2711 * Common routine to set mode given a vnode.
2712 */
2713 static int
2714 change_mode(struct vnode *vp, int mode, struct lwp *l)
2715 {
2716 struct vattr vattr;
2717 int error;
2718
2719 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2720 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2721 VATTR_NULL(&vattr);
2722 vattr.va_mode = mode & ALLPERMS;
2723 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2724 VOP_UNLOCK(vp, 0);
2725 return (error);
2726 }
2727
2728 /*
2729 * Set ownership given a path name; this version follows links.
2730 */
2731 /* ARGSUSED */
2732 int
2733 sys_chown(struct lwp *l, const struct sys_chown_args *uap, register_t *retval)
2734 {
2735 /* {
2736 syscallarg(const char *) path;
2737 syscallarg(uid_t) uid;
2738 syscallarg(gid_t) gid;
2739 } */
2740 int error;
2741 struct nameidata nd;
2742
2743 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2744 SCARG(uap, path));
2745 if ((error = namei(&nd)) != 0)
2746 return (error);
2747
2748 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2749
2750 vrele(nd.ni_vp);
2751 return (error);
2752 }
2753
2754 /*
2755 * Set ownership given a path name; this version follows links.
2756 * Provides POSIX semantics.
2757 */
2758 /* ARGSUSED */
2759 int
2760 sys___posix_chown(struct lwp *l, const struct sys___posix_chown_args *uap, register_t *retval)
2761 {
2762 /* {
2763 syscallarg(const char *) path;
2764 syscallarg(uid_t) uid;
2765 syscallarg(gid_t) gid;
2766 } */
2767 int error;
2768 struct nameidata nd;
2769
2770 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2771 SCARG(uap, path));
2772 if ((error = namei(&nd)) != 0)
2773 return (error);
2774
2775 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2776
2777 vrele(nd.ni_vp);
2778 return (error);
2779 }
2780
2781 /*
2782 * Set ownership given a file descriptor.
2783 */
2784 /* ARGSUSED */
2785 int
2786 sys_fchown(struct lwp *l, const struct sys_fchown_args *uap, register_t *retval)
2787 {
2788 /* {
2789 syscallarg(int) fd;
2790 syscallarg(uid_t) uid;
2791 syscallarg(gid_t) gid;
2792 } */
2793 struct proc *p = l->l_proc;
2794 int error;
2795 struct file *fp;
2796
2797 /* getvnode() will use the descriptor for us */
2798 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2799 return (error);
2800
2801 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2802 SCARG(uap, gid), l, 0);
2803 FILE_UNUSE(fp, l);
2804 return (error);
2805 }
2806
2807 /*
2808 * Set ownership given a file descriptor, providing POSIX/XPG semantics.
2809 */
2810 /* ARGSUSED */
2811 int
2812 sys___posix_fchown(struct lwp *l, const struct sys___posix_fchown_args *uap, register_t *retval)
2813 {
2814 /* {
2815 syscallarg(int) fd;
2816 syscallarg(uid_t) uid;
2817 syscallarg(gid_t) gid;
2818 } */
2819 struct proc *p = l->l_proc;
2820 int error;
2821 struct file *fp;
2822
2823 /* getvnode() will use the descriptor for us */
2824 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2825 return (error);
2826
2827 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2828 SCARG(uap, gid), l, 1);
2829 FILE_UNUSE(fp, l);
2830 return (error);
2831 }
2832
2833 /*
2834 * Set ownership given a path name; this version does not follow links.
2835 */
2836 /* ARGSUSED */
2837 int
2838 sys_lchown(struct lwp *l, const struct sys_lchown_args *uap, register_t *retval)
2839 {
2840 /* {
2841 syscallarg(const char *) path;
2842 syscallarg(uid_t) uid;
2843 syscallarg(gid_t) gid;
2844 } */
2845 int error;
2846 struct nameidata nd;
2847
2848 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2849 SCARG(uap, path));
2850 if ((error = namei(&nd)) != 0)
2851 return (error);
2852
2853 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2854
2855 vrele(nd.ni_vp);
2856 return (error);
2857 }
2858
2859 /*
2860 * Set ownership given a path name; this version does not follow links.
2861 * Provides POSIX/XPG semantics.
2862 */
2863 /* ARGSUSED */
2864 int
2865 sys___posix_lchown(struct lwp *l, const struct sys___posix_lchown_args *uap, register_t *retval)
2866 {
2867 /* {
2868 syscallarg(const char *) path;
2869 syscallarg(uid_t) uid;
2870 syscallarg(gid_t) gid;
2871 } */
2872 int error;
2873 struct nameidata nd;
2874
2875 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2876 SCARG(uap, path));
2877 if ((error = namei(&nd)) != 0)
2878 return (error);
2879
2880 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2881
2882 vrele(nd.ni_vp);
2883 return (error);
2884 }
2885
2886 /*
2887 * Common routine to set ownership given a vnode.
2888 */
2889 static int
2890 change_owner(struct vnode *vp, uid_t uid, gid_t gid, struct lwp *l,
2891 int posix_semantics)
2892 {
2893 struct vattr vattr;
2894 mode_t newmode;
2895 int error;
2896
2897 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2898 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2899 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2900 goto out;
2901
2902 #define CHANGED(x) ((int)(x) != -1)
2903 newmode = vattr.va_mode;
2904 if (posix_semantics) {
2905 /*
2906 * POSIX/XPG semantics: if the caller is not the super-user,
2907 * clear set-user-id and set-group-id bits. Both POSIX and
2908 * the XPG consider the behaviour for calls by the super-user
2909 * implementation-defined; we leave the set-user-id and set-
2910 * group-id settings intact in that case.
2911 */
2912 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER,
2913 NULL) != 0)
2914 newmode &= ~(S_ISUID | S_ISGID);
2915 } else {
2916 /*
2917 * NetBSD semantics: when changing owner and/or group,
2918 * clear the respective bit(s).
2919 */
2920 if (CHANGED(uid))
2921 newmode &= ~S_ISUID;
2922 if (CHANGED(gid))
2923 newmode &= ~S_ISGID;
2924 }
2925 /* Update va_mode iff altered. */
2926 if (vattr.va_mode == newmode)
2927 newmode = VNOVAL;
2928
2929 VATTR_NULL(&vattr);
2930 vattr.va_uid = CHANGED(uid) ? uid : (uid_t)VNOVAL;
2931 vattr.va_gid = CHANGED(gid) ? gid : (gid_t)VNOVAL;
2932 vattr.va_mode = newmode;
2933 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2934 #undef CHANGED
2935
2936 out:
2937 VOP_UNLOCK(vp, 0);
2938 return (error);
2939 }
2940
2941 /*
2942 * Set the access and modification times given a path name; this
2943 * version follows links.
2944 */
2945 /* ARGSUSED */
2946 int
2947 sys_utimes(struct lwp *l, const struct sys_utimes_args *uap, register_t *retval)
2948 {
2949 /* {
2950 syscallarg(const char *) path;
2951 syscallarg(const struct timeval *) tptr;
2952 } */
2953
2954 return do_sys_utimes(l, NULL, SCARG(uap, path), FOLLOW,
2955 SCARG(uap, tptr), UIO_USERSPACE);
2956 }
2957
2958 /*
2959 * Set the access and modification times given a file descriptor.
2960 */
2961 /* ARGSUSED */
2962 int
2963 sys_futimes(struct lwp *l, const struct sys_futimes_args *uap, register_t *retval)
2964 {
2965 /* {
2966 syscallarg(int) fd;
2967 syscallarg(const struct timeval *) tptr;
2968 } */
2969 int error;
2970 struct file *fp;
2971
2972 /* getvnode() will use the descriptor for us */
2973 if ((error = getvnode(l->l_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
2974 return (error);
2975
2976 error = do_sys_utimes(l, fp->f_data, NULL, 0,
2977 SCARG(uap, tptr), UIO_USERSPACE);
2978
2979 FILE_UNUSE(fp, l);
2980 return (error);
2981 }
2982
2983 /*
2984 * Set the access and modification times given a path name; this
2985 * version does not follow links.
2986 */
2987 int
2988 sys_lutimes(struct lwp *l, const struct sys_lutimes_args *uap, register_t *retval)
2989 {
2990 /* {
2991 syscallarg(const char *) path;
2992 syscallarg(const struct timeval *) tptr;
2993 } */
2994
2995 return do_sys_utimes(l, NULL, SCARG(uap, path), NOFOLLOW,
2996 SCARG(uap, tptr), UIO_USERSPACE);
2997 }
2998
2999 /*
3000 * Common routine to set access and modification times given a vnode.
3001 */
3002 int
3003 do_sys_utimes(struct lwp *l, struct vnode *vp, const char *path, int flag,
3004 const struct timeval *tptr, enum uio_seg seg)
3005 {
3006 struct vattr vattr;
3007 struct nameidata nd;
3008 int error;
3009
3010 VATTR_NULL(&vattr);
3011 if (tptr == NULL) {
3012 nanotime(&vattr.va_atime);
3013 vattr.va_mtime = vattr.va_atime;
3014 vattr.va_vaflags |= VA_UTIMES_NULL;
3015 } else {
3016 struct timeval tv[2];
3017
3018 if (seg != UIO_SYSSPACE) {
3019 error = copyin(tptr, &tv, sizeof (tv));
3020 if (error != 0)
3021 return error;
3022 tptr = tv;
3023 }
3024 TIMEVAL_TO_TIMESPEC(tptr, &vattr.va_atime);
3025 TIMEVAL_TO_TIMESPEC(tptr + 1, &vattr.va_mtime);
3026 }
3027
3028 if (vp == NULL) {
3029 NDINIT(&nd, LOOKUP, flag | TRYEMULROOT, UIO_USERSPACE, path);
3030 if ((error = namei(&nd)) != 0)
3031 return (error);
3032 vp = nd.ni_vp;
3033 } else
3034 nd.ni_vp = NULL;
3035
3036 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3037 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3038 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3039 VOP_UNLOCK(vp, 0);
3040
3041 if (nd.ni_vp != NULL)
3042 vrele(nd.ni_vp);
3043
3044 return (error);
3045 }
3046
3047 /*
3048 * Truncate a file given its path name.
3049 */
3050 /* ARGSUSED */
3051 int
3052 sys_truncate(struct lwp *l, const struct sys_truncate_args *uap, register_t *retval)
3053 {
3054 /* {
3055 syscallarg(const char *) path;
3056 syscallarg(int) pad;
3057 syscallarg(off_t) length;
3058 } */
3059 struct vnode *vp;
3060 struct vattr vattr;
3061 int error;
3062 struct nameidata nd;
3063
3064 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3065 SCARG(uap, path));
3066 if ((error = namei(&nd)) != 0)
3067 return (error);
3068 vp = nd.ni_vp;
3069 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3070 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3071 if (vp->v_type == VDIR)
3072 error = EISDIR;
3073 else if ((error = vn_writechk(vp)) == 0 &&
3074 (error = VOP_ACCESS(vp, VWRITE, l->l_cred)) == 0) {
3075 VATTR_NULL(&vattr);
3076 vattr.va_size = SCARG(uap, length);
3077 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3078 }
3079 vput(vp);
3080 return (error);
3081 }
3082
3083 /*
3084 * Truncate a file given a file descriptor.
3085 */
3086 /* ARGSUSED */
3087 int
3088 sys_ftruncate(struct lwp *l, const struct sys_ftruncate_args *uap, register_t *retval)
3089 {
3090 /* {
3091 syscallarg(int) fd;
3092 syscallarg(int) pad;
3093 syscallarg(off_t) length;
3094 } */
3095 struct proc *p = l->l_proc;
3096 struct vattr vattr;
3097 struct vnode *vp;
3098 struct file *fp;
3099 int error;
3100
3101 /* getvnode() will use the descriptor for us */
3102 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3103 return (error);
3104 if ((fp->f_flag & FWRITE) == 0) {
3105 error = EINVAL;
3106 goto out;
3107 }
3108 vp = (struct vnode *)fp->f_data;
3109 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3110 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3111 if (vp->v_type == VDIR)
3112 error = EISDIR;
3113 else if ((error = vn_writechk(vp)) == 0) {
3114 VATTR_NULL(&vattr);
3115 vattr.va_size = SCARG(uap, length);
3116 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
3117 }
3118 VOP_UNLOCK(vp, 0);
3119 out:
3120 FILE_UNUSE(fp, l);
3121 return (error);
3122 }
3123
3124 /*
3125 * Sync an open file.
3126 */
3127 /* ARGSUSED */
3128 int
3129 sys_fsync(struct lwp *l, const struct sys_fsync_args *uap, register_t *retval)
3130 {
3131 /* {
3132 syscallarg(int) fd;
3133 } */
3134 struct proc *p = l->l_proc;
3135 struct vnode *vp;
3136 struct file *fp;
3137 int error;
3138
3139 /* getvnode() will use the descriptor for us */
3140 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3141 return (error);
3142 vp = (struct vnode *)fp->f_data;
3143 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3144 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT, 0, 0);
3145 if (error == 0 && bioopsp != NULL &&
3146 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3147 (*bioopsp->io_fsync)(vp, 0);
3148 VOP_UNLOCK(vp, 0);
3149 FILE_UNUSE(fp, l);
3150 return (error);
3151 }
3152
3153 /*
3154 * Sync a range of file data. API modeled after that found in AIX.
3155 *
3156 * FDATASYNC indicates that we need only save enough metadata to be able
3157 * to re-read the written data. Note we duplicate AIX's requirement that
3158 * the file be open for writing.
3159 */
3160 /* ARGSUSED */
3161 int
3162 sys_fsync_range(struct lwp *l, const struct sys_fsync_range_args *uap, register_t *retval)
3163 {
3164 /* {
3165 syscallarg(int) fd;
3166 syscallarg(int) flags;
3167 syscallarg(off_t) start;
3168 syscallarg(off_t) length;
3169 } */
3170 struct proc *p = l->l_proc;
3171 struct vnode *vp;
3172 struct file *fp;
3173 int flags, nflags;
3174 off_t s, e, len;
3175 int error;
3176
3177 /* getvnode() will use the descriptor for us */
3178 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3179 return (error);
3180
3181 if ((fp->f_flag & FWRITE) == 0) {
3182 error = EBADF;
3183 goto out;
3184 }
3185
3186 flags = SCARG(uap, flags);
3187 if (((flags & (FDATASYNC | FFILESYNC)) == 0) ||
3188 ((~flags & (FDATASYNC | FFILESYNC)) == 0)) {
3189 error = EINVAL;
3190 goto out;
3191 }
3192 /* Now set up the flags for value(s) to pass to VOP_FSYNC() */
3193 if (flags & FDATASYNC)
3194 nflags = FSYNC_DATAONLY | FSYNC_WAIT;
3195 else
3196 nflags = FSYNC_WAIT;
3197 if (flags & FDISKSYNC)
3198 nflags |= FSYNC_CACHE;
3199
3200 len = SCARG(uap, length);
3201 /* If length == 0, we do the whole file, and s = l = 0 will do that */
3202 if (len) {
3203 s = SCARG(uap, start);
3204 e = s + len;
3205 if (e < s) {
3206 error = EINVAL;
3207 goto out;
3208 }
3209 } else {
3210 e = 0;
3211 s = 0;
3212 }
3213
3214 vp = (struct vnode *)fp->f_data;
3215 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3216 error = VOP_FSYNC(vp, fp->f_cred, nflags, s, e);
3217
3218 if (error == 0 && bioopsp != NULL &&
3219 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3220 (*bioopsp->io_fsync)(vp, nflags);
3221
3222 VOP_UNLOCK(vp, 0);
3223 out:
3224 FILE_UNUSE(fp, l);
3225 return (error);
3226 }
3227
3228 /*
3229 * Sync the data of an open file.
3230 */
3231 /* ARGSUSED */
3232 int
3233 sys_fdatasync(struct lwp *l, const struct sys_fdatasync_args *uap, register_t *retval)
3234 {
3235 /* {
3236 syscallarg(int) fd;
3237 } */
3238 struct proc *p = l->l_proc;
3239 struct vnode *vp;
3240 struct file *fp;
3241 int error;
3242
3243 /* getvnode() will use the descriptor for us */
3244 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3245 return (error);
3246 if ((fp->f_flag & FWRITE) == 0) {
3247 FILE_UNUSE(fp, l);
3248 return (EBADF);
3249 }
3250 vp = (struct vnode *)fp->f_data;
3251 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3252 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT|FSYNC_DATAONLY, 0, 0);
3253 VOP_UNLOCK(vp, 0);
3254 FILE_UNUSE(fp, l);
3255 return (error);
3256 }
3257
3258 /*
3259 * Rename files, (standard) BSD semantics frontend.
3260 */
3261 /* ARGSUSED */
3262 int
3263 sys_rename(struct lwp *l, const struct sys_rename_args *uap, register_t *retval)
3264 {
3265 /* {
3266 syscallarg(const char *) from;
3267 syscallarg(const char *) to;
3268 } */
3269
3270 return (do_sys_rename(SCARG(uap, from), SCARG(uap, to), UIO_USERSPACE, 0));
3271 }
3272
3273 /*
3274 * Rename files, POSIX semantics frontend.
3275 */
3276 /* ARGSUSED */
3277 int
3278 sys___posix_rename(struct lwp *l, const struct sys___posix_rename_args *uap, register_t *retval)
3279 {
3280 /* {
3281 syscallarg(const char *) from;
3282 syscallarg(const char *) to;
3283 } */
3284
3285 return (do_sys_rename(SCARG(uap, from), SCARG(uap, to), UIO_USERSPACE, 1));
3286 }
3287
3288 /*
3289 * Rename files. Source and destination must either both be directories,
3290 * or both not be directories. If target is a directory, it must be empty.
3291 * If `from' and `to' refer to the same object, the value of the `retain'
3292 * argument is used to determine whether `from' will be
3293 *
3294 * (retain == 0) deleted unless `from' and `to' refer to the same
3295 * object in the file system's name space (BSD).
3296 * (retain == 1) always retained (POSIX).
3297 */
3298 int
3299 do_sys_rename(const char *from, const char *to, enum uio_seg seg, int retain)
3300 {
3301 struct vnode *tvp, *fvp, *tdvp;
3302 struct nameidata fromnd, tond;
3303 struct lwp *l = curlwp;
3304 struct proc *p;
3305 int error;
3306
3307 NDINIT(&fromnd, DELETE, LOCKPARENT | SAVESTART | TRYEMULROOT,
3308 seg, from);
3309 if ((error = namei(&fromnd)) != 0)
3310 return (error);
3311 if (fromnd.ni_dvp != fromnd.ni_vp)
3312 VOP_UNLOCK(fromnd.ni_dvp, 0);
3313 fvp = fromnd.ni_vp;
3314 NDINIT(&tond, RENAME,
3315 LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | TRYEMULROOT
3316 | (fvp->v_type == VDIR ? CREATEDIR : 0),
3317 seg, to);
3318 if ((error = namei(&tond)) != 0) {
3319 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3320 vrele(fromnd.ni_dvp);
3321 vrele(fvp);
3322 goto out1;
3323 }
3324 tdvp = tond.ni_dvp;
3325 tvp = tond.ni_vp;
3326
3327 if (tvp != NULL) {
3328 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
3329 error = ENOTDIR;
3330 goto out;
3331 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
3332 error = EISDIR;
3333 goto out;
3334 }
3335 }
3336
3337 if (fvp == tdvp)
3338 error = EINVAL;
3339
3340 /*
3341 * Source and destination refer to the same object.
3342 */
3343 if (fvp == tvp) {
3344 if (retain)
3345 error = -1;
3346 else if (fromnd.ni_dvp == tdvp &&
3347 fromnd.ni_cnd.cn_namelen == tond.ni_cnd.cn_namelen &&
3348 !memcmp(fromnd.ni_cnd.cn_nameptr,
3349 tond.ni_cnd.cn_nameptr,
3350 fromnd.ni_cnd.cn_namelen))
3351 error = -1;
3352 }
3353
3354 #if NVERIEXEC > 0
3355 if (!error) {
3356 char *f1, *f2;
3357
3358 f1 = malloc(fromnd.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3359 strlcpy(f1, fromnd.ni_cnd.cn_nameptr, fromnd.ni_cnd.cn_namelen);
3360
3361 f2 = malloc(tond.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3362 strlcpy(f2, tond.ni_cnd.cn_nameptr, tond.ni_cnd.cn_namelen);
3363
3364 error = veriexec_renamechk(l, fvp, f1, tvp, f2);
3365
3366 free(f1, M_TEMP);
3367 free(f2, M_TEMP);
3368 }
3369 #endif /* NVERIEXEC > 0 */
3370
3371 out:
3372 p = l->l_proc;
3373 if (!error) {
3374 VOP_LEASE(tdvp, l->l_cred, LEASE_WRITE);
3375 if (fromnd.ni_dvp != tdvp)
3376 VOP_LEASE(fromnd.ni_dvp, l->l_cred, LEASE_WRITE);
3377 if (tvp) {
3378 VOP_LEASE(tvp, l->l_cred, LEASE_WRITE);
3379 }
3380 error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
3381 tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
3382 } else {
3383 VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd);
3384 if (tdvp == tvp)
3385 vrele(tdvp);
3386 else
3387 vput(tdvp);
3388 if (tvp)
3389 vput(tvp);
3390 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3391 vrele(fromnd.ni_dvp);
3392 vrele(fvp);
3393 }
3394 vrele(tond.ni_startdir);
3395 PNBUF_PUT(tond.ni_cnd.cn_pnbuf);
3396 out1:
3397 if (fromnd.ni_startdir)
3398 vrele(fromnd.ni_startdir);
3399 PNBUF_PUT(fromnd.ni_cnd.cn_pnbuf);
3400 return (error == -1 ? 0 : error);
3401 }
3402
3403 /*
3404 * Make a directory file.
3405 */
3406 /* ARGSUSED */
3407 int
3408 sys_mkdir(struct lwp *l, const struct sys_mkdir_args *uap, register_t *retval)
3409 {
3410 /* {
3411 syscallarg(const char *) path;
3412 syscallarg(int) mode;
3413 } */
3414 struct proc *p = l->l_proc;
3415 struct vnode *vp;
3416 struct vattr vattr;
3417 int error;
3418 struct nameidata nd;
3419
3420 NDINIT(&nd, CREATE, LOCKPARENT | CREATEDIR | TRYEMULROOT, UIO_USERSPACE,
3421 SCARG(uap, path));
3422 if ((error = namei(&nd)) != 0)
3423 return (error);
3424 vp = nd.ni_vp;
3425 if (vp != NULL) {
3426 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3427 if (nd.ni_dvp == vp)
3428 vrele(nd.ni_dvp);
3429 else
3430 vput(nd.ni_dvp);
3431 vrele(vp);
3432 return (EEXIST);
3433 }
3434 VATTR_NULL(&vattr);
3435 vattr.va_type = VDIR;
3436 /* We will read cwdi->cwdi_cmask unlocked. */
3437 vattr.va_mode =
3438 (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_cwdi->cwdi_cmask;
3439 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3440 error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
3441 if (!error)
3442 vput(nd.ni_vp);
3443 return (error);
3444 }
3445
3446 /*
3447 * Remove a directory file.
3448 */
3449 /* ARGSUSED */
3450 int
3451 sys_rmdir(struct lwp *l, const struct sys_rmdir_args *uap, register_t *retval)
3452 {
3453 /* {
3454 syscallarg(const char *) path;
3455 } */
3456 struct vnode *vp;
3457 int error;
3458 struct nameidata nd;
3459
3460 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
3461 SCARG(uap, path));
3462 if ((error = namei(&nd)) != 0)
3463 return (error);
3464 vp = nd.ni_vp;
3465 if (vp->v_type != VDIR) {
3466 error = ENOTDIR;
3467 goto out;
3468 }
3469 /*
3470 * No rmdir "." please.
3471 */
3472 if (nd.ni_dvp == vp) {
3473 error = EINVAL;
3474 goto out;
3475 }
3476 /*
3477 * The root of a mounted filesystem cannot be deleted.
3478 */
3479 if (vp->v_vflag & VV_ROOT) {
3480 error = EBUSY;
3481 goto out;
3482 }
3483 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3484 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3485 error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
3486 return (error);
3487
3488 out:
3489 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3490 if (nd.ni_dvp == vp)
3491 vrele(nd.ni_dvp);
3492 else
3493 vput(nd.ni_dvp);
3494 vput(vp);
3495 return (error);
3496 }
3497
3498 /*
3499 * Read a block of directory entries in a file system independent format.
3500 */
3501 int
3502 sys___getdents30(struct lwp *l, const struct sys___getdents30_args *uap, register_t *retval)
3503 {
3504 /* {
3505 syscallarg(int) fd;
3506 syscallarg(char *) buf;
3507 syscallarg(size_t) count;
3508 } */
3509 struct proc *p = l->l_proc;
3510 struct file *fp;
3511 int error, done;
3512
3513 /* getvnode() will use the descriptor for us */
3514 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3515 return (error);
3516 if ((fp->f_flag & FREAD) == 0) {
3517 error = EBADF;
3518 goto out;
3519 }
3520 error = vn_readdir(fp, SCARG(uap, buf), UIO_USERSPACE,
3521 SCARG(uap, count), &done, l, 0, 0);
3522 ktrgenio(SCARG(uap, fd), UIO_READ, SCARG(uap, buf), done, error);
3523 *retval = done;
3524 out:
3525 FILE_UNUSE(fp, l);
3526 return (error);
3527 }
3528
3529 /*
3530 * Set the mode mask for creation of filesystem nodes.
3531 */
3532 int
3533 sys_umask(struct lwp *l, const struct sys_umask_args *uap, register_t *retval)
3534 {
3535 /* {
3536 syscallarg(mode_t) newmask;
3537 } */
3538 struct proc *p = l->l_proc;
3539 struct cwdinfo *cwdi;
3540
3541 /*
3542 * cwdi->cwdi_cmask will be read unlocked elsewhere. What's
3543 * important is that we serialize changes to the mask. The
3544 * rw_exit() will issue a write memory barrier on our behalf,
3545 * and force the changes out to other CPUs (as it must use an
3546 * atomic operation, draining the local CPU's store buffers).
3547 */
3548 cwdi = p->p_cwdi;
3549 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
3550 *retval = cwdi->cwdi_cmask;
3551 cwdi->cwdi_cmask = SCARG(uap, newmask) & ALLPERMS;
3552 rw_exit(&cwdi->cwdi_lock);
3553
3554 return (0);
3555 }
3556
3557 /*
3558 * Void all references to file by ripping underlying filesystem
3559 * away from vnode.
3560 */
3561 /* ARGSUSED */
3562 int
3563 sys_revoke(struct lwp *l, const struct sys_revoke_args *uap, register_t *retval)
3564 {
3565 /* {
3566 syscallarg(const char *) path;
3567 } */
3568 struct vnode *vp;
3569 struct vattr vattr;
3570 int error;
3571 bool revoke;
3572 struct nameidata nd;
3573
3574 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3575 SCARG(uap, path));
3576 if ((error = namei(&nd)) != 0)
3577 return (error);
3578 vp = nd.ni_vp;
3579 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
3580 goto out;
3581 if (kauth_cred_geteuid(l->l_cred) != vattr.va_uid &&
3582 (error = kauth_authorize_generic(l->l_cred,
3583 KAUTH_GENERIC_ISSUSER, NULL)) != 0)
3584 goto out;
3585 simple_lock(&vp->v_interlock);
3586 revoke = (vp->v_usecount > 1 || (vp->v_iflag & (VI_ALIASED|VI_LAYER)));
3587 simple_unlock(&vp->v_interlock);
3588 if (revoke)
3589 VOP_REVOKE(vp, REVOKEALL);
3590 out:
3591 vrele(vp);
3592 return (error);
3593 }
3594
3595 /*
3596 * Convert a user file descriptor to a kernel file entry.
3597 */
3598 int
3599 getvnode(struct filedesc *fdp, int fd, struct file **fpp)
3600 {
3601 struct vnode *vp;
3602 struct file *fp;
3603
3604 if ((fp = fd_getfile(fdp, fd)) == NULL)
3605 return (EBADF);
3606
3607 FILE_USE(fp);
3608
3609 if (fp->f_type != DTYPE_VNODE) {
3610 FILE_UNUSE(fp, NULL);
3611 return (EINVAL);
3612 }
3613
3614 vp = (struct vnode *)fp->f_data;
3615 if (vp->v_type == VBAD) {
3616 FILE_UNUSE(fp, NULL);
3617 return (EBADF);
3618 }
3619
3620 *fpp = fp;
3621 return (0);
3622 }
3623