vfs_syscalls.c revision 1.335 1 /* $NetBSD: vfs_syscalls.c,v 1.335 2007/12/20 23:03:13 dsl Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_syscalls.c 8.42 (Berkeley) 7/31/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_syscalls.c,v 1.335 2007/12/20 23:03:13 dsl Exp $");
41
42 #include "opt_compat_netbsd.h"
43 #include "opt_compat_43.h"
44 #include "opt_fileassoc.h"
45 #include "fss.h"
46 #include "veriexec.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/filedesc.h>
52 #include <sys/kernel.h>
53 #include <sys/file.h>
54 #include <sys/stat.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/proc.h>
58 #include <sys/uio.h>
59 #include <sys/malloc.h>
60 #include <sys/kmem.h>
61 #include <sys/dirent.h>
62 #include <sys/sysctl.h>
63 #include <sys/syscallargs.h>
64 #include <sys/vfs_syscalls.h>
65 #include <sys/ktrace.h>
66 #ifdef FILEASSOC
67 #include <sys/fileassoc.h>
68 #endif /* FILEASSOC */
69 #include <sys/verified_exec.h>
70 #include <sys/kauth.h>
71
72 #include <miscfs/genfs/genfs.h>
73 #include <miscfs/syncfs/syncfs.h>
74
75 #ifdef COMPAT_30
76 #include "opt_nfsserver.h"
77 #include <nfs/rpcv2.h>
78 #endif
79 #include <nfs/nfsproto.h>
80 #ifdef COMPAT_30
81 #include <nfs/nfs.h>
82 #include <nfs/nfs_var.h>
83 #endif
84
85 #if NFSS > 0
86 #include <dev/fssvar.h>
87 #endif
88
89 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount struct");
90
91 static int change_dir(struct nameidata *, struct lwp *);
92 static int change_flags(struct vnode *, u_long, struct lwp *);
93 static int change_mode(struct vnode *, int, struct lwp *l);
94 static int change_owner(struct vnode *, uid_t, gid_t, struct lwp *, int);
95 static int rename_files(const char *, const char *, struct lwp *, int);
96
97 void checkdirs(struct vnode *);
98
99 int dovfsusermount = 0;
100
101 /*
102 * Virtual File System System Calls
103 */
104
105 /*
106 * Mount a file system.
107 */
108
109 #if defined(COMPAT_09) || defined(COMPAT_43)
110 /*
111 * This table is used to maintain compatibility with 4.3BSD
112 * and NetBSD 0.9 mount syscalls. Note, the order is important!
113 *
114 * Do not modify this table. It should only contain filesystems
115 * supported by NetBSD 0.9 and 4.3BSD.
116 */
117 const char * const mountcompatnames[] = {
118 NULL, /* 0 = MOUNT_NONE */
119 MOUNT_FFS, /* 1 = MOUNT_UFS */
120 MOUNT_NFS, /* 2 */
121 MOUNT_MFS, /* 3 */
122 MOUNT_MSDOS, /* 4 */
123 MOUNT_CD9660, /* 5 = MOUNT_ISOFS */
124 MOUNT_FDESC, /* 6 */
125 MOUNT_KERNFS, /* 7 */
126 NULL, /* 8 = MOUNT_DEVFS */
127 MOUNT_AFS, /* 9 */
128 };
129 const int nmountcompatnames = sizeof(mountcompatnames) /
130 sizeof(mountcompatnames[0]);
131 #endif /* COMPAT_09 || COMPAT_43 */
132
133 static int
134 mount_update(struct lwp *l, struct vnode *vp, const char *path, int flags,
135 void *data, size_t *data_len)
136 {
137 struct mount *mp;
138 int error = 0, saved_flags;
139
140 mp = vp->v_mount;
141 saved_flags = mp->mnt_flag;
142
143 /* We can operate only on VV_ROOT nodes. */
144 if ((vp->v_vflag & VV_ROOT) == 0) {
145 error = EINVAL;
146 goto out;
147 }
148
149 /*
150 * We only allow the filesystem to be reloaded if it
151 * is currently mounted read-only.
152 */
153 if (flags & MNT_RELOAD && !(mp->mnt_flag & MNT_RDONLY)) {
154 error = EOPNOTSUPP; /* Needs translation */
155 goto out;
156 }
157
158 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
159 KAUTH_REQ_SYSTEM_MOUNT_UPDATE, mp, KAUTH_ARG(flags), data);
160 if (error)
161 goto out;
162
163 if (vfs_busy(mp, LK_NOWAIT, 0)) {
164 error = EPERM;
165 goto out;
166 }
167
168 mp->mnt_flag &= ~MNT_OP_FLAGS;
169 mp->mnt_flag |= flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
170
171 /*
172 * Set the mount level flags.
173 */
174 if (flags & MNT_RDONLY)
175 mp->mnt_flag |= MNT_RDONLY;
176 else if (mp->mnt_flag & MNT_RDONLY)
177 mp->mnt_iflag |= IMNT_WANTRDWR;
178 mp->mnt_flag &=
179 ~(MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
180 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
181 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP);
182 mp->mnt_flag |= flags &
183 (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
184 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
185 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
186 MNT_IGNORE);
187
188 error = VFS_MOUNT(mp, path, data, data_len);
189
190 #if defined(COMPAT_30) && defined(NFSSERVER)
191 if (error && data != NULL) {
192 int error2;
193
194 /* Update failed; let's try and see if it was an
195 * export request. */
196 error2 = nfs_update_exports_30(mp, path, data, l);
197
198 /* Only update error code if the export request was
199 * understood but some problem occurred while
200 * processing it. */
201 if (error2 != EJUSTRETURN)
202 error = error2;
203 }
204 #endif
205 if (mp->mnt_iflag & IMNT_WANTRDWR)
206 mp->mnt_flag &= ~MNT_RDONLY;
207 if (error)
208 mp->mnt_flag = saved_flags;
209 mp->mnt_flag &= ~MNT_OP_FLAGS;
210 mp->mnt_iflag &= ~IMNT_WANTRDWR;
211 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0) {
212 if (mp->mnt_syncer == NULL)
213 error = vfs_allocate_syncvnode(mp);
214 } else {
215 if (mp->mnt_syncer != NULL)
216 vfs_deallocate_syncvnode(mp);
217 }
218 vfs_unbusy(mp);
219
220 out:
221 return (error);
222 }
223
224 static int
225 mount_get_vfsops(const char *fstype, struct vfsops **vfsops)
226 {
227 char fstypename[sizeof(((struct statvfs *)NULL)->f_fstypename)];
228 int error;
229
230 /* Copy file-system type from userspace. */
231 error = copyinstr(fstype, fstypename, sizeof(fstypename), NULL);
232 if (error) {
233 #if defined(COMPAT_09) || defined(COMPAT_43)
234 /*
235 * Historically, filesystem types were identified by numbers.
236 * If we get an integer for the filesystem type instead of a
237 * string, we check to see if it matches one of the historic
238 * filesystem types.
239 */
240 u_long fsindex = (u_long)fstype;
241 if (fsindex >= nmountcompatnames ||
242 mountcompatnames[fsindex] == NULL)
243 return ENODEV;
244 strlcpy(fstypename, mountcompatnames[fsindex],
245 sizeof(fstypename));
246 #else
247 return error;
248 #endif
249 }
250
251 #ifdef COMPAT_10
252 /* Accept `ufs' as an alias for `ffs'. */
253 if (strcmp(fstypename, "ufs") == 0)
254 fstypename[0] = 'f';
255 #endif
256
257 if ((*vfsops = vfs_getopsbyname(fstypename)) == NULL)
258 return ENODEV;
259 return 0;
260 }
261
262 static int
263 mount_domount(struct lwp *l, struct vnode **vpp, struct vfsops *vfsops,
264 const char *path, int flags, void *data, size_t *data_len)
265 {
266 struct mount *mp = NULL;
267 struct vnode *vp = *vpp;
268 struct vattr va;
269 int error;
270
271 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
272 KAUTH_REQ_SYSTEM_MOUNT_NEW, vp, KAUTH_ARG(flags), data);
273 if (error)
274 return error;
275
276 /* Can't make a non-dir a mount-point (from here anyway). */
277 if (vp->v_type != VDIR)
278 return ENOTDIR;
279
280 /*
281 * If the user is not root, ensure that they own the directory
282 * onto which we are attempting to mount.
283 */
284 if ((error = VOP_GETATTR(vp, &va, l->l_cred)) != 0 ||
285 (va.va_uid != kauth_cred_geteuid(l->l_cred) &&
286 (error = kauth_authorize_generic(l->l_cred,
287 KAUTH_GENERIC_ISSUSER, NULL)) != 0)) {
288 return error;
289 }
290
291 if (flags & MNT_EXPORTED)
292 return EINVAL;
293
294 if ((error = vinvalbuf(vp, V_SAVE, l->l_cred, l, 0, 0)) != 0)
295 return error;
296
297 /*
298 * Check if a file-system is not already mounted on this vnode.
299 */
300 if (vp->v_mountedhere != NULL)
301 return EBUSY;
302
303 mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
304
305 mp->mnt_op = vfsops;
306
307 TAILQ_INIT(&mp->mnt_vnodelist);
308 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
309 simple_lock_init(&mp->mnt_slock);
310 (void)vfs_busy(mp, LK_NOWAIT, 0);
311
312 mp->mnt_vnodecovered = vp;
313 mp->mnt_stat.f_owner = kauth_cred_geteuid(l->l_cred);
314 mp->mnt_unmounter = NULL;
315 mount_initspecific(mp);
316
317 /*
318 * The underlying file system may refuse the mount for
319 * various reasons. Allow the user to force it to happen.
320 *
321 * Set the mount level flags.
322 */
323 mp->mnt_flag = flags &
324 (MNT_FORCE | MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
325 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
326 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
327 MNT_IGNORE | MNT_RDONLY);
328
329 error = VFS_MOUNT(mp, path, data, data_len);
330 mp->mnt_flag &= ~MNT_OP_FLAGS;
331
332 /*
333 * Put the new filesystem on the mount list after root.
334 */
335 cache_purge(vp);
336 if (error != 0) {
337 vp->v_mountedhere = NULL;
338 mp->mnt_op->vfs_refcount--;
339 vfs_unbusy(mp);
340 vfs_destroy(mp);
341 return error;
342 }
343
344 mp->mnt_iflag &= ~IMNT_WANTRDWR;
345 vp->v_mountedhere = mp;
346 mutex_enter(&mountlist_lock);
347 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
348 mutex_exit(&mountlist_lock);
349 VOP_UNLOCK(vp, 0);
350 checkdirs(vp);
351 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
352 error = vfs_allocate_syncvnode(mp);
353 vfs_unbusy(mp);
354 (void) VFS_STATVFS(mp, &mp->mnt_stat);
355 error = VFS_START(mp, 0);
356 if (error)
357 vrele(vp);
358 *vpp = NULL;
359 return error;
360 }
361
362 static int
363 mount_getargs(struct lwp *l, struct vnode *vp, const char *path, int flags,
364 void *data, size_t *data_len)
365 {
366 struct mount *mp;
367 int error;
368
369 /* If MNT_GETARGS is specified, it should be the only flag. */
370 if (flags & ~MNT_GETARGS)
371 return EINVAL;
372
373 mp = vp->v_mount;
374
375 /* XXX: probably some notion of "can see" here if we want isolation. */
376 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
377 KAUTH_REQ_SYSTEM_MOUNT_GET, mp, data, NULL);
378 if (error)
379 return error;
380
381 if ((vp->v_vflag & VV_ROOT) == 0)
382 return EINVAL;
383
384 if (vfs_busy(mp, LK_NOWAIT, 0))
385 return EPERM;
386
387 mp->mnt_flag &= ~MNT_OP_FLAGS;
388 mp->mnt_flag |= MNT_GETARGS;
389 error = VFS_MOUNT(mp, path, data, data_len);
390 mp->mnt_flag &= ~MNT_OP_FLAGS;
391
392 vfs_unbusy(mp);
393 return (error);
394 }
395
396 #ifdef COMPAT_40
397 /* ARGSUSED */
398 int
399 compat_40_sys_mount(struct lwp *l, const struct compat_40_sys_mount_args *uap, register_t *retval)
400 {
401 /* {
402 syscallarg(const char *) type;
403 syscallarg(const char *) path;
404 syscallarg(int) flags;
405 syscallarg(void *) data;
406 } */
407 register_t dummy;
408
409 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
410 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE, 0, &dummy);
411 }
412 #endif
413
414 int
415 sys___mount50(struct lwp *l, const struct sys___mount50_args *uap, register_t *retval)
416 {
417 /* {
418 syscallarg(const char *) type;
419 syscallarg(const char *) path;
420 syscallarg(int) flags;
421 syscallarg(void *) data;
422 syscallarg(size_t) data_len;
423 } */
424
425 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
426 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE,
427 SCARG(uap, data_len), retval);
428 }
429
430 int
431 do_sys_mount(struct lwp *l, struct vfsops *vfsops, const char *type,
432 const char *path, int flags, void *data, enum uio_seg data_seg,
433 size_t data_len, register_t *retval)
434 {
435 struct vnode *vp;
436 struct nameidata nd;
437 void *data_buf = data;
438 int error;
439
440 /*
441 * Get vnode to be covered
442 */
443 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
444 if ((error = namei(&nd)) != 0)
445 return (error);
446 vp = nd.ni_vp;
447
448 /*
449 * A lookup in VFS_MOUNT might result in an attempt to
450 * lock this vnode again, so make the lock recursive.
451 */
452 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_SETRECURSE);
453
454 if (vfsops == NULL) {
455 if (flags & (MNT_GETARGS | MNT_UPDATE))
456 vfsops = vp->v_mount->mnt_op;
457 else {
458 /* 'type' is userspace */
459 error = mount_get_vfsops(type, &vfsops);
460 if (error != 0)
461 goto done;
462 }
463 }
464
465 if (data != NULL && data_seg == UIO_USERSPACE) {
466 if (data_len == 0) {
467 /* No length supplied, use default for filesystem */
468 data_len = vfsops->vfs_min_mount_data;
469 if (data_len > VFS_MAX_MOUNT_DATA) {
470 /* maybe a force loaded old LKM */
471 error = EINVAL;
472 goto done;
473 }
474 #ifdef COMPAT_30
475 /* Hopefully a longer buffer won't make copyin() fail */
476 if (flags & MNT_UPDATE
477 && data_len < sizeof (struct mnt_export_args30))
478 data_len = sizeof (struct mnt_export_args30);
479 #endif
480 }
481 data_buf = malloc(data_len, M_TEMP, M_WAITOK);
482
483 /* NFS needs the buffer even for mnt_getargs .... */
484 error = copyin(data, data_buf, data_len);
485 if (error != 0)
486 goto done;
487 }
488
489 if (flags & MNT_GETARGS) {
490 if (data_len == 0) {
491 error = EINVAL;
492 goto done;
493 }
494 error = mount_getargs(l, vp, path, flags, data_buf, &data_len);
495 if (error != 0)
496 goto done;
497 if (data_seg == UIO_USERSPACE)
498 error = copyout(data_buf, data, data_len);
499 *retval = data_len;
500 } else if (flags & MNT_UPDATE) {
501 error = mount_update(l, vp, path, flags, data_buf, &data_len);
502 } else {
503 /* Locking is handled internally in mount_domount(). */
504 error = mount_domount(l, &vp, vfsops, path, flags, data_buf,
505 &data_len);
506 }
507
508 done:
509 if (vp)
510 vput(vp);
511 if (data_buf != data)
512 free(data_buf, M_TEMP);
513 return (error);
514 }
515
516 /*
517 * Scan all active processes to see if any of them have a current
518 * or root directory onto which the new filesystem has just been
519 * mounted. If so, replace them with the new mount point.
520 */
521 void
522 checkdirs(struct vnode *olddp)
523 {
524 struct cwdinfo *cwdi;
525 struct vnode *newdp;
526 struct proc *p;
527
528 if (olddp->v_usecount == 1)
529 return;
530 if (VFS_ROOT(olddp->v_mountedhere, &newdp))
531 panic("mount: lost mount");
532 mutex_enter(&proclist_lock);
533 PROCLIST_FOREACH(p, &allproc) {
534 cwdi = p->p_cwdi;
535 if (!cwdi)
536 continue;
537 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
538 if (cwdi->cwdi_cdir == olddp) {
539 vrele(cwdi->cwdi_cdir);
540 VREF(newdp);
541 cwdi->cwdi_cdir = newdp;
542 }
543 if (cwdi->cwdi_rdir == olddp) {
544 vrele(cwdi->cwdi_rdir);
545 VREF(newdp);
546 cwdi->cwdi_rdir = newdp;
547 }
548 rw_exit(&cwdi->cwdi_lock);
549 }
550 mutex_exit(&proclist_lock);
551 if (rootvnode == olddp) {
552 vrele(rootvnode);
553 VREF(newdp);
554 rootvnode = newdp;
555 }
556 vput(newdp);
557 }
558
559 /*
560 * Unmount a file system.
561 *
562 * Note: unmount takes a path to the vnode mounted on as argument,
563 * not special file (as before).
564 */
565 /* ARGSUSED */
566 int
567 sys_unmount(struct lwp *l, const struct sys_unmount_args *uap, register_t *retval)
568 {
569 /* {
570 syscallarg(const char *) path;
571 syscallarg(int) flags;
572 } */
573 struct vnode *vp;
574 struct mount *mp;
575 int error;
576 struct nameidata nd;
577
578 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
579 SCARG(uap, path));
580 if ((error = namei(&nd)) != 0)
581 return (error);
582 vp = nd.ni_vp;
583 mp = vp->v_mount;
584
585 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
586 KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT, mp, NULL, NULL);
587 if (error) {
588 vput(vp);
589 return (error);
590 }
591
592 /*
593 * Don't allow unmounting the root file system.
594 */
595 if (mp->mnt_flag & MNT_ROOTFS) {
596 vput(vp);
597 return (EINVAL);
598 }
599
600 /*
601 * Must be the root of the filesystem
602 */
603 if ((vp->v_vflag & VV_ROOT) == 0) {
604 vput(vp);
605 return (EINVAL);
606 }
607 vput(vp);
608
609 /*
610 * XXX Freeze syncer. Must do this before locking the
611 * mount point. See dounmount() for details.
612 */
613 mutex_enter(&syncer_mutex);
614
615 if (vfs_busy(mp, 0, 0)) {
616 mutex_exit(&syncer_mutex);
617 return (EBUSY);
618 }
619
620 return (dounmount(mp, SCARG(uap, flags), l));
621 }
622
623 /*
624 * Do the actual file system unmount. File system is assumed to have been
625 * marked busy by the caller.
626 */
627 int
628 dounmount(struct mount *mp, int flags, struct lwp *l)
629 {
630 struct vnode *coveredvp;
631 int error;
632 int async;
633 int used_syncer;
634
635 #if NVERIEXEC > 0
636 error = veriexec_unmountchk(mp);
637 if (error)
638 return (error);
639 #endif /* NVERIEXEC > 0 */
640
641 mutex_enter(&mountlist_lock);
642 vfs_unbusy(mp);
643 used_syncer = (mp->mnt_syncer != NULL);
644
645 /*
646 * XXX Syncer must be frozen when we get here. This should really
647 * be done on a per-mountpoint basis, but especially the softdep
648 * code possibly called from the syncer doesn't exactly work on a
649 * per-mountpoint basis, so the softdep code would become a maze
650 * of vfs_busy() calls.
651 *
652 * The caller of dounmount() must acquire syncer_mutex because
653 * the syncer itself acquires locks in syncer_mutex -> vfs_busy
654 * order, and we must preserve that order to avoid deadlock.
655 *
656 * So, if the file system did not use the syncer, now is
657 * the time to release the syncer_mutex.
658 */
659 if (used_syncer == 0)
660 mutex_exit(&syncer_mutex);
661
662 mp->mnt_iflag |= IMNT_UNMOUNT;
663 mp->mnt_unmounter = l;
664 mutex_exit(&mountlist_lock); /* XXX */
665 lockmgr(&mp->mnt_lock, LK_DRAIN, NULL);
666
667 async = mp->mnt_flag & MNT_ASYNC;
668 mp->mnt_flag &= ~MNT_ASYNC;
669 cache_purgevfs(mp); /* remove cache entries for this file sys */
670 if (mp->mnt_syncer != NULL)
671 vfs_deallocate_syncvnode(mp);
672 error = 0;
673 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
674 #if NFSS > 0
675 error = fss_umount_hook(mp, (flags & MNT_FORCE));
676 #endif
677 if (error == 0)
678 error = VFS_SYNC(mp, MNT_WAIT, l->l_cred);
679 }
680 if (error == 0 || (flags & MNT_FORCE))
681 error = VFS_UNMOUNT(mp, flags);
682 if (error) {
683 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
684 (void) vfs_allocate_syncvnode(mp);
685 mutex_enter(&mountlist_lock);
686 mp->mnt_iflag &= ~IMNT_UNMOUNT;
687 mp->mnt_unmounter = NULL;
688 mp->mnt_flag |= async;
689 mutex_exit(&mountlist_lock); /* XXX */
690 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_REENABLE,
691 NULL);
692 if (used_syncer)
693 mutex_exit(&syncer_mutex);
694 simple_lock(&mp->mnt_slock);
695 while (mp->mnt_wcnt > 0) {
696 wakeup(mp);
697 ltsleep(&mp->mnt_wcnt, PVFS, "mntwcnt1",
698 0, &mp->mnt_slock);
699 }
700 simple_unlock(&mp->mnt_slock);
701 return (error);
702 }
703 mutex_enter(&mountlist_lock);
704 CIRCLEQ_REMOVE(&mountlist, mp, mnt_list);
705 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP)
706 coveredvp->v_mountedhere = NULL;
707 if (TAILQ_FIRST(&mp->mnt_vnodelist) != NULL)
708 panic("unmount: dangling vnode");
709 mp->mnt_iflag |= IMNT_GONE;
710 mutex_exit(&mountlist_lock);
711 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
712 if (coveredvp != NULLVP)
713 vrele(coveredvp);
714 if (used_syncer)
715 mutex_exit(&syncer_mutex);
716 simple_lock(&mp->mnt_slock);
717 while (mp->mnt_wcnt > 0) {
718 wakeup(mp);
719 ltsleep(&mp->mnt_wcnt, PVFS, "mntwcnt2", 0, &mp->mnt_slock);
720 }
721 simple_unlock(&mp->mnt_slock);
722 vfs_hooks_unmount(mp);
723 vfs_delref(mp->mnt_op);
724 vfs_destroy(mp);
725 return (0);
726 }
727
728 /*
729 * Sync each mounted filesystem.
730 */
731 #ifdef DEBUG
732 int syncprt = 0;
733 struct ctldebug debug0 = { "syncprt", &syncprt };
734 #endif
735
736 /* ARGSUSED */
737 int
738 sys_sync(struct lwp *l, const void *v, register_t *retval)
739 {
740 struct mount *mp, *nmp;
741 int asyncflag;
742
743 if (l == NULL)
744 l = &lwp0;
745
746 mutex_enter(&mountlist_lock);
747 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
748 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
749 nmp = mp->mnt_list.cqe_prev;
750 continue;
751 }
752 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
753 asyncflag = mp->mnt_flag & MNT_ASYNC;
754 mp->mnt_flag &= ~MNT_ASYNC;
755 VFS_SYNC(mp, MNT_NOWAIT, l->l_cred);
756 if (asyncflag)
757 mp->mnt_flag |= MNT_ASYNC;
758 }
759 mutex_enter(&mountlist_lock);
760 nmp = mp->mnt_list.cqe_prev;
761 vfs_unbusy(mp);
762
763 }
764 mutex_exit(&mountlist_lock);
765 #ifdef DEBUG
766 if (syncprt)
767 vfs_bufstats();
768 #endif /* DEBUG */
769 return (0);
770 }
771
772 /*
773 * Change filesystem quotas.
774 */
775 /* ARGSUSED */
776 int
777 sys_quotactl(struct lwp *l, const struct sys_quotactl_args *uap, register_t *retval)
778 {
779 /* {
780 syscallarg(const char *) path;
781 syscallarg(int) cmd;
782 syscallarg(int) uid;
783 syscallarg(void *) arg;
784 } */
785 struct mount *mp;
786 int error;
787 struct nameidata nd;
788
789 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
790 SCARG(uap, path));
791 if ((error = namei(&nd)) != 0)
792 return (error);
793 mp = nd.ni_vp->v_mount;
794 error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
795 SCARG(uap, arg));
796 vrele(nd.ni_vp);
797 return (error);
798 }
799
800 int
801 dostatvfs(struct mount *mp, struct statvfs *sp, struct lwp *l, int flags,
802 int root)
803 {
804 struct cwdinfo *cwdi = l->l_proc->p_cwdi;
805 int error = 0;
806
807 /*
808 * If MNT_NOWAIT or MNT_LAZY is specified, do not
809 * refresh the fsstat cache. MNT_WAIT or MNT_LAZY
810 * overrides MNT_NOWAIT.
811 */
812 if (flags == MNT_NOWAIT || flags == MNT_LAZY ||
813 (flags != MNT_WAIT && flags != 0)) {
814 memcpy(sp, &mp->mnt_stat, sizeof(*sp));
815 goto done;
816 }
817
818 /* Get the filesystem stats now */
819 memset(sp, 0, sizeof(*sp));
820 if ((error = VFS_STATVFS(mp, sp)) != 0) {
821 return error;
822 }
823
824 if (cwdi->cwdi_rdir == NULL)
825 (void)memcpy(&mp->mnt_stat, sp, sizeof(mp->mnt_stat));
826 done:
827 if (cwdi->cwdi_rdir != NULL) {
828 size_t len;
829 char *bp;
830 char *path = PNBUF_GET();
831
832 bp = path + MAXPATHLEN;
833 *--bp = '\0';
834 rw_enter(&cwdi->cwdi_lock, RW_READER);
835 error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp, path,
836 MAXPATHLEN / 2, 0, l);
837 rw_exit(&cwdi->cwdi_lock);
838 if (error) {
839 PNBUF_PUT(path);
840 return error;
841 }
842 len = strlen(bp);
843 /*
844 * for mount points that are below our root, we can see
845 * them, so we fix up the pathname and return them. The
846 * rest we cannot see, so we don't allow viewing the
847 * data.
848 */
849 if (strncmp(bp, sp->f_mntonname, len) == 0) {
850 strlcpy(sp->f_mntonname, &sp->f_mntonname[len],
851 sizeof(sp->f_mntonname));
852 if (sp->f_mntonname[0] == '\0')
853 (void)strlcpy(sp->f_mntonname, "/",
854 sizeof(sp->f_mntonname));
855 } else {
856 if (root)
857 (void)strlcpy(sp->f_mntonname, "/",
858 sizeof(sp->f_mntonname));
859 else
860 error = EPERM;
861 }
862 PNBUF_PUT(path);
863 }
864 sp->f_flag = mp->mnt_flag & MNT_VISFLAGMASK;
865 return error;
866 }
867
868 /*
869 * Get filesystem statistics by path.
870 */
871 int
872 do_sys_pstatvfs(struct lwp *l, const char *path, int flags, struct statvfs *sb)
873 {
874 struct mount *mp;
875 int error;
876 struct nameidata nd;
877
878 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
879 if ((error = namei(&nd)) != 0)
880 return error;
881 mp = nd.ni_vp->v_mount;
882 error = dostatvfs(mp, sb, l, flags, 1);
883 vrele(nd.ni_vp);
884 return error;
885 }
886
887 /* ARGSUSED */
888 int
889 sys_statvfs1(struct lwp *l, const struct sys_statvfs1_args *uap, register_t *retval)
890 {
891 /* {
892 syscallarg(const char *) path;
893 syscallarg(struct statvfs *) buf;
894 syscallarg(int) flags;
895 } */
896 struct statvfs *sb;
897 int error;
898
899 sb = STATVFSBUF_GET();
900 error = do_sys_pstatvfs(l, SCARG(uap, path), SCARG(uap, flags), sb);
901 if (error == 0)
902 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
903 STATVFSBUF_PUT(sb);
904 return error;
905 }
906
907 /*
908 * Get filesystem statistics by fd.
909 */
910 int
911 do_sys_fstatvfs(struct lwp *l, int fd, int flags, struct statvfs *sb)
912 {
913 struct proc *p = l->l_proc;
914 struct file *fp;
915 struct mount *mp;
916 int error;
917
918 /* getvnode() will use the descriptor for us */
919 if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
920 return (error);
921 mp = ((struct vnode *)fp->f_data)->v_mount;
922 error = dostatvfs(mp, sb, l, flags, 1);
923 FILE_UNUSE(fp, l);
924 return error;
925 }
926
927 /* ARGSUSED */
928 int
929 sys_fstatvfs1(struct lwp *l, const struct sys_fstatvfs1_args *uap, register_t *retval)
930 {
931 /* {
932 syscallarg(int) fd;
933 syscallarg(struct statvfs *) buf;
934 syscallarg(int) flags;
935 } */
936 struct statvfs *sb;
937 int error;
938
939 sb = STATVFSBUF_GET();
940 error = do_sys_fstatvfs(l, SCARG(uap, fd), SCARG(uap, flags), sb);
941 if (error == 0)
942 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
943 STATVFSBUF_PUT(sb);
944 return error;
945 }
946
947
948 /*
949 * Get statistics on all filesystems.
950 */
951 int
952 do_sys_getvfsstat(struct lwp *l, void *sfsp, size_t bufsize, int flags,
953 int (*copyfn)(const void *, void *, size_t), size_t entry_sz,
954 register_t *retval)
955 {
956 int root = 0;
957 struct proc *p = l->l_proc;
958 struct mount *mp, *nmp;
959 struct statvfs *sb;
960 size_t count, maxcount;
961 int error = 0;
962
963 sb = STATVFSBUF_GET();
964 maxcount = bufsize / entry_sz;
965 mutex_enter(&mountlist_lock);
966 count = 0;
967 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
968 mp = nmp) {
969 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
970 nmp = CIRCLEQ_NEXT(mp, mnt_list);
971 continue;
972 }
973 if (sfsp && count < maxcount) {
974 error = dostatvfs(mp, sb, l, flags, 0);
975 if (error) {
976 mutex_enter(&mountlist_lock);
977 nmp = CIRCLEQ_NEXT(mp, mnt_list);
978 vfs_unbusy(mp);
979 continue;
980 }
981 error = copyfn(sb, sfsp, entry_sz);
982 if (error) {
983 vfs_unbusy(mp);
984 goto out;
985 }
986 sfsp = (char *)sfsp + entry_sz;
987 root |= strcmp(sb->f_mntonname, "/") == 0;
988 }
989 count++;
990 mutex_enter(&mountlist_lock);
991 nmp = CIRCLEQ_NEXT(mp, mnt_list);
992 vfs_unbusy(mp);
993 }
994
995 mutex_exit(&mountlist_lock);
996 if (root == 0 && p->p_cwdi->cwdi_rdir) {
997 /*
998 * fake a root entry
999 */
1000 error = dostatvfs(p->p_cwdi->cwdi_rdir->v_mount,
1001 sb, l, flags, 1);
1002 if (error != 0)
1003 goto out;
1004 if (sfsp)
1005 error = copyfn(sb, sfsp, entry_sz);
1006 count++;
1007 }
1008 if (sfsp && count > maxcount)
1009 *retval = maxcount;
1010 else
1011 *retval = count;
1012 out:
1013 STATVFSBUF_PUT(sb);
1014 return error;
1015 }
1016
1017 int
1018 sys_getvfsstat(struct lwp *l, const struct sys_getvfsstat_args *uap, register_t *retval)
1019 {
1020 /* {
1021 syscallarg(struct statvfs *) buf;
1022 syscallarg(size_t) bufsize;
1023 syscallarg(int) flags;
1024 } */
1025
1026 return do_sys_getvfsstat(l, SCARG(uap, buf), SCARG(uap, bufsize),
1027 SCARG(uap, flags), copyout, sizeof (struct statvfs), retval);
1028 }
1029
1030 /*
1031 * Change current working directory to a given file descriptor.
1032 */
1033 /* ARGSUSED */
1034 int
1035 sys_fchdir(struct lwp *l, const struct sys_fchdir_args *uap, register_t *retval)
1036 {
1037 /* {
1038 syscallarg(int) fd;
1039 } */
1040 struct proc *p = l->l_proc;
1041 struct filedesc *fdp = p->p_fd;
1042 struct cwdinfo *cwdi;
1043 struct vnode *vp, *tdp;
1044 struct mount *mp;
1045 struct file *fp;
1046 int error;
1047
1048 /* getvnode() will use the descriptor for us */
1049 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1050 return (error);
1051 vp = (struct vnode *)fp->f_data;
1052
1053 VREF(vp);
1054 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1055 if (vp->v_type != VDIR)
1056 error = ENOTDIR;
1057 else
1058 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1059 if (error) {
1060 vput(vp);
1061 goto out;
1062 }
1063 while ((mp = vp->v_mountedhere) != NULL) {
1064 if (vfs_busy(mp, 0, 0))
1065 continue;
1066
1067 vput(vp);
1068 error = VFS_ROOT(mp, &tdp);
1069 vfs_unbusy(mp);
1070 if (error)
1071 goto out;
1072 vp = tdp;
1073 }
1074 VOP_UNLOCK(vp, 0);
1075
1076 /*
1077 * Disallow changing to a directory not under the process's
1078 * current root directory (if there is one).
1079 */
1080 cwdi = p->p_cwdi;
1081 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1082 if (cwdi->cwdi_rdir && !vn_isunder(vp, NULL, l)) {
1083 vrele(vp);
1084 error = EPERM; /* operation not permitted */
1085 } else {
1086 vrele(cwdi->cwdi_cdir);
1087 cwdi->cwdi_cdir = vp;
1088 }
1089 rw_exit(&cwdi->cwdi_lock);
1090
1091 out:
1092 FILE_UNUSE(fp, l);
1093 return (error);
1094 }
1095
1096 /*
1097 * Change this process's notion of the root directory to a given file
1098 * descriptor.
1099 */
1100 int
1101 sys_fchroot(struct lwp *l, const struct sys_fchroot_args *uap, register_t *retval)
1102 {
1103 struct proc *p = l->l_proc;
1104 struct filedesc *fdp = p->p_fd;
1105 struct cwdinfo *cwdi;
1106 struct vnode *vp;
1107 struct file *fp;
1108 int error;
1109
1110 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1111 KAUTH_REQ_SYSTEM_CHROOT_FCHROOT, NULL, NULL, NULL)) != 0)
1112 return error;
1113 /* getvnode() will use the descriptor for us */
1114 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1115 return error;
1116 vp = (struct vnode *) fp->f_data;
1117 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1118 if (vp->v_type != VDIR)
1119 error = ENOTDIR;
1120 else
1121 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1122 VOP_UNLOCK(vp, 0);
1123 if (error)
1124 goto out;
1125 VREF(vp);
1126
1127 /*
1128 * Prevent escaping from chroot by putting the root under
1129 * the working directory. Silently chdir to / if we aren't
1130 * already there.
1131 */
1132 cwdi = p->p_cwdi;
1133 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1134 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1135 /*
1136 * XXX would be more failsafe to change directory to a
1137 * deadfs node here instead
1138 */
1139 vrele(cwdi->cwdi_cdir);
1140 VREF(vp);
1141 cwdi->cwdi_cdir = vp;
1142 }
1143
1144 if (cwdi->cwdi_rdir != NULL)
1145 vrele(cwdi->cwdi_rdir);
1146 cwdi->cwdi_rdir = vp;
1147 rw_exit(&cwdi->cwdi_lock);
1148
1149 out:
1150 FILE_UNUSE(fp, l);
1151 return (error);
1152 }
1153
1154 /*
1155 * Change current working directory (``.'').
1156 */
1157 /* ARGSUSED */
1158 int
1159 sys_chdir(struct lwp *l, const struct sys_chdir_args *uap, register_t *retval)
1160 {
1161 /* {
1162 syscallarg(const char *) path;
1163 } */
1164 struct proc *p = l->l_proc;
1165 struct cwdinfo *cwdi;
1166 int error;
1167 struct nameidata nd;
1168
1169 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1170 SCARG(uap, path));
1171 if ((error = change_dir(&nd, l)) != 0)
1172 return (error);
1173 cwdi = p->p_cwdi;
1174 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1175 vrele(cwdi->cwdi_cdir);
1176 cwdi->cwdi_cdir = nd.ni_vp;
1177 rw_exit(&cwdi->cwdi_lock);
1178 return (0);
1179 }
1180
1181 /*
1182 * Change notion of root (``/'') directory.
1183 */
1184 /* ARGSUSED */
1185 int
1186 sys_chroot(struct lwp *l, const struct sys_chroot_args *uap, register_t *retval)
1187 {
1188 /* {
1189 syscallarg(const char *) path;
1190 } */
1191 struct proc *p = l->l_proc;
1192 struct cwdinfo *cwdi;
1193 struct vnode *vp;
1194 int error;
1195 struct nameidata nd;
1196
1197 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1198 KAUTH_REQ_SYSTEM_CHROOT_CHROOT, NULL, NULL, NULL)) != 0)
1199 return (error);
1200 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1201 SCARG(uap, path));
1202 if ((error = change_dir(&nd, l)) != 0)
1203 return (error);
1204
1205 cwdi = p->p_cwdi;
1206 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1207 if (cwdi->cwdi_rdir != NULL)
1208 vrele(cwdi->cwdi_rdir);
1209 vp = nd.ni_vp;
1210 cwdi->cwdi_rdir = vp;
1211
1212 /*
1213 * Prevent escaping from chroot by putting the root under
1214 * the working directory. Silently chdir to / if we aren't
1215 * already there.
1216 */
1217 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1218 /*
1219 * XXX would be more failsafe to change directory to a
1220 * deadfs node here instead
1221 */
1222 vrele(cwdi->cwdi_cdir);
1223 VREF(vp);
1224 cwdi->cwdi_cdir = vp;
1225 }
1226 rw_exit(&cwdi->cwdi_lock);
1227
1228 return (0);
1229 }
1230
1231 /*
1232 * Common routine for chroot and chdir.
1233 */
1234 static int
1235 change_dir(struct nameidata *ndp, struct lwp *l)
1236 {
1237 struct vnode *vp;
1238 int error;
1239
1240 if ((error = namei(ndp)) != 0)
1241 return (error);
1242 vp = ndp->ni_vp;
1243 if (vp->v_type != VDIR)
1244 error = ENOTDIR;
1245 else
1246 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1247
1248 if (error)
1249 vput(vp);
1250 else
1251 VOP_UNLOCK(vp, 0);
1252 return (error);
1253 }
1254
1255 /*
1256 * Check permissions, allocate an open file structure,
1257 * and call the device open routine if any.
1258 */
1259 int
1260 sys_open(struct lwp *l, const struct sys_open_args *uap, register_t *retval)
1261 {
1262 /* {
1263 syscallarg(const char *) path;
1264 syscallarg(int) flags;
1265 syscallarg(int) mode;
1266 } */
1267 struct proc *p = l->l_proc;
1268 struct cwdinfo *cwdi = p->p_cwdi;
1269 struct filedesc *fdp = p->p_fd;
1270 struct file *fp;
1271 struct vnode *vp;
1272 int flags, cmode;
1273 int type, indx, error;
1274 struct flock lf;
1275 struct nameidata nd;
1276
1277 flags = FFLAGS(SCARG(uap, flags));
1278 if ((flags & (FREAD | FWRITE)) == 0)
1279 return (EINVAL);
1280 /* falloc() will use the file descriptor for us */
1281 if ((error = falloc(l, &fp, &indx)) != 0)
1282 return (error);
1283 /* We're going to read cwdi->cwdi_cmask unlocked here. */
1284 cmode = ((SCARG(uap, mode) &~ cwdi->cwdi_cmask) & ALLPERMS) &~ S_ISTXT;
1285 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1286 SCARG(uap, path));
1287 l->l_dupfd = -indx - 1; /* XXX check for fdopen */
1288 if ((error = vn_open(&nd, flags, cmode)) != 0) {
1289 rw_enter(&fdp->fd_lock, RW_WRITER);
1290 FILE_UNUSE(fp, l);
1291 fdp->fd_ofiles[indx] = NULL;
1292 rw_exit(&fdp->fd_lock);
1293 ffree(fp);
1294 if ((error == EDUPFD || error == EMOVEFD) &&
1295 l->l_dupfd >= 0 && /* XXX from fdopen */
1296 (error =
1297 dupfdopen(l, indx, l->l_dupfd, flags, error)) == 0) {
1298 *retval = indx;
1299 return (0);
1300 }
1301 if (error == ERESTART)
1302 error = EINTR;
1303 fdremove(fdp, indx);
1304 return (error);
1305 }
1306
1307 l->l_dupfd = 0;
1308 vp = nd.ni_vp;
1309 fp->f_flag = flags & FMASK;
1310 fp->f_type = DTYPE_VNODE;
1311 fp->f_ops = &vnops;
1312 fp->f_data = vp;
1313 if (flags & (O_EXLOCK | O_SHLOCK)) {
1314 lf.l_whence = SEEK_SET;
1315 lf.l_start = 0;
1316 lf.l_len = 0;
1317 if (flags & O_EXLOCK)
1318 lf.l_type = F_WRLCK;
1319 else
1320 lf.l_type = F_RDLCK;
1321 type = F_FLOCK;
1322 if ((flags & FNONBLOCK) == 0)
1323 type |= F_WAIT;
1324 VOP_UNLOCK(vp, 0);
1325 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1326 if (error) {
1327 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1328 FILE_UNUSE(fp, l);
1329 ffree(fp);
1330 fdremove(fdp, indx);
1331 return (error);
1332 }
1333 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1334 fp->f_flag |= FHASLOCK;
1335 }
1336 VOP_UNLOCK(vp, 0);
1337 *retval = indx;
1338 FILE_SET_MATURE(fp);
1339 FILE_UNUSE(fp, l);
1340 return (0);
1341 }
1342
1343 static void
1344 vfs__fhfree(fhandle_t *fhp)
1345 {
1346 size_t fhsize;
1347
1348 if (fhp == NULL) {
1349 return;
1350 }
1351 fhsize = FHANDLE_SIZE(fhp);
1352 kmem_free(fhp, fhsize);
1353 }
1354
1355 /*
1356 * vfs_composefh: compose a filehandle.
1357 */
1358
1359 int
1360 vfs_composefh(struct vnode *vp, fhandle_t *fhp, size_t *fh_size)
1361 {
1362 struct mount *mp;
1363 struct fid *fidp;
1364 int error;
1365 size_t needfhsize;
1366 size_t fidsize;
1367
1368 mp = vp->v_mount;
1369 fidp = NULL;
1370 if (*fh_size < FHANDLE_SIZE_MIN) {
1371 fidsize = 0;
1372 } else {
1373 fidsize = *fh_size - offsetof(fhandle_t, fh_fid);
1374 if (fhp != NULL) {
1375 memset(fhp, 0, *fh_size);
1376 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1377 fidp = &fhp->fh_fid;
1378 }
1379 }
1380 error = VFS_VPTOFH(vp, fidp, &fidsize);
1381 needfhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1382 if (error == 0 && *fh_size < needfhsize) {
1383 error = E2BIG;
1384 }
1385 *fh_size = needfhsize;
1386 return error;
1387 }
1388
1389 int
1390 vfs_composefh_alloc(struct vnode *vp, fhandle_t **fhpp)
1391 {
1392 struct mount *mp;
1393 fhandle_t *fhp;
1394 size_t fhsize;
1395 size_t fidsize;
1396 int error;
1397
1398 *fhpp = NULL;
1399 mp = vp->v_mount;
1400 fidsize = 0;
1401 error = VFS_VPTOFH(vp, NULL, &fidsize);
1402 KASSERT(error != 0);
1403 if (error != E2BIG) {
1404 goto out;
1405 }
1406 fhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1407 fhp = kmem_zalloc(fhsize, KM_SLEEP);
1408 if (fhp == NULL) {
1409 error = ENOMEM;
1410 goto out;
1411 }
1412 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1413 error = VFS_VPTOFH(vp, &fhp->fh_fid, &fidsize);
1414 if (error == 0) {
1415 KASSERT((FHANDLE_SIZE(fhp) == fhsize &&
1416 FHANDLE_FILEID(fhp)->fid_len == fidsize));
1417 *fhpp = fhp;
1418 } else {
1419 kmem_free(fhp, fhsize);
1420 }
1421 out:
1422 return error;
1423 }
1424
1425 void
1426 vfs_composefh_free(fhandle_t *fhp)
1427 {
1428
1429 vfs__fhfree(fhp);
1430 }
1431
1432 /*
1433 * vfs_fhtovp: lookup a vnode by a filehandle.
1434 */
1435
1436 int
1437 vfs_fhtovp(fhandle_t *fhp, struct vnode **vpp)
1438 {
1439 struct mount *mp;
1440 int error;
1441
1442 *vpp = NULL;
1443 mp = vfs_getvfs(FHANDLE_FSID(fhp));
1444 if (mp == NULL) {
1445 error = ESTALE;
1446 goto out;
1447 }
1448 if (mp->mnt_op->vfs_fhtovp == NULL) {
1449 error = EOPNOTSUPP;
1450 goto out;
1451 }
1452 error = VFS_FHTOVP(mp, FHANDLE_FILEID(fhp), vpp);
1453 out:
1454 return error;
1455 }
1456
1457 /*
1458 * vfs_copyinfh_alloc: allocate and copyin a filehandle, given
1459 * the needed size.
1460 */
1461
1462 int
1463 vfs_copyinfh_alloc(const void *ufhp, size_t fhsize, fhandle_t **fhpp)
1464 {
1465 fhandle_t *fhp;
1466 int error;
1467
1468 *fhpp = NULL;
1469 if (fhsize > FHANDLE_SIZE_MAX) {
1470 return EINVAL;
1471 }
1472 if (fhsize < FHANDLE_SIZE_MIN) {
1473 return EINVAL;
1474 }
1475 again:
1476 fhp = kmem_alloc(fhsize, KM_SLEEP);
1477 if (fhp == NULL) {
1478 return ENOMEM;
1479 }
1480 error = copyin(ufhp, fhp, fhsize);
1481 if (error == 0) {
1482 /* XXX this check shouldn't be here */
1483 if (FHANDLE_SIZE(fhp) == fhsize) {
1484 *fhpp = fhp;
1485 return 0;
1486 } else if (fhsize == NFSX_V2FH && FHANDLE_SIZE(fhp) < fhsize) {
1487 /*
1488 * a kludge for nfsv2 padded handles.
1489 */
1490 size_t sz;
1491
1492 sz = FHANDLE_SIZE(fhp);
1493 kmem_free(fhp, fhsize);
1494 fhsize = sz;
1495 goto again;
1496 } else {
1497 /*
1498 * userland told us wrong size.
1499 */
1500 error = EINVAL;
1501 }
1502 }
1503 kmem_free(fhp, fhsize);
1504 return error;
1505 }
1506
1507 void
1508 vfs_copyinfh_free(fhandle_t *fhp)
1509 {
1510
1511 vfs__fhfree(fhp);
1512 }
1513
1514 /*
1515 * Get file handle system call
1516 */
1517 int
1518 sys___getfh30(struct lwp *l, const struct sys___getfh30_args *uap, register_t *retval)
1519 {
1520 /* {
1521 syscallarg(char *) fname;
1522 syscallarg(fhandle_t *) fhp;
1523 syscallarg(size_t *) fh_size;
1524 } */
1525 struct vnode *vp;
1526 fhandle_t *fh;
1527 int error;
1528 struct nameidata nd;
1529 size_t sz;
1530 size_t usz;
1531
1532 /*
1533 * Must be super user
1534 */
1535 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1536 0, NULL, NULL, NULL);
1537 if (error)
1538 return (error);
1539 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1540 SCARG(uap, fname));
1541 error = namei(&nd);
1542 if (error)
1543 return (error);
1544 vp = nd.ni_vp;
1545 error = vfs_composefh_alloc(vp, &fh);
1546 vput(vp);
1547 if (error != 0) {
1548 goto out;
1549 }
1550 error = copyin(SCARG(uap, fh_size), &usz, sizeof(size_t));
1551 if (error != 0) {
1552 goto out;
1553 }
1554 sz = FHANDLE_SIZE(fh);
1555 error = copyout(&sz, SCARG(uap, fh_size), sizeof(size_t));
1556 if (error != 0) {
1557 goto out;
1558 }
1559 if (usz >= sz) {
1560 error = copyout(fh, SCARG(uap, fhp), sz);
1561 } else {
1562 error = E2BIG;
1563 }
1564 out:
1565 vfs_composefh_free(fh);
1566 return (error);
1567 }
1568
1569 /*
1570 * Open a file given a file handle.
1571 *
1572 * Check permissions, allocate an open file structure,
1573 * and call the device open routine if any.
1574 */
1575
1576 int
1577 dofhopen(struct lwp *l, const void *ufhp, size_t fhsize, int oflags,
1578 register_t *retval)
1579 {
1580 struct filedesc *fdp = l->l_proc->p_fd;
1581 struct file *fp;
1582 struct vnode *vp = NULL;
1583 kauth_cred_t cred = l->l_cred;
1584 struct file *nfp;
1585 int type, indx, error=0;
1586 struct flock lf;
1587 struct vattr va;
1588 fhandle_t *fh;
1589 int flags;
1590
1591 /*
1592 * Must be super user
1593 */
1594 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1595 0, NULL, NULL, NULL)))
1596 return (error);
1597
1598 flags = FFLAGS(oflags);
1599 if ((flags & (FREAD | FWRITE)) == 0)
1600 return (EINVAL);
1601 if ((flags & O_CREAT))
1602 return (EINVAL);
1603 /* falloc() will use the file descriptor for us */
1604 if ((error = falloc(l, &nfp, &indx)) != 0)
1605 return (error);
1606 fp = nfp;
1607 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1608 if (error != 0) {
1609 goto bad;
1610 }
1611 error = vfs_fhtovp(fh, &vp);
1612 if (error != 0) {
1613 goto bad;
1614 }
1615
1616 /* Now do an effective vn_open */
1617
1618 if (vp->v_type == VSOCK) {
1619 error = EOPNOTSUPP;
1620 goto bad;
1621 }
1622 error = vn_openchk(vp, cred, flags);
1623 if (error != 0)
1624 goto bad;
1625 if (flags & O_TRUNC) {
1626 VOP_UNLOCK(vp, 0); /* XXX */
1627 VOP_LEASE(vp, cred, LEASE_WRITE);
1628 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
1629 VATTR_NULL(&va);
1630 va.va_size = 0;
1631 error = VOP_SETATTR(vp, &va, cred);
1632 if (error)
1633 goto bad;
1634 }
1635 if ((error = VOP_OPEN(vp, flags, cred)) != 0)
1636 goto bad;
1637 if (flags & FWRITE)
1638 vp->v_writecount++;
1639
1640 /* done with modified vn_open, now finish what sys_open does. */
1641
1642 fp->f_flag = flags & FMASK;
1643 fp->f_type = DTYPE_VNODE;
1644 fp->f_ops = &vnops;
1645 fp->f_data = vp;
1646 if (flags & (O_EXLOCK | O_SHLOCK)) {
1647 lf.l_whence = SEEK_SET;
1648 lf.l_start = 0;
1649 lf.l_len = 0;
1650 if (flags & O_EXLOCK)
1651 lf.l_type = F_WRLCK;
1652 else
1653 lf.l_type = F_RDLCK;
1654 type = F_FLOCK;
1655 if ((flags & FNONBLOCK) == 0)
1656 type |= F_WAIT;
1657 VOP_UNLOCK(vp, 0);
1658 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1659 if (error) {
1660 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1661 FILE_UNUSE(fp, l);
1662 ffree(fp);
1663 fdremove(fdp, indx);
1664 return (error);
1665 }
1666 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1667 fp->f_flag |= FHASLOCK;
1668 }
1669 VOP_UNLOCK(vp, 0);
1670 *retval = indx;
1671 FILE_SET_MATURE(fp);
1672 FILE_UNUSE(fp, l);
1673 vfs_copyinfh_free(fh);
1674 return (0);
1675
1676 bad:
1677 FILE_UNUSE(fp, l);
1678 ffree(fp);
1679 fdremove(fdp, indx);
1680 if (vp != NULL)
1681 vput(vp);
1682 vfs_copyinfh_free(fh);
1683 return (error);
1684 }
1685
1686 int
1687 sys___fhopen40(struct lwp *l, const struct sys___fhopen40_args *uap, register_t *retval)
1688 {
1689 /* {
1690 syscallarg(const void *) fhp;
1691 syscallarg(size_t) fh_size;
1692 syscallarg(int) flags;
1693 } */
1694
1695 return dofhopen(l, SCARG(uap, fhp), SCARG(uap, fh_size),
1696 SCARG(uap, flags), retval);
1697 }
1698
1699 int
1700 do_fhstat(struct lwp *l, const void *ufhp, size_t fhsize, struct stat *sb)
1701 {
1702 int error;
1703 fhandle_t *fh;
1704 struct vnode *vp;
1705
1706 /*
1707 * Must be super user
1708 */
1709 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1710 0, NULL, NULL, NULL)))
1711 return (error);
1712
1713 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1714 if (error != 0)
1715 return error;
1716
1717 error = vfs_fhtovp(fh, &vp);
1718 vfs_copyinfh_free(fh);
1719 if (error != 0)
1720 return error;
1721
1722 error = vn_stat(vp, sb, l);
1723 vput(vp);
1724 return error;
1725 }
1726
1727
1728 /* ARGSUSED */
1729 int
1730 sys___fhstat40(struct lwp *l, const struct sys___fhstat40_args *uap, register_t *retval)
1731 {
1732 /* {
1733 syscallarg(const void *) fhp;
1734 syscallarg(size_t) fh_size;
1735 syscallarg(struct stat *) sb;
1736 } */
1737 struct stat sb;
1738 int error;
1739
1740 error = do_fhstat(l, SCARG(uap, fhp), SCARG(uap, fh_size), &sb);
1741 if (error)
1742 return error;
1743 return copyout(&sb, SCARG(uap, sb), sizeof(sb));
1744 }
1745
1746 int
1747 do_fhstatvfs(struct lwp *l, const void *ufhp, size_t fhsize, struct statvfs *sb,
1748 int flags)
1749 {
1750 fhandle_t *fh;
1751 struct mount *mp;
1752 struct vnode *vp;
1753 int error;
1754
1755 /*
1756 * Must be super user
1757 */
1758 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1759 0, NULL, NULL, NULL)))
1760 return error;
1761
1762 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1763 if (error != 0)
1764 return error;
1765
1766 error = vfs_fhtovp(fh, &vp);
1767 vfs_copyinfh_free(fh);
1768 if (error != 0)
1769 return error;
1770
1771 mp = vp->v_mount;
1772 error = dostatvfs(mp, sb, l, flags, 1);
1773 vput(vp);
1774 return error;
1775 }
1776
1777 /* ARGSUSED */
1778 int
1779 sys___fhstatvfs140(struct lwp *l, const struct sys___fhstatvfs140_args *uap, register_t *retval)
1780 {
1781 /* {
1782 syscallarg(const void *) fhp;
1783 syscallarg(size_t) fh_size;
1784 syscallarg(struct statvfs *) buf;
1785 syscallarg(int) flags;
1786 } */
1787 struct statvfs *sb = STATVFSBUF_GET();
1788 int error;
1789
1790 error = do_fhstatvfs(l, SCARG(uap, fhp), SCARG(uap, fh_size), sb,
1791 SCARG(uap, flags));
1792 if (error == 0)
1793 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
1794 STATVFSBUF_PUT(sb);
1795 return error;
1796 }
1797
1798 /*
1799 * Create a special file.
1800 */
1801 /* ARGSUSED */
1802 int
1803 sys_mknod(struct lwp *l, const struct sys_mknod_args *uap, register_t *retval)
1804 {
1805 /* {
1806 syscallarg(const char *) path;
1807 syscallarg(int) mode;
1808 syscallarg(int) dev;
1809 } */
1810 struct proc *p = l->l_proc;
1811 struct vnode *vp;
1812 struct vattr vattr;
1813 int error, optype;
1814 struct nameidata nd;
1815 char *path;
1816 const char *cpath;
1817 enum uio_seg seg = UIO_USERSPACE;
1818
1819 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MKNOD,
1820 0, NULL, NULL, NULL)) != 0)
1821 return (error);
1822
1823 optype = VOP_MKNOD_DESCOFFSET;
1824
1825 VERIEXEC_PATH_GET(SCARG(uap, path), seg, cpath, path);
1826 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, seg, cpath);
1827
1828 if ((error = namei(&nd)) != 0)
1829 goto out;
1830 vp = nd.ni_vp;
1831 if (vp != NULL)
1832 error = EEXIST;
1833 else {
1834 VATTR_NULL(&vattr);
1835 /* We will read cwdi->cwdi_cmask unlocked. */
1836 vattr.va_mode =
1837 (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1838 vattr.va_rdev = SCARG(uap, dev);
1839
1840 switch (SCARG(uap, mode) & S_IFMT) {
1841 case S_IFMT: /* used by badsect to flag bad sectors */
1842 vattr.va_type = VBAD;
1843 break;
1844 case S_IFCHR:
1845 vattr.va_type = VCHR;
1846 break;
1847 case S_IFBLK:
1848 vattr.va_type = VBLK;
1849 break;
1850 case S_IFWHT:
1851 optype = VOP_WHITEOUT_DESCOFFSET;
1852 break;
1853 case S_IFREG:
1854 #if NVERIEXEC > 0
1855 error = veriexec_openchk(l, nd.ni_vp, nd.ni_dirp,
1856 O_CREAT);
1857 #endif /* NVERIEXEC > 0 */
1858 vattr.va_type = VREG;
1859 vattr.va_rdev = VNOVAL;
1860 optype = VOP_CREATE_DESCOFFSET;
1861 break;
1862 default:
1863 error = EINVAL;
1864 break;
1865 }
1866 }
1867 if (!error) {
1868 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1869 switch (optype) {
1870 case VOP_WHITEOUT_DESCOFFSET:
1871 error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
1872 if (error)
1873 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1874 vput(nd.ni_dvp);
1875 break;
1876
1877 case VOP_MKNOD_DESCOFFSET:
1878 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp,
1879 &nd.ni_cnd, &vattr);
1880 if (error == 0)
1881 vput(nd.ni_vp);
1882 break;
1883
1884 case VOP_CREATE_DESCOFFSET:
1885 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
1886 &nd.ni_cnd, &vattr);
1887 if (error == 0)
1888 vput(nd.ni_vp);
1889 break;
1890 }
1891 } else {
1892 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1893 if (nd.ni_dvp == vp)
1894 vrele(nd.ni_dvp);
1895 else
1896 vput(nd.ni_dvp);
1897 if (vp)
1898 vrele(vp);
1899 }
1900 out:
1901 VERIEXEC_PATH_PUT(path);
1902 return (error);
1903 }
1904
1905 /*
1906 * Create a named pipe.
1907 */
1908 /* ARGSUSED */
1909 int
1910 sys_mkfifo(struct lwp *l, const struct sys_mkfifo_args *uap, register_t *retval)
1911 {
1912 /* {
1913 syscallarg(const char *) path;
1914 syscallarg(int) mode;
1915 } */
1916 struct proc *p = l->l_proc;
1917 struct vattr vattr;
1918 int error;
1919 struct nameidata nd;
1920
1921 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1922 SCARG(uap, path));
1923 if ((error = namei(&nd)) != 0)
1924 return (error);
1925 if (nd.ni_vp != NULL) {
1926 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1927 if (nd.ni_dvp == nd.ni_vp)
1928 vrele(nd.ni_dvp);
1929 else
1930 vput(nd.ni_dvp);
1931 vrele(nd.ni_vp);
1932 return (EEXIST);
1933 }
1934 VATTR_NULL(&vattr);
1935 vattr.va_type = VFIFO;
1936 /* We will read cwdi->cwdi_cmask unlocked. */
1937 vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1938 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1939 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
1940 if (error == 0)
1941 vput(nd.ni_vp);
1942 return (error);
1943 }
1944
1945 /*
1946 * Make a hard file link.
1947 */
1948 /* ARGSUSED */
1949 int
1950 sys_link(struct lwp *l, const struct sys_link_args *uap, register_t *retval)
1951 {
1952 /* {
1953 syscallarg(const char *) path;
1954 syscallarg(const char *) link;
1955 } */
1956 struct vnode *vp;
1957 struct nameidata nd;
1958 int error;
1959
1960 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1961 SCARG(uap, path));
1962 if ((error = namei(&nd)) != 0)
1963 return (error);
1964 vp = nd.ni_vp;
1965 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1966 SCARG(uap, link));
1967 if ((error = namei(&nd)) != 0)
1968 goto out;
1969 if (nd.ni_vp) {
1970 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1971 if (nd.ni_dvp == nd.ni_vp)
1972 vrele(nd.ni_dvp);
1973 else
1974 vput(nd.ni_dvp);
1975 vrele(nd.ni_vp);
1976 error = EEXIST;
1977 goto out;
1978 }
1979 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1980 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
1981 error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
1982 out:
1983 vrele(vp);
1984 return (error);
1985 }
1986
1987 /*
1988 * Make a symbolic link.
1989 */
1990 /* ARGSUSED */
1991 int
1992 sys_symlink(struct lwp *l, const struct sys_symlink_args *uap, register_t *retval)
1993 {
1994 /* {
1995 syscallarg(const char *) path;
1996 syscallarg(const char *) link;
1997 } */
1998 struct proc *p = l->l_proc;
1999 struct vattr vattr;
2000 char *path;
2001 int error;
2002 struct nameidata nd;
2003
2004 path = PNBUF_GET();
2005 error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL);
2006 if (error)
2007 goto out;
2008 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
2009 SCARG(uap, link));
2010 if ((error = namei(&nd)) != 0)
2011 goto out;
2012 if (nd.ni_vp) {
2013 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2014 if (nd.ni_dvp == nd.ni_vp)
2015 vrele(nd.ni_dvp);
2016 else
2017 vput(nd.ni_dvp);
2018 vrele(nd.ni_vp);
2019 error = EEXIST;
2020 goto out;
2021 }
2022 VATTR_NULL(&vattr);
2023 vattr.va_type = VLNK;
2024 /* We will read cwdi->cwdi_cmask unlocked. */
2025 vattr.va_mode = ACCESSPERMS &~ p->p_cwdi->cwdi_cmask;
2026 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2027 error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
2028 if (error == 0)
2029 vput(nd.ni_vp);
2030 out:
2031 PNBUF_PUT(path);
2032 return (error);
2033 }
2034
2035 /*
2036 * Delete a whiteout from the filesystem.
2037 */
2038 /* ARGSUSED */
2039 int
2040 sys_undelete(struct lwp *l, const struct sys_undelete_args *uap, register_t *retval)
2041 {
2042 /* {
2043 syscallarg(const char *) path;
2044 } */
2045 int error;
2046 struct nameidata nd;
2047
2048 NDINIT(&nd, DELETE, LOCKPARENT | DOWHITEOUT | TRYEMULROOT,
2049 UIO_USERSPACE, SCARG(uap, path));
2050 error = namei(&nd);
2051 if (error)
2052 return (error);
2053
2054 if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
2055 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2056 if (nd.ni_dvp == nd.ni_vp)
2057 vrele(nd.ni_dvp);
2058 else
2059 vput(nd.ni_dvp);
2060 if (nd.ni_vp)
2061 vrele(nd.ni_vp);
2062 return (EEXIST);
2063 }
2064 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2065 if ((error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE)) != 0)
2066 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2067 vput(nd.ni_dvp);
2068 return (error);
2069 }
2070
2071 /*
2072 * Delete a name from the filesystem.
2073 */
2074 /* ARGSUSED */
2075 int
2076 sys_unlink(struct lwp *l, const struct sys_unlink_args *uap, register_t *retval)
2077 {
2078 /* {
2079 syscallarg(const char *) path;
2080 } */
2081 struct vnode *vp;
2082 int error;
2083 struct nameidata nd;
2084 char *path;
2085 const char *cpath;
2086 enum uio_seg seg = UIO_USERSPACE;
2087
2088 VERIEXEC_PATH_GET(SCARG(uap, path), seg, cpath, path);
2089 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, seg, cpath);
2090
2091 if ((error = namei(&nd)) != 0)
2092 goto out;
2093 vp = nd.ni_vp;
2094
2095 /*
2096 * The root of a mounted filesystem cannot be deleted.
2097 */
2098 if (vp->v_vflag & VV_ROOT) {
2099 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2100 if (nd.ni_dvp == vp)
2101 vrele(nd.ni_dvp);
2102 else
2103 vput(nd.ni_dvp);
2104 vput(vp);
2105 error = EBUSY;
2106 goto out;
2107 }
2108
2109 #if NVERIEXEC > 0
2110 /* Handle remove requests for veriexec entries. */
2111 if ((error = veriexec_removechk(l, nd.ni_vp, nd.ni_dirp)) != 0) {
2112 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2113 if (nd.ni_dvp == vp)
2114 vrele(nd.ni_dvp);
2115 else
2116 vput(nd.ni_dvp);
2117 vput(vp);
2118 goto out;
2119 }
2120 #endif /* NVERIEXEC > 0 */
2121
2122 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2123 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2124 #ifdef FILEASSOC
2125 (void)fileassoc_file_delete(vp);
2126 #endif /* FILEASSOC */
2127 error = VOP_REMOVE(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
2128 out:
2129 VERIEXEC_PATH_PUT(path);
2130 return (error);
2131 }
2132
2133 /*
2134 * Reposition read/write file offset.
2135 */
2136 int
2137 sys_lseek(struct lwp *l, const struct sys_lseek_args *uap, register_t *retval)
2138 {
2139 /* {
2140 syscallarg(int) fd;
2141 syscallarg(int) pad;
2142 syscallarg(off_t) offset;
2143 syscallarg(int) whence;
2144 } */
2145 struct proc *p = l->l_proc;
2146 kauth_cred_t cred = l->l_cred;
2147 struct filedesc *fdp = p->p_fd;
2148 struct file *fp;
2149 struct vnode *vp;
2150 struct vattr vattr;
2151 off_t newoff;
2152 int error;
2153
2154 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
2155 return (EBADF);
2156
2157 vp = (struct vnode *)fp->f_data;
2158 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2159 error = ESPIPE;
2160 mutex_exit(&fp->f_lock);
2161 goto out;
2162 }
2163
2164 switch (SCARG(uap, whence)) {
2165 case SEEK_CUR:
2166 newoff = fp->f_offset + SCARG(uap, offset);
2167 FILE_USE(fp);
2168 break;
2169 case SEEK_END:
2170 FILE_USE(fp);
2171 error = VOP_GETATTR(vp, &vattr, cred);
2172 if (error) {
2173 FILE_UNUSE(fp, l);
2174 goto out;
2175 }
2176 newoff = SCARG(uap, offset) + vattr.va_size;
2177 break;
2178 case SEEK_SET:
2179 FILE_USE(fp);
2180 newoff = SCARG(uap, offset);
2181 break;
2182 default:
2183 mutex_exit(&fp->f_lock);
2184 error = EINVAL;
2185 goto out;
2186 }
2187 if ((error = VOP_SEEK(vp, fp->f_offset, newoff, cred)) == 0) {
2188 mutex_enter(&fp->f_lock);
2189 *(off_t *)retval = fp->f_offset = newoff;
2190 mutex_exit(&fp->f_lock);
2191 }
2192 FILE_UNUSE(fp, l);
2193 out:
2194 return (error);
2195 }
2196
2197 /*
2198 * Positional read system call.
2199 */
2200 int
2201 sys_pread(struct lwp *l, const struct sys_pread_args *uap, register_t *retval)
2202 {
2203 /* {
2204 syscallarg(int) fd;
2205 syscallarg(void *) buf;
2206 syscallarg(size_t) nbyte;
2207 syscallarg(off_t) offset;
2208 } */
2209 struct proc *p = l->l_proc;
2210 struct filedesc *fdp = p->p_fd;
2211 struct file *fp;
2212 struct vnode *vp;
2213 off_t offset;
2214 int error, fd = SCARG(uap, fd);
2215
2216 if ((fp = fd_getfile(fdp, fd)) == NULL)
2217 return (EBADF);
2218
2219 if ((fp->f_flag & FREAD) == 0) {
2220 mutex_exit(&fp->f_lock);
2221 return (EBADF);
2222 }
2223
2224 FILE_USE(fp);
2225
2226 vp = (struct vnode *)fp->f_data;
2227 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2228 error = ESPIPE;
2229 goto out;
2230 }
2231
2232 offset = SCARG(uap, offset);
2233
2234 /*
2235 * XXX This works because no file systems actually
2236 * XXX take any action on the seek operation.
2237 */
2238 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2239 goto out;
2240
2241 /* dofileread() will unuse the descriptor for us */
2242 return (dofileread(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2243 &offset, 0, retval));
2244
2245 out:
2246 FILE_UNUSE(fp, l);
2247 return (error);
2248 }
2249
2250 /*
2251 * Positional scatter read system call.
2252 */
2253 int
2254 sys_preadv(struct lwp *l, const struct sys_preadv_args *uap, register_t *retval)
2255 {
2256 /* {
2257 syscallarg(int) fd;
2258 syscallarg(const struct iovec *) iovp;
2259 syscallarg(int) iovcnt;
2260 syscallarg(off_t) offset;
2261 } */
2262 off_t offset = SCARG(uap, offset);
2263
2264 return do_filereadv(SCARG(uap, fd), SCARG(uap, iovp),
2265 SCARG(uap, iovcnt), &offset, 0, retval);
2266 }
2267
2268 /*
2269 * Positional write system call.
2270 */
2271 int
2272 sys_pwrite(struct lwp *l, const struct sys_pwrite_args *uap, register_t *retval)
2273 {
2274 /* {
2275 syscallarg(int) fd;
2276 syscallarg(const void *) buf;
2277 syscallarg(size_t) nbyte;
2278 syscallarg(off_t) offset;
2279 } */
2280 struct proc *p = l->l_proc;
2281 struct filedesc *fdp = p->p_fd;
2282 struct file *fp;
2283 struct vnode *vp;
2284 off_t offset;
2285 int error, fd = SCARG(uap, fd);
2286
2287 if ((fp = fd_getfile(fdp, fd)) == NULL)
2288 return (EBADF);
2289
2290 if ((fp->f_flag & FWRITE) == 0) {
2291 mutex_exit(&fp->f_lock);
2292 return (EBADF);
2293 }
2294
2295 FILE_USE(fp);
2296
2297 vp = (struct vnode *)fp->f_data;
2298 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2299 error = ESPIPE;
2300 goto out;
2301 }
2302
2303 offset = SCARG(uap, offset);
2304
2305 /*
2306 * XXX This works because no file systems actually
2307 * XXX take any action on the seek operation.
2308 */
2309 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2310 goto out;
2311
2312 /* dofilewrite() will unuse the descriptor for us */
2313 return (dofilewrite(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2314 &offset, 0, retval));
2315
2316 out:
2317 FILE_UNUSE(fp, l);
2318 return (error);
2319 }
2320
2321 /*
2322 * Positional gather write system call.
2323 */
2324 int
2325 sys_pwritev(struct lwp *l, const struct sys_pwritev_args *uap, register_t *retval)
2326 {
2327 /* {
2328 syscallarg(int) fd;
2329 syscallarg(const struct iovec *) iovp;
2330 syscallarg(int) iovcnt;
2331 syscallarg(off_t) offset;
2332 } */
2333 off_t offset = SCARG(uap, offset);
2334
2335 return do_filewritev(SCARG(uap, fd), SCARG(uap, iovp),
2336 SCARG(uap, iovcnt), &offset, 0, retval);
2337 }
2338
2339 /*
2340 * Check access permissions.
2341 */
2342 int
2343 sys_access(struct lwp *l, const struct sys_access_args *uap, register_t *retval)
2344 {
2345 /* {
2346 syscallarg(const char *) path;
2347 syscallarg(int) flags;
2348 } */
2349 kauth_cred_t cred;
2350 struct vnode *vp;
2351 int error, flags;
2352 struct nameidata nd;
2353
2354 cred = kauth_cred_dup(l->l_cred);
2355 kauth_cred_seteuid(cred, kauth_cred_getuid(l->l_cred));
2356 kauth_cred_setegid(cred, kauth_cred_getgid(l->l_cred));
2357 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2358 SCARG(uap, path));
2359 /* Override default credentials */
2360 nd.ni_cnd.cn_cred = cred;
2361 if ((error = namei(&nd)) != 0)
2362 goto out;
2363 vp = nd.ni_vp;
2364
2365 /* Flags == 0 means only check for existence. */
2366 if (SCARG(uap, flags)) {
2367 flags = 0;
2368 if (SCARG(uap, flags) & R_OK)
2369 flags |= VREAD;
2370 if (SCARG(uap, flags) & W_OK)
2371 flags |= VWRITE;
2372 if (SCARG(uap, flags) & X_OK)
2373 flags |= VEXEC;
2374
2375 error = VOP_ACCESS(vp, flags, cred);
2376 if (!error && (flags & VWRITE))
2377 error = vn_writechk(vp);
2378 }
2379 vput(vp);
2380 out:
2381 kauth_cred_free(cred);
2382 return (error);
2383 }
2384
2385 /*
2386 * Common code for all sys_stat functions, including compat versions.
2387 */
2388 int
2389 do_sys_stat(struct lwp *l, const char *path, unsigned int nd_flags,
2390 struct stat *sb)
2391 {
2392 int error;
2393 struct nameidata nd;
2394
2395 NDINIT(&nd, LOOKUP, nd_flags | LOCKLEAF | TRYEMULROOT,
2396 UIO_USERSPACE, path);
2397 error = namei(&nd);
2398 if (error != 0)
2399 return error;
2400 error = vn_stat(nd.ni_vp, sb, l);
2401 vput(nd.ni_vp);
2402 return error;
2403 }
2404
2405 /*
2406 * Get file status; this version follows links.
2407 */
2408 /* ARGSUSED */
2409 int
2410 sys___stat30(struct lwp *l, const struct sys___stat30_args *uap, register_t *retval)
2411 {
2412 /* {
2413 syscallarg(const char *) path;
2414 syscallarg(struct stat *) ub;
2415 } */
2416 struct stat sb;
2417 int error;
2418
2419 error = do_sys_stat(l, SCARG(uap, path), FOLLOW, &sb);
2420 if (error)
2421 return error;
2422 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2423 }
2424
2425 /*
2426 * Get file status; this version does not follow links.
2427 */
2428 /* ARGSUSED */
2429 int
2430 sys___lstat30(struct lwp *l, const struct sys___lstat30_args *uap, register_t *retval)
2431 {
2432 /* {
2433 syscallarg(const char *) path;
2434 syscallarg(struct stat *) ub;
2435 } */
2436 struct stat sb;
2437 int error;
2438
2439 error = do_sys_stat(l, SCARG(uap, path), NOFOLLOW, &sb);
2440 if (error)
2441 return error;
2442 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2443 }
2444
2445 /*
2446 * Get configurable pathname variables.
2447 */
2448 /* ARGSUSED */
2449 int
2450 sys_pathconf(struct lwp *l, const struct sys_pathconf_args *uap, register_t *retval)
2451 {
2452 /* {
2453 syscallarg(const char *) path;
2454 syscallarg(int) name;
2455 } */
2456 int error;
2457 struct nameidata nd;
2458
2459 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2460 SCARG(uap, path));
2461 if ((error = namei(&nd)) != 0)
2462 return (error);
2463 error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), retval);
2464 vput(nd.ni_vp);
2465 return (error);
2466 }
2467
2468 /*
2469 * Return target name of a symbolic link.
2470 */
2471 /* ARGSUSED */
2472 int
2473 sys_readlink(struct lwp *l, const struct sys_readlink_args *uap, register_t *retval)
2474 {
2475 /* {
2476 syscallarg(const char *) path;
2477 syscallarg(char *) buf;
2478 syscallarg(size_t) count;
2479 } */
2480 struct vnode *vp;
2481 struct iovec aiov;
2482 struct uio auio;
2483 int error;
2484 struct nameidata nd;
2485
2486 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2487 SCARG(uap, path));
2488 if ((error = namei(&nd)) != 0)
2489 return (error);
2490 vp = nd.ni_vp;
2491 if (vp->v_type != VLNK)
2492 error = EINVAL;
2493 else if (!(vp->v_mount->mnt_flag & MNT_SYMPERM) ||
2494 (error = VOP_ACCESS(vp, VREAD, l->l_cred)) == 0) {
2495 aiov.iov_base = SCARG(uap, buf);
2496 aiov.iov_len = SCARG(uap, count);
2497 auio.uio_iov = &aiov;
2498 auio.uio_iovcnt = 1;
2499 auio.uio_offset = 0;
2500 auio.uio_rw = UIO_READ;
2501 KASSERT(l == curlwp);
2502 auio.uio_vmspace = l->l_proc->p_vmspace;
2503 auio.uio_resid = SCARG(uap, count);
2504 error = VOP_READLINK(vp, &auio, l->l_cred);
2505 }
2506 vput(vp);
2507 *retval = SCARG(uap, count) - auio.uio_resid;
2508 return (error);
2509 }
2510
2511 /*
2512 * Change flags of a file given a path name.
2513 */
2514 /* ARGSUSED */
2515 int
2516 sys_chflags(struct lwp *l, const struct sys_chflags_args *uap, register_t *retval)
2517 {
2518 /* {
2519 syscallarg(const char *) path;
2520 syscallarg(u_long) flags;
2521 } */
2522 struct vnode *vp;
2523 int error;
2524 struct nameidata nd;
2525
2526 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2527 SCARG(uap, path));
2528 if ((error = namei(&nd)) != 0)
2529 return (error);
2530 vp = nd.ni_vp;
2531 error = change_flags(vp, SCARG(uap, flags), l);
2532 vput(vp);
2533 return (error);
2534 }
2535
2536 /*
2537 * Change flags of a file given a file descriptor.
2538 */
2539 /* ARGSUSED */
2540 int
2541 sys_fchflags(struct lwp *l, const struct sys_fchflags_args *uap, register_t *retval)
2542 {
2543 /* {
2544 syscallarg(int) fd;
2545 syscallarg(u_long) flags;
2546 } */
2547 struct proc *p = l->l_proc;
2548 struct vnode *vp;
2549 struct file *fp;
2550 int error;
2551
2552 /* getvnode() will use the descriptor for us */
2553 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2554 return (error);
2555 vp = (struct vnode *)fp->f_data;
2556 error = change_flags(vp, SCARG(uap, flags), l);
2557 VOP_UNLOCK(vp, 0);
2558 FILE_UNUSE(fp, l);
2559 return (error);
2560 }
2561
2562 /*
2563 * Change flags of a file given a path name; this version does
2564 * not follow links.
2565 */
2566 int
2567 sys_lchflags(struct lwp *l, const struct sys_lchflags_args *uap, register_t *retval)
2568 {
2569 /* {
2570 syscallarg(const char *) path;
2571 syscallarg(u_long) flags;
2572 } */
2573 struct vnode *vp;
2574 int error;
2575 struct nameidata nd;
2576
2577 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2578 SCARG(uap, path));
2579 if ((error = namei(&nd)) != 0)
2580 return (error);
2581 vp = nd.ni_vp;
2582 error = change_flags(vp, SCARG(uap, flags), l);
2583 vput(vp);
2584 return (error);
2585 }
2586
2587 /*
2588 * Common routine to change flags of a file.
2589 */
2590 int
2591 change_flags(struct vnode *vp, u_long flags, struct lwp *l)
2592 {
2593 struct vattr vattr;
2594 int error;
2595
2596 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2597 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2598 /*
2599 * Non-superusers cannot change the flags on devices, even if they
2600 * own them.
2601 */
2602 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) {
2603 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2604 goto out;
2605 if (vattr.va_type == VCHR || vattr.va_type == VBLK) {
2606 error = EINVAL;
2607 goto out;
2608 }
2609 }
2610 VATTR_NULL(&vattr);
2611 vattr.va_flags = flags;
2612 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2613 out:
2614 return (error);
2615 }
2616
2617 /*
2618 * Change mode of a file given path name; this version follows links.
2619 */
2620 /* ARGSUSED */
2621 int
2622 sys_chmod(struct lwp *l, const struct sys_chmod_args *uap, register_t *retval)
2623 {
2624 /* {
2625 syscallarg(const char *) path;
2626 syscallarg(int) mode;
2627 } */
2628 int error;
2629 struct nameidata nd;
2630
2631 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2632 SCARG(uap, path));
2633 if ((error = namei(&nd)) != 0)
2634 return (error);
2635
2636 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2637
2638 vrele(nd.ni_vp);
2639 return (error);
2640 }
2641
2642 /*
2643 * Change mode of a file given a file descriptor.
2644 */
2645 /* ARGSUSED */
2646 int
2647 sys_fchmod(struct lwp *l, const struct sys_fchmod_args *uap, register_t *retval)
2648 {
2649 /* {
2650 syscallarg(int) fd;
2651 syscallarg(int) mode;
2652 } */
2653 struct proc *p = l->l_proc;
2654 struct file *fp;
2655 int error;
2656
2657 /* getvnode() will use the descriptor for us */
2658 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2659 return (error);
2660
2661 error = change_mode((struct vnode *)fp->f_data, SCARG(uap, mode), l);
2662 FILE_UNUSE(fp, l);
2663 return (error);
2664 }
2665
2666 /*
2667 * Change mode of a file given path name; this version does not follow links.
2668 */
2669 /* ARGSUSED */
2670 int
2671 sys_lchmod(struct lwp *l, const struct sys_lchmod_args *uap, register_t *retval)
2672 {
2673 /* {
2674 syscallarg(const char *) path;
2675 syscallarg(int) mode;
2676 } */
2677 int error;
2678 struct nameidata nd;
2679
2680 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2681 SCARG(uap, path));
2682 if ((error = namei(&nd)) != 0)
2683 return (error);
2684
2685 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2686
2687 vrele(nd.ni_vp);
2688 return (error);
2689 }
2690
2691 /*
2692 * Common routine to set mode given a vnode.
2693 */
2694 static int
2695 change_mode(struct vnode *vp, int mode, struct lwp *l)
2696 {
2697 struct vattr vattr;
2698 int error;
2699
2700 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2701 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2702 VATTR_NULL(&vattr);
2703 vattr.va_mode = mode & ALLPERMS;
2704 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2705 VOP_UNLOCK(vp, 0);
2706 return (error);
2707 }
2708
2709 /*
2710 * Set ownership given a path name; this version follows links.
2711 */
2712 /* ARGSUSED */
2713 int
2714 sys_chown(struct lwp *l, const struct sys_chown_args *uap, register_t *retval)
2715 {
2716 /* {
2717 syscallarg(const char *) path;
2718 syscallarg(uid_t) uid;
2719 syscallarg(gid_t) gid;
2720 } */
2721 int error;
2722 struct nameidata nd;
2723
2724 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2725 SCARG(uap, path));
2726 if ((error = namei(&nd)) != 0)
2727 return (error);
2728
2729 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2730
2731 vrele(nd.ni_vp);
2732 return (error);
2733 }
2734
2735 /*
2736 * Set ownership given a path name; this version follows links.
2737 * Provides POSIX semantics.
2738 */
2739 /* ARGSUSED */
2740 int
2741 sys___posix_chown(struct lwp *l, const struct sys___posix_chown_args *uap, register_t *retval)
2742 {
2743 /* {
2744 syscallarg(const char *) path;
2745 syscallarg(uid_t) uid;
2746 syscallarg(gid_t) gid;
2747 } */
2748 int error;
2749 struct nameidata nd;
2750
2751 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2752 SCARG(uap, path));
2753 if ((error = namei(&nd)) != 0)
2754 return (error);
2755
2756 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2757
2758 vrele(nd.ni_vp);
2759 return (error);
2760 }
2761
2762 /*
2763 * Set ownership given a file descriptor.
2764 */
2765 /* ARGSUSED */
2766 int
2767 sys_fchown(struct lwp *l, const struct sys_fchown_args *uap, register_t *retval)
2768 {
2769 /* {
2770 syscallarg(int) fd;
2771 syscallarg(uid_t) uid;
2772 syscallarg(gid_t) gid;
2773 } */
2774 struct proc *p = l->l_proc;
2775 int error;
2776 struct file *fp;
2777
2778 /* getvnode() will use the descriptor for us */
2779 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2780 return (error);
2781
2782 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2783 SCARG(uap, gid), l, 0);
2784 FILE_UNUSE(fp, l);
2785 return (error);
2786 }
2787
2788 /*
2789 * Set ownership given a file descriptor, providing POSIX/XPG semantics.
2790 */
2791 /* ARGSUSED */
2792 int
2793 sys___posix_fchown(struct lwp *l, const struct sys___posix_fchown_args *uap, register_t *retval)
2794 {
2795 /* {
2796 syscallarg(int) fd;
2797 syscallarg(uid_t) uid;
2798 syscallarg(gid_t) gid;
2799 } */
2800 struct proc *p = l->l_proc;
2801 int error;
2802 struct file *fp;
2803
2804 /* getvnode() will use the descriptor for us */
2805 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2806 return (error);
2807
2808 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2809 SCARG(uap, gid), l, 1);
2810 FILE_UNUSE(fp, l);
2811 return (error);
2812 }
2813
2814 /*
2815 * Set ownership given a path name; this version does not follow links.
2816 */
2817 /* ARGSUSED */
2818 int
2819 sys_lchown(struct lwp *l, const struct sys_lchown_args *uap, register_t *retval)
2820 {
2821 /* {
2822 syscallarg(const char *) path;
2823 syscallarg(uid_t) uid;
2824 syscallarg(gid_t) gid;
2825 } */
2826 int error;
2827 struct nameidata nd;
2828
2829 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2830 SCARG(uap, path));
2831 if ((error = namei(&nd)) != 0)
2832 return (error);
2833
2834 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2835
2836 vrele(nd.ni_vp);
2837 return (error);
2838 }
2839
2840 /*
2841 * Set ownership given a path name; this version does not follow links.
2842 * Provides POSIX/XPG semantics.
2843 */
2844 /* ARGSUSED */
2845 int
2846 sys___posix_lchown(struct lwp *l, const struct sys___posix_lchown_args *uap, register_t *retval)
2847 {
2848 /* {
2849 syscallarg(const char *) path;
2850 syscallarg(uid_t) uid;
2851 syscallarg(gid_t) gid;
2852 } */
2853 int error;
2854 struct nameidata nd;
2855
2856 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2857 SCARG(uap, path));
2858 if ((error = namei(&nd)) != 0)
2859 return (error);
2860
2861 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2862
2863 vrele(nd.ni_vp);
2864 return (error);
2865 }
2866
2867 /*
2868 * Common routine to set ownership given a vnode.
2869 */
2870 static int
2871 change_owner(struct vnode *vp, uid_t uid, gid_t gid, struct lwp *l,
2872 int posix_semantics)
2873 {
2874 struct vattr vattr;
2875 mode_t newmode;
2876 int error;
2877
2878 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2879 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2880 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2881 goto out;
2882
2883 #define CHANGED(x) ((int)(x) != -1)
2884 newmode = vattr.va_mode;
2885 if (posix_semantics) {
2886 /*
2887 * POSIX/XPG semantics: if the caller is not the super-user,
2888 * clear set-user-id and set-group-id bits. Both POSIX and
2889 * the XPG consider the behaviour for calls by the super-user
2890 * implementation-defined; we leave the set-user-id and set-
2891 * group-id settings intact in that case.
2892 */
2893 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER,
2894 NULL) != 0)
2895 newmode &= ~(S_ISUID | S_ISGID);
2896 } else {
2897 /*
2898 * NetBSD semantics: when changing owner and/or group,
2899 * clear the respective bit(s).
2900 */
2901 if (CHANGED(uid))
2902 newmode &= ~S_ISUID;
2903 if (CHANGED(gid))
2904 newmode &= ~S_ISGID;
2905 }
2906 /* Update va_mode iff altered. */
2907 if (vattr.va_mode == newmode)
2908 newmode = VNOVAL;
2909
2910 VATTR_NULL(&vattr);
2911 vattr.va_uid = CHANGED(uid) ? uid : (uid_t)VNOVAL;
2912 vattr.va_gid = CHANGED(gid) ? gid : (gid_t)VNOVAL;
2913 vattr.va_mode = newmode;
2914 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2915 #undef CHANGED
2916
2917 out:
2918 VOP_UNLOCK(vp, 0);
2919 return (error);
2920 }
2921
2922 /*
2923 * Set the access and modification times given a path name; this
2924 * version follows links.
2925 */
2926 /* ARGSUSED */
2927 int
2928 sys_utimes(struct lwp *l, const struct sys_utimes_args *uap, register_t *retval)
2929 {
2930 /* {
2931 syscallarg(const char *) path;
2932 syscallarg(const struct timeval *) tptr;
2933 } */
2934
2935 return do_sys_utimes(l, NULL, SCARG(uap, path), FOLLOW,
2936 SCARG(uap, tptr), UIO_USERSPACE);
2937 }
2938
2939 /*
2940 * Set the access and modification times given a file descriptor.
2941 */
2942 /* ARGSUSED */
2943 int
2944 sys_futimes(struct lwp *l, const struct sys_futimes_args *uap, register_t *retval)
2945 {
2946 /* {
2947 syscallarg(int) fd;
2948 syscallarg(const struct timeval *) tptr;
2949 } */
2950 int error;
2951 struct file *fp;
2952
2953 /* getvnode() will use the descriptor for us */
2954 if ((error = getvnode(l->l_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
2955 return (error);
2956
2957 error = do_sys_utimes(l, fp->f_data, NULL, 0,
2958 SCARG(uap, tptr), UIO_USERSPACE);
2959
2960 FILE_UNUSE(fp, l);
2961 return (error);
2962 }
2963
2964 /*
2965 * Set the access and modification times given a path name; this
2966 * version does not follow links.
2967 */
2968 int
2969 sys_lutimes(struct lwp *l, const struct sys_lutimes_args *uap, register_t *retval)
2970 {
2971 /* {
2972 syscallarg(const char *) path;
2973 syscallarg(const struct timeval *) tptr;
2974 } */
2975
2976 return do_sys_utimes(l, NULL, SCARG(uap, path), NOFOLLOW,
2977 SCARG(uap, tptr), UIO_USERSPACE);
2978 }
2979
2980 /*
2981 * Common routine to set access and modification times given a vnode.
2982 */
2983 int
2984 do_sys_utimes(struct lwp *l, struct vnode *vp, const char *path, int flag,
2985 const struct timeval *tptr, enum uio_seg seg)
2986 {
2987 struct vattr vattr;
2988 struct nameidata nd;
2989 int error;
2990
2991 VATTR_NULL(&vattr);
2992 if (tptr == NULL) {
2993 nanotime(&vattr.va_atime);
2994 vattr.va_mtime = vattr.va_atime;
2995 vattr.va_vaflags |= VA_UTIMES_NULL;
2996 } else {
2997 struct timeval tv[2];
2998
2999 if (seg != UIO_SYSSPACE) {
3000 error = copyin(tptr, &tv, sizeof (tv));
3001 if (error != 0)
3002 return error;
3003 tptr = tv;
3004 }
3005 TIMEVAL_TO_TIMESPEC(tptr, &vattr.va_atime);
3006 TIMEVAL_TO_TIMESPEC(tptr + 1, &vattr.va_mtime);
3007 }
3008
3009 if (vp == NULL) {
3010 NDINIT(&nd, LOOKUP, flag | TRYEMULROOT, UIO_USERSPACE, path);
3011 if ((error = namei(&nd)) != 0)
3012 return (error);
3013 vp = nd.ni_vp;
3014 } else
3015 nd.ni_vp = NULL;
3016
3017 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3018 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3019 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3020 VOP_UNLOCK(vp, 0);
3021
3022 if (nd.ni_vp != NULL)
3023 vrele(nd.ni_vp);
3024
3025 return (error);
3026 }
3027
3028 /*
3029 * Truncate a file given its path name.
3030 */
3031 /* ARGSUSED */
3032 int
3033 sys_truncate(struct lwp *l, const struct sys_truncate_args *uap, register_t *retval)
3034 {
3035 /* {
3036 syscallarg(const char *) path;
3037 syscallarg(int) pad;
3038 syscallarg(off_t) length;
3039 } */
3040 struct vnode *vp;
3041 struct vattr vattr;
3042 int error;
3043 struct nameidata nd;
3044
3045 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3046 SCARG(uap, path));
3047 if ((error = namei(&nd)) != 0)
3048 return (error);
3049 vp = nd.ni_vp;
3050 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3051 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3052 if (vp->v_type == VDIR)
3053 error = EISDIR;
3054 else if ((error = vn_writechk(vp)) == 0 &&
3055 (error = VOP_ACCESS(vp, VWRITE, l->l_cred)) == 0) {
3056 VATTR_NULL(&vattr);
3057 vattr.va_size = SCARG(uap, length);
3058 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3059 }
3060 vput(vp);
3061 return (error);
3062 }
3063
3064 /*
3065 * Truncate a file given a file descriptor.
3066 */
3067 /* ARGSUSED */
3068 int
3069 sys_ftruncate(struct lwp *l, const struct sys_ftruncate_args *uap, register_t *retval)
3070 {
3071 /* {
3072 syscallarg(int) fd;
3073 syscallarg(int) pad;
3074 syscallarg(off_t) length;
3075 } */
3076 struct proc *p = l->l_proc;
3077 struct vattr vattr;
3078 struct vnode *vp;
3079 struct file *fp;
3080 int error;
3081
3082 /* getvnode() will use the descriptor for us */
3083 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3084 return (error);
3085 if ((fp->f_flag & FWRITE) == 0) {
3086 error = EINVAL;
3087 goto out;
3088 }
3089 vp = (struct vnode *)fp->f_data;
3090 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3091 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3092 if (vp->v_type == VDIR)
3093 error = EISDIR;
3094 else if ((error = vn_writechk(vp)) == 0) {
3095 VATTR_NULL(&vattr);
3096 vattr.va_size = SCARG(uap, length);
3097 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
3098 }
3099 VOP_UNLOCK(vp, 0);
3100 out:
3101 FILE_UNUSE(fp, l);
3102 return (error);
3103 }
3104
3105 /*
3106 * Sync an open file.
3107 */
3108 /* ARGSUSED */
3109 int
3110 sys_fsync(struct lwp *l, const struct sys_fsync_args *uap, register_t *retval)
3111 {
3112 /* {
3113 syscallarg(int) fd;
3114 } */
3115 struct proc *p = l->l_proc;
3116 struct vnode *vp;
3117 struct file *fp;
3118 int error;
3119
3120 /* getvnode() will use the descriptor for us */
3121 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3122 return (error);
3123 vp = (struct vnode *)fp->f_data;
3124 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3125 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT, 0, 0);
3126 if (error == 0 && bioopsp != NULL &&
3127 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3128 (*bioopsp->io_fsync)(vp, 0);
3129 VOP_UNLOCK(vp, 0);
3130 FILE_UNUSE(fp, l);
3131 return (error);
3132 }
3133
3134 /*
3135 * Sync a range of file data. API modeled after that found in AIX.
3136 *
3137 * FDATASYNC indicates that we need only save enough metadata to be able
3138 * to re-read the written data. Note we duplicate AIX's requirement that
3139 * the file be open for writing.
3140 */
3141 /* ARGSUSED */
3142 int
3143 sys_fsync_range(struct lwp *l, const struct sys_fsync_range_args *uap, register_t *retval)
3144 {
3145 /* {
3146 syscallarg(int) fd;
3147 syscallarg(int) flags;
3148 syscallarg(off_t) start;
3149 syscallarg(off_t) length;
3150 } */
3151 struct proc *p = l->l_proc;
3152 struct vnode *vp;
3153 struct file *fp;
3154 int flags, nflags;
3155 off_t s, e, len;
3156 int error;
3157
3158 /* getvnode() will use the descriptor for us */
3159 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3160 return (error);
3161
3162 if ((fp->f_flag & FWRITE) == 0) {
3163 error = EBADF;
3164 goto out;
3165 }
3166
3167 flags = SCARG(uap, flags);
3168 if (((flags & (FDATASYNC | FFILESYNC)) == 0) ||
3169 ((~flags & (FDATASYNC | FFILESYNC)) == 0)) {
3170 error = EINVAL;
3171 goto out;
3172 }
3173 /* Now set up the flags for value(s) to pass to VOP_FSYNC() */
3174 if (flags & FDATASYNC)
3175 nflags = FSYNC_DATAONLY | FSYNC_WAIT;
3176 else
3177 nflags = FSYNC_WAIT;
3178 if (flags & FDISKSYNC)
3179 nflags |= FSYNC_CACHE;
3180
3181 len = SCARG(uap, length);
3182 /* If length == 0, we do the whole file, and s = l = 0 will do that */
3183 if (len) {
3184 s = SCARG(uap, start);
3185 e = s + len;
3186 if (e < s) {
3187 error = EINVAL;
3188 goto out;
3189 }
3190 } else {
3191 e = 0;
3192 s = 0;
3193 }
3194
3195 vp = (struct vnode *)fp->f_data;
3196 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3197 error = VOP_FSYNC(vp, fp->f_cred, nflags, s, e);
3198
3199 if (error == 0 && bioopsp != NULL &&
3200 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3201 (*bioopsp->io_fsync)(vp, nflags);
3202
3203 VOP_UNLOCK(vp, 0);
3204 out:
3205 FILE_UNUSE(fp, l);
3206 return (error);
3207 }
3208
3209 /*
3210 * Sync the data of an open file.
3211 */
3212 /* ARGSUSED */
3213 int
3214 sys_fdatasync(struct lwp *l, const struct sys_fdatasync_args *uap, register_t *retval)
3215 {
3216 /* {
3217 syscallarg(int) fd;
3218 } */
3219 struct proc *p = l->l_proc;
3220 struct vnode *vp;
3221 struct file *fp;
3222 int error;
3223
3224 /* getvnode() will use the descriptor for us */
3225 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3226 return (error);
3227 if ((fp->f_flag & FWRITE) == 0) {
3228 FILE_UNUSE(fp, l);
3229 return (EBADF);
3230 }
3231 vp = (struct vnode *)fp->f_data;
3232 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3233 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT|FSYNC_DATAONLY, 0, 0);
3234 VOP_UNLOCK(vp, 0);
3235 FILE_UNUSE(fp, l);
3236 return (error);
3237 }
3238
3239 /*
3240 * Rename files, (standard) BSD semantics frontend.
3241 */
3242 /* ARGSUSED */
3243 int
3244 sys_rename(struct lwp *l, const struct sys_rename_args *uap, register_t *retval)
3245 {
3246 /* {
3247 syscallarg(const char *) from;
3248 syscallarg(const char *) to;
3249 } */
3250
3251 return (rename_files(SCARG(uap, from), SCARG(uap, to), l, 0));
3252 }
3253
3254 /*
3255 * Rename files, POSIX semantics frontend.
3256 */
3257 /* ARGSUSED */
3258 int
3259 sys___posix_rename(struct lwp *l, const struct sys___posix_rename_args *uap, register_t *retval)
3260 {
3261 /* {
3262 syscallarg(const char *) from;
3263 syscallarg(const char *) to;
3264 } */
3265
3266 return (rename_files(SCARG(uap, from), SCARG(uap, to), l, 1));
3267 }
3268
3269 /*
3270 * Rename files. Source and destination must either both be directories,
3271 * or both not be directories. If target is a directory, it must be empty.
3272 * If `from' and `to' refer to the same object, the value of the `retain'
3273 * argument is used to determine whether `from' will be
3274 *
3275 * (retain == 0) deleted unless `from' and `to' refer to the same
3276 * object in the file system's name space (BSD).
3277 * (retain == 1) always retained (POSIX).
3278 */
3279 static int
3280 rename_files(const char *from, const char *to, struct lwp *l, int retain)
3281 {
3282 struct vnode *tvp, *fvp, *tdvp;
3283 struct nameidata fromnd, tond;
3284 struct proc *p;
3285 int error;
3286
3287 NDINIT(&fromnd, DELETE, LOCKPARENT | SAVESTART | TRYEMULROOT,
3288 UIO_USERSPACE, from);
3289 if ((error = namei(&fromnd)) != 0)
3290 return (error);
3291 if (fromnd.ni_dvp != fromnd.ni_vp)
3292 VOP_UNLOCK(fromnd.ni_dvp, 0);
3293 fvp = fromnd.ni_vp;
3294 NDINIT(&tond, RENAME,
3295 LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | TRYEMULROOT
3296 | (fvp->v_type == VDIR ? CREATEDIR : 0),
3297 UIO_USERSPACE, to);
3298 if ((error = namei(&tond)) != 0) {
3299 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3300 vrele(fromnd.ni_dvp);
3301 vrele(fvp);
3302 goto out1;
3303 }
3304 tdvp = tond.ni_dvp;
3305 tvp = tond.ni_vp;
3306
3307 if (tvp != NULL) {
3308 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
3309 error = ENOTDIR;
3310 goto out;
3311 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
3312 error = EISDIR;
3313 goto out;
3314 }
3315 }
3316
3317 if (fvp == tdvp)
3318 error = EINVAL;
3319
3320 /*
3321 * Source and destination refer to the same object.
3322 */
3323 if (fvp == tvp) {
3324 if (retain)
3325 error = -1;
3326 else if (fromnd.ni_dvp == tdvp &&
3327 fromnd.ni_cnd.cn_namelen == tond.ni_cnd.cn_namelen &&
3328 !memcmp(fromnd.ni_cnd.cn_nameptr,
3329 tond.ni_cnd.cn_nameptr,
3330 fromnd.ni_cnd.cn_namelen))
3331 error = -1;
3332 }
3333
3334 #if NVERIEXEC > 0
3335 if (!error) {
3336 char *f1, *f2;
3337
3338 f1 = malloc(fromnd.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3339 strlcpy(f1, fromnd.ni_cnd.cn_nameptr, fromnd.ni_cnd.cn_namelen);
3340
3341 f2 = malloc(tond.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3342 strlcpy(f2, tond.ni_cnd.cn_nameptr, tond.ni_cnd.cn_namelen);
3343
3344 error = veriexec_renamechk(l, fvp, f1, tvp, f2);
3345
3346 free(f1, M_TEMP);
3347 free(f2, M_TEMP);
3348 }
3349 #endif /* NVERIEXEC > 0 */
3350
3351 out:
3352 p = l->l_proc;
3353 if (!error) {
3354 VOP_LEASE(tdvp, l->l_cred, LEASE_WRITE);
3355 if (fromnd.ni_dvp != tdvp)
3356 VOP_LEASE(fromnd.ni_dvp, l->l_cred, LEASE_WRITE);
3357 if (tvp) {
3358 VOP_LEASE(tvp, l->l_cred, LEASE_WRITE);
3359 }
3360 error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
3361 tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
3362 } else {
3363 VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd);
3364 if (tdvp == tvp)
3365 vrele(tdvp);
3366 else
3367 vput(tdvp);
3368 if (tvp)
3369 vput(tvp);
3370 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3371 vrele(fromnd.ni_dvp);
3372 vrele(fvp);
3373 }
3374 vrele(tond.ni_startdir);
3375 PNBUF_PUT(tond.ni_cnd.cn_pnbuf);
3376 out1:
3377 if (fromnd.ni_startdir)
3378 vrele(fromnd.ni_startdir);
3379 PNBUF_PUT(fromnd.ni_cnd.cn_pnbuf);
3380 return (error == -1 ? 0 : error);
3381 }
3382
3383 /*
3384 * Make a directory file.
3385 */
3386 /* ARGSUSED */
3387 int
3388 sys_mkdir(struct lwp *l, const struct sys_mkdir_args *uap, register_t *retval)
3389 {
3390 /* {
3391 syscallarg(const char *) path;
3392 syscallarg(int) mode;
3393 } */
3394 struct proc *p = l->l_proc;
3395 struct vnode *vp;
3396 struct vattr vattr;
3397 int error;
3398 struct nameidata nd;
3399
3400 NDINIT(&nd, CREATE, LOCKPARENT | CREATEDIR | TRYEMULROOT, UIO_USERSPACE,
3401 SCARG(uap, path));
3402 if ((error = namei(&nd)) != 0)
3403 return (error);
3404 vp = nd.ni_vp;
3405 if (vp != NULL) {
3406 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3407 if (nd.ni_dvp == vp)
3408 vrele(nd.ni_dvp);
3409 else
3410 vput(nd.ni_dvp);
3411 vrele(vp);
3412 return (EEXIST);
3413 }
3414 VATTR_NULL(&vattr);
3415 vattr.va_type = VDIR;
3416 /* We will read cwdi->cwdi_cmask unlocked. */
3417 vattr.va_mode =
3418 (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_cwdi->cwdi_cmask;
3419 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3420 error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
3421 if (!error)
3422 vput(nd.ni_vp);
3423 return (error);
3424 }
3425
3426 /*
3427 * Remove a directory file.
3428 */
3429 /* ARGSUSED */
3430 int
3431 sys_rmdir(struct lwp *l, const struct sys_rmdir_args *uap, register_t *retval)
3432 {
3433 /* {
3434 syscallarg(const char *) path;
3435 } */
3436 struct vnode *vp;
3437 int error;
3438 struct nameidata nd;
3439
3440 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
3441 SCARG(uap, path));
3442 if ((error = namei(&nd)) != 0)
3443 return (error);
3444 vp = nd.ni_vp;
3445 if (vp->v_type != VDIR) {
3446 error = ENOTDIR;
3447 goto out;
3448 }
3449 /*
3450 * No rmdir "." please.
3451 */
3452 if (nd.ni_dvp == vp) {
3453 error = EINVAL;
3454 goto out;
3455 }
3456 /*
3457 * The root of a mounted filesystem cannot be deleted.
3458 */
3459 if (vp->v_vflag & VV_ROOT) {
3460 error = EBUSY;
3461 goto out;
3462 }
3463 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3464 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3465 error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
3466 return (error);
3467
3468 out:
3469 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3470 if (nd.ni_dvp == vp)
3471 vrele(nd.ni_dvp);
3472 else
3473 vput(nd.ni_dvp);
3474 vput(vp);
3475 return (error);
3476 }
3477
3478 /*
3479 * Read a block of directory entries in a file system independent format.
3480 */
3481 int
3482 sys___getdents30(struct lwp *l, const struct sys___getdents30_args *uap, register_t *retval)
3483 {
3484 /* {
3485 syscallarg(int) fd;
3486 syscallarg(char *) buf;
3487 syscallarg(size_t) count;
3488 } */
3489 struct proc *p = l->l_proc;
3490 struct file *fp;
3491 int error, done;
3492
3493 /* getvnode() will use the descriptor for us */
3494 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3495 return (error);
3496 if ((fp->f_flag & FREAD) == 0) {
3497 error = EBADF;
3498 goto out;
3499 }
3500 error = vn_readdir(fp, SCARG(uap, buf), UIO_USERSPACE,
3501 SCARG(uap, count), &done, l, 0, 0);
3502 ktrgenio(SCARG(uap, fd), UIO_READ, SCARG(uap, buf), done, error);
3503 *retval = done;
3504 out:
3505 FILE_UNUSE(fp, l);
3506 return (error);
3507 }
3508
3509 /*
3510 * Set the mode mask for creation of filesystem nodes.
3511 */
3512 int
3513 sys_umask(struct lwp *l, const struct sys_umask_args *uap, register_t *retval)
3514 {
3515 /* {
3516 syscallarg(mode_t) newmask;
3517 } */
3518 struct proc *p = l->l_proc;
3519 struct cwdinfo *cwdi;
3520
3521 /*
3522 * cwdi->cwdi_cmask will be read unlocked elsewhere. What's
3523 * important is that we serialize changes to the mask. The
3524 * rw_exit() will issue a write memory barrier on our behalf,
3525 * and force the changes out to other CPUs (as it must use an
3526 * atomic operation, draining the local CPU's store buffers).
3527 */
3528 cwdi = p->p_cwdi;
3529 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
3530 *retval = cwdi->cwdi_cmask;
3531 cwdi->cwdi_cmask = SCARG(uap, newmask) & ALLPERMS;
3532 rw_exit(&cwdi->cwdi_lock);
3533
3534 return (0);
3535 }
3536
3537 /*
3538 * Void all references to file by ripping underlying filesystem
3539 * away from vnode.
3540 */
3541 /* ARGSUSED */
3542 int
3543 sys_revoke(struct lwp *l, const struct sys_revoke_args *uap, register_t *retval)
3544 {
3545 /* {
3546 syscallarg(const char *) path;
3547 } */
3548 struct vnode *vp;
3549 struct vattr vattr;
3550 int error;
3551 bool revoke;
3552 struct nameidata nd;
3553
3554 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3555 SCARG(uap, path));
3556 if ((error = namei(&nd)) != 0)
3557 return (error);
3558 vp = nd.ni_vp;
3559 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
3560 goto out;
3561 if (kauth_cred_geteuid(l->l_cred) != vattr.va_uid &&
3562 (error = kauth_authorize_generic(l->l_cred,
3563 KAUTH_GENERIC_ISSUSER, NULL)) != 0)
3564 goto out;
3565 simple_lock(&vp->v_interlock);
3566 revoke = (vp->v_usecount > 1 || (vp->v_iflag & (VI_ALIASED|VI_LAYER)));
3567 simple_unlock(&vp->v_interlock);
3568 if (revoke)
3569 VOP_REVOKE(vp, REVOKEALL);
3570 out:
3571 vrele(vp);
3572 return (error);
3573 }
3574
3575 /*
3576 * Convert a user file descriptor to a kernel file entry.
3577 */
3578 int
3579 getvnode(struct filedesc *fdp, int fd, struct file **fpp)
3580 {
3581 struct vnode *vp;
3582 struct file *fp;
3583
3584 if ((fp = fd_getfile(fdp, fd)) == NULL)
3585 return (EBADF);
3586
3587 FILE_USE(fp);
3588
3589 if (fp->f_type != DTYPE_VNODE) {
3590 FILE_UNUSE(fp, NULL);
3591 return (EINVAL);
3592 }
3593
3594 vp = (struct vnode *)fp->f_data;
3595 if (vp->v_type == VBAD) {
3596 FILE_UNUSE(fp, NULL);
3597 return (EBADF);
3598 }
3599
3600 *fpp = fp;
3601 return (0);
3602 }
3603