vfs_syscalls.c revision 1.342 1 /* $NetBSD: vfs_syscalls.c,v 1.342 2008/01/24 17:32:55 ad Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_syscalls.c 8.42 (Berkeley) 7/31/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_syscalls.c,v 1.342 2008/01/24 17:32:55 ad Exp $");
41
42 #include "opt_compat_netbsd.h"
43 #include "opt_compat_43.h"
44 #include "opt_fileassoc.h"
45 #include "fss.h"
46 #include "veriexec.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/filedesc.h>
52 #include <sys/kernel.h>
53 #include <sys/file.h>
54 #include <sys/stat.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/proc.h>
58 #include <sys/uio.h>
59 #include <sys/malloc.h>
60 #include <sys/kmem.h>
61 #include <sys/dirent.h>
62 #include <sys/sysctl.h>
63 #include <sys/syscallargs.h>
64 #include <sys/vfs_syscalls.h>
65 #include <sys/ktrace.h>
66 #ifdef FILEASSOC
67 #include <sys/fileassoc.h>
68 #endif /* FILEASSOC */
69 #include <sys/verified_exec.h>
70 #include <sys/kauth.h>
71
72 #include <miscfs/genfs/genfs.h>
73 #include <miscfs/syncfs/syncfs.h>
74 #include <miscfs/specfs/specdev.h>
75
76 #ifdef COMPAT_30
77 #include "opt_nfsserver.h"
78 #include <nfs/rpcv2.h>
79 #endif
80 #include <nfs/nfsproto.h>
81 #ifdef COMPAT_30
82 #include <nfs/nfs.h>
83 #include <nfs/nfs_var.h>
84 #endif
85
86 #if NFSS > 0
87 #include <dev/fssvar.h>
88 #endif
89
90 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount struct");
91
92 static int change_dir(struct nameidata *, struct lwp *);
93 static int change_flags(struct vnode *, u_long, struct lwp *);
94 static int change_mode(struct vnode *, int, struct lwp *l);
95 static int change_owner(struct vnode *, uid_t, gid_t, struct lwp *, int);
96
97 void checkdirs(struct vnode *);
98
99 int dovfsusermount = 0;
100
101 /*
102 * Virtual File System System Calls
103 */
104
105 /*
106 * Mount a file system.
107 */
108
109 #if defined(COMPAT_09) || defined(COMPAT_43)
110 /*
111 * This table is used to maintain compatibility with 4.3BSD
112 * and NetBSD 0.9 mount syscalls. Note, the order is important!
113 *
114 * Do not modify this table. It should only contain filesystems
115 * supported by NetBSD 0.9 and 4.3BSD.
116 */
117 const char * const mountcompatnames[] = {
118 NULL, /* 0 = MOUNT_NONE */
119 MOUNT_FFS, /* 1 = MOUNT_UFS */
120 MOUNT_NFS, /* 2 */
121 MOUNT_MFS, /* 3 */
122 MOUNT_MSDOS, /* 4 */
123 MOUNT_CD9660, /* 5 = MOUNT_ISOFS */
124 MOUNT_FDESC, /* 6 */
125 MOUNT_KERNFS, /* 7 */
126 NULL, /* 8 = MOUNT_DEVFS */
127 MOUNT_AFS, /* 9 */
128 };
129 const int nmountcompatnames = sizeof(mountcompatnames) /
130 sizeof(mountcompatnames[0]);
131 #endif /* COMPAT_09 || COMPAT_43 */
132
133 static int
134 mount_update(struct lwp *l, struct vnode *vp, const char *path, int flags,
135 void *data, size_t *data_len)
136 {
137 struct mount *mp;
138 int error = 0, saved_flags;
139
140 mp = vp->v_mount;
141 saved_flags = mp->mnt_flag;
142
143 /* We can operate only on VV_ROOT nodes. */
144 if ((vp->v_vflag & VV_ROOT) == 0) {
145 error = EINVAL;
146 goto out;
147 }
148
149 /*
150 * We only allow the filesystem to be reloaded if it
151 * is currently mounted read-only.
152 */
153 if (flags & MNT_RELOAD && !(mp->mnt_flag & MNT_RDONLY)) {
154 error = EOPNOTSUPP; /* Needs translation */
155 goto out;
156 }
157
158 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
159 KAUTH_REQ_SYSTEM_MOUNT_UPDATE, mp, KAUTH_ARG(flags), data);
160 if (error)
161 goto out;
162
163 if (vfs_busy(mp, LK_NOWAIT, 0)) {
164 error = EPERM;
165 goto out;
166 }
167
168 mp->mnt_flag &= ~MNT_OP_FLAGS;
169 mp->mnt_flag |= flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
170
171 /*
172 * Set the mount level flags.
173 */
174 if (flags & MNT_RDONLY)
175 mp->mnt_flag |= MNT_RDONLY;
176 else if (mp->mnt_flag & MNT_RDONLY)
177 mp->mnt_iflag |= IMNT_WANTRDWR;
178 mp->mnt_flag &=
179 ~(MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
180 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
181 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP);
182 mp->mnt_flag |= flags &
183 (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
184 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
185 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
186 MNT_IGNORE);
187
188 error = VFS_MOUNT(mp, path, data, data_len);
189
190 #if defined(COMPAT_30) && defined(NFSSERVER)
191 if (error && data != NULL) {
192 int error2;
193
194 /* Update failed; let's try and see if it was an
195 * export request. */
196 error2 = nfs_update_exports_30(mp, path, data, l);
197
198 /* Only update error code if the export request was
199 * understood but some problem occurred while
200 * processing it. */
201 if (error2 != EJUSTRETURN)
202 error = error2;
203 }
204 #endif
205 if (mp->mnt_iflag & IMNT_WANTRDWR)
206 mp->mnt_flag &= ~MNT_RDONLY;
207 if (error)
208 mp->mnt_flag = saved_flags;
209 mp->mnt_flag &= ~MNT_OP_FLAGS;
210 mp->mnt_iflag &= ~IMNT_WANTRDWR;
211 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0) {
212 if (mp->mnt_syncer == NULL)
213 error = vfs_allocate_syncvnode(mp);
214 } else {
215 if (mp->mnt_syncer != NULL)
216 vfs_deallocate_syncvnode(mp);
217 }
218 vfs_unbusy(mp);
219
220 out:
221 return (error);
222 }
223
224 static int
225 mount_get_vfsops(const char *fstype, struct vfsops **vfsops)
226 {
227 char fstypename[sizeof(((struct statvfs *)NULL)->f_fstypename)];
228 int error;
229
230 /* Copy file-system type from userspace. */
231 error = copyinstr(fstype, fstypename, sizeof(fstypename), NULL);
232 if (error) {
233 #if defined(COMPAT_09) || defined(COMPAT_43)
234 /*
235 * Historically, filesystem types were identified by numbers.
236 * If we get an integer for the filesystem type instead of a
237 * string, we check to see if it matches one of the historic
238 * filesystem types.
239 */
240 u_long fsindex = (u_long)fstype;
241 if (fsindex >= nmountcompatnames ||
242 mountcompatnames[fsindex] == NULL)
243 return ENODEV;
244 strlcpy(fstypename, mountcompatnames[fsindex],
245 sizeof(fstypename));
246 #else
247 return error;
248 #endif
249 }
250
251 #ifdef COMPAT_10
252 /* Accept `ufs' as an alias for `ffs'. */
253 if (strcmp(fstypename, "ufs") == 0)
254 fstypename[0] = 'f';
255 #endif
256
257 if ((*vfsops = vfs_getopsbyname(fstypename)) == NULL)
258 return ENODEV;
259 return 0;
260 }
261
262 static int
263 mount_domount(struct lwp *l, struct vnode **vpp, struct vfsops *vfsops,
264 const char *path, int flags, void *data, size_t *data_len, u_int recurse)
265 {
266 struct mount *mp = NULL;
267 struct vnode *vp = *vpp;
268 struct vattr va;
269 int error;
270
271 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
272 KAUTH_REQ_SYSTEM_MOUNT_NEW, vp, KAUTH_ARG(flags), data);
273 if (error)
274 return error;
275
276 /* Can't make a non-dir a mount-point (from here anyway). */
277 if (vp->v_type != VDIR)
278 return ENOTDIR;
279
280 /*
281 * If the user is not root, ensure that they own the directory
282 * onto which we are attempting to mount.
283 */
284 if ((error = VOP_GETATTR(vp, &va, l->l_cred)) != 0 ||
285 (va.va_uid != kauth_cred_geteuid(l->l_cred) &&
286 (error = kauth_authorize_generic(l->l_cred,
287 KAUTH_GENERIC_ISSUSER, NULL)) != 0)) {
288 return error;
289 }
290
291 if (flags & MNT_EXPORTED)
292 return EINVAL;
293
294 if ((error = vinvalbuf(vp, V_SAVE, l->l_cred, l, 0, 0)) != 0)
295 return error;
296
297 /*
298 * Check if a file-system is not already mounted on this vnode.
299 */
300 if (vp->v_mountedhere != NULL)
301 return EBUSY;
302
303 mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
304
305 mp->mnt_op = vfsops;
306
307 TAILQ_INIT(&mp->mnt_vnodelist);
308 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
309 mutex_init(&mp->mnt_mutex, MUTEX_DEFAULT, IPL_NONE);
310 (void)vfs_busy(mp, LK_NOWAIT, 0);
311
312 mp->mnt_vnodecovered = vp;
313 mp->mnt_stat.f_owner = kauth_cred_geteuid(l->l_cred);
314 mp->mnt_unmounter = NULL;
315 mount_initspecific(mp);
316
317 /*
318 * The underlying file system may refuse the mount for
319 * various reasons. Allow the user to force it to happen.
320 *
321 * Set the mount level flags.
322 */
323 mp->mnt_flag = flags &
324 (MNT_FORCE | MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
325 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOCOREDUMP |
326 MNT_NOATIME | MNT_NODEVMTIME | MNT_SYMPERM | MNT_SOFTDEP |
327 MNT_IGNORE | MNT_RDONLY);
328
329 error = VFS_MOUNT(mp, path, data, data_len);
330 mp->mnt_flag &= ~MNT_OP_FLAGS;
331
332 /*
333 * Put the new filesystem on the mount list after root.
334 */
335 cache_purge(vp);
336 if (error != 0) {
337 vp->v_mountedhere = NULL;
338 mp->mnt_op->vfs_refcount--;
339 vfs_unbusy(mp);
340 vfs_destroy(mp);
341 return error;
342 }
343
344 mp->mnt_iflag &= ~IMNT_WANTRDWR;
345 vp->v_mountedhere = mp;
346 mutex_enter(&mountlist_lock);
347 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
348 mutex_exit(&mountlist_lock);
349 vn_restorerecurse(vp, recurse);
350 VOP_UNLOCK(vp, 0);
351 checkdirs(vp);
352 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
353 error = vfs_allocate_syncvnode(mp);
354 vfs_unbusy(mp);
355 (void) VFS_STATVFS(mp, &mp->mnt_stat);
356 error = VFS_START(mp, 0);
357 if (error)
358 vrele(vp);
359 *vpp = NULL;
360 return error;
361 }
362
363 static int
364 mount_getargs(struct lwp *l, struct vnode *vp, const char *path, int flags,
365 void *data, size_t *data_len)
366 {
367 struct mount *mp;
368 int error;
369
370 /* If MNT_GETARGS is specified, it should be the only flag. */
371 if (flags & ~MNT_GETARGS)
372 return EINVAL;
373
374 mp = vp->v_mount;
375
376 /* XXX: probably some notion of "can see" here if we want isolation. */
377 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
378 KAUTH_REQ_SYSTEM_MOUNT_GET, mp, data, NULL);
379 if (error)
380 return error;
381
382 if ((vp->v_vflag & VV_ROOT) == 0)
383 return EINVAL;
384
385 if (vfs_busy(mp, LK_NOWAIT, 0))
386 return EPERM;
387
388 mp->mnt_flag &= ~MNT_OP_FLAGS;
389 mp->mnt_flag |= MNT_GETARGS;
390 error = VFS_MOUNT(mp, path, data, data_len);
391 mp->mnt_flag &= ~MNT_OP_FLAGS;
392
393 vfs_unbusy(mp);
394 return (error);
395 }
396
397 #ifdef COMPAT_40
398 /* ARGSUSED */
399 int
400 compat_40_sys_mount(struct lwp *l, const struct compat_40_sys_mount_args *uap, register_t *retval)
401 {
402 /* {
403 syscallarg(const char *) type;
404 syscallarg(const char *) path;
405 syscallarg(int) flags;
406 syscallarg(void *) data;
407 } */
408 register_t dummy;
409
410 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
411 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE, 0, &dummy);
412 }
413 #endif
414
415 int
416 sys___mount50(struct lwp *l, const struct sys___mount50_args *uap, register_t *retval)
417 {
418 /* {
419 syscallarg(const char *) type;
420 syscallarg(const char *) path;
421 syscallarg(int) flags;
422 syscallarg(void *) data;
423 syscallarg(size_t) data_len;
424 } */
425
426 return do_sys_mount(l, NULL, SCARG(uap, type), SCARG(uap, path),
427 SCARG(uap, flags), SCARG(uap, data), UIO_USERSPACE,
428 SCARG(uap, data_len), retval);
429 }
430
431 int
432 do_sys_mount(struct lwp *l, struct vfsops *vfsops, const char *type,
433 const char *path, int flags, void *data, enum uio_seg data_seg,
434 size_t data_len, register_t *retval)
435 {
436 struct vnode *vp;
437 struct nameidata nd;
438 void *data_buf = data;
439 u_int recurse;
440 int error;
441
442 /*
443 * Get vnode to be covered
444 */
445 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
446 if ((error = namei(&nd)) != 0)
447 return (error);
448 vp = nd.ni_vp;
449
450 /*
451 * A lookup in VFS_MOUNT might result in an attempt to
452 * lock this vnode again, so make the lock recursive.
453 */
454 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
455 recurse = vn_setrecurse(vp);
456
457 if (vfsops == NULL) {
458 if (flags & (MNT_GETARGS | MNT_UPDATE))
459 vfsops = vp->v_mount->mnt_op;
460 else {
461 /* 'type' is userspace */
462 error = mount_get_vfsops(type, &vfsops);
463 if (error != 0)
464 goto done;
465 }
466 }
467
468 if (data != NULL && data_seg == UIO_USERSPACE) {
469 if (data_len == 0) {
470 /* No length supplied, use default for filesystem */
471 data_len = vfsops->vfs_min_mount_data;
472 if (data_len > VFS_MAX_MOUNT_DATA) {
473 /* maybe a force loaded old LKM */
474 error = EINVAL;
475 goto done;
476 }
477 #ifdef COMPAT_30
478 /* Hopefully a longer buffer won't make copyin() fail */
479 if (flags & MNT_UPDATE
480 && data_len < sizeof (struct mnt_export_args30))
481 data_len = sizeof (struct mnt_export_args30);
482 #endif
483 }
484 data_buf = malloc(data_len, M_TEMP, M_WAITOK);
485
486 /* NFS needs the buffer even for mnt_getargs .... */
487 error = copyin(data, data_buf, data_len);
488 if (error != 0)
489 goto done;
490 }
491
492 if (flags & MNT_GETARGS) {
493 if (data_len == 0) {
494 error = EINVAL;
495 goto done;
496 }
497 error = mount_getargs(l, vp, path, flags, data_buf, &data_len);
498 if (error != 0)
499 goto done;
500 if (data_seg == UIO_USERSPACE)
501 error = copyout(data_buf, data, data_len);
502 *retval = data_len;
503 } else if (flags & MNT_UPDATE) {
504 error = mount_update(l, vp, path, flags, data_buf, &data_len);
505 } else {
506 /* Locking is handled internally in mount_domount(). */
507 error = mount_domount(l, &vp, vfsops, path, flags, data_buf,
508 &data_len, recurse);
509 }
510
511 done:
512 if (vp != NULL) {
513 vn_restorerecurse(vp, recurse);
514 vput(vp);
515 }
516 if (data_buf != data)
517 free(data_buf, M_TEMP);
518 return (error);
519 }
520
521 /*
522 * Scan all active processes to see if any of them have a current
523 * or root directory onto which the new filesystem has just been
524 * mounted. If so, replace them with the new mount point.
525 */
526 void
527 checkdirs(struct vnode *olddp)
528 {
529 struct cwdinfo *cwdi;
530 struct vnode *newdp;
531 struct proc *p;
532
533 if (olddp->v_usecount == 1)
534 return;
535 if (VFS_ROOT(olddp->v_mountedhere, &newdp))
536 panic("mount: lost mount");
537 mutex_enter(&proclist_lock);
538 PROCLIST_FOREACH(p, &allproc) {
539 cwdi = p->p_cwdi;
540 if (!cwdi)
541 continue;
542 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
543 if (cwdi->cwdi_cdir == olddp) {
544 vrele(cwdi->cwdi_cdir);
545 VREF(newdp);
546 cwdi->cwdi_cdir = newdp;
547 }
548 if (cwdi->cwdi_rdir == olddp) {
549 vrele(cwdi->cwdi_rdir);
550 VREF(newdp);
551 cwdi->cwdi_rdir = newdp;
552 }
553 rw_exit(&cwdi->cwdi_lock);
554 }
555 mutex_exit(&proclist_lock);
556 if (rootvnode == olddp) {
557 vrele(rootvnode);
558 VREF(newdp);
559 rootvnode = newdp;
560 }
561 vput(newdp);
562 }
563
564 /*
565 * Unmount a file system.
566 *
567 * Note: unmount takes a path to the vnode mounted on as argument,
568 * not special file (as before).
569 */
570 /* ARGSUSED */
571 int
572 sys_unmount(struct lwp *l, const struct sys_unmount_args *uap, register_t *retval)
573 {
574 /* {
575 syscallarg(const char *) path;
576 syscallarg(int) flags;
577 } */
578 struct vnode *vp;
579 struct mount *mp;
580 int error;
581 struct nameidata nd;
582
583 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
584 SCARG(uap, path));
585 if ((error = namei(&nd)) != 0)
586 return (error);
587 vp = nd.ni_vp;
588 mp = vp->v_mount;
589
590 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
591 KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT, mp, NULL, NULL);
592 if (error) {
593 vput(vp);
594 return (error);
595 }
596
597 /*
598 * Don't allow unmounting the root file system.
599 */
600 if (mp->mnt_flag & MNT_ROOTFS) {
601 vput(vp);
602 return (EINVAL);
603 }
604
605 /*
606 * Must be the root of the filesystem
607 */
608 if ((vp->v_vflag & VV_ROOT) == 0) {
609 vput(vp);
610 return (EINVAL);
611 }
612 vput(vp);
613
614 /*
615 * XXX Freeze syncer. Must do this before locking the
616 * mount point. See dounmount() for details.
617 */
618 mutex_enter(&syncer_mutex);
619
620 if (vfs_busy(mp, 0, 0)) {
621 mutex_exit(&syncer_mutex);
622 return (EBUSY);
623 }
624
625 return (dounmount(mp, SCARG(uap, flags), l));
626 }
627
628 /*
629 * Do the actual file system unmount. File system is assumed to have been
630 * marked busy by the caller.
631 */
632 int
633 dounmount(struct mount *mp, int flags, struct lwp *l)
634 {
635 struct vnode *coveredvp;
636 int error;
637 int async;
638 int used_syncer;
639
640 #if NVERIEXEC > 0
641 error = veriexec_unmountchk(mp);
642 if (error)
643 return (error);
644 #endif /* NVERIEXEC > 0 */
645
646 mutex_enter(&mountlist_lock);
647 vfs_unbusy(mp);
648 used_syncer = (mp->mnt_syncer != NULL);
649
650 /*
651 * XXX Syncer must be frozen when we get here. This should really
652 * be done on a per-mountpoint basis, but especially the softdep
653 * code possibly called from the syncer doesn't exactly work on a
654 * per-mountpoint basis, so the softdep code would become a maze
655 * of vfs_busy() calls.
656 *
657 * The caller of dounmount() must acquire syncer_mutex because
658 * the syncer itself acquires locks in syncer_mutex -> vfs_busy
659 * order, and we must preserve that order to avoid deadlock.
660 *
661 * So, if the file system did not use the syncer, now is
662 * the time to release the syncer_mutex.
663 */
664 if (used_syncer == 0)
665 mutex_exit(&syncer_mutex);
666
667 mp->mnt_iflag |= IMNT_UNMOUNT;
668 mp->mnt_unmounter = l;
669 lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_lock);
670
671 async = mp->mnt_flag & MNT_ASYNC;
672 mp->mnt_flag &= ~MNT_ASYNC;
673 cache_purgevfs(mp); /* remove cache entries for this file sys */
674 if (mp->mnt_syncer != NULL)
675 vfs_deallocate_syncvnode(mp);
676 error = 0;
677 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
678 #if NFSS > 0
679 error = fss_umount_hook(mp, (flags & MNT_FORCE));
680 #endif
681 if (error == 0)
682 error = VFS_SYNC(mp, MNT_WAIT, l->l_cred);
683 }
684 vfs_scrubvnlist(mp);
685 if (error == 0 || (flags & MNT_FORCE))
686 error = VFS_UNMOUNT(mp, flags);
687 if (error) {
688 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
689 (void) vfs_allocate_syncvnode(mp);
690 mutex_enter(&mountlist_lock);
691 mp->mnt_iflag &= ~IMNT_UNMOUNT;
692 mp->mnt_unmounter = NULL;
693 mp->mnt_flag |= async;
694 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
695 &mountlist_lock);
696 if (used_syncer)
697 mutex_exit(&syncer_mutex);
698 mutex_enter(&mp->mnt_mutex);
699 while (mp->mnt_wcnt > 0) {
700 wakeup(mp);
701 mtsleep(&mp->mnt_wcnt, PVFS, "mntwcnt1",
702 0, &mp->mnt_mutex);
703 }
704 mutex_exit(&mp->mnt_mutex);
705 return (error);
706 }
707 vfs_scrubvnlist(mp);
708 mutex_enter(&mountlist_lock);
709 CIRCLEQ_REMOVE(&mountlist, mp, mnt_list);
710 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP)
711 coveredvp->v_mountedhere = NULL;
712 if (TAILQ_FIRST(&mp->mnt_vnodelist) != NULL)
713 panic("unmount: dangling vnode");
714 mp->mnt_iflag |= IMNT_GONE;
715 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_lock);
716 if (coveredvp != NULLVP)
717 vrele(coveredvp);
718 if (used_syncer)
719 mutex_exit(&syncer_mutex);
720 mutex_enter(&mp->mnt_mutex);
721 while (mp->mnt_wcnt > 0) {
722 wakeup(mp);
723 mtsleep(&mp->mnt_wcnt, PVFS, "mntwcnt2", 0, &mp->mnt_mutex);
724 }
725 mutex_exit(&mp->mnt_mutex);
726 vfs_hooks_unmount(mp);
727 vfs_delref(mp->mnt_op);
728 vfs_destroy(mp);
729 return (0);
730 }
731
732 /*
733 * Sync each mounted filesystem.
734 */
735 #ifdef DEBUG
736 int syncprt = 0;
737 struct ctldebug debug0 = { "syncprt", &syncprt };
738 #endif
739
740 /* ARGSUSED */
741 int
742 sys_sync(struct lwp *l, const void *v, register_t *retval)
743 {
744 struct mount *mp, *nmp;
745 int asyncflag;
746
747 if (l == NULL)
748 l = &lwp0;
749
750 mutex_enter(&mountlist_lock);
751 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
752 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
753 nmp = mp->mnt_list.cqe_prev;
754 continue;
755 }
756 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
757 asyncflag = mp->mnt_flag & MNT_ASYNC;
758 mp->mnt_flag &= ~MNT_ASYNC;
759 VFS_SYNC(mp, MNT_NOWAIT, l->l_cred);
760 if (asyncflag)
761 mp->mnt_flag |= MNT_ASYNC;
762 }
763 mutex_enter(&mountlist_lock);
764 nmp = mp->mnt_list.cqe_prev;
765 vfs_unbusy(mp);
766
767 }
768 mutex_exit(&mountlist_lock);
769 #ifdef DEBUG
770 if (syncprt)
771 vfs_bufstats();
772 #endif /* DEBUG */
773 return (0);
774 }
775
776 /*
777 * Change filesystem quotas.
778 */
779 /* ARGSUSED */
780 int
781 sys_quotactl(struct lwp *l, const struct sys_quotactl_args *uap, register_t *retval)
782 {
783 /* {
784 syscallarg(const char *) path;
785 syscallarg(int) cmd;
786 syscallarg(int) uid;
787 syscallarg(void *) arg;
788 } */
789 struct mount *mp;
790 int error;
791 struct nameidata nd;
792
793 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
794 SCARG(uap, path));
795 if ((error = namei(&nd)) != 0)
796 return (error);
797 mp = nd.ni_vp->v_mount;
798 error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
799 SCARG(uap, arg));
800 vrele(nd.ni_vp);
801 return (error);
802 }
803
804 int
805 dostatvfs(struct mount *mp, struct statvfs *sp, struct lwp *l, int flags,
806 int root)
807 {
808 struct cwdinfo *cwdi = l->l_proc->p_cwdi;
809 int error = 0;
810
811 /*
812 * If MNT_NOWAIT or MNT_LAZY is specified, do not
813 * refresh the fsstat cache. MNT_WAIT or MNT_LAZY
814 * overrides MNT_NOWAIT.
815 */
816 if (flags == MNT_NOWAIT || flags == MNT_LAZY ||
817 (flags != MNT_WAIT && flags != 0)) {
818 memcpy(sp, &mp->mnt_stat, sizeof(*sp));
819 goto done;
820 }
821
822 /* Get the filesystem stats now */
823 memset(sp, 0, sizeof(*sp));
824 if ((error = VFS_STATVFS(mp, sp)) != 0) {
825 return error;
826 }
827
828 if (cwdi->cwdi_rdir == NULL)
829 (void)memcpy(&mp->mnt_stat, sp, sizeof(mp->mnt_stat));
830 done:
831 if (cwdi->cwdi_rdir != NULL) {
832 size_t len;
833 char *bp;
834 char *path = PNBUF_GET();
835
836 bp = path + MAXPATHLEN;
837 *--bp = '\0';
838 rw_enter(&cwdi->cwdi_lock, RW_READER);
839 error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp, path,
840 MAXPATHLEN / 2, 0, l);
841 rw_exit(&cwdi->cwdi_lock);
842 if (error) {
843 PNBUF_PUT(path);
844 return error;
845 }
846 len = strlen(bp);
847 /*
848 * for mount points that are below our root, we can see
849 * them, so we fix up the pathname and return them. The
850 * rest we cannot see, so we don't allow viewing the
851 * data.
852 */
853 if (strncmp(bp, sp->f_mntonname, len) == 0) {
854 strlcpy(sp->f_mntonname, &sp->f_mntonname[len],
855 sizeof(sp->f_mntonname));
856 if (sp->f_mntonname[0] == '\0')
857 (void)strlcpy(sp->f_mntonname, "/",
858 sizeof(sp->f_mntonname));
859 } else {
860 if (root)
861 (void)strlcpy(sp->f_mntonname, "/",
862 sizeof(sp->f_mntonname));
863 else
864 error = EPERM;
865 }
866 PNBUF_PUT(path);
867 }
868 sp->f_flag = mp->mnt_flag & MNT_VISFLAGMASK;
869 return error;
870 }
871
872 /*
873 * Get filesystem statistics by path.
874 */
875 int
876 do_sys_pstatvfs(struct lwp *l, const char *path, int flags, struct statvfs *sb)
877 {
878 struct mount *mp;
879 int error;
880 struct nameidata nd;
881
882 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE, path);
883 if ((error = namei(&nd)) != 0)
884 return error;
885 mp = nd.ni_vp->v_mount;
886 error = dostatvfs(mp, sb, l, flags, 1);
887 vrele(nd.ni_vp);
888 return error;
889 }
890
891 /* ARGSUSED */
892 int
893 sys_statvfs1(struct lwp *l, const struct sys_statvfs1_args *uap, register_t *retval)
894 {
895 /* {
896 syscallarg(const char *) path;
897 syscallarg(struct statvfs *) buf;
898 syscallarg(int) flags;
899 } */
900 struct statvfs *sb;
901 int error;
902
903 sb = STATVFSBUF_GET();
904 error = do_sys_pstatvfs(l, SCARG(uap, path), SCARG(uap, flags), sb);
905 if (error == 0)
906 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
907 STATVFSBUF_PUT(sb);
908 return error;
909 }
910
911 /*
912 * Get filesystem statistics by fd.
913 */
914 int
915 do_sys_fstatvfs(struct lwp *l, int fd, int flags, struct statvfs *sb)
916 {
917 struct proc *p = l->l_proc;
918 struct file *fp;
919 struct mount *mp;
920 int error;
921
922 /* getvnode() will use the descriptor for us */
923 if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
924 return (error);
925 mp = ((struct vnode *)fp->f_data)->v_mount;
926 error = dostatvfs(mp, sb, l, flags, 1);
927 FILE_UNUSE(fp, l);
928 return error;
929 }
930
931 /* ARGSUSED */
932 int
933 sys_fstatvfs1(struct lwp *l, const struct sys_fstatvfs1_args *uap, register_t *retval)
934 {
935 /* {
936 syscallarg(int) fd;
937 syscallarg(struct statvfs *) buf;
938 syscallarg(int) flags;
939 } */
940 struct statvfs *sb;
941 int error;
942
943 sb = STATVFSBUF_GET();
944 error = do_sys_fstatvfs(l, SCARG(uap, fd), SCARG(uap, flags), sb);
945 if (error == 0)
946 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
947 STATVFSBUF_PUT(sb);
948 return error;
949 }
950
951
952 /*
953 * Get statistics on all filesystems.
954 */
955 int
956 do_sys_getvfsstat(struct lwp *l, void *sfsp, size_t bufsize, int flags,
957 int (*copyfn)(const void *, void *, size_t), size_t entry_sz,
958 register_t *retval)
959 {
960 int root = 0;
961 struct proc *p = l->l_proc;
962 struct mount *mp, *nmp;
963 struct statvfs *sb;
964 size_t count, maxcount;
965 int error = 0;
966
967 sb = STATVFSBUF_GET();
968 maxcount = bufsize / entry_sz;
969 mutex_enter(&mountlist_lock);
970 count = 0;
971 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
972 mp = nmp) {
973 if (vfs_busy(mp, LK_NOWAIT, &mountlist_lock)) {
974 nmp = CIRCLEQ_NEXT(mp, mnt_list);
975 continue;
976 }
977 if (sfsp && count < maxcount) {
978 error = dostatvfs(mp, sb, l, flags, 0);
979 if (error) {
980 mutex_enter(&mountlist_lock);
981 nmp = CIRCLEQ_NEXT(mp, mnt_list);
982 vfs_unbusy(mp);
983 continue;
984 }
985 error = copyfn(sb, sfsp, entry_sz);
986 if (error) {
987 vfs_unbusy(mp);
988 goto out;
989 }
990 sfsp = (char *)sfsp + entry_sz;
991 root |= strcmp(sb->f_mntonname, "/") == 0;
992 }
993 count++;
994 mutex_enter(&mountlist_lock);
995 nmp = CIRCLEQ_NEXT(mp, mnt_list);
996 vfs_unbusy(mp);
997 }
998
999 mutex_exit(&mountlist_lock);
1000 if (root == 0 && p->p_cwdi->cwdi_rdir) {
1001 /*
1002 * fake a root entry
1003 */
1004 error = dostatvfs(p->p_cwdi->cwdi_rdir->v_mount,
1005 sb, l, flags, 1);
1006 if (error != 0)
1007 goto out;
1008 if (sfsp)
1009 error = copyfn(sb, sfsp, entry_sz);
1010 count++;
1011 }
1012 if (sfsp && count > maxcount)
1013 *retval = maxcount;
1014 else
1015 *retval = count;
1016 out:
1017 STATVFSBUF_PUT(sb);
1018 return error;
1019 }
1020
1021 int
1022 sys_getvfsstat(struct lwp *l, const struct sys_getvfsstat_args *uap, register_t *retval)
1023 {
1024 /* {
1025 syscallarg(struct statvfs *) buf;
1026 syscallarg(size_t) bufsize;
1027 syscallarg(int) flags;
1028 } */
1029
1030 return do_sys_getvfsstat(l, SCARG(uap, buf), SCARG(uap, bufsize),
1031 SCARG(uap, flags), copyout, sizeof (struct statvfs), retval);
1032 }
1033
1034 /*
1035 * Change current working directory to a given file descriptor.
1036 */
1037 /* ARGSUSED */
1038 int
1039 sys_fchdir(struct lwp *l, const struct sys_fchdir_args *uap, register_t *retval)
1040 {
1041 /* {
1042 syscallarg(int) fd;
1043 } */
1044 struct proc *p = l->l_proc;
1045 struct filedesc *fdp = p->p_fd;
1046 struct cwdinfo *cwdi;
1047 struct vnode *vp, *tdp;
1048 struct mount *mp;
1049 struct file *fp;
1050 int error;
1051
1052 /* getvnode() will use the descriptor for us */
1053 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1054 return (error);
1055 vp = (struct vnode *)fp->f_data;
1056
1057 VREF(vp);
1058 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1059 if (vp->v_type != VDIR)
1060 error = ENOTDIR;
1061 else
1062 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1063 if (error) {
1064 vput(vp);
1065 goto out;
1066 }
1067 while ((mp = vp->v_mountedhere) != NULL) {
1068 if (vfs_busy(mp, 0, 0))
1069 continue;
1070
1071 vput(vp);
1072 error = VFS_ROOT(mp, &tdp);
1073 vfs_unbusy(mp);
1074 if (error)
1075 goto out;
1076 vp = tdp;
1077 }
1078 VOP_UNLOCK(vp, 0);
1079
1080 /*
1081 * Disallow changing to a directory not under the process's
1082 * current root directory (if there is one).
1083 */
1084 cwdi = p->p_cwdi;
1085 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1086 if (cwdi->cwdi_rdir && !vn_isunder(vp, NULL, l)) {
1087 vrele(vp);
1088 error = EPERM; /* operation not permitted */
1089 } else {
1090 vrele(cwdi->cwdi_cdir);
1091 cwdi->cwdi_cdir = vp;
1092 }
1093 rw_exit(&cwdi->cwdi_lock);
1094
1095 out:
1096 FILE_UNUSE(fp, l);
1097 return (error);
1098 }
1099
1100 /*
1101 * Change this process's notion of the root directory to a given file
1102 * descriptor.
1103 */
1104 int
1105 sys_fchroot(struct lwp *l, const struct sys_fchroot_args *uap, register_t *retval)
1106 {
1107 struct proc *p = l->l_proc;
1108 struct filedesc *fdp = p->p_fd;
1109 struct cwdinfo *cwdi;
1110 struct vnode *vp;
1111 struct file *fp;
1112 int error;
1113
1114 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1115 KAUTH_REQ_SYSTEM_CHROOT_FCHROOT, NULL, NULL, NULL)) != 0)
1116 return error;
1117 /* getvnode() will use the descriptor for us */
1118 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1119 return error;
1120 vp = (struct vnode *) fp->f_data;
1121 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1122 if (vp->v_type != VDIR)
1123 error = ENOTDIR;
1124 else
1125 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1126 VOP_UNLOCK(vp, 0);
1127 if (error)
1128 goto out;
1129 VREF(vp);
1130
1131 /*
1132 * Prevent escaping from chroot by putting the root under
1133 * the working directory. Silently chdir to / if we aren't
1134 * already there.
1135 */
1136 cwdi = p->p_cwdi;
1137 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1138 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1139 /*
1140 * XXX would be more failsafe to change directory to a
1141 * deadfs node here instead
1142 */
1143 vrele(cwdi->cwdi_cdir);
1144 VREF(vp);
1145 cwdi->cwdi_cdir = vp;
1146 }
1147
1148 if (cwdi->cwdi_rdir != NULL)
1149 vrele(cwdi->cwdi_rdir);
1150 cwdi->cwdi_rdir = vp;
1151 rw_exit(&cwdi->cwdi_lock);
1152
1153 out:
1154 FILE_UNUSE(fp, l);
1155 return (error);
1156 }
1157
1158 /*
1159 * Change current working directory (``.'').
1160 */
1161 /* ARGSUSED */
1162 int
1163 sys_chdir(struct lwp *l, const struct sys_chdir_args *uap, register_t *retval)
1164 {
1165 /* {
1166 syscallarg(const char *) path;
1167 } */
1168 struct proc *p = l->l_proc;
1169 struct cwdinfo *cwdi;
1170 int error;
1171 struct nameidata nd;
1172
1173 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1174 SCARG(uap, path));
1175 if ((error = change_dir(&nd, l)) != 0)
1176 return (error);
1177 cwdi = p->p_cwdi;
1178 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1179 vrele(cwdi->cwdi_cdir);
1180 cwdi->cwdi_cdir = nd.ni_vp;
1181 rw_exit(&cwdi->cwdi_lock);
1182 return (0);
1183 }
1184
1185 /*
1186 * Change notion of root (``/'') directory.
1187 */
1188 /* ARGSUSED */
1189 int
1190 sys_chroot(struct lwp *l, const struct sys_chroot_args *uap, register_t *retval)
1191 {
1192 /* {
1193 syscallarg(const char *) path;
1194 } */
1195 struct proc *p = l->l_proc;
1196 struct cwdinfo *cwdi;
1197 struct vnode *vp;
1198 int error;
1199 struct nameidata nd;
1200
1201 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHROOT,
1202 KAUTH_REQ_SYSTEM_CHROOT_CHROOT, NULL, NULL, NULL)) != 0)
1203 return (error);
1204 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1205 SCARG(uap, path));
1206 if ((error = change_dir(&nd, l)) != 0)
1207 return (error);
1208
1209 cwdi = p->p_cwdi;
1210 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
1211 if (cwdi->cwdi_rdir != NULL)
1212 vrele(cwdi->cwdi_rdir);
1213 vp = nd.ni_vp;
1214 cwdi->cwdi_rdir = vp;
1215
1216 /*
1217 * Prevent escaping from chroot by putting the root under
1218 * the working directory. Silently chdir to / if we aren't
1219 * already there.
1220 */
1221 if (!vn_isunder(cwdi->cwdi_cdir, vp, l)) {
1222 /*
1223 * XXX would be more failsafe to change directory to a
1224 * deadfs node here instead
1225 */
1226 vrele(cwdi->cwdi_cdir);
1227 VREF(vp);
1228 cwdi->cwdi_cdir = vp;
1229 }
1230 rw_exit(&cwdi->cwdi_lock);
1231
1232 return (0);
1233 }
1234
1235 /*
1236 * Common routine for chroot and chdir.
1237 */
1238 static int
1239 change_dir(struct nameidata *ndp, struct lwp *l)
1240 {
1241 struct vnode *vp;
1242 int error;
1243
1244 if ((error = namei(ndp)) != 0)
1245 return (error);
1246 vp = ndp->ni_vp;
1247 if (vp->v_type != VDIR)
1248 error = ENOTDIR;
1249 else
1250 error = VOP_ACCESS(vp, VEXEC, l->l_cred);
1251
1252 if (error)
1253 vput(vp);
1254 else
1255 VOP_UNLOCK(vp, 0);
1256 return (error);
1257 }
1258
1259 /*
1260 * Check permissions, allocate an open file structure,
1261 * and call the device open routine if any.
1262 */
1263 int
1264 sys_open(struct lwp *l, const struct sys_open_args *uap, register_t *retval)
1265 {
1266 /* {
1267 syscallarg(const char *) path;
1268 syscallarg(int) flags;
1269 syscallarg(int) mode;
1270 } */
1271 struct proc *p = l->l_proc;
1272 struct cwdinfo *cwdi = p->p_cwdi;
1273 struct filedesc *fdp = p->p_fd;
1274 struct file *fp;
1275 struct vnode *vp;
1276 int flags, cmode;
1277 int type, indx, error;
1278 struct flock lf;
1279 struct nameidata nd;
1280
1281 flags = FFLAGS(SCARG(uap, flags));
1282 if ((flags & (FREAD | FWRITE)) == 0)
1283 return (EINVAL);
1284 /* falloc() will use the file descriptor for us */
1285 if ((error = falloc(l, &fp, &indx)) != 0)
1286 return (error);
1287 /* We're going to read cwdi->cwdi_cmask unlocked here. */
1288 cmode = ((SCARG(uap, mode) &~ cwdi->cwdi_cmask) & ALLPERMS) &~ S_ISTXT;
1289 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1290 SCARG(uap, path));
1291 l->l_dupfd = -indx - 1; /* XXX check for fdopen */
1292 if ((error = vn_open(&nd, flags, cmode)) != 0) {
1293 rw_enter(&fdp->fd_lock, RW_WRITER);
1294 FILE_UNUSE(fp, l);
1295 fdp->fd_ofiles[indx] = NULL;
1296 rw_exit(&fdp->fd_lock);
1297 ffree(fp);
1298 if ((error == EDUPFD || error == EMOVEFD) &&
1299 l->l_dupfd >= 0 && /* XXX from fdopen */
1300 (error =
1301 dupfdopen(l, indx, l->l_dupfd, flags, error)) == 0) {
1302 *retval = indx;
1303 return (0);
1304 }
1305 if (error == ERESTART)
1306 error = EINTR;
1307 fdremove(fdp, indx);
1308 return (error);
1309 }
1310
1311 l->l_dupfd = 0;
1312 vp = nd.ni_vp;
1313 fp->f_flag = flags & FMASK;
1314 fp->f_type = DTYPE_VNODE;
1315 fp->f_ops = &vnops;
1316 fp->f_data = vp;
1317 if (flags & (O_EXLOCK | O_SHLOCK)) {
1318 lf.l_whence = SEEK_SET;
1319 lf.l_start = 0;
1320 lf.l_len = 0;
1321 if (flags & O_EXLOCK)
1322 lf.l_type = F_WRLCK;
1323 else
1324 lf.l_type = F_RDLCK;
1325 type = F_FLOCK;
1326 if ((flags & FNONBLOCK) == 0)
1327 type |= F_WAIT;
1328 VOP_UNLOCK(vp, 0);
1329 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1330 if (error) {
1331 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1332 FILE_UNUSE(fp, l);
1333 ffree(fp);
1334 fdremove(fdp, indx);
1335 return (error);
1336 }
1337 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1338 fp->f_flag |= FHASLOCK;
1339 }
1340 VOP_UNLOCK(vp, 0);
1341 *retval = indx;
1342 FILE_SET_MATURE(fp);
1343 FILE_UNUSE(fp, l);
1344 return (0);
1345 }
1346
1347 static void
1348 vfs__fhfree(fhandle_t *fhp)
1349 {
1350 size_t fhsize;
1351
1352 if (fhp == NULL) {
1353 return;
1354 }
1355 fhsize = FHANDLE_SIZE(fhp);
1356 kmem_free(fhp, fhsize);
1357 }
1358
1359 /*
1360 * vfs_composefh: compose a filehandle.
1361 */
1362
1363 int
1364 vfs_composefh(struct vnode *vp, fhandle_t *fhp, size_t *fh_size)
1365 {
1366 struct mount *mp;
1367 struct fid *fidp;
1368 int error;
1369 size_t needfhsize;
1370 size_t fidsize;
1371
1372 mp = vp->v_mount;
1373 fidp = NULL;
1374 if (*fh_size < FHANDLE_SIZE_MIN) {
1375 fidsize = 0;
1376 } else {
1377 fidsize = *fh_size - offsetof(fhandle_t, fh_fid);
1378 if (fhp != NULL) {
1379 memset(fhp, 0, *fh_size);
1380 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1381 fidp = &fhp->fh_fid;
1382 }
1383 }
1384 error = VFS_VPTOFH(vp, fidp, &fidsize);
1385 needfhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1386 if (error == 0 && *fh_size < needfhsize) {
1387 error = E2BIG;
1388 }
1389 *fh_size = needfhsize;
1390 return error;
1391 }
1392
1393 int
1394 vfs_composefh_alloc(struct vnode *vp, fhandle_t **fhpp)
1395 {
1396 struct mount *mp;
1397 fhandle_t *fhp;
1398 size_t fhsize;
1399 size_t fidsize;
1400 int error;
1401
1402 *fhpp = NULL;
1403 mp = vp->v_mount;
1404 fidsize = 0;
1405 error = VFS_VPTOFH(vp, NULL, &fidsize);
1406 KASSERT(error != 0);
1407 if (error != E2BIG) {
1408 goto out;
1409 }
1410 fhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1411 fhp = kmem_zalloc(fhsize, KM_SLEEP);
1412 if (fhp == NULL) {
1413 error = ENOMEM;
1414 goto out;
1415 }
1416 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1417 error = VFS_VPTOFH(vp, &fhp->fh_fid, &fidsize);
1418 if (error == 0) {
1419 KASSERT((FHANDLE_SIZE(fhp) == fhsize &&
1420 FHANDLE_FILEID(fhp)->fid_len == fidsize));
1421 *fhpp = fhp;
1422 } else {
1423 kmem_free(fhp, fhsize);
1424 }
1425 out:
1426 return error;
1427 }
1428
1429 void
1430 vfs_composefh_free(fhandle_t *fhp)
1431 {
1432
1433 vfs__fhfree(fhp);
1434 }
1435
1436 /*
1437 * vfs_fhtovp: lookup a vnode by a filehandle.
1438 */
1439
1440 int
1441 vfs_fhtovp(fhandle_t *fhp, struct vnode **vpp)
1442 {
1443 struct mount *mp;
1444 int error;
1445
1446 *vpp = NULL;
1447 mp = vfs_getvfs(FHANDLE_FSID(fhp));
1448 if (mp == NULL) {
1449 error = ESTALE;
1450 goto out;
1451 }
1452 if (mp->mnt_op->vfs_fhtovp == NULL) {
1453 error = EOPNOTSUPP;
1454 goto out;
1455 }
1456 error = VFS_FHTOVP(mp, FHANDLE_FILEID(fhp), vpp);
1457 out:
1458 return error;
1459 }
1460
1461 /*
1462 * vfs_copyinfh_alloc: allocate and copyin a filehandle, given
1463 * the needed size.
1464 */
1465
1466 int
1467 vfs_copyinfh_alloc(const void *ufhp, size_t fhsize, fhandle_t **fhpp)
1468 {
1469 fhandle_t *fhp;
1470 int error;
1471
1472 *fhpp = NULL;
1473 if (fhsize > FHANDLE_SIZE_MAX) {
1474 return EINVAL;
1475 }
1476 if (fhsize < FHANDLE_SIZE_MIN) {
1477 return EINVAL;
1478 }
1479 again:
1480 fhp = kmem_alloc(fhsize, KM_SLEEP);
1481 if (fhp == NULL) {
1482 return ENOMEM;
1483 }
1484 error = copyin(ufhp, fhp, fhsize);
1485 if (error == 0) {
1486 /* XXX this check shouldn't be here */
1487 if (FHANDLE_SIZE(fhp) == fhsize) {
1488 *fhpp = fhp;
1489 return 0;
1490 } else if (fhsize == NFSX_V2FH && FHANDLE_SIZE(fhp) < fhsize) {
1491 /*
1492 * a kludge for nfsv2 padded handles.
1493 */
1494 size_t sz;
1495
1496 sz = FHANDLE_SIZE(fhp);
1497 kmem_free(fhp, fhsize);
1498 fhsize = sz;
1499 goto again;
1500 } else {
1501 /*
1502 * userland told us wrong size.
1503 */
1504 error = EINVAL;
1505 }
1506 }
1507 kmem_free(fhp, fhsize);
1508 return error;
1509 }
1510
1511 void
1512 vfs_copyinfh_free(fhandle_t *fhp)
1513 {
1514
1515 vfs__fhfree(fhp);
1516 }
1517
1518 /*
1519 * Get file handle system call
1520 */
1521 int
1522 sys___getfh30(struct lwp *l, const struct sys___getfh30_args *uap, register_t *retval)
1523 {
1524 /* {
1525 syscallarg(char *) fname;
1526 syscallarg(fhandle_t *) fhp;
1527 syscallarg(size_t *) fh_size;
1528 } */
1529 struct vnode *vp;
1530 fhandle_t *fh;
1531 int error;
1532 struct nameidata nd;
1533 size_t sz;
1534 size_t usz;
1535
1536 /*
1537 * Must be super user
1538 */
1539 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1540 0, NULL, NULL, NULL);
1541 if (error)
1542 return (error);
1543 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
1544 SCARG(uap, fname));
1545 error = namei(&nd);
1546 if (error)
1547 return (error);
1548 vp = nd.ni_vp;
1549 error = vfs_composefh_alloc(vp, &fh);
1550 vput(vp);
1551 if (error != 0) {
1552 goto out;
1553 }
1554 error = copyin(SCARG(uap, fh_size), &usz, sizeof(size_t));
1555 if (error != 0) {
1556 goto out;
1557 }
1558 sz = FHANDLE_SIZE(fh);
1559 error = copyout(&sz, SCARG(uap, fh_size), sizeof(size_t));
1560 if (error != 0) {
1561 goto out;
1562 }
1563 if (usz >= sz) {
1564 error = copyout(fh, SCARG(uap, fhp), sz);
1565 } else {
1566 error = E2BIG;
1567 }
1568 out:
1569 vfs_composefh_free(fh);
1570 return (error);
1571 }
1572
1573 /*
1574 * Open a file given a file handle.
1575 *
1576 * Check permissions, allocate an open file structure,
1577 * and call the device open routine if any.
1578 */
1579
1580 int
1581 dofhopen(struct lwp *l, const void *ufhp, size_t fhsize, int oflags,
1582 register_t *retval)
1583 {
1584 struct filedesc *fdp = l->l_proc->p_fd;
1585 struct file *fp;
1586 struct vnode *vp = NULL;
1587 kauth_cred_t cred = l->l_cred;
1588 struct file *nfp;
1589 int type, indx, error=0;
1590 struct flock lf;
1591 struct vattr va;
1592 fhandle_t *fh;
1593 int flags;
1594
1595 /*
1596 * Must be super user
1597 */
1598 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1599 0, NULL, NULL, NULL)))
1600 return (error);
1601
1602 flags = FFLAGS(oflags);
1603 if ((flags & (FREAD | FWRITE)) == 0)
1604 return (EINVAL);
1605 if ((flags & O_CREAT))
1606 return (EINVAL);
1607 /* falloc() will use the file descriptor for us */
1608 if ((error = falloc(l, &nfp, &indx)) != 0)
1609 return (error);
1610 fp = nfp;
1611 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1612 if (error != 0) {
1613 goto bad;
1614 }
1615 error = vfs_fhtovp(fh, &vp);
1616 if (error != 0) {
1617 goto bad;
1618 }
1619
1620 /* Now do an effective vn_open */
1621
1622 if (vp->v_type == VSOCK) {
1623 error = EOPNOTSUPP;
1624 goto bad;
1625 }
1626 error = vn_openchk(vp, cred, flags);
1627 if (error != 0)
1628 goto bad;
1629 if (flags & O_TRUNC) {
1630 VOP_UNLOCK(vp, 0); /* XXX */
1631 VOP_LEASE(vp, cred, LEASE_WRITE);
1632 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
1633 VATTR_NULL(&va);
1634 va.va_size = 0;
1635 error = VOP_SETATTR(vp, &va, cred);
1636 if (error)
1637 goto bad;
1638 }
1639 if ((error = VOP_OPEN(vp, flags, cred)) != 0)
1640 goto bad;
1641 if (flags & FWRITE)
1642 vp->v_writecount++;
1643
1644 /* done with modified vn_open, now finish what sys_open does. */
1645
1646 fp->f_flag = flags & FMASK;
1647 fp->f_type = DTYPE_VNODE;
1648 fp->f_ops = &vnops;
1649 fp->f_data = vp;
1650 if (flags & (O_EXLOCK | O_SHLOCK)) {
1651 lf.l_whence = SEEK_SET;
1652 lf.l_start = 0;
1653 lf.l_len = 0;
1654 if (flags & O_EXLOCK)
1655 lf.l_type = F_WRLCK;
1656 else
1657 lf.l_type = F_RDLCK;
1658 type = F_FLOCK;
1659 if ((flags & FNONBLOCK) == 0)
1660 type |= F_WAIT;
1661 VOP_UNLOCK(vp, 0);
1662 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, type);
1663 if (error) {
1664 (void) vn_close(vp, fp->f_flag, fp->f_cred, l);
1665 FILE_UNUSE(fp, l);
1666 ffree(fp);
1667 fdremove(fdp, indx);
1668 return (error);
1669 }
1670 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1671 fp->f_flag |= FHASLOCK;
1672 }
1673 VOP_UNLOCK(vp, 0);
1674 *retval = indx;
1675 FILE_SET_MATURE(fp);
1676 FILE_UNUSE(fp, l);
1677 vfs_copyinfh_free(fh);
1678 return (0);
1679
1680 bad:
1681 FILE_UNUSE(fp, l);
1682 ffree(fp);
1683 fdremove(fdp, indx);
1684 if (vp != NULL)
1685 vput(vp);
1686 vfs_copyinfh_free(fh);
1687 return (error);
1688 }
1689
1690 int
1691 sys___fhopen40(struct lwp *l, const struct sys___fhopen40_args *uap, register_t *retval)
1692 {
1693 /* {
1694 syscallarg(const void *) fhp;
1695 syscallarg(size_t) fh_size;
1696 syscallarg(int) flags;
1697 } */
1698
1699 return dofhopen(l, SCARG(uap, fhp), SCARG(uap, fh_size),
1700 SCARG(uap, flags), retval);
1701 }
1702
1703 int
1704 do_fhstat(struct lwp *l, const void *ufhp, size_t fhsize, struct stat *sb)
1705 {
1706 int error;
1707 fhandle_t *fh;
1708 struct vnode *vp;
1709
1710 /*
1711 * Must be super user
1712 */
1713 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1714 0, NULL, NULL, NULL)))
1715 return (error);
1716
1717 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1718 if (error != 0)
1719 return error;
1720
1721 error = vfs_fhtovp(fh, &vp);
1722 vfs_copyinfh_free(fh);
1723 if (error != 0)
1724 return error;
1725
1726 error = vn_stat(vp, sb, l);
1727 vput(vp);
1728 return error;
1729 }
1730
1731
1732 /* ARGSUSED */
1733 int
1734 sys___fhstat40(struct lwp *l, const struct sys___fhstat40_args *uap, register_t *retval)
1735 {
1736 /* {
1737 syscallarg(const void *) fhp;
1738 syscallarg(size_t) fh_size;
1739 syscallarg(struct stat *) sb;
1740 } */
1741 struct stat sb;
1742 int error;
1743
1744 error = do_fhstat(l, SCARG(uap, fhp), SCARG(uap, fh_size), &sb);
1745 if (error)
1746 return error;
1747 return copyout(&sb, SCARG(uap, sb), sizeof(sb));
1748 }
1749
1750 int
1751 do_fhstatvfs(struct lwp *l, const void *ufhp, size_t fhsize, struct statvfs *sb,
1752 int flags)
1753 {
1754 fhandle_t *fh;
1755 struct mount *mp;
1756 struct vnode *vp;
1757 int error;
1758
1759 /*
1760 * Must be super user
1761 */
1762 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FILEHANDLE,
1763 0, NULL, NULL, NULL)))
1764 return error;
1765
1766 error = vfs_copyinfh_alloc(ufhp, fhsize, &fh);
1767 if (error != 0)
1768 return error;
1769
1770 error = vfs_fhtovp(fh, &vp);
1771 vfs_copyinfh_free(fh);
1772 if (error != 0)
1773 return error;
1774
1775 mp = vp->v_mount;
1776 error = dostatvfs(mp, sb, l, flags, 1);
1777 vput(vp);
1778 return error;
1779 }
1780
1781 /* ARGSUSED */
1782 int
1783 sys___fhstatvfs140(struct lwp *l, const struct sys___fhstatvfs140_args *uap, register_t *retval)
1784 {
1785 /* {
1786 syscallarg(const void *) fhp;
1787 syscallarg(size_t) fh_size;
1788 syscallarg(struct statvfs *) buf;
1789 syscallarg(int) flags;
1790 } */
1791 struct statvfs *sb = STATVFSBUF_GET();
1792 int error;
1793
1794 error = do_fhstatvfs(l, SCARG(uap, fhp), SCARG(uap, fh_size), sb,
1795 SCARG(uap, flags));
1796 if (error == 0)
1797 error = copyout(sb, SCARG(uap, buf), sizeof(*sb));
1798 STATVFSBUF_PUT(sb);
1799 return error;
1800 }
1801
1802 /*
1803 * Create a special file.
1804 */
1805 /* ARGSUSED */
1806 int
1807 sys_mknod(struct lwp *l, const struct sys_mknod_args *uap, register_t *retval)
1808 {
1809 /* {
1810 syscallarg(const char *) path;
1811 syscallarg(int) mode;
1812 syscallarg(int) dev;
1813 } */
1814 struct proc *p = l->l_proc;
1815 struct vnode *vp;
1816 struct vattr vattr;
1817 int error, optype;
1818 struct nameidata nd;
1819 char *path;
1820 const char *cpath;
1821 enum uio_seg seg = UIO_USERSPACE;
1822
1823 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MKNOD,
1824 0, NULL, NULL, NULL)) != 0)
1825 return (error);
1826
1827 optype = VOP_MKNOD_DESCOFFSET;
1828
1829 VERIEXEC_PATH_GET(SCARG(uap, path), seg, cpath, path);
1830 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, seg, cpath);
1831
1832 if ((error = namei(&nd)) != 0)
1833 goto out;
1834 vp = nd.ni_vp;
1835 if (vp != NULL)
1836 error = EEXIST;
1837 else {
1838 VATTR_NULL(&vattr);
1839 /* We will read cwdi->cwdi_cmask unlocked. */
1840 vattr.va_mode =
1841 (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1842 vattr.va_rdev = SCARG(uap, dev);
1843
1844 switch (SCARG(uap, mode) & S_IFMT) {
1845 case S_IFMT: /* used by badsect to flag bad sectors */
1846 vattr.va_type = VBAD;
1847 break;
1848 case S_IFCHR:
1849 vattr.va_type = VCHR;
1850 break;
1851 case S_IFBLK:
1852 vattr.va_type = VBLK;
1853 break;
1854 case S_IFWHT:
1855 optype = VOP_WHITEOUT_DESCOFFSET;
1856 break;
1857 case S_IFREG:
1858 #if NVERIEXEC > 0
1859 error = veriexec_openchk(l, nd.ni_vp, nd.ni_dirp,
1860 O_CREAT);
1861 #endif /* NVERIEXEC > 0 */
1862 vattr.va_type = VREG;
1863 vattr.va_rdev = VNOVAL;
1864 optype = VOP_CREATE_DESCOFFSET;
1865 break;
1866 default:
1867 error = EINVAL;
1868 break;
1869 }
1870 }
1871 if (!error) {
1872 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1873 switch (optype) {
1874 case VOP_WHITEOUT_DESCOFFSET:
1875 error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
1876 if (error)
1877 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1878 vput(nd.ni_dvp);
1879 break;
1880
1881 case VOP_MKNOD_DESCOFFSET:
1882 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp,
1883 &nd.ni_cnd, &vattr);
1884 if (error == 0)
1885 vput(nd.ni_vp);
1886 break;
1887
1888 case VOP_CREATE_DESCOFFSET:
1889 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
1890 &nd.ni_cnd, &vattr);
1891 if (error == 0)
1892 vput(nd.ni_vp);
1893 break;
1894 }
1895 } else {
1896 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1897 if (nd.ni_dvp == vp)
1898 vrele(nd.ni_dvp);
1899 else
1900 vput(nd.ni_dvp);
1901 if (vp)
1902 vrele(vp);
1903 }
1904 out:
1905 VERIEXEC_PATH_PUT(path);
1906 return (error);
1907 }
1908
1909 /*
1910 * Create a named pipe.
1911 */
1912 /* ARGSUSED */
1913 int
1914 sys_mkfifo(struct lwp *l, const struct sys_mkfifo_args *uap, register_t *retval)
1915 {
1916 /* {
1917 syscallarg(const char *) path;
1918 syscallarg(int) mode;
1919 } */
1920 struct proc *p = l->l_proc;
1921 struct vattr vattr;
1922 int error;
1923 struct nameidata nd;
1924
1925 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1926 SCARG(uap, path));
1927 if ((error = namei(&nd)) != 0)
1928 return (error);
1929 if (nd.ni_vp != NULL) {
1930 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1931 if (nd.ni_dvp == nd.ni_vp)
1932 vrele(nd.ni_dvp);
1933 else
1934 vput(nd.ni_dvp);
1935 vrele(nd.ni_vp);
1936 return (EEXIST);
1937 }
1938 VATTR_NULL(&vattr);
1939 vattr.va_type = VFIFO;
1940 /* We will read cwdi->cwdi_cmask unlocked. */
1941 vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_cwdi->cwdi_cmask;
1942 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1943 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
1944 if (error == 0)
1945 vput(nd.ni_vp);
1946 return (error);
1947 }
1948
1949 /*
1950 * Make a hard file link.
1951 */
1952 /* ARGSUSED */
1953 int
1954 sys_link(struct lwp *l, const struct sys_link_args *uap, register_t *retval)
1955 {
1956 /* {
1957 syscallarg(const char *) path;
1958 syscallarg(const char *) link;
1959 } */
1960 struct vnode *vp;
1961 struct nameidata nd;
1962 int error;
1963
1964 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
1965 SCARG(uap, path));
1966 if ((error = namei(&nd)) != 0)
1967 return (error);
1968 vp = nd.ni_vp;
1969 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
1970 SCARG(uap, link));
1971 if ((error = namei(&nd)) != 0)
1972 goto out;
1973 if (nd.ni_vp) {
1974 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
1975 if (nd.ni_dvp == nd.ni_vp)
1976 vrele(nd.ni_dvp);
1977 else
1978 vput(nd.ni_dvp);
1979 vrele(nd.ni_vp);
1980 error = EEXIST;
1981 goto out;
1982 }
1983 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
1984 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
1985 error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
1986 out:
1987 vrele(vp);
1988 return (error);
1989 }
1990
1991 /*
1992 * Make a symbolic link.
1993 */
1994 /* ARGSUSED */
1995 int
1996 sys_symlink(struct lwp *l, const struct sys_symlink_args *uap, register_t *retval)
1997 {
1998 /* {
1999 syscallarg(const char *) path;
2000 syscallarg(const char *) link;
2001 } */
2002 struct proc *p = l->l_proc;
2003 struct vattr vattr;
2004 char *path;
2005 int error;
2006 struct nameidata nd;
2007
2008 path = PNBUF_GET();
2009 error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL);
2010 if (error)
2011 goto out;
2012 NDINIT(&nd, CREATE, LOCKPARENT | TRYEMULROOT, UIO_USERSPACE,
2013 SCARG(uap, link));
2014 if ((error = namei(&nd)) != 0)
2015 goto out;
2016 if (nd.ni_vp) {
2017 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2018 if (nd.ni_dvp == nd.ni_vp)
2019 vrele(nd.ni_dvp);
2020 else
2021 vput(nd.ni_dvp);
2022 vrele(nd.ni_vp);
2023 error = EEXIST;
2024 goto out;
2025 }
2026 VATTR_NULL(&vattr);
2027 vattr.va_type = VLNK;
2028 /* We will read cwdi->cwdi_cmask unlocked. */
2029 vattr.va_mode = ACCESSPERMS &~ p->p_cwdi->cwdi_cmask;
2030 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2031 error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
2032 if (error == 0)
2033 vput(nd.ni_vp);
2034 out:
2035 PNBUF_PUT(path);
2036 return (error);
2037 }
2038
2039 /*
2040 * Delete a whiteout from the filesystem.
2041 */
2042 /* ARGSUSED */
2043 int
2044 sys_undelete(struct lwp *l, const struct sys_undelete_args *uap, register_t *retval)
2045 {
2046 /* {
2047 syscallarg(const char *) path;
2048 } */
2049 int error;
2050 struct nameidata nd;
2051
2052 NDINIT(&nd, DELETE, LOCKPARENT | DOWHITEOUT | TRYEMULROOT,
2053 UIO_USERSPACE, SCARG(uap, path));
2054 error = namei(&nd);
2055 if (error)
2056 return (error);
2057
2058 if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
2059 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2060 if (nd.ni_dvp == nd.ni_vp)
2061 vrele(nd.ni_dvp);
2062 else
2063 vput(nd.ni_dvp);
2064 if (nd.ni_vp)
2065 vrele(nd.ni_vp);
2066 return (EEXIST);
2067 }
2068 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
2069 if ((error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE)) != 0)
2070 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2071 vput(nd.ni_dvp);
2072 return (error);
2073 }
2074
2075 /*
2076 * Delete a name from the filesystem.
2077 */
2078 /* ARGSUSED */
2079 int
2080 sys_unlink(struct lwp *l, const struct sys_unlink_args *uap, register_t *retval)
2081 {
2082 /* {
2083 syscallarg(const char *) path;
2084 } */
2085
2086 return do_sys_unlink(SCARG(uap, path), UIO_USERSPACE);
2087 }
2088
2089 int
2090 do_sys_unlink(const char *arg, enum uio_seg seg)
2091 {
2092 struct vnode *vp;
2093 int error;
2094 struct nameidata nd;
2095 kauth_cred_t cred;
2096 char *path;
2097 const char *cpath;
2098
2099 VERIEXEC_PATH_GET(arg, seg, cpath, path);
2100 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, seg, cpath);
2101
2102 if ((error = namei(&nd)) != 0)
2103 goto out;
2104 vp = nd.ni_vp;
2105
2106 /*
2107 * The root of a mounted filesystem cannot be deleted.
2108 */
2109 if (vp->v_vflag & VV_ROOT) {
2110 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2111 if (nd.ni_dvp == vp)
2112 vrele(nd.ni_dvp);
2113 else
2114 vput(nd.ni_dvp);
2115 vput(vp);
2116 error = EBUSY;
2117 goto out;
2118 }
2119
2120 #if NVERIEXEC > 0
2121 /* Handle remove requests for veriexec entries. */
2122 if ((error = veriexec_removechk(curlwp, nd.ni_vp, nd.ni_dirp)) != 0) {
2123 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
2124 if (nd.ni_dvp == vp)
2125 vrele(nd.ni_dvp);
2126 else
2127 vput(nd.ni_dvp);
2128 vput(vp);
2129 goto out;
2130 }
2131 #endif /* NVERIEXEC > 0 */
2132
2133 cred = kauth_cred_get();
2134 VOP_LEASE(nd.ni_dvp, cred, LEASE_WRITE);
2135 VOP_LEASE(vp, cred, LEASE_WRITE);
2136 #ifdef FILEASSOC
2137 (void)fileassoc_file_delete(vp);
2138 #endif /* FILEASSOC */
2139 error = VOP_REMOVE(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
2140 out:
2141 VERIEXEC_PATH_PUT(path);
2142 return (error);
2143 }
2144
2145 /*
2146 * Reposition read/write file offset.
2147 */
2148 int
2149 sys_lseek(struct lwp *l, const struct sys_lseek_args *uap, register_t *retval)
2150 {
2151 /* {
2152 syscallarg(int) fd;
2153 syscallarg(int) pad;
2154 syscallarg(off_t) offset;
2155 syscallarg(int) whence;
2156 } */
2157 struct proc *p = l->l_proc;
2158 kauth_cred_t cred = l->l_cred;
2159 struct filedesc *fdp = p->p_fd;
2160 struct file *fp;
2161 struct vnode *vp;
2162 struct vattr vattr;
2163 off_t newoff;
2164 int error;
2165
2166 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
2167 return (EBADF);
2168
2169 vp = (struct vnode *)fp->f_data;
2170 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2171 error = ESPIPE;
2172 FILE_UNLOCK(fp);
2173 goto out;
2174 }
2175
2176 switch (SCARG(uap, whence)) {
2177 case SEEK_CUR:
2178 newoff = fp->f_offset + SCARG(uap, offset);
2179 FILE_USE(fp);
2180 break;
2181 case SEEK_END:
2182 FILE_USE(fp);
2183 error = VOP_GETATTR(vp, &vattr, cred);
2184 if (error) {
2185 FILE_UNUSE(fp, l);
2186 goto out;
2187 }
2188 newoff = SCARG(uap, offset) + vattr.va_size;
2189 break;
2190 case SEEK_SET:
2191 FILE_USE(fp);
2192 newoff = SCARG(uap, offset);
2193 break;
2194 default:
2195 FILE_UNLOCK(fp);
2196 error = EINVAL;
2197 goto out;
2198 }
2199 if ((error = VOP_SEEK(vp, fp->f_offset, newoff, cred)) == 0) {
2200 FILE_LOCK(fp);
2201 *(off_t *)retval = fp->f_offset = newoff;
2202 FILE_UNLOCK(fp);
2203 }
2204 FILE_UNUSE(fp, l);
2205 out:
2206 return (error);
2207 }
2208
2209 /*
2210 * Positional read system call.
2211 */
2212 int
2213 sys_pread(struct lwp *l, const struct sys_pread_args *uap, register_t *retval)
2214 {
2215 /* {
2216 syscallarg(int) fd;
2217 syscallarg(void *) buf;
2218 syscallarg(size_t) nbyte;
2219 syscallarg(off_t) offset;
2220 } */
2221 struct proc *p = l->l_proc;
2222 struct filedesc *fdp = p->p_fd;
2223 struct file *fp;
2224 struct vnode *vp;
2225 off_t offset;
2226 int error, fd = SCARG(uap, fd);
2227
2228 if ((fp = fd_getfile(fdp, fd)) == NULL)
2229 return (EBADF);
2230
2231 if ((fp->f_flag & FREAD) == 0) {
2232 FILE_UNLOCK(fp);
2233 return (EBADF);
2234 }
2235
2236 FILE_USE(fp);
2237
2238 vp = (struct vnode *)fp->f_data;
2239 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2240 error = ESPIPE;
2241 goto out;
2242 }
2243
2244 offset = SCARG(uap, offset);
2245
2246 /*
2247 * XXX This works because no file systems actually
2248 * XXX take any action on the seek operation.
2249 */
2250 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2251 goto out;
2252
2253 /* dofileread() will unuse the descriptor for us */
2254 return (dofileread(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2255 &offset, 0, retval));
2256
2257 out:
2258 FILE_UNUSE(fp, l);
2259 return (error);
2260 }
2261
2262 /*
2263 * Positional scatter read system call.
2264 */
2265 int
2266 sys_preadv(struct lwp *l, const struct sys_preadv_args *uap, register_t *retval)
2267 {
2268 /* {
2269 syscallarg(int) fd;
2270 syscallarg(const struct iovec *) iovp;
2271 syscallarg(int) iovcnt;
2272 syscallarg(off_t) offset;
2273 } */
2274 off_t offset = SCARG(uap, offset);
2275
2276 return do_filereadv(SCARG(uap, fd), SCARG(uap, iovp),
2277 SCARG(uap, iovcnt), &offset, 0, retval);
2278 }
2279
2280 /*
2281 * Positional write system call.
2282 */
2283 int
2284 sys_pwrite(struct lwp *l, const struct sys_pwrite_args *uap, register_t *retval)
2285 {
2286 /* {
2287 syscallarg(int) fd;
2288 syscallarg(const void *) buf;
2289 syscallarg(size_t) nbyte;
2290 syscallarg(off_t) offset;
2291 } */
2292 struct proc *p = l->l_proc;
2293 struct filedesc *fdp = p->p_fd;
2294 struct file *fp;
2295 struct vnode *vp;
2296 off_t offset;
2297 int error, fd = SCARG(uap, fd);
2298
2299 if ((fp = fd_getfile(fdp, fd)) == NULL)
2300 return (EBADF);
2301
2302 if ((fp->f_flag & FWRITE) == 0) {
2303 FILE_UNLOCK(fp);
2304 return (EBADF);
2305 }
2306
2307 FILE_USE(fp);
2308
2309 vp = (struct vnode *)fp->f_data;
2310 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
2311 error = ESPIPE;
2312 goto out;
2313 }
2314
2315 offset = SCARG(uap, offset);
2316
2317 /*
2318 * XXX This works because no file systems actually
2319 * XXX take any action on the seek operation.
2320 */
2321 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
2322 goto out;
2323
2324 /* dofilewrite() will unuse the descriptor for us */
2325 return (dofilewrite(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
2326 &offset, 0, retval));
2327
2328 out:
2329 FILE_UNUSE(fp, l);
2330 return (error);
2331 }
2332
2333 /*
2334 * Positional gather write system call.
2335 */
2336 int
2337 sys_pwritev(struct lwp *l, const struct sys_pwritev_args *uap, register_t *retval)
2338 {
2339 /* {
2340 syscallarg(int) fd;
2341 syscallarg(const struct iovec *) iovp;
2342 syscallarg(int) iovcnt;
2343 syscallarg(off_t) offset;
2344 } */
2345 off_t offset = SCARG(uap, offset);
2346
2347 return do_filewritev(SCARG(uap, fd), SCARG(uap, iovp),
2348 SCARG(uap, iovcnt), &offset, 0, retval);
2349 }
2350
2351 /*
2352 * Check access permissions.
2353 */
2354 int
2355 sys_access(struct lwp *l, const struct sys_access_args *uap, register_t *retval)
2356 {
2357 /* {
2358 syscallarg(const char *) path;
2359 syscallarg(int) flags;
2360 } */
2361 kauth_cred_t cred;
2362 struct vnode *vp;
2363 int error, flags;
2364 struct nameidata nd;
2365
2366 cred = kauth_cred_dup(l->l_cred);
2367 kauth_cred_seteuid(cred, kauth_cred_getuid(l->l_cred));
2368 kauth_cred_setegid(cred, kauth_cred_getgid(l->l_cred));
2369 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2370 SCARG(uap, path));
2371 /* Override default credentials */
2372 nd.ni_cnd.cn_cred = cred;
2373 if ((error = namei(&nd)) != 0)
2374 goto out;
2375 vp = nd.ni_vp;
2376
2377 /* Flags == 0 means only check for existence. */
2378 if (SCARG(uap, flags)) {
2379 flags = 0;
2380 if (SCARG(uap, flags) & R_OK)
2381 flags |= VREAD;
2382 if (SCARG(uap, flags) & W_OK)
2383 flags |= VWRITE;
2384 if (SCARG(uap, flags) & X_OK)
2385 flags |= VEXEC;
2386
2387 error = VOP_ACCESS(vp, flags, cred);
2388 if (!error && (flags & VWRITE))
2389 error = vn_writechk(vp);
2390 }
2391 vput(vp);
2392 out:
2393 kauth_cred_free(cred);
2394 return (error);
2395 }
2396
2397 /*
2398 * Common code for all sys_stat functions, including compat versions.
2399 */
2400 int
2401 do_sys_stat(struct lwp *l, const char *path, unsigned int nd_flags,
2402 struct stat *sb)
2403 {
2404 int error;
2405 struct nameidata nd;
2406
2407 NDINIT(&nd, LOOKUP, nd_flags | LOCKLEAF | TRYEMULROOT,
2408 UIO_USERSPACE, path);
2409 error = namei(&nd);
2410 if (error != 0)
2411 return error;
2412 error = vn_stat(nd.ni_vp, sb, l);
2413 vput(nd.ni_vp);
2414 return error;
2415 }
2416
2417 /*
2418 * Get file status; this version follows links.
2419 */
2420 /* ARGSUSED */
2421 int
2422 sys___stat30(struct lwp *l, const struct sys___stat30_args *uap, register_t *retval)
2423 {
2424 /* {
2425 syscallarg(const char *) path;
2426 syscallarg(struct stat *) ub;
2427 } */
2428 struct stat sb;
2429 int error;
2430
2431 error = do_sys_stat(l, SCARG(uap, path), FOLLOW, &sb);
2432 if (error)
2433 return error;
2434 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2435 }
2436
2437 /*
2438 * Get file status; this version does not follow links.
2439 */
2440 /* ARGSUSED */
2441 int
2442 sys___lstat30(struct lwp *l, const struct sys___lstat30_args *uap, register_t *retval)
2443 {
2444 /* {
2445 syscallarg(const char *) path;
2446 syscallarg(struct stat *) ub;
2447 } */
2448 struct stat sb;
2449 int error;
2450
2451 error = do_sys_stat(l, SCARG(uap, path), NOFOLLOW, &sb);
2452 if (error)
2453 return error;
2454 return copyout(&sb, SCARG(uap, ub), sizeof(sb));
2455 }
2456
2457 /*
2458 * Get configurable pathname variables.
2459 */
2460 /* ARGSUSED */
2461 int
2462 sys_pathconf(struct lwp *l, const struct sys_pathconf_args *uap, register_t *retval)
2463 {
2464 /* {
2465 syscallarg(const char *) path;
2466 syscallarg(int) name;
2467 } */
2468 int error;
2469 struct nameidata nd;
2470
2471 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2472 SCARG(uap, path));
2473 if ((error = namei(&nd)) != 0)
2474 return (error);
2475 error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), retval);
2476 vput(nd.ni_vp);
2477 return (error);
2478 }
2479
2480 /*
2481 * Return target name of a symbolic link.
2482 */
2483 /* ARGSUSED */
2484 int
2485 sys_readlink(struct lwp *l, const struct sys_readlink_args *uap, register_t *retval)
2486 {
2487 /* {
2488 syscallarg(const char *) path;
2489 syscallarg(char *) buf;
2490 syscallarg(size_t) count;
2491 } */
2492 struct vnode *vp;
2493 struct iovec aiov;
2494 struct uio auio;
2495 int error;
2496 struct nameidata nd;
2497
2498 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
2499 SCARG(uap, path));
2500 if ((error = namei(&nd)) != 0)
2501 return (error);
2502 vp = nd.ni_vp;
2503 if (vp->v_type != VLNK)
2504 error = EINVAL;
2505 else if (!(vp->v_mount->mnt_flag & MNT_SYMPERM) ||
2506 (error = VOP_ACCESS(vp, VREAD, l->l_cred)) == 0) {
2507 aiov.iov_base = SCARG(uap, buf);
2508 aiov.iov_len = SCARG(uap, count);
2509 auio.uio_iov = &aiov;
2510 auio.uio_iovcnt = 1;
2511 auio.uio_offset = 0;
2512 auio.uio_rw = UIO_READ;
2513 KASSERT(l == curlwp);
2514 auio.uio_vmspace = l->l_proc->p_vmspace;
2515 auio.uio_resid = SCARG(uap, count);
2516 error = VOP_READLINK(vp, &auio, l->l_cred);
2517 }
2518 vput(vp);
2519 *retval = SCARG(uap, count) - auio.uio_resid;
2520 return (error);
2521 }
2522
2523 /*
2524 * Change flags of a file given a path name.
2525 */
2526 /* ARGSUSED */
2527 int
2528 sys_chflags(struct lwp *l, const struct sys_chflags_args *uap, register_t *retval)
2529 {
2530 /* {
2531 syscallarg(const char *) path;
2532 syscallarg(u_long) flags;
2533 } */
2534 struct vnode *vp;
2535 int error;
2536 struct nameidata nd;
2537
2538 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2539 SCARG(uap, path));
2540 if ((error = namei(&nd)) != 0)
2541 return (error);
2542 vp = nd.ni_vp;
2543 error = change_flags(vp, SCARG(uap, flags), l);
2544 vput(vp);
2545 return (error);
2546 }
2547
2548 /*
2549 * Change flags of a file given a file descriptor.
2550 */
2551 /* ARGSUSED */
2552 int
2553 sys_fchflags(struct lwp *l, const struct sys_fchflags_args *uap, register_t *retval)
2554 {
2555 /* {
2556 syscallarg(int) fd;
2557 syscallarg(u_long) flags;
2558 } */
2559 struct proc *p = l->l_proc;
2560 struct vnode *vp;
2561 struct file *fp;
2562 int error;
2563
2564 /* getvnode() will use the descriptor for us */
2565 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2566 return (error);
2567 vp = (struct vnode *)fp->f_data;
2568 error = change_flags(vp, SCARG(uap, flags), l);
2569 VOP_UNLOCK(vp, 0);
2570 FILE_UNUSE(fp, l);
2571 return (error);
2572 }
2573
2574 /*
2575 * Change flags of a file given a path name; this version does
2576 * not follow links.
2577 */
2578 int
2579 sys_lchflags(struct lwp *l, const struct sys_lchflags_args *uap, register_t *retval)
2580 {
2581 /* {
2582 syscallarg(const char *) path;
2583 syscallarg(u_long) flags;
2584 } */
2585 struct vnode *vp;
2586 int error;
2587 struct nameidata nd;
2588
2589 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2590 SCARG(uap, path));
2591 if ((error = namei(&nd)) != 0)
2592 return (error);
2593 vp = nd.ni_vp;
2594 error = change_flags(vp, SCARG(uap, flags), l);
2595 vput(vp);
2596 return (error);
2597 }
2598
2599 /*
2600 * Common routine to change flags of a file.
2601 */
2602 int
2603 change_flags(struct vnode *vp, u_long flags, struct lwp *l)
2604 {
2605 struct vattr vattr;
2606 int error;
2607
2608 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2609 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2610 /*
2611 * Non-superusers cannot change the flags on devices, even if they
2612 * own them.
2613 */
2614 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) {
2615 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2616 goto out;
2617 if (vattr.va_type == VCHR || vattr.va_type == VBLK) {
2618 error = EINVAL;
2619 goto out;
2620 }
2621 }
2622 VATTR_NULL(&vattr);
2623 vattr.va_flags = flags;
2624 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2625 out:
2626 return (error);
2627 }
2628
2629 /*
2630 * Change mode of a file given path name; this version follows links.
2631 */
2632 /* ARGSUSED */
2633 int
2634 sys_chmod(struct lwp *l, const struct sys_chmod_args *uap, register_t *retval)
2635 {
2636 /* {
2637 syscallarg(const char *) path;
2638 syscallarg(int) mode;
2639 } */
2640 int error;
2641 struct nameidata nd;
2642
2643 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2644 SCARG(uap, path));
2645 if ((error = namei(&nd)) != 0)
2646 return (error);
2647
2648 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2649
2650 vrele(nd.ni_vp);
2651 return (error);
2652 }
2653
2654 /*
2655 * Change mode of a file given a file descriptor.
2656 */
2657 /* ARGSUSED */
2658 int
2659 sys_fchmod(struct lwp *l, const struct sys_fchmod_args *uap, register_t *retval)
2660 {
2661 /* {
2662 syscallarg(int) fd;
2663 syscallarg(int) mode;
2664 } */
2665 struct proc *p = l->l_proc;
2666 struct file *fp;
2667 int error;
2668
2669 /* getvnode() will use the descriptor for us */
2670 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2671 return (error);
2672
2673 error = change_mode((struct vnode *)fp->f_data, SCARG(uap, mode), l);
2674 FILE_UNUSE(fp, l);
2675 return (error);
2676 }
2677
2678 /*
2679 * Change mode of a file given path name; this version does not follow links.
2680 */
2681 /* ARGSUSED */
2682 int
2683 sys_lchmod(struct lwp *l, const struct sys_lchmod_args *uap, register_t *retval)
2684 {
2685 /* {
2686 syscallarg(const char *) path;
2687 syscallarg(int) mode;
2688 } */
2689 int error;
2690 struct nameidata nd;
2691
2692 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2693 SCARG(uap, path));
2694 if ((error = namei(&nd)) != 0)
2695 return (error);
2696
2697 error = change_mode(nd.ni_vp, SCARG(uap, mode), l);
2698
2699 vrele(nd.ni_vp);
2700 return (error);
2701 }
2702
2703 /*
2704 * Common routine to set mode given a vnode.
2705 */
2706 static int
2707 change_mode(struct vnode *vp, int mode, struct lwp *l)
2708 {
2709 struct vattr vattr;
2710 int error;
2711
2712 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2713 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2714 VATTR_NULL(&vattr);
2715 vattr.va_mode = mode & ALLPERMS;
2716 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2717 VOP_UNLOCK(vp, 0);
2718 return (error);
2719 }
2720
2721 /*
2722 * Set ownership given a path name; this version follows links.
2723 */
2724 /* ARGSUSED */
2725 int
2726 sys_chown(struct lwp *l, const struct sys_chown_args *uap, register_t *retval)
2727 {
2728 /* {
2729 syscallarg(const char *) path;
2730 syscallarg(uid_t) uid;
2731 syscallarg(gid_t) gid;
2732 } */
2733 int error;
2734 struct nameidata nd;
2735
2736 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2737 SCARG(uap, path));
2738 if ((error = namei(&nd)) != 0)
2739 return (error);
2740
2741 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2742
2743 vrele(nd.ni_vp);
2744 return (error);
2745 }
2746
2747 /*
2748 * Set ownership given a path name; this version follows links.
2749 * Provides POSIX semantics.
2750 */
2751 /* ARGSUSED */
2752 int
2753 sys___posix_chown(struct lwp *l, const struct sys___posix_chown_args *uap, register_t *retval)
2754 {
2755 /* {
2756 syscallarg(const char *) path;
2757 syscallarg(uid_t) uid;
2758 syscallarg(gid_t) gid;
2759 } */
2760 int error;
2761 struct nameidata nd;
2762
2763 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
2764 SCARG(uap, path));
2765 if ((error = namei(&nd)) != 0)
2766 return (error);
2767
2768 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2769
2770 vrele(nd.ni_vp);
2771 return (error);
2772 }
2773
2774 /*
2775 * Set ownership given a file descriptor.
2776 */
2777 /* ARGSUSED */
2778 int
2779 sys_fchown(struct lwp *l, const struct sys_fchown_args *uap, register_t *retval)
2780 {
2781 /* {
2782 syscallarg(int) fd;
2783 syscallarg(uid_t) uid;
2784 syscallarg(gid_t) gid;
2785 } */
2786 struct proc *p = l->l_proc;
2787 int error;
2788 struct file *fp;
2789
2790 /* getvnode() will use the descriptor for us */
2791 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2792 return (error);
2793
2794 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2795 SCARG(uap, gid), l, 0);
2796 FILE_UNUSE(fp, l);
2797 return (error);
2798 }
2799
2800 /*
2801 * Set ownership given a file descriptor, providing POSIX/XPG semantics.
2802 */
2803 /* ARGSUSED */
2804 int
2805 sys___posix_fchown(struct lwp *l, const struct sys___posix_fchown_args *uap, register_t *retval)
2806 {
2807 /* {
2808 syscallarg(int) fd;
2809 syscallarg(uid_t) uid;
2810 syscallarg(gid_t) gid;
2811 } */
2812 struct proc *p = l->l_proc;
2813 int error;
2814 struct file *fp;
2815
2816 /* getvnode() will use the descriptor for us */
2817 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2818 return (error);
2819
2820 error = change_owner((struct vnode *)fp->f_data, SCARG(uap, uid),
2821 SCARG(uap, gid), l, 1);
2822 FILE_UNUSE(fp, l);
2823 return (error);
2824 }
2825
2826 /*
2827 * Set ownership given a path name; this version does not follow links.
2828 */
2829 /* ARGSUSED */
2830 int
2831 sys_lchown(struct lwp *l, const struct sys_lchown_args *uap, register_t *retval)
2832 {
2833 /* {
2834 syscallarg(const char *) path;
2835 syscallarg(uid_t) uid;
2836 syscallarg(gid_t) gid;
2837 } */
2838 int error;
2839 struct nameidata nd;
2840
2841 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2842 SCARG(uap, path));
2843 if ((error = namei(&nd)) != 0)
2844 return (error);
2845
2846 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 0);
2847
2848 vrele(nd.ni_vp);
2849 return (error);
2850 }
2851
2852 /*
2853 * Set ownership given a path name; this version does not follow links.
2854 * Provides POSIX/XPG semantics.
2855 */
2856 /* ARGSUSED */
2857 int
2858 sys___posix_lchown(struct lwp *l, const struct sys___posix_lchown_args *uap, register_t *retval)
2859 {
2860 /* {
2861 syscallarg(const char *) path;
2862 syscallarg(uid_t) uid;
2863 syscallarg(gid_t) gid;
2864 } */
2865 int error;
2866 struct nameidata nd;
2867
2868 NDINIT(&nd, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_USERSPACE,
2869 SCARG(uap, path));
2870 if ((error = namei(&nd)) != 0)
2871 return (error);
2872
2873 error = change_owner(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid), l, 1);
2874
2875 vrele(nd.ni_vp);
2876 return (error);
2877 }
2878
2879 /*
2880 * Common routine to set ownership given a vnode.
2881 */
2882 static int
2883 change_owner(struct vnode *vp, uid_t uid, gid_t gid, struct lwp *l,
2884 int posix_semantics)
2885 {
2886 struct vattr vattr;
2887 mode_t newmode;
2888 int error;
2889
2890 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
2891 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2892 if ((error = VOP_GETATTR(vp, &vattr, l->l_cred)) != 0)
2893 goto out;
2894
2895 #define CHANGED(x) ((int)(x) != -1)
2896 newmode = vattr.va_mode;
2897 if (posix_semantics) {
2898 /*
2899 * POSIX/XPG semantics: if the caller is not the super-user,
2900 * clear set-user-id and set-group-id bits. Both POSIX and
2901 * the XPG consider the behaviour for calls by the super-user
2902 * implementation-defined; we leave the set-user-id and set-
2903 * group-id settings intact in that case.
2904 */
2905 if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER,
2906 NULL) != 0)
2907 newmode &= ~(S_ISUID | S_ISGID);
2908 } else {
2909 /*
2910 * NetBSD semantics: when changing owner and/or group,
2911 * clear the respective bit(s).
2912 */
2913 if (CHANGED(uid))
2914 newmode &= ~S_ISUID;
2915 if (CHANGED(gid))
2916 newmode &= ~S_ISGID;
2917 }
2918 /* Update va_mode iff altered. */
2919 if (vattr.va_mode == newmode)
2920 newmode = VNOVAL;
2921
2922 VATTR_NULL(&vattr);
2923 vattr.va_uid = CHANGED(uid) ? uid : (uid_t)VNOVAL;
2924 vattr.va_gid = CHANGED(gid) ? gid : (gid_t)VNOVAL;
2925 vattr.va_mode = newmode;
2926 error = VOP_SETATTR(vp, &vattr, l->l_cred);
2927 #undef CHANGED
2928
2929 out:
2930 VOP_UNLOCK(vp, 0);
2931 return (error);
2932 }
2933
2934 /*
2935 * Set the access and modification times given a path name; this
2936 * version follows links.
2937 */
2938 /* ARGSUSED */
2939 int
2940 sys_utimes(struct lwp *l, const struct sys_utimes_args *uap, register_t *retval)
2941 {
2942 /* {
2943 syscallarg(const char *) path;
2944 syscallarg(const struct timeval *) tptr;
2945 } */
2946
2947 return do_sys_utimes(l, NULL, SCARG(uap, path), FOLLOW,
2948 SCARG(uap, tptr), UIO_USERSPACE);
2949 }
2950
2951 /*
2952 * Set the access and modification times given a file descriptor.
2953 */
2954 /* ARGSUSED */
2955 int
2956 sys_futimes(struct lwp *l, const struct sys_futimes_args *uap, register_t *retval)
2957 {
2958 /* {
2959 syscallarg(int) fd;
2960 syscallarg(const struct timeval *) tptr;
2961 } */
2962 int error;
2963 struct file *fp;
2964
2965 /* getvnode() will use the descriptor for us */
2966 if ((error = getvnode(l->l_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
2967 return (error);
2968
2969 error = do_sys_utimes(l, fp->f_data, NULL, 0,
2970 SCARG(uap, tptr), UIO_USERSPACE);
2971
2972 FILE_UNUSE(fp, l);
2973 return (error);
2974 }
2975
2976 /*
2977 * Set the access and modification times given a path name; this
2978 * version does not follow links.
2979 */
2980 int
2981 sys_lutimes(struct lwp *l, const struct sys_lutimes_args *uap, register_t *retval)
2982 {
2983 /* {
2984 syscallarg(const char *) path;
2985 syscallarg(const struct timeval *) tptr;
2986 } */
2987
2988 return do_sys_utimes(l, NULL, SCARG(uap, path), NOFOLLOW,
2989 SCARG(uap, tptr), UIO_USERSPACE);
2990 }
2991
2992 /*
2993 * Common routine to set access and modification times given a vnode.
2994 */
2995 int
2996 do_sys_utimes(struct lwp *l, struct vnode *vp, const char *path, int flag,
2997 const struct timeval *tptr, enum uio_seg seg)
2998 {
2999 struct vattr vattr;
3000 struct nameidata nd;
3001 int error;
3002
3003 VATTR_NULL(&vattr);
3004 if (tptr == NULL) {
3005 nanotime(&vattr.va_atime);
3006 vattr.va_mtime = vattr.va_atime;
3007 vattr.va_vaflags |= VA_UTIMES_NULL;
3008 } else {
3009 struct timeval tv[2];
3010
3011 if (seg != UIO_SYSSPACE) {
3012 error = copyin(tptr, &tv, sizeof (tv));
3013 if (error != 0)
3014 return error;
3015 tptr = tv;
3016 }
3017 TIMEVAL_TO_TIMESPEC(tptr, &vattr.va_atime);
3018 TIMEVAL_TO_TIMESPEC(tptr + 1, &vattr.va_mtime);
3019 }
3020
3021 if (vp == NULL) {
3022 NDINIT(&nd, LOOKUP, flag | TRYEMULROOT, UIO_USERSPACE, path);
3023 if ((error = namei(&nd)) != 0)
3024 return (error);
3025 vp = nd.ni_vp;
3026 } else
3027 nd.ni_vp = NULL;
3028
3029 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3030 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3031 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3032 VOP_UNLOCK(vp, 0);
3033
3034 if (nd.ni_vp != NULL)
3035 vrele(nd.ni_vp);
3036
3037 return (error);
3038 }
3039
3040 /*
3041 * Truncate a file given its path name.
3042 */
3043 /* ARGSUSED */
3044 int
3045 sys_truncate(struct lwp *l, const struct sys_truncate_args *uap, register_t *retval)
3046 {
3047 /* {
3048 syscallarg(const char *) path;
3049 syscallarg(int) pad;
3050 syscallarg(off_t) length;
3051 } */
3052 struct vnode *vp;
3053 struct vattr vattr;
3054 int error;
3055 struct nameidata nd;
3056
3057 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3058 SCARG(uap, path));
3059 if ((error = namei(&nd)) != 0)
3060 return (error);
3061 vp = nd.ni_vp;
3062 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3063 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3064 if (vp->v_type == VDIR)
3065 error = EISDIR;
3066 else if ((error = vn_writechk(vp)) == 0 &&
3067 (error = VOP_ACCESS(vp, VWRITE, l->l_cred)) == 0) {
3068 VATTR_NULL(&vattr);
3069 vattr.va_size = SCARG(uap, length);
3070 error = VOP_SETATTR(vp, &vattr, l->l_cred);
3071 }
3072 vput(vp);
3073 return (error);
3074 }
3075
3076 /*
3077 * Truncate a file given a file descriptor.
3078 */
3079 /* ARGSUSED */
3080 int
3081 sys_ftruncate(struct lwp *l, const struct sys_ftruncate_args *uap, register_t *retval)
3082 {
3083 /* {
3084 syscallarg(int) fd;
3085 syscallarg(int) pad;
3086 syscallarg(off_t) length;
3087 } */
3088 struct proc *p = l->l_proc;
3089 struct vattr vattr;
3090 struct vnode *vp;
3091 struct file *fp;
3092 int error;
3093
3094 /* getvnode() will use the descriptor for us */
3095 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3096 return (error);
3097 if ((fp->f_flag & FWRITE) == 0) {
3098 error = EINVAL;
3099 goto out;
3100 }
3101 vp = (struct vnode *)fp->f_data;
3102 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3103 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3104 if (vp->v_type == VDIR)
3105 error = EISDIR;
3106 else if ((error = vn_writechk(vp)) == 0) {
3107 VATTR_NULL(&vattr);
3108 vattr.va_size = SCARG(uap, length);
3109 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
3110 }
3111 VOP_UNLOCK(vp, 0);
3112 out:
3113 FILE_UNUSE(fp, l);
3114 return (error);
3115 }
3116
3117 /*
3118 * Sync an open file.
3119 */
3120 /* ARGSUSED */
3121 int
3122 sys_fsync(struct lwp *l, const struct sys_fsync_args *uap, register_t *retval)
3123 {
3124 /* {
3125 syscallarg(int) fd;
3126 } */
3127 struct proc *p = l->l_proc;
3128 struct vnode *vp;
3129 struct file *fp;
3130 int error;
3131
3132 /* getvnode() will use the descriptor for us */
3133 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3134 return (error);
3135 vp = (struct vnode *)fp->f_data;
3136 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3137 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT, 0, 0);
3138 if (error == 0 && bioopsp != NULL &&
3139 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3140 (*bioopsp->io_fsync)(vp, 0);
3141 VOP_UNLOCK(vp, 0);
3142 FILE_UNUSE(fp, l);
3143 return (error);
3144 }
3145
3146 /*
3147 * Sync a range of file data. API modeled after that found in AIX.
3148 *
3149 * FDATASYNC indicates that we need only save enough metadata to be able
3150 * to re-read the written data. Note we duplicate AIX's requirement that
3151 * the file be open for writing.
3152 */
3153 /* ARGSUSED */
3154 int
3155 sys_fsync_range(struct lwp *l, const struct sys_fsync_range_args *uap, register_t *retval)
3156 {
3157 /* {
3158 syscallarg(int) fd;
3159 syscallarg(int) flags;
3160 syscallarg(off_t) start;
3161 syscallarg(off_t) length;
3162 } */
3163 struct proc *p = l->l_proc;
3164 struct vnode *vp;
3165 struct file *fp;
3166 int flags, nflags;
3167 off_t s, e, len;
3168 int error;
3169
3170 /* getvnode() will use the descriptor for us */
3171 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3172 return (error);
3173
3174 if ((fp->f_flag & FWRITE) == 0) {
3175 error = EBADF;
3176 goto out;
3177 }
3178
3179 flags = SCARG(uap, flags);
3180 if (((flags & (FDATASYNC | FFILESYNC)) == 0) ||
3181 ((~flags & (FDATASYNC | FFILESYNC)) == 0)) {
3182 error = EINVAL;
3183 goto out;
3184 }
3185 /* Now set up the flags for value(s) to pass to VOP_FSYNC() */
3186 if (flags & FDATASYNC)
3187 nflags = FSYNC_DATAONLY | FSYNC_WAIT;
3188 else
3189 nflags = FSYNC_WAIT;
3190 if (flags & FDISKSYNC)
3191 nflags |= FSYNC_CACHE;
3192
3193 len = SCARG(uap, length);
3194 /* If length == 0, we do the whole file, and s = l = 0 will do that */
3195 if (len) {
3196 s = SCARG(uap, start);
3197 e = s + len;
3198 if (e < s) {
3199 error = EINVAL;
3200 goto out;
3201 }
3202 } else {
3203 e = 0;
3204 s = 0;
3205 }
3206
3207 vp = (struct vnode *)fp->f_data;
3208 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3209 error = VOP_FSYNC(vp, fp->f_cred, nflags, s, e);
3210
3211 if (error == 0 && bioopsp != NULL &&
3212 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
3213 (*bioopsp->io_fsync)(vp, nflags);
3214
3215 VOP_UNLOCK(vp, 0);
3216 out:
3217 FILE_UNUSE(fp, l);
3218 return (error);
3219 }
3220
3221 /*
3222 * Sync the data of an open file.
3223 */
3224 /* ARGSUSED */
3225 int
3226 sys_fdatasync(struct lwp *l, const struct sys_fdatasync_args *uap, register_t *retval)
3227 {
3228 /* {
3229 syscallarg(int) fd;
3230 } */
3231 struct proc *p = l->l_proc;
3232 struct vnode *vp;
3233 struct file *fp;
3234 int error;
3235
3236 /* getvnode() will use the descriptor for us */
3237 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3238 return (error);
3239 if ((fp->f_flag & FWRITE) == 0) {
3240 FILE_UNUSE(fp, l);
3241 return (EBADF);
3242 }
3243 vp = (struct vnode *)fp->f_data;
3244 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3245 error = VOP_FSYNC(vp, fp->f_cred, FSYNC_WAIT|FSYNC_DATAONLY, 0, 0);
3246 VOP_UNLOCK(vp, 0);
3247 FILE_UNUSE(fp, l);
3248 return (error);
3249 }
3250
3251 /*
3252 * Rename files, (standard) BSD semantics frontend.
3253 */
3254 /* ARGSUSED */
3255 int
3256 sys_rename(struct lwp *l, const struct sys_rename_args *uap, register_t *retval)
3257 {
3258 /* {
3259 syscallarg(const char *) from;
3260 syscallarg(const char *) to;
3261 } */
3262
3263 return (do_sys_rename(SCARG(uap, from), SCARG(uap, to), UIO_USERSPACE, 0));
3264 }
3265
3266 /*
3267 * Rename files, POSIX semantics frontend.
3268 */
3269 /* ARGSUSED */
3270 int
3271 sys___posix_rename(struct lwp *l, const struct sys___posix_rename_args *uap, register_t *retval)
3272 {
3273 /* {
3274 syscallarg(const char *) from;
3275 syscallarg(const char *) to;
3276 } */
3277
3278 return (do_sys_rename(SCARG(uap, from), SCARG(uap, to), UIO_USERSPACE, 1));
3279 }
3280
3281 /*
3282 * Rename files. Source and destination must either both be directories,
3283 * or both not be directories. If target is a directory, it must be empty.
3284 * If `from' and `to' refer to the same object, the value of the `retain'
3285 * argument is used to determine whether `from' will be
3286 *
3287 * (retain == 0) deleted unless `from' and `to' refer to the same
3288 * object in the file system's name space (BSD).
3289 * (retain == 1) always retained (POSIX).
3290 */
3291 int
3292 do_sys_rename(const char *from, const char *to, enum uio_seg seg, int retain)
3293 {
3294 struct vnode *tvp, *fvp, *tdvp;
3295 struct nameidata fromnd, tond;
3296 struct lwp *l = curlwp;
3297 struct proc *p;
3298 int error;
3299
3300 NDINIT(&fromnd, DELETE, LOCKPARENT | SAVESTART | TRYEMULROOT,
3301 seg, from);
3302 if ((error = namei(&fromnd)) != 0)
3303 return (error);
3304 if (fromnd.ni_dvp != fromnd.ni_vp)
3305 VOP_UNLOCK(fromnd.ni_dvp, 0);
3306 fvp = fromnd.ni_vp;
3307 NDINIT(&tond, RENAME,
3308 LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | TRYEMULROOT
3309 | (fvp->v_type == VDIR ? CREATEDIR : 0),
3310 seg, to);
3311 if ((error = namei(&tond)) != 0) {
3312 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3313 vrele(fromnd.ni_dvp);
3314 vrele(fvp);
3315 goto out1;
3316 }
3317 tdvp = tond.ni_dvp;
3318 tvp = tond.ni_vp;
3319
3320 if (tvp != NULL) {
3321 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
3322 error = ENOTDIR;
3323 goto out;
3324 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
3325 error = EISDIR;
3326 goto out;
3327 }
3328 }
3329
3330 if (fvp == tdvp)
3331 error = EINVAL;
3332
3333 /*
3334 * Source and destination refer to the same object.
3335 */
3336 if (fvp == tvp) {
3337 if (retain)
3338 error = -1;
3339 else if (fromnd.ni_dvp == tdvp &&
3340 fromnd.ni_cnd.cn_namelen == tond.ni_cnd.cn_namelen &&
3341 !memcmp(fromnd.ni_cnd.cn_nameptr,
3342 tond.ni_cnd.cn_nameptr,
3343 fromnd.ni_cnd.cn_namelen))
3344 error = -1;
3345 }
3346
3347 #if NVERIEXEC > 0
3348 if (!error) {
3349 char *f1, *f2;
3350
3351 f1 = malloc(fromnd.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3352 strlcpy(f1, fromnd.ni_cnd.cn_nameptr, fromnd.ni_cnd.cn_namelen);
3353
3354 f2 = malloc(tond.ni_cnd.cn_namelen + 1, M_TEMP, M_WAITOK);
3355 strlcpy(f2, tond.ni_cnd.cn_nameptr, tond.ni_cnd.cn_namelen);
3356
3357 error = veriexec_renamechk(l, fvp, f1, tvp, f2);
3358
3359 free(f1, M_TEMP);
3360 free(f2, M_TEMP);
3361 }
3362 #endif /* NVERIEXEC > 0 */
3363
3364 out:
3365 p = l->l_proc;
3366 if (!error) {
3367 VOP_LEASE(tdvp, l->l_cred, LEASE_WRITE);
3368 if (fromnd.ni_dvp != tdvp)
3369 VOP_LEASE(fromnd.ni_dvp, l->l_cred, LEASE_WRITE);
3370 if (tvp) {
3371 VOP_LEASE(tvp, l->l_cred, LEASE_WRITE);
3372 }
3373 error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
3374 tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
3375 } else {
3376 VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd);
3377 if (tdvp == tvp)
3378 vrele(tdvp);
3379 else
3380 vput(tdvp);
3381 if (tvp)
3382 vput(tvp);
3383 VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
3384 vrele(fromnd.ni_dvp);
3385 vrele(fvp);
3386 }
3387 vrele(tond.ni_startdir);
3388 PNBUF_PUT(tond.ni_cnd.cn_pnbuf);
3389 out1:
3390 if (fromnd.ni_startdir)
3391 vrele(fromnd.ni_startdir);
3392 PNBUF_PUT(fromnd.ni_cnd.cn_pnbuf);
3393 return (error == -1 ? 0 : error);
3394 }
3395
3396 /*
3397 * Make a directory file.
3398 */
3399 /* ARGSUSED */
3400 int
3401 sys_mkdir(struct lwp *l, const struct sys_mkdir_args *uap, register_t *retval)
3402 {
3403 /* {
3404 syscallarg(const char *) path;
3405 syscallarg(int) mode;
3406 } */
3407 struct proc *p = l->l_proc;
3408 struct vnode *vp;
3409 struct vattr vattr;
3410 int error;
3411 struct nameidata nd;
3412
3413 NDINIT(&nd, CREATE, LOCKPARENT | CREATEDIR | TRYEMULROOT, UIO_USERSPACE,
3414 SCARG(uap, path));
3415 if ((error = namei(&nd)) != 0)
3416 return (error);
3417 vp = nd.ni_vp;
3418 if (vp != NULL) {
3419 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3420 if (nd.ni_dvp == vp)
3421 vrele(nd.ni_dvp);
3422 else
3423 vput(nd.ni_dvp);
3424 vrele(vp);
3425 return (EEXIST);
3426 }
3427 VATTR_NULL(&vattr);
3428 vattr.va_type = VDIR;
3429 /* We will read cwdi->cwdi_cmask unlocked. */
3430 vattr.va_mode =
3431 (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_cwdi->cwdi_cmask;
3432 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3433 error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
3434 if (!error)
3435 vput(nd.ni_vp);
3436 return (error);
3437 }
3438
3439 /*
3440 * Remove a directory file.
3441 */
3442 /* ARGSUSED */
3443 int
3444 sys_rmdir(struct lwp *l, const struct sys_rmdir_args *uap, register_t *retval)
3445 {
3446 /* {
3447 syscallarg(const char *) path;
3448 } */
3449 struct vnode *vp;
3450 int error;
3451 struct nameidata nd;
3452
3453 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | TRYEMULROOT, UIO_USERSPACE,
3454 SCARG(uap, path));
3455 if ((error = namei(&nd)) != 0)
3456 return (error);
3457 vp = nd.ni_vp;
3458 if (vp->v_type != VDIR) {
3459 error = ENOTDIR;
3460 goto out;
3461 }
3462 /*
3463 * No rmdir "." please.
3464 */
3465 if (nd.ni_dvp == vp) {
3466 error = EINVAL;
3467 goto out;
3468 }
3469 /*
3470 * The root of a mounted filesystem cannot be deleted.
3471 */
3472 if (vp->v_vflag & VV_ROOT) {
3473 error = EBUSY;
3474 goto out;
3475 }
3476 VOP_LEASE(nd.ni_dvp, l->l_cred, LEASE_WRITE);
3477 VOP_LEASE(vp, l->l_cred, LEASE_WRITE);
3478 error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
3479 return (error);
3480
3481 out:
3482 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
3483 if (nd.ni_dvp == vp)
3484 vrele(nd.ni_dvp);
3485 else
3486 vput(nd.ni_dvp);
3487 vput(vp);
3488 return (error);
3489 }
3490
3491 /*
3492 * Read a block of directory entries in a file system independent format.
3493 */
3494 int
3495 sys___getdents30(struct lwp *l, const struct sys___getdents30_args *uap, register_t *retval)
3496 {
3497 /* {
3498 syscallarg(int) fd;
3499 syscallarg(char *) buf;
3500 syscallarg(size_t) count;
3501 } */
3502 struct proc *p = l->l_proc;
3503 struct file *fp;
3504 int error, done;
3505
3506 /* getvnode() will use the descriptor for us */
3507 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
3508 return (error);
3509 if ((fp->f_flag & FREAD) == 0) {
3510 error = EBADF;
3511 goto out;
3512 }
3513 error = vn_readdir(fp, SCARG(uap, buf), UIO_USERSPACE,
3514 SCARG(uap, count), &done, l, 0, 0);
3515 ktrgenio(SCARG(uap, fd), UIO_READ, SCARG(uap, buf), done, error);
3516 *retval = done;
3517 out:
3518 FILE_UNUSE(fp, l);
3519 return (error);
3520 }
3521
3522 /*
3523 * Set the mode mask for creation of filesystem nodes.
3524 */
3525 int
3526 sys_umask(struct lwp *l, const struct sys_umask_args *uap, register_t *retval)
3527 {
3528 /* {
3529 syscallarg(mode_t) newmask;
3530 } */
3531 struct proc *p = l->l_proc;
3532 struct cwdinfo *cwdi;
3533
3534 /*
3535 * cwdi->cwdi_cmask will be read unlocked elsewhere. What's
3536 * important is that we serialize changes to the mask. The
3537 * rw_exit() will issue a write memory barrier on our behalf,
3538 * and force the changes out to other CPUs (as it must use an
3539 * atomic operation, draining the local CPU's store buffers).
3540 */
3541 cwdi = p->p_cwdi;
3542 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
3543 *retval = cwdi->cwdi_cmask;
3544 cwdi->cwdi_cmask = SCARG(uap, newmask) & ALLPERMS;
3545 rw_exit(&cwdi->cwdi_lock);
3546
3547 return (0);
3548 }
3549
3550 int
3551 dorevoke(struct vnode *vp, kauth_cred_t cred)
3552 {
3553 struct vattr vattr;
3554 int error;
3555
3556 if ((error = VOP_GETATTR(vp, &vattr, cred)) != 0)
3557 return error;
3558 if (kauth_cred_geteuid(cred) != vattr.va_uid &&
3559 (error = kauth_authorize_generic(cred,
3560 KAUTH_GENERIC_ISSUSER, NULL)) == 0)
3561 VOP_REVOKE(vp, REVOKEALL);
3562 return (error);
3563 }
3564
3565 /*
3566 * Void all references to file by ripping underlying filesystem
3567 * away from vnode.
3568 */
3569 /* ARGSUSED */
3570 int
3571 sys_revoke(struct lwp *l, const struct sys_revoke_args *uap, register_t *retval)
3572 {
3573 /* {
3574 syscallarg(const char *) path;
3575 } */
3576 struct vnode *vp;
3577 int error;
3578 struct nameidata nd;
3579
3580 NDINIT(&nd, LOOKUP, FOLLOW | TRYEMULROOT, UIO_USERSPACE,
3581 SCARG(uap, path));
3582 if ((error = namei(&nd)) != 0)
3583 return (error);
3584 vp = nd.ni_vp;
3585 error = dorevoke(vp, l->l_cred);
3586 vrele(vp);
3587 return (error);
3588 }
3589
3590 /*
3591 * Convert a user file descriptor to a kernel file entry.
3592 */
3593 int
3594 getvnode(struct filedesc *fdp, int fd, struct file **fpp)
3595 {
3596 struct vnode *vp;
3597 struct file *fp;
3598
3599 if ((fp = fd_getfile(fdp, fd)) == NULL)
3600 return (EBADF);
3601
3602 FILE_USE(fp);
3603
3604 if (fp->f_type != DTYPE_VNODE) {
3605 FILE_UNUSE(fp, NULL);
3606 return (EINVAL);
3607 }
3608
3609 vp = (struct vnode *)fp->f_data;
3610 if (vp->v_type == VBAD) {
3611 FILE_UNUSE(fp, NULL);
3612 return (EBADF);
3613 }
3614
3615 *fpp = fp;
3616 return (0);
3617 }
3618