union_vnops.c revision 1.84 1 /* $NetBSD: union_vnops.c,v 1.84 2025/09/02 21:46:54 dholland Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Jan-Simon Pendry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)union_vnops.c 8.33 (Berkeley) 7/31/95
35 */
36
37 /*
38 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
39 *
40 * This code is derived from software contributed to Berkeley by
41 * Jan-Simon Pendry.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)union_vnops.c 8.33 (Berkeley) 7/31/95
72 */
73
74 #include <sys/cdefs.h>
75 __KERNEL_RCSID(0, "$NetBSD: union_vnops.c,v 1.84 2025/09/02 21:46:54 dholland Exp $");
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/file.h>
81 #include <sys/time.h>
82 #include <sys/stat.h>
83 #include <sys/vnode.h>
84 #include <sys/mount.h>
85 #include <sys/namei.h>
86 #include <sys/malloc.h>
87 #include <sys/buf.h>
88 #include <sys/queue.h>
89 #include <sys/lock.h>
90 #include <sys/kauth.h>
91
92 #include <fs/union/union.h>
93 #include <miscfs/genfs/genfs.h>
94 #include <miscfs/specfs/specdev.h>
95
96 int union_parsepath(void *);
97 int union_lookup(void *);
98 int union_create(void *);
99 int union_whiteout(void *);
100 int union_mknod(void *);
101 int union_open(void *);
102 int union_close(void *);
103 int union_access(void *);
104 int union_getattr(void *);
105 int union_setattr(void *);
106 int union_read(void *);
107 int union_write(void *);
108 int union_ioctl(void *);
109 int union_poll(void *);
110 int union_revoke(void *);
111 int union_mmap(void *);
112 int union_fsync(void *);
113 int union_seek(void *);
114 int union_remove(void *);
115 int union_link(void *);
116 int union_rename(void *);
117 int union_mkdir(void *);
118 int union_rmdir(void *);
119 int union_symlink(void *);
120 int union_readdir(void *);
121 int union_readlink(void *);
122 int union_abortop(void *);
123 int union_inactive(void *);
124 int union_reclaim(void *);
125 int union_lock(void *);
126 int union_unlock(void *);
127 int union_bmap(void *);
128 int union_print(void *);
129 int union_islocked(void *);
130 int union_pathconf(void *);
131 int union_advlock(void *);
132 int union_strategy(void *);
133 int union_bwrite(void *);
134 int union_getpages(void *);
135 int union_putpages(void *);
136 int union_kqfilter(void *);
137
138 static int union_lookup1(struct vnode *, struct vnode **,
139 struct vnode **, struct componentname *);
140
141
142 /*
143 * Global vfs data structures
144 */
145 int (**union_vnodeop_p)(void *);
146 const struct vnodeopv_entry_desc union_vnodeop_entries[] = {
147 { &vop_default_desc, vn_default_error },
148 { &vop_parsepath_desc, union_parsepath }, /* parsepath */
149 { &vop_lookup_desc, union_lookup }, /* lookup */
150 { &vop_create_desc, union_create }, /* create */
151 { &vop_whiteout_desc, union_whiteout }, /* whiteout */
152 { &vop_mknod_desc, union_mknod }, /* mknod */
153 { &vop_open_desc, union_open }, /* open */
154 { &vop_close_desc, union_close }, /* close */
155 { &vop_access_desc, union_access }, /* access */
156 { &vop_accessx_desc, genfs_accessx }, /* accessx */
157 { &vop_getattr_desc, union_getattr }, /* getattr */
158 { &vop_setattr_desc, union_setattr }, /* setattr */
159 { &vop_read_desc, union_read }, /* read */
160 { &vop_write_desc, union_write }, /* write */
161 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */
162 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */
163 { &vop_ioctl_desc, union_ioctl }, /* ioctl */
164 { &vop_poll_desc, union_poll }, /* select */
165 { &vop_revoke_desc, union_revoke }, /* revoke */
166 { &vop_mmap_desc, union_mmap }, /* mmap */
167 { &vop_fsync_desc, union_fsync }, /* fsync */
168 { &vop_seek_desc, union_seek }, /* seek */
169 { &vop_remove_desc, union_remove }, /* remove */
170 { &vop_link_desc, union_link }, /* link */
171 { &vop_rename_desc, union_rename }, /* rename */
172 { &vop_mkdir_desc, union_mkdir }, /* mkdir */
173 { &vop_rmdir_desc, union_rmdir }, /* rmdir */
174 { &vop_symlink_desc, union_symlink }, /* symlink */
175 { &vop_readdir_desc, union_readdir }, /* readdir */
176 { &vop_readlink_desc, union_readlink }, /* readlink */
177 { &vop_abortop_desc, union_abortop }, /* abortop */
178 { &vop_inactive_desc, union_inactive }, /* inactive */
179 { &vop_reclaim_desc, union_reclaim }, /* reclaim */
180 { &vop_lock_desc, union_lock }, /* lock */
181 { &vop_unlock_desc, union_unlock }, /* unlock */
182 { &vop_bmap_desc, union_bmap }, /* bmap */
183 { &vop_strategy_desc, union_strategy }, /* strategy */
184 { &vop_bwrite_desc, union_bwrite }, /* bwrite */
185 { &vop_print_desc, union_print }, /* print */
186 { &vop_islocked_desc, union_islocked }, /* islocked */
187 { &vop_pathconf_desc, union_pathconf }, /* pathconf */
188 { &vop_advlock_desc, union_advlock }, /* advlock */
189 { &vop_getpages_desc, union_getpages }, /* getpages */
190 { &vop_putpages_desc, union_putpages }, /* putpages */
191 { &vop_kqfilter_desc, union_kqfilter }, /* kqfilter */
192 { NULL, NULL }
193 };
194 const struct vnodeopv_desc union_vnodeop_opv_desc =
195 { &union_vnodeop_p, union_vnodeop_entries };
196
197 #define NODE_IS_SPECIAL(vp) \
198 ((vp)->v_type == VBLK || (vp)->v_type == VCHR || \
199 (vp)->v_type == VSOCK || (vp)->v_type == VFIFO)
200
201 int
202 union_parsepath(void *v)
203 {
204 struct vop_parsepath_args /* {
205 struct vnode *a_dvp;
206 const char *a_name;
207 size_t *a_retval;
208 } */ *ap = v;
209 struct vnode *upperdvp, *lowerdvp;
210 size_t upper, lower;
211 int error;
212
213 upperdvp = UPPERVP(ap->a_dvp);
214 lowerdvp = LOWERVP(ap->a_dvp);
215
216 if (upperdvp != NULLVP) {
217 error = VOP_PARSEPATH(upperdvp, ap->a_name, &upper);
218 if (error) {
219 return error;
220 }
221 } else {
222 upper = 0;
223 }
224
225 if (lowerdvp != NULLVP) {
226 error = VOP_PARSEPATH(lowerdvp, ap->a_name, &lower);
227 if (error) {
228 return error;
229 }
230 } else {
231 lower = 0;
232 }
233
234 if (upper == 0 && lower == 0) {
235 panic("%s: missing both layers", __func__);
236 }
237
238 /*
239 * If they're different, use the larger one. This is not a
240 * comprehensive solution, but it's sufficient for the
241 * non-default cases of parsepath that currently exist.
242 */
243 *ap->a_retval = MAX(upper, lower);
244 return 0;
245 }
246
247 static int
248 union_lookup1(struct vnode *udvp, struct vnode **dvpp, struct vnode **vpp,
249 struct componentname *cnp)
250 {
251 int error;
252 struct vnode *tdvp;
253 struct vnode *dvp;
254 struct mount *mp;
255
256 dvp = *dvpp;
257
258 /*
259 * If stepping up the directory tree, check for going
260 * back across the mount point, in which case do what
261 * lookup would do by stepping back down the mount
262 * hierarchy.
263 */
264 if (cnp->cn_flags & ISDOTDOT) {
265 while ((dvp != udvp) && (dvp->v_vflag & VV_ROOT)) {
266 /*
267 * Don't do the NOCROSSMOUNT check
268 * at this level. By definition,
269 * union fs deals with namespaces, not
270 * filesystems.
271 */
272 tdvp = dvp;
273 *dvpp = dvp = dvp->v_mount->mnt_vnodecovered;
274 VOP_UNLOCK(tdvp);
275 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
276 }
277 }
278
279 error = VOP_LOOKUP(dvp, &tdvp, cnp);
280 if (error)
281 return (error);
282 if (dvp != tdvp) {
283 if (cnp->cn_flags & ISDOTDOT)
284 VOP_UNLOCK(dvp);
285 error = vn_lock(tdvp, LK_EXCLUSIVE);
286 if (cnp->cn_flags & ISDOTDOT)
287 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
288 if (error) {
289 vrele(tdvp);
290 return error;
291 }
292 dvp = tdvp;
293 }
294
295 /*
296 * Lastly check if the current node is a mount point in
297 * which case walk up the mount hierarchy making sure not to
298 * bump into the root of the mount tree (ie. dvp != udvp).
299 */
300 while (dvp != udvp && (dvp->v_type == VDIR) &&
301 (mp = dvp->v_mountedhere)) {
302 if (vfs_busy(mp))
303 continue;
304 vput(dvp);
305 error = VFS_ROOT(mp, LK_EXCLUSIVE, &tdvp);
306 vfs_unbusy(mp);
307 if (error) {
308 return (error);
309 }
310 dvp = tdvp;
311 }
312
313 *vpp = dvp;
314 return (0);
315 }
316
317 int
318 union_lookup(void *v)
319 {
320 struct vop_lookup_v2_args /* {
321 struct vnodeop_desc *a_desc;
322 struct vnode *a_dvp;
323 struct vnode **a_vpp;
324 struct componentname *a_cnp;
325 } */ *ap = v;
326 int error;
327 int uerror, lerror;
328 struct vnode *uppervp, *lowervp;
329 struct vnode *upperdvp, *lowerdvp;
330 struct vnode *dvp = ap->a_dvp;
331 struct union_node *dun = VTOUNION(dvp);
332 struct componentname *cnp = ap->a_cnp;
333 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
334 kauth_cred_t saved_cred = NULL;
335 int iswhiteout;
336 struct vattr va;
337
338 #ifdef notyet
339 if (cnp->cn_namelen == 3 &&
340 cnp->cn_nameptr[2] == '.' &&
341 cnp->cn_nameptr[1] == '.' &&
342 cnp->cn_nameptr[0] == '.') {
343 dvp = *ap->a_vpp = LOWERVP(ap->a_dvp);
344 if (dvp == NULLVP)
345 return (ENOENT);
346 vref(dvp);
347 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
348 return (0);
349 }
350 #endif
351
352 if ((cnp->cn_flags & ISLASTCN) &&
353 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
354 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
355 return (EROFS);
356
357 start:
358 upperdvp = dun->un_uppervp;
359 lowerdvp = dun->un_lowervp;
360 uppervp = NULLVP;
361 lowervp = NULLVP;
362 iswhiteout = 0;
363
364 /*
365 * do the lookup in the upper level.
366 * if that level comsumes additional pathnames,
367 * then assume that something special is going
368 * on and just return that vnode.
369 */
370 if (upperdvp != NULLVP) {
371 uerror = union_lookup1(um->um_uppervp, &upperdvp,
372 &uppervp, cnp);
373 if (uerror == ENOENT || uerror == EJUSTRETURN) {
374 if (cnp->cn_flags & ISWHITEOUT) {
375 iswhiteout = 1;
376 } else if (lowerdvp != NULLVP) {
377 lerror = VOP_GETATTR(upperdvp, &va,
378 cnp->cn_cred);
379 if (lerror == 0 && (va.va_flags & OPAQUE))
380 iswhiteout = 1;
381 }
382 }
383 } else {
384 uerror = ENOENT;
385 }
386
387 /*
388 * in a similar way to the upper layer, do the lookup
389 * in the lower layer. this time, if there is some
390 * component magic going on, then vput whatever we got
391 * back from the upper layer and return the lower vnode
392 * instead.
393 */
394 if (lowerdvp != NULLVP && !iswhiteout) {
395 int nameiop;
396
397 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY);
398
399 /*
400 * Only do a LOOKUP on the bottom node, since
401 * we won't be making changes to it anyway.
402 */
403 nameiop = cnp->cn_nameiop;
404 cnp->cn_nameiop = LOOKUP;
405 if (um->um_op == UNMNT_BELOW) {
406 saved_cred = cnp->cn_cred;
407 cnp->cn_cred = um->um_cred;
408 }
409
410 /*
411 * we shouldn't have to worry about locking interactions
412 * between the lower layer and our union layer (w.r.t.
413 * `..' processing) because we don't futz with lowervp
414 * locks in the union-node instantiation code path.
415 */
416 lerror = union_lookup1(um->um_lowervp, &lowerdvp,
417 &lowervp, cnp);
418 if (um->um_op == UNMNT_BELOW)
419 cnp->cn_cred = saved_cred;
420 cnp->cn_nameiop = nameiop;
421
422 if (lowervp != lowerdvp)
423 VOP_UNLOCK(lowerdvp);
424 } else {
425 lerror = ENOENT;
426 if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
427 lowervp = LOWERVP(dun->un_pvp);
428 if (lowervp != NULLVP) {
429 vref(lowervp);
430 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
431 lerror = 0;
432 }
433 }
434 }
435
436 /*
437 * EJUSTRETURN is used by underlying filesystems to indicate that
438 * a directory modification op was started successfully.
439 * This will only happen in the upper layer, since
440 * the lower layer only does LOOKUPs.
441 * If this union is mounted read-only, bounce it now.
442 */
443
444 if ((uerror == EJUSTRETURN) && (cnp->cn_flags & ISLASTCN) &&
445 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
446 ((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)))
447 uerror = EROFS;
448
449 /*
450 * at this point, we have uerror and lerror indicating
451 * possible errors with the lookups in the upper and lower
452 * layers. additionally, uppervp and lowervp are (locked)
453 * references to existing vnodes in the upper and lower layers.
454 *
455 * there are now three cases to consider.
456 * 1. if both layers returned an error, then return whatever
457 * error the upper layer generated.
458 *
459 * 2. if the top layer failed and the bottom layer succeeded
460 * then two subcases occur.
461 * a. the bottom vnode is not a directory, in which
462 * case just return a new union vnode referencing
463 * an empty top layer and the existing bottom layer.
464 * b. the bottom vnode is a directory, in which case
465 * create a new directory in the top-level and
466 * continue as in case 3.
467 *
468 * 3. if the top layer succeeded then return a new union
469 * vnode referencing whatever the new top layer and
470 * whatever the bottom layer returned.
471 */
472
473 *ap->a_vpp = NULLVP;
474
475
476 /* case 1. */
477 if ((uerror != 0) && (lerror != 0)) {
478 return (uerror);
479 }
480
481 /* case 2. */
482 if (uerror != 0 /* && (lerror == 0) */ ) {
483 if (lowervp->v_type == VDIR) { /* case 2b. */
484 /*
485 * We may be racing another process to make the
486 * upper-level shadow directory. Be careful with
487 * locks/etc!
488 * If we have to create a shadow directory and want
489 * to commit the node we have to restart the lookup
490 * to get the componentname right.
491 */
492 if (upperdvp) {
493 VOP_UNLOCK(upperdvp);
494 uerror = union_mkshadow(um, upperdvp, cnp,
495 &uppervp);
496 vn_lock(upperdvp, LK_EXCLUSIVE | LK_RETRY);
497 if (uerror == 0 && cnp->cn_nameiop != LOOKUP) {
498 vrele(uppervp);
499 if (lowervp != NULLVP)
500 vput(lowervp);
501 goto start;
502 }
503 }
504 if (uerror) {
505 if (lowervp != NULLVP) {
506 vput(lowervp);
507 lowervp = NULLVP;
508 }
509 return (uerror);
510 }
511 }
512 } else { /* uerror == 0 */
513 if (uppervp != upperdvp)
514 VOP_UNLOCK(uppervp);
515 }
516
517 if (lowervp != NULLVP)
518 VOP_UNLOCK(lowervp);
519
520 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
521 uppervp, lowervp, 1);
522
523 if (error) {
524 if (uppervp != NULLVP)
525 vrele(uppervp);
526 if (lowervp != NULLVP)
527 vrele(lowervp);
528 return error;
529 }
530
531 return 0;
532 }
533
534 int
535 union_create(void *v)
536 {
537 struct vop_create_v3_args /* {
538 struct vnode *a_dvp;
539 struct vnode **a_vpp;
540 struct componentname *a_cnp;
541 struct vattr *a_vap;
542 } */ *ap = v;
543 struct union_node *un = VTOUNION(ap->a_dvp);
544 struct vnode *dvp = un->un_uppervp;
545 struct componentname *cnp = ap->a_cnp;
546
547 if (dvp != NULLVP) {
548 int error;
549 struct vnode *vp;
550 struct mount *mp;
551
552 mp = ap->a_dvp->v_mount;
553
554 vp = NULL;
555 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
556 if (error)
557 return (error);
558
559 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, cnp, vp,
560 NULLVP, 1);
561 if (error)
562 vrele(vp);
563 return (error);
564 }
565
566 return (EROFS);
567 }
568
569 int
570 union_whiteout(void *v)
571 {
572 struct vop_whiteout_args /* {
573 struct vnode *a_dvp;
574 struct componentname *a_cnp;
575 int a_flags;
576 } */ *ap = v;
577 struct union_node *un = VTOUNION(ap->a_dvp);
578 struct componentname *cnp = ap->a_cnp;
579
580 if (un->un_uppervp == NULLVP)
581 return (EOPNOTSUPP);
582
583 return (VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags));
584 }
585
586 int
587 union_mknod(void *v)
588 {
589 struct vop_mknod_v3_args /* {
590 struct vnode *a_dvp;
591 struct vnode **a_vpp;
592 struct componentname *a_cnp;
593 struct vattr *a_vap;
594 } */ *ap = v;
595 struct union_node *un = VTOUNION(ap->a_dvp);
596 struct vnode *dvp = un->un_uppervp;
597 struct componentname *cnp = ap->a_cnp;
598
599 if (dvp != NULLVP) {
600 int error;
601 struct vnode *vp;
602 struct mount *mp;
603
604 mp = ap->a_dvp->v_mount;
605 error = VOP_MKNOD(dvp, &vp, cnp, ap->a_vap);
606 if (error)
607 return (error);
608
609 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
610 cnp, vp, NULLVP, 1);
611 if (error)
612 vrele(vp);
613 return (error);
614 }
615
616 return (EROFS);
617 }
618
619 int
620 union_open(void *v)
621 {
622 struct vop_open_args /* {
623 struct vnodeop_desc *a_desc;
624 struct vnode *a_vp;
625 int a_mode;
626 kauth_cred_t a_cred;
627 } */ *ap = v;
628 struct union_node *un = VTOUNION(ap->a_vp);
629 struct vnode *tvp;
630 int mode = ap->a_mode;
631 kauth_cred_t cred = ap->a_cred;
632 struct lwp *l = curlwp;
633 int error;
634
635 /*
636 * If there is an existing upper vp then simply open that.
637 */
638 tvp = un->un_uppervp;
639 if (tvp == NULLVP) {
640 /*
641 * If the lower vnode is being opened for writing, then
642 * copy the file contents to the upper vnode and open that,
643 * otherwise can simply open the lower vnode.
644 */
645 tvp = un->un_lowervp;
646 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
647 error = union_copyup(un, (mode&O_TRUNC) == 0, cred, l);
648 if (error == 0)
649 error = VOP_OPEN(un->un_uppervp, mode, cred);
650 if (error == 0) {
651 mutex_enter(un->un_uppervp->v_interlock);
652 un->un_uppervp->v_writecount++;
653 mutex_exit(un->un_uppervp->v_interlock);
654 }
655 return (error);
656 }
657
658 /*
659 * Just open the lower vnode, but check for nodev mount flag
660 */
661 if ((tvp->v_type == VBLK || tvp->v_type == VCHR) &&
662 (ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
663 return ENXIO;
664 un->un_openl++;
665 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
666 error = VOP_OPEN(tvp, mode, cred);
667 VOP_UNLOCK(tvp);
668
669 return (error);
670 }
671 /*
672 * Just open the upper vnode, checking for nodev mount flag first
673 */
674 if ((tvp->v_type == VBLK || tvp->v_type == VCHR) &&
675 (ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
676 return ENXIO;
677
678 error = VOP_OPEN(tvp, mode, cred);
679 if (error == 0 && (ap->a_mode & FWRITE)) {
680 mutex_enter(tvp->v_interlock);
681 tvp->v_writecount++;
682 mutex_exit(tvp->v_interlock);
683 }
684
685 return (error);
686 }
687
688 int
689 union_close(void *v)
690 {
691 struct vop_close_args /* {
692 struct vnode *a_vp;
693 int a_fflag;
694 kauth_cred_t a_cred;
695 } */ *ap = v;
696 struct union_node *un = VTOUNION(ap->a_vp);
697 struct vnode *vp;
698 int error;
699 bool do_lock;
700
701 vp = un->un_uppervp;
702 if (vp != NULLVP) {
703 do_lock = false;
704 } else {
705 KASSERT(un->un_openl > 0);
706 --un->un_openl;
707 vp = un->un_lowervp;
708 do_lock = true;
709 }
710
711 KASSERT(vp != NULLVP);
712 ap->a_vp = vp;
713 if ((ap->a_fflag & FWRITE)) {
714 KASSERT(vp == un->un_uppervp);
715 mutex_enter(vp->v_interlock);
716 vp->v_writecount--;
717 mutex_exit(vp->v_interlock);
718 }
719 if (do_lock)
720 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
721 error = VCALL(vp, VOFFSET(vop_close), ap);
722 if (do_lock)
723 VOP_UNLOCK(vp);
724
725 return error;
726 }
727
728 /*
729 * Check access permission on the union vnode.
730 * The access check being enforced is to check
731 * against both the underlying vnode, and any
732 * copied vnode. This ensures that no additional
733 * file permissions are given away simply because
734 * the user caused an implicit file copy.
735 */
736 int
737 union_access(void *v)
738 {
739 struct vop_access_args /* {
740 struct vnodeop_desc *a_desc;
741 struct vnode *a_vp;
742 accmode_t a_accmode;
743 kauth_cred_t a_cred;
744 } */ *ap = v;
745 struct vnode *vp = ap->a_vp;
746 struct union_node *un = VTOUNION(vp);
747 int error = EACCES;
748 struct union_mount *um = MOUNTTOUNIONMOUNT(vp->v_mount);
749
750 /*
751 * Disallow write attempts on read-only file systems;
752 * unless the file is a socket, fifo, or a block or
753 * character device resident on the file system.
754 */
755 if (ap->a_accmode & VWRITE) {
756 switch (vp->v_type) {
757 case VDIR:
758 case VLNK:
759 case VREG:
760 if (vp->v_mount->mnt_flag & MNT_RDONLY)
761 return (EROFS);
762 break;
763 case VBAD:
764 case VBLK:
765 case VCHR:
766 case VSOCK:
767 case VFIFO:
768 case VNON:
769 default:
770 break;
771 }
772 }
773
774 /*
775 * Copy up to prevent checking (and failing) against
776 * underlying file system mounted read only.
777 * Check for read access first to prevent implicit
778 * copy of inaccessible underlying vnode.
779 */
780 if (un->un_uppervp == NULLVP &&
781 (un->un_lowervp->v_type == VREG) &&
782 (ap->a_accmode & VWRITE)) {
783 vn_lock(un->un_lowervp, LK_EXCLUSIVE | LK_RETRY);
784 error = VOP_ACCESS(un->un_lowervp, VREAD, ap->a_cred);
785 VOP_UNLOCK(un->un_lowervp);
786 if (error == 0)
787 error = union_copyup(un, 1, ap->a_cred, curlwp);
788 if (error)
789 return error;
790 }
791
792 if ((vp = un->un_uppervp) != NULLVP) {
793 ap->a_vp = vp;
794 return (VCALL(vp, VOFFSET(vop_access), ap));
795 }
796
797 if ((vp = un->un_lowervp) != NULLVP) {
798 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
799 ap->a_vp = vp;
800 error = VCALL(vp, VOFFSET(vop_access), ap);
801 if (error == 0) {
802 if (um->um_op == UNMNT_BELOW) {
803 ap->a_cred = um->um_cred;
804 error = VCALL(vp, VOFFSET(vop_access), ap);
805 }
806 }
807 VOP_UNLOCK(vp);
808 if (error)
809 return (error);
810 }
811
812 return (error);
813 }
814
815 /*
816 * We handle getattr only to change the fsid and
817 * track object sizes
818 */
819 int
820 union_getattr(void *v)
821 {
822 struct vop_getattr_args /* {
823 struct vnode *a_vp;
824 struct vattr *a_vap;
825 kauth_cred_t a_cred;
826 } */ *ap = v;
827 int error;
828 struct union_node *un = VTOUNION(ap->a_vp);
829 struct vnode *vp = un->un_uppervp;
830 struct vattr *vap;
831 struct vattr va;
832
833
834 /*
835 * Some programs walk the filesystem hierarchy by counting
836 * links to directories to avoid stat'ing all the time.
837 * This means the link count on directories needs to be "correct".
838 * The only way to do that is to call getattr on both layers
839 * and fix up the link count. The link count will not necessarily
840 * be accurate but will be large enough to defeat the tree walkers.
841 *
842 * To make life more interesting, some filesystems don't keep
843 * track of link counts in the expected way, and return a
844 * link count of `1' for those directories; if either of the
845 * component directories returns a link count of `1', we return a 1.
846 */
847
848 vap = ap->a_vap;
849
850 vp = un->un_uppervp;
851 if (vp != NULLVP) {
852 error = VOP_GETATTR(vp, vap, ap->a_cred);
853 if (error)
854 return (error);
855 mutex_enter(&un->un_lock);
856 union_newsize(ap->a_vp, vap->va_size, VNOVAL);
857 }
858
859 if (vp == NULLVP) {
860 vp = un->un_lowervp;
861 } else if (vp->v_type == VDIR) {
862 vp = un->un_lowervp;
863 if (vp != NULLVP)
864 vap = &va;
865 } else {
866 vp = NULLVP;
867 }
868
869 if (vp != NULLVP) {
870 if (vp == un->un_lowervp)
871 vn_lock(vp, LK_SHARED | LK_RETRY);
872 error = VOP_GETATTR(vp, vap, ap->a_cred);
873 if (vp == un->un_lowervp)
874 VOP_UNLOCK(vp);
875 if (error)
876 return (error);
877 mutex_enter(&un->un_lock);
878 union_newsize(ap->a_vp, VNOVAL, vap->va_size);
879 }
880
881 if ((vap != ap->a_vap) && (vap->va_type == VDIR)) {
882 /*
883 * Link count manipulation:
884 * - If both return "2", return 2 (no subdirs)
885 * - If one or the other return "1", return "1" (ENOCLUE)
886 */
887 if ((ap->a_vap->va_nlink == 2) &&
888 (vap->va_nlink == 2))
889 ;
890 else if (ap->a_vap->va_nlink != 1) {
891 if (vap->va_nlink == 1)
892 ap->a_vap->va_nlink = 1;
893 else
894 ap->a_vap->va_nlink += vap->va_nlink;
895 }
896 }
897 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
898 return (0);
899 }
900
901 int
902 union_setattr(void *v)
903 {
904 struct vop_setattr_args /* {
905 struct vnode *a_vp;
906 struct vattr *a_vap;
907 kauth_cred_t a_cred;
908 } */ *ap = v;
909 struct vattr *vap = ap->a_vap;
910 struct vnode *vp = ap->a_vp;
911 struct union_node *un = VTOUNION(vp);
912 bool size_only; /* All but va_size are VNOVAL. */
913 int error;
914
915 size_only = (vap->va_flags == VNOVAL && vap->va_uid == (uid_t)VNOVAL &&
916 vap->va_gid == (gid_t)VNOVAL && vap->va_atime.tv_sec == VNOVAL &&
917 vap->va_mtime.tv_sec == VNOVAL && vap->va_mode == (mode_t)VNOVAL);
918
919 if (!size_only && (vp->v_mount->mnt_flag & MNT_RDONLY))
920 return (EROFS);
921 if (vap->va_size != VNOVAL) {
922 switch (vp->v_type) {
923 case VDIR:
924 return (EISDIR);
925 case VCHR:
926 case VBLK:
927 case VSOCK:
928 case VFIFO:
929 break;
930 case VREG:
931 case VLNK:
932 default:
933 /*
934 * Disallow write attempts if the filesystem is
935 * mounted read-only.
936 */
937 if (vp->v_mount->mnt_flag & MNT_RDONLY)
938 return (EROFS);
939 }
940 }
941
942 /*
943 * Handle case of truncating lower object to zero size,
944 * by creating a zero length upper object. This is to
945 * handle the case of open with O_TRUNC and O_CREAT.
946 */
947 if ((un->un_uppervp == NULLVP) &&
948 /* assert(un->un_lowervp != NULLVP) */
949 (un->un_lowervp->v_type == VREG)) {
950 error = union_copyup(un, (vap->va_size != 0),
951 ap->a_cred, curlwp);
952 if (error)
953 return (error);
954 }
955
956 /*
957 * Try to set attributes in upper layer, ignore size change to zero
958 * for devices to handle O_TRUNC and return read-only filesystem error
959 * otherwise.
960 */
961 if (un->un_uppervp != NULLVP) {
962 error = VOP_SETATTR(un->un_uppervp, vap, ap->a_cred);
963 if ((error == 0) && (vap->va_size != VNOVAL)) {
964 mutex_enter(&un->un_lock);
965 union_newsize(ap->a_vp, vap->va_size, VNOVAL);
966 }
967 } else {
968 KASSERT(un->un_lowervp != NULLVP);
969 if (NODE_IS_SPECIAL(un->un_lowervp)) {
970 if (size_only &&
971 (vap->va_size == 0 || vap->va_size == VNOVAL))
972 error = 0;
973 else
974 error = EROFS;
975 } else {
976 error = EROFS;
977 }
978 }
979
980 return (error);
981 }
982
983 int
984 union_read(void *v)
985 {
986 struct vop_read_args /* {
987 struct vnode *a_vp;
988 struct uio *a_uio;
989 int a_ioflag;
990 kauth_cred_t a_cred;
991 } */ *ap = v;
992 int error;
993 struct vnode *vp = OTHERVP(ap->a_vp);
994 int dolock = (vp == LOWERVP(ap->a_vp));
995
996 if (dolock)
997 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
998 error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
999 if (dolock)
1000 VOP_UNLOCK(vp);
1001
1002 /*
1003 * XXX
1004 * perhaps the size of the underlying object has changed under
1005 * our feet. take advantage of the offset information present
1006 * in the uio structure.
1007 */
1008 if (error == 0) {
1009 struct union_node *un = VTOUNION(ap->a_vp);
1010 off_t cur = ap->a_uio->uio_offset;
1011 off_t usz = VNOVAL, lsz = VNOVAL;
1012
1013 mutex_enter(&un->un_lock);
1014 if (vp == un->un_uppervp) {
1015 if (cur > un->un_uppersz)
1016 usz = cur;
1017 } else {
1018 if (cur > un->un_lowersz)
1019 lsz = cur;
1020 }
1021
1022 if (usz != VNOVAL || lsz != VNOVAL)
1023 union_newsize(ap->a_vp, usz, lsz);
1024 else
1025 mutex_exit(&un->un_lock);
1026 }
1027
1028 return (error);
1029 }
1030
1031 int
1032 union_write(void *v)
1033 {
1034 struct vop_read_args /* {
1035 struct vnode *a_vp;
1036 struct uio *a_uio;
1037 int a_ioflag;
1038 kauth_cred_t a_cred;
1039 } */ *ap = v;
1040 int error;
1041 struct vnode *vp;
1042 struct union_node *un = VTOUNION(ap->a_vp);
1043
1044 vp = UPPERVP(ap->a_vp);
1045 if (vp == NULLVP) {
1046 vp = LOWERVP(ap->a_vp);
1047 if (NODE_IS_SPECIAL(vp)) {
1048 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1049 error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag,
1050 ap->a_cred);
1051 VOP_UNLOCK(vp);
1052 return error;
1053 }
1054 panic("union: missing upper layer in write");
1055 }
1056
1057 error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1058
1059 /*
1060 * the size of the underlying object may be changed by the
1061 * write.
1062 */
1063 if (error == 0) {
1064 off_t cur = ap->a_uio->uio_offset;
1065
1066 mutex_enter(&un->un_lock);
1067 if (cur > un->un_uppersz)
1068 union_newsize(ap->a_vp, cur, VNOVAL);
1069 else
1070 mutex_exit(&un->un_lock);
1071 }
1072
1073 return (error);
1074 }
1075
1076 int
1077 union_ioctl(void *v)
1078 {
1079 struct vop_ioctl_args /* {
1080 struct vnode *a_vp;
1081 int a_command;
1082 void *a_data;
1083 int a_fflag;
1084 kauth_cred_t a_cred;
1085 } */ *ap = v;
1086 struct vnode *ovp = OTHERVP(ap->a_vp);
1087
1088 ap->a_vp = ovp;
1089 return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1090 }
1091
1092 int
1093 union_poll(void *v)
1094 {
1095 struct vop_poll_args /* {
1096 struct vnode *a_vp;
1097 int a_events;
1098 } */ *ap = v;
1099 struct vnode *ovp = OTHERVP(ap->a_vp);
1100
1101 ap->a_vp = ovp;
1102 return (VCALL(ovp, VOFFSET(vop_poll), ap));
1103 }
1104
1105 int
1106 union_revoke(void *v)
1107 {
1108 struct vop_revoke_args /* {
1109 struct vnode *a_vp;
1110 int a_flags;
1111 struct proc *a_p;
1112 } */ *ap = v;
1113 struct vnode *vp = ap->a_vp;
1114
1115 if (UPPERVP(vp))
1116 VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1117 if (LOWERVP(vp))
1118 VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1119 vgone(vp); /* XXXAD?? */
1120 return (0);
1121 }
1122
1123 int
1124 union_mmap(void *v)
1125 {
1126 struct vop_mmap_args /* {
1127 struct vnode *a_vp;
1128 vm_prot_t a_prot;
1129 kauth_cred_t a_cred;
1130 } */ *ap = v;
1131 struct vnode *ovp = OTHERVP(ap->a_vp);
1132
1133 ap->a_vp = ovp;
1134 return (VCALL(ovp, VOFFSET(vop_mmap), ap));
1135 }
1136
1137 int
1138 union_fsync(void *v)
1139 {
1140 struct vop_fsync_args /* {
1141 struct vnode *a_vp;
1142 kauth_cred_t a_cred;
1143 int a_flags;
1144 off_t offhi;
1145 off_t offlo;
1146 } */ *ap = v;
1147 int error = 0;
1148 struct vnode *targetvp;
1149
1150 /*
1151 * If vinvalbuf is calling us, it's a "shallow fsync" -- don't
1152 * bother syncing the underlying vnodes, since (a) they'll be
1153 * fsync'ed when reclaimed and (b) we could deadlock if
1154 * they're locked; otherwise, pass it through to the
1155 * underlying layer.
1156 */
1157 if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR) {
1158 error = spec_fsync(v);
1159 if (error)
1160 return error;
1161 }
1162
1163 if (ap->a_flags & FSYNC_RECLAIM)
1164 return 0;
1165
1166 targetvp = OTHERVP(ap->a_vp);
1167 if (targetvp != NULLVP) {
1168 int dolock = (targetvp == LOWERVP(ap->a_vp));
1169
1170 if (dolock)
1171 vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY);
1172 error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_flags,
1173 ap->a_offlo, ap->a_offhi);
1174 if (dolock)
1175 VOP_UNLOCK(targetvp);
1176 }
1177
1178 return (error);
1179 }
1180
1181 int
1182 union_seek(void *v)
1183 {
1184 struct vop_seek_args /* {
1185 struct vnode *a_vp;
1186 off_t a_oldoff;
1187 off_t a_newoff;
1188 kauth_cred_t a_cred;
1189 } */ *ap = v;
1190 struct vnode *ovp = OTHERVP(ap->a_vp);
1191
1192 ap->a_vp = ovp;
1193 return (VCALL(ovp, VOFFSET(vop_seek), ap));
1194 }
1195
1196 int
1197 union_remove(void *v)
1198 {
1199 struct vop_remove_v3_args /* {
1200 struct vnode *a_dvp;
1201 struct vnode *a_vp;
1202 struct componentname *a_cnp;
1203 nlink_t ctx_vp_new_nlink;
1204 } */ *ap = v;
1205 int error;
1206 struct union_node *dun = VTOUNION(ap->a_dvp);
1207 struct union_node *un = VTOUNION(ap->a_vp);
1208 struct componentname *cnp = ap->a_cnp;
1209
1210 if (dun->un_uppervp == NULLVP)
1211 panic("union remove: null upper vnode");
1212
1213 if (un->un_uppervp != NULLVP) {
1214 struct vnode *dvp = dun->un_uppervp;
1215 struct vnode *vp = un->un_uppervp;
1216
1217 /* Account for VOP_REMOVE to vrele vp. */
1218 vref(vp);
1219 if (union_dowhiteout(un, cnp->cn_cred))
1220 cnp->cn_flags |= DOWHITEOUT;
1221 error = VOP_REMOVE(dvp, vp, cnp);
1222 if (!error)
1223 union_removed_upper(un);
1224 vrele(ap->a_vp);
1225 } else {
1226 error = union_mkwhiteout(
1227 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
1228 dun->un_uppervp, ap->a_cnp, un);
1229 vput(ap->a_vp);
1230 }
1231
1232 return (error);
1233 }
1234
1235 int
1236 union_link(void *v)
1237 {
1238 struct vop_link_v2_args /* {
1239 struct vnode *a_dvp;
1240 struct vnode *a_vp;
1241 struct componentname *a_cnp;
1242 } */ *ap = v;
1243 int error = 0;
1244 struct componentname *cnp = ap->a_cnp;
1245 struct union_node *dun;
1246 struct vnode *vp;
1247 struct vnode *dvp;
1248
1249 dun = VTOUNION(ap->a_dvp);
1250
1251 KASSERT((ap->a_cnp->cn_flags & LOCKPARENT) != 0);
1252
1253 if (ap->a_dvp->v_op != ap->a_vp->v_op) {
1254 vp = ap->a_vp;
1255 } else {
1256 struct union_node *un = VTOUNION(ap->a_vp);
1257 if (un->un_uppervp == NULLVP) {
1258 const bool droplock = (dun->un_uppervp == un->un_dirvp);
1259
1260 /*
1261 * Needs to be copied before we can link it.
1262 */
1263 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
1264 if (droplock)
1265 VOP_UNLOCK(dun->un_uppervp);
1266 error = union_copyup(un, 1, cnp->cn_cred, curlwp);
1267 /*
1268 * Unlock the target vnode again immediately
1269 * to avoid deadlocking while relocking or in
1270 * relookup. In particular it's common for a
1271 * link operation to be entirely in one dir,
1272 * where ap->a_dvp and/or dun->un_uppervp are
1273 * the parent of ap->a_vp; locking the former
1274 * while holding the latter is an order
1275 * reversal. It's also possible for the
1276 * relookup call to find and lock the same
1277 * vnode if another process just did the same
1278 * link operation.
1279 */
1280 VOP_UNLOCK(ap->a_vp);
1281 if (droplock) {
1282 vn_lock(dun->un_uppervp,
1283 LK_EXCLUSIVE | LK_RETRY);
1284 /*
1285 * During copyup, we dropped the lock on the
1286 * dir and invalidated any saved namei lookup
1287 * state for the directory we'll be entering
1288 * the link in. We need to re-run the lookup
1289 * in that directory to reset any state needed
1290 * for VOP_LINK.
1291 * Call relookup on the union-layer to reset
1292 * the state.
1293 */
1294 vp = NULLVP;
1295 if (dun->un_uppervp == NULLVP)
1296 panic("union: null upperdvp?");
1297 error = relookup(ap->a_dvp, &vp, ap->a_cnp, 0);
1298 if (error) {
1299 return EROFS; /* ? */
1300 }
1301 if (vp != NULLVP) {
1302 /*
1303 * The name we wanted to
1304 * create has been created by
1305 * another process.
1306 */
1307 vput(vp);
1308 return EEXIST;
1309 }
1310 }
1311 }
1312 vp = un->un_uppervp;
1313 }
1314
1315 dvp = dun->un_uppervp;
1316 if (dvp == NULLVP)
1317 error = EROFS;
1318
1319 if (error)
1320 return (error);
1321
1322 return VOP_LINK(dvp, vp, cnp);
1323 }
1324
1325 int
1326 union_rename(void *v)
1327 {
1328 struct vop_rename_args /* {
1329 struct vnode *a_fdvp;
1330 struct vnode *a_fvp;
1331 struct componentname *a_fcnp;
1332 struct vnode *a_tdvp;
1333 struct vnode *a_tvp;
1334 struct componentname *a_tcnp;
1335 } */ *ap = v;
1336 int error;
1337
1338 struct vnode *fdvp = ap->a_fdvp;
1339 struct vnode *fvp = ap->a_fvp;
1340 struct vnode *tdvp = ap->a_tdvp;
1341 struct vnode *tvp = ap->a_tvp;
1342
1343 /*
1344 * Account for VOP_RENAME to vrele all nodes.
1345 * Note: VOP_RENAME will unlock tdvp.
1346 */
1347
1348 if (fdvp->v_op == union_vnodeop_p) { /* always true */
1349 struct union_node *un = VTOUNION(fdvp);
1350 if (un->un_uppervp == NULLVP) {
1351 /*
1352 * this should never happen in normal
1353 * operation but might if there was
1354 * a problem creating the top-level shadow
1355 * directory.
1356 */
1357 error = EXDEV;
1358 goto bad;
1359 }
1360
1361 fdvp = un->un_uppervp;
1362 vref(fdvp);
1363 }
1364
1365 if (fvp->v_op == union_vnodeop_p) { /* always true */
1366 struct union_node *un = VTOUNION(fvp);
1367 if (un->un_uppervp == NULLVP) {
1368 /* XXX: should do a copyup */
1369 error = EXDEV;
1370 goto bad;
1371 }
1372
1373 if (un->un_lowervp != NULLVP)
1374 ap->a_fcnp->cn_flags |= DOWHITEOUT;
1375
1376 fvp = un->un_uppervp;
1377 vref(fvp);
1378 }
1379
1380 if (tdvp->v_op == union_vnodeop_p) {
1381 struct union_node *un = VTOUNION(tdvp);
1382 if (un->un_uppervp == NULLVP) {
1383 /*
1384 * this should never happen in normal
1385 * operation but might if there was
1386 * a problem creating the top-level shadow
1387 * directory.
1388 */
1389 error = EXDEV;
1390 goto bad;
1391 }
1392
1393 tdvp = un->un_uppervp;
1394 vref(tdvp);
1395 }
1396
1397 if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1398 struct union_node *un = VTOUNION(tvp);
1399
1400 tvp = un->un_uppervp;
1401 if (tvp != NULLVP) {
1402 vref(tvp);
1403 }
1404 }
1405
1406 error = VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp);
1407 goto out;
1408
1409 bad:
1410 vput(tdvp);
1411 if (tvp != NULLVP)
1412 vput(tvp);
1413 vrele(fdvp);
1414 vrele(fvp);
1415
1416 out:
1417 if (fdvp != ap->a_fdvp) {
1418 vrele(ap->a_fdvp);
1419 }
1420 if (fvp != ap->a_fvp) {
1421 vrele(ap->a_fvp);
1422 }
1423 if (tdvp != ap->a_tdvp) {
1424 vrele(ap->a_tdvp);
1425 }
1426 if (tvp != ap->a_tvp) {
1427 vrele(ap->a_tvp);
1428 }
1429 return (error);
1430 }
1431
1432 int
1433 union_mkdir(void *v)
1434 {
1435 struct vop_mkdir_v3_args /* {
1436 struct vnode *a_dvp;
1437 struct vnode **a_vpp;
1438 struct componentname *a_cnp;
1439 struct vattr *a_vap;
1440 } */ *ap = v;
1441 struct union_node *un = VTOUNION(ap->a_dvp);
1442 struct vnode *dvp = un->un_uppervp;
1443 struct componentname *cnp = ap->a_cnp;
1444
1445 if (dvp != NULLVP) {
1446 int error;
1447 struct vnode *vp;
1448
1449 vp = NULL;
1450 error = VOP_MKDIR(dvp, &vp, cnp, ap->a_vap);
1451 if (error) {
1452 vrele(ap->a_dvp);
1453 return (error);
1454 }
1455
1456 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, ap->a_dvp,
1457 NULLVP, cnp, vp, NULLVP, 1);
1458 if (error)
1459 vrele(vp);
1460 return (error);
1461 }
1462
1463 return (EROFS);
1464 }
1465
1466 int
1467 union_rmdir(void *v)
1468 {
1469 struct vop_rmdir_v2_args /* {
1470 struct vnode *a_dvp;
1471 struct vnode *a_vp;
1472 struct componentname *a_cnp;
1473 } */ *ap = v;
1474 int error;
1475 struct union_node *dun = VTOUNION(ap->a_dvp);
1476 struct union_node *un = VTOUNION(ap->a_vp);
1477 struct componentname *cnp = ap->a_cnp;
1478
1479 if (dun->un_uppervp == NULLVP)
1480 panic("union rmdir: null upper vnode");
1481
1482 error = union_check_rmdir(un, cnp->cn_cred);
1483 if (error) {
1484 vput(ap->a_vp);
1485 return error;
1486 }
1487
1488 if (un->un_uppervp != NULLVP) {
1489 struct vnode *dvp = dun->un_uppervp;
1490 struct vnode *vp = un->un_uppervp;
1491
1492 /* Account for VOP_RMDIR to vrele vp. */
1493 vref(vp);
1494 if (union_dowhiteout(un, cnp->cn_cred))
1495 cnp->cn_flags |= DOWHITEOUT;
1496 error = VOP_RMDIR(dvp, vp, ap->a_cnp);
1497 if (!error)
1498 union_removed_upper(un);
1499 vrele(ap->a_vp);
1500 } else {
1501 error = union_mkwhiteout(
1502 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
1503 dun->un_uppervp, ap->a_cnp, un);
1504 vput(ap->a_vp);
1505 }
1506
1507 return (error);
1508 }
1509
1510 int
1511 union_symlink(void *v)
1512 {
1513 struct vop_symlink_v3_args /* {
1514 struct vnode *a_dvp;
1515 struct vnode **a_vpp;
1516 struct componentname *a_cnp;
1517 struct vattr *a_vap;
1518 char *a_target;
1519 } */ *ap = v;
1520 struct union_node *un = VTOUNION(ap->a_dvp);
1521 struct vnode *dvp = un->un_uppervp;
1522 struct componentname *cnp = ap->a_cnp;
1523
1524 if (dvp != NULLVP) {
1525 int error;
1526
1527 error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1528 ap->a_target);
1529 return (error);
1530 }
1531
1532 return (EROFS);
1533 }
1534
1535 /*
1536 * union_readdir works in concert with getdirentries and
1537 * readdir(3) to provide a list of entries in the unioned
1538 * directories. getdirentries is responsible for walking
1539 * down the union stack. readdir(3) is responsible for
1540 * eliminating duplicate names from the returned data stream.
1541 */
1542 int
1543 union_readdir(void *v)
1544 {
1545 struct vop_readdir_args /* {
1546 struct vnodeop_desc *a_desc;
1547 struct vnode *a_vp;
1548 struct uio *a_uio;
1549 kauth_cred_t a_cred;
1550 int *a_eofflag;
1551 u_long *a_cookies;
1552 int a_ncookies;
1553 } */ *ap = v;
1554 struct union_node *un = VTOUNION(ap->a_vp);
1555 struct vnode *vp;
1556 int dolock, error;
1557
1558 if (un->un_hooknode) {
1559 KASSERT(un->un_uppervp == NULLVP);
1560 KASSERT(un->un_lowervp != NULLVP);
1561 vp = un->un_lowervp;
1562 dolock = 1;
1563 } else {
1564 vp = un->un_uppervp;
1565 dolock = 0;
1566 }
1567 if (vp == NULLVP)
1568 return 0;
1569
1570 if (dolock)
1571 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1572 ap->a_vp = vp;
1573 error = VCALL(vp, VOFFSET(vop_readdir), ap);
1574 if (dolock)
1575 VOP_UNLOCK(vp);
1576
1577 return error;
1578 }
1579
1580 int
1581 union_readlink(void *v)
1582 {
1583 struct vop_readlink_args /* {
1584 struct vnode *a_vp;
1585 struct uio *a_uio;
1586 kauth_cred_t a_cred;
1587 } */ *ap = v;
1588 int error;
1589 struct vnode *vp = OTHERVP(ap->a_vp);
1590 int dolock = (vp == LOWERVP(ap->a_vp));
1591
1592 if (dolock)
1593 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1594 ap->a_vp = vp;
1595 error = VCALL(vp, VOFFSET(vop_readlink), ap);
1596 if (dolock)
1597 VOP_UNLOCK(vp);
1598
1599 return (error);
1600 }
1601
1602 int
1603 union_abortop(void *v)
1604 {
1605 struct vop_abortop_args /* {
1606 struct vnode *a_dvp;
1607 struct componentname *a_cnp;
1608 } */ *ap = v;
1609
1610 KASSERT(UPPERVP(ap->a_dvp) != NULL);
1611
1612 ap->a_dvp = UPPERVP(ap->a_dvp);
1613 return VCALL(ap->a_dvp, VOFFSET(vop_abortop), ap);
1614 }
1615
1616 int
1617 union_inactive(void *v)
1618 {
1619 struct vop_inactive_v2_args /* {
1620 const struct vnodeop_desc *a_desc;
1621 struct vnode *a_vp;
1622 bool *a_recycle;
1623 } */ *ap = v;
1624 struct vnode *vp = ap->a_vp;
1625 struct union_node *un = VTOUNION(vp);
1626 struct vnode **vpp;
1627
1628 /*
1629 * Do nothing (and _don't_ bypass).
1630 * Wait to vrele lowervp until reclaim,
1631 * so that until then our union_node is in the
1632 * cache and reusable.
1633 *
1634 * NEEDSWORK: Someday, consider inactive'ing
1635 * the lowervp and then trying to reactivate it
1636 * with capabilities (v_id)
1637 * like they do in the name lookup cache code.
1638 * That's too much work for now.
1639 */
1640
1641 if (un->un_dircache != 0) {
1642 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1643 vrele(*vpp);
1644 free(un->un_dircache, M_TEMP);
1645 un->un_dircache = 0;
1646 }
1647
1648 *ap->a_recycle = ((un->un_cflags & UN_CACHED) == 0);
1649
1650 return (0);
1651 }
1652
1653 int
1654 union_reclaim(void *v)
1655 {
1656 struct vop_reclaim_v2_args /* {
1657 struct vnode *a_vp;
1658 } */ *ap = v;
1659 struct vnode *vp = ap->a_vp;
1660 struct vnode *uvp = UPPERVP(vp);
1661
1662 VOP_UNLOCK(vp);
1663
1664 if (uvp != NULL) {
1665 mutex_enter(uvp->v_interlock);
1666 KASSERT(vp->v_interlock == uvp->v_interlock);
1667 uvp->v_writecount -= vp->v_writecount;
1668 mutex_exit(uvp->v_interlock);
1669 }
1670
1671 union_freevp(vp);
1672
1673 return (0);
1674 }
1675
1676 static int
1677 union_lock1(struct vnode *vp, struct vnode *lockvp, int flags)
1678 {
1679 struct vop_lock_args ap;
1680
1681 ap.a_desc = VDESC(vop_lock);
1682 ap.a_vp = lockvp;
1683 ap.a_flags = flags;
1684
1685 if (lockvp == vp)
1686 return genfs_lock(&ap);
1687 else
1688 return VCALL(ap.a_vp, VOFFSET(vop_lock), &ap);
1689 }
1690
1691 static int
1692 union_unlock1(struct vnode *vp, struct vnode *lockvp)
1693 {
1694 struct vop_unlock_args ap;
1695
1696 ap.a_desc = VDESC(vop_unlock);
1697 ap.a_vp = lockvp;
1698
1699 if (lockvp == vp)
1700 return genfs_unlock(&ap);
1701 else
1702 return VCALL(ap.a_vp, VOFFSET(vop_unlock), &ap);
1703 }
1704
1705 int
1706 union_lock(void *v)
1707 {
1708 struct vop_lock_args /* {
1709 struct vnode *a_vp;
1710 int a_flags;
1711 } */ *ap = v;
1712 struct vnode *vp = ap->a_vp, *lockvp;
1713 struct union_node *un = VTOUNION(vp);
1714 int flags = ap->a_flags;
1715 int error;
1716
1717 if ((flags & LK_NOWAIT) != 0) {
1718 if (!mutex_tryenter(&un->un_lock))
1719 return EBUSY;
1720 lockvp = LOCKVP(vp);
1721 error = union_lock1(vp, lockvp, flags);
1722 mutex_exit(&un->un_lock);
1723 return error;
1724 }
1725
1726 mutex_enter(&un->un_lock);
1727 for (;;) {
1728 lockvp = LOCKVP(vp);
1729 mutex_exit(&un->un_lock);
1730 error = union_lock1(vp, lockvp, flags);
1731 if (error != 0 || (flags & (LK_DOWNGRADE | LK_UPGRADE)) != 0)
1732 return error;
1733 mutex_enter(&un->un_lock);
1734 if (lockvp == LOCKVP(vp))
1735 break;
1736 union_unlock1(vp, lockvp);
1737 }
1738 mutex_exit(&un->un_lock);
1739
1740 return error;
1741 }
1742
1743 int
1744 union_unlock(void *v)
1745 {
1746 struct vop_unlock_args /* {
1747 struct vnode *a_vp;
1748 int a_flags;
1749 } */ *ap = v;
1750 struct vnode *vp = ap->a_vp, *lockvp;
1751
1752 lockvp = LOCKVP(vp);
1753 union_unlock1(vp, lockvp);
1754
1755 return 0;
1756 }
1757
1758 int
1759 union_bmap(void *v)
1760 {
1761 struct vop_bmap_args /* {
1762 struct vnode *a_vp;
1763 daddr_t a_bn;
1764 struct vnode **a_vpp;
1765 daddr_t *a_bnp;
1766 int *a_runp;
1767 } */ *ap = v;
1768 int error;
1769 struct vnode *vp = OTHERVP(ap->a_vp);
1770 int dolock = (vp == LOWERVP(ap->a_vp));
1771
1772 if (dolock)
1773 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1774 ap->a_vp = vp;
1775 error = VCALL(vp, VOFFSET(vop_bmap), ap);
1776 if (dolock)
1777 VOP_UNLOCK(vp);
1778
1779 return (error);
1780 }
1781
1782 int
1783 union_print(void *v)
1784 {
1785 struct vop_print_args /* {
1786 struct vnode *a_vp;
1787 } */ *ap = v;
1788 struct vnode *vp = ap->a_vp;
1789
1790 printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1791 vp, UPPERVP(vp), LOWERVP(vp));
1792 if (UPPERVP(vp) != NULLVP)
1793 vprint("union: upper", UPPERVP(vp));
1794 if (LOWERVP(vp) != NULLVP)
1795 vprint("union: lower", LOWERVP(vp));
1796 if (VTOUNION(vp)->un_dircache) {
1797 struct vnode **vpp;
1798 for (vpp = VTOUNION(vp)->un_dircache; *vpp != NULLVP; vpp++)
1799 vprint("dircache:", *vpp);
1800 }
1801
1802 return (0);
1803 }
1804
1805 int
1806 union_islocked(void *v)
1807 {
1808 struct vop_islocked_args /* {
1809 struct vnode *a_vp;
1810 } */ *ap = v;
1811 struct vnode *vp;
1812 struct union_node *un;
1813
1814 un = VTOUNION(ap->a_vp);
1815 mutex_enter(&un->un_lock);
1816 vp = LOCKVP(ap->a_vp);
1817 mutex_exit(&un->un_lock);
1818
1819 if (vp == ap->a_vp)
1820 return genfs_islocked(ap);
1821 else
1822 return VOP_ISLOCKED(vp);
1823 }
1824
1825 int
1826 union_pathconf(void *v)
1827 {
1828 struct vop_pathconf_args /* {
1829 struct vnode *a_vp;
1830 int a_name;
1831 int *a_retval;
1832 } */ *ap = v;
1833 int error;
1834 struct vnode *vp = OTHERVP(ap->a_vp);
1835 int dolock = (vp == LOWERVP(ap->a_vp));
1836
1837 if (dolock)
1838 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1839 ap->a_vp = vp;
1840 error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1841 if (dolock)
1842 VOP_UNLOCK(vp);
1843
1844 return (error);
1845 }
1846
1847 int
1848 union_advlock(void *v)
1849 {
1850 struct vop_advlock_args /* {
1851 struct vnode *a_vp;
1852 void *a_id;
1853 int a_op;
1854 struct flock *a_fl;
1855 int a_flags;
1856 } */ *ap = v;
1857 struct vnode *ovp = OTHERVP(ap->a_vp);
1858
1859 ap->a_vp = ovp;
1860 return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1861 }
1862
1863 int
1864 union_strategy(void *v)
1865 {
1866 struct vop_strategy_args /* {
1867 struct vnode *a_vp;
1868 struct buf *a_bp;
1869 } */ *ap = v;
1870 struct vnode *ovp = OTHERVP(ap->a_vp);
1871 struct buf *bp = ap->a_bp;
1872
1873 KASSERT(ovp != NULLVP);
1874 if (!NODE_IS_SPECIAL(ovp))
1875 KASSERT((bp->b_flags & B_READ) || ovp != LOWERVP(bp->b_vp));
1876
1877 return (VOP_STRATEGY(ovp, bp));
1878 }
1879
1880 int
1881 union_bwrite(void *v)
1882 {
1883 struct vop_bwrite_args /* {
1884 struct vnode *a_vp;
1885 struct buf *a_bp;
1886 } */ *ap = v;
1887 struct vnode *ovp = OTHERVP(ap->a_vp);
1888 struct buf *bp = ap->a_bp;
1889
1890 KASSERT(ovp != NULLVP);
1891 if (!NODE_IS_SPECIAL(ovp))
1892 KASSERT((bp->b_flags & B_READ) || ovp != LOWERVP(bp->b_vp));
1893
1894 return (VOP_BWRITE(ovp, bp));
1895 }
1896
1897 int
1898 union_getpages(void *v)
1899 {
1900 struct vop_getpages_args /* {
1901 struct vnode *a_vp;
1902 voff_t a_offset;
1903 struct vm_page **a_m;
1904 int *a_count;
1905 int a_centeridx;
1906 vm_prot_t a_access_type;
1907 int a_advice;
1908 int a_flags;
1909 } */ *ap = v;
1910 struct vnode *vp = ap->a_vp;
1911
1912 KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
1913
1914 if (ap->a_flags & PGO_LOCKED) {
1915 return EBUSY;
1916 }
1917 ap->a_vp = OTHERVP(vp);
1918 KASSERT(vp->v_uobj.vmobjlock == ap->a_vp->v_uobj.vmobjlock);
1919
1920 /* Just pass the request on to the underlying layer. */
1921 return VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
1922 }
1923
1924 int
1925 union_putpages(void *v)
1926 {
1927 struct vop_putpages_args /* {
1928 struct vnode *a_vp;
1929 voff_t a_offlo;
1930 voff_t a_offhi;
1931 int a_flags;
1932 } */ *ap = v;
1933 struct vnode *vp = ap->a_vp;
1934
1935 KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
1936
1937 ap->a_vp = OTHERVP(vp);
1938 KASSERT(vp->v_uobj.vmobjlock == ap->a_vp->v_uobj.vmobjlock);
1939
1940 if (ap->a_flags & PGO_RECLAIM) {
1941 rw_exit(vp->v_uobj.vmobjlock);
1942 return 0;
1943 }
1944
1945 /* Just pass the request on to the underlying layer. */
1946 return VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
1947 }
1948
1949 int
1950 union_kqfilter(void *v)
1951 {
1952 struct vop_kqfilter_args /* {
1953 struct vnode *a_vp;
1954 struct knote *a_kn;
1955 } */ *ap = v;
1956 int error;
1957
1958 /*
1959 * We watch either the upper layer file (if it already exists),
1960 * or the lower layer one. If there is lower layer file only
1961 * at this moment, we will keep watching that lower layer file
1962 * even if upper layer file would be created later on.
1963 */
1964 if (UPPERVP(ap->a_vp))
1965 error = VOP_KQFILTER(UPPERVP(ap->a_vp), ap->a_kn);
1966 else if (LOWERVP(ap->a_vp))
1967 error = VOP_KQFILTER(LOWERVP(ap->a_vp), ap->a_kn);
1968 else {
1969 /* panic? */
1970 error = EOPNOTSUPP;
1971 }
1972
1973 return (error);
1974 }
1975