union_vnops.c revision 1.79 1 /* $NetBSD: union_vnops.c,v 1.79 2021/10/20 03:08:17 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Jan-Simon Pendry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)union_vnops.c 8.33 (Berkeley) 7/31/95
35 */
36
37 /*
38 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
39 *
40 * This code is derived from software contributed to Berkeley by
41 * Jan-Simon Pendry.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)union_vnops.c 8.33 (Berkeley) 7/31/95
72 */
73
74 #include <sys/cdefs.h>
75 __KERNEL_RCSID(0, "$NetBSD: union_vnops.c,v 1.79 2021/10/20 03:08:17 thorpej Exp $");
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/file.h>
81 #include <sys/time.h>
82 #include <sys/stat.h>
83 #include <sys/vnode.h>
84 #include <sys/mount.h>
85 #include <sys/namei.h>
86 #include <sys/malloc.h>
87 #include <sys/buf.h>
88 #include <sys/queue.h>
89 #include <sys/lock.h>
90 #include <sys/kauth.h>
91
92 #include <fs/union/union.h>
93 #include <miscfs/genfs/genfs.h>
94 #include <miscfs/specfs/specdev.h>
95
96 int union_parsepath(void *);
97 int union_lookup(void *);
98 int union_create(void *);
99 int union_whiteout(void *);
100 int union_mknod(void *);
101 int union_open(void *);
102 int union_close(void *);
103 int union_access(void *);
104 int union_getattr(void *);
105 int union_setattr(void *);
106 int union_read(void *);
107 int union_write(void *);
108 int union_ioctl(void *);
109 int union_poll(void *);
110 int union_revoke(void *);
111 int union_mmap(void *);
112 int union_fsync(void *);
113 int union_seek(void *);
114 int union_remove(void *);
115 int union_link(void *);
116 int union_rename(void *);
117 int union_mkdir(void *);
118 int union_rmdir(void *);
119 int union_symlink(void *);
120 int union_readdir(void *);
121 int union_readlink(void *);
122 int union_abortop(void *);
123 int union_inactive(void *);
124 int union_reclaim(void *);
125 int union_lock(void *);
126 int union_unlock(void *);
127 int union_bmap(void *);
128 int union_print(void *);
129 int union_islocked(void *);
130 int union_pathconf(void *);
131 int union_advlock(void *);
132 int union_strategy(void *);
133 int union_bwrite(void *);
134 int union_getpages(void *);
135 int union_putpages(void *);
136 int union_kqfilter(void *);
137
138 static int union_lookup1(struct vnode *, struct vnode **,
139 struct vnode **, struct componentname *);
140
141
142 /*
143 * Global vfs data structures
144 */
145 int (**union_vnodeop_p)(void *);
146 const struct vnodeopv_entry_desc union_vnodeop_entries[] = {
147 { &vop_default_desc, vn_default_error },
148 { &vop_parsepath_desc, union_parsepath }, /* parsepath */
149 { &vop_lookup_desc, union_lookup }, /* lookup */
150 { &vop_create_desc, union_create }, /* create */
151 { &vop_whiteout_desc, union_whiteout }, /* whiteout */
152 { &vop_mknod_desc, union_mknod }, /* mknod */
153 { &vop_open_desc, union_open }, /* open */
154 { &vop_close_desc, union_close }, /* close */
155 { &vop_access_desc, union_access }, /* access */
156 { &vop_accessx_desc, genfs_accessx }, /* accessx */
157 { &vop_getattr_desc, union_getattr }, /* getattr */
158 { &vop_setattr_desc, union_setattr }, /* setattr */
159 { &vop_read_desc, union_read }, /* read */
160 { &vop_write_desc, union_write }, /* write */
161 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */
162 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */
163 { &vop_ioctl_desc, union_ioctl }, /* ioctl */
164 { &vop_poll_desc, union_poll }, /* select */
165 { &vop_revoke_desc, union_revoke }, /* revoke */
166 { &vop_mmap_desc, union_mmap }, /* mmap */
167 { &vop_fsync_desc, union_fsync }, /* fsync */
168 { &vop_seek_desc, union_seek }, /* seek */
169 { &vop_remove_desc, union_remove }, /* remove */
170 { &vop_link_desc, union_link }, /* link */
171 { &vop_rename_desc, union_rename }, /* rename */
172 { &vop_mkdir_desc, union_mkdir }, /* mkdir */
173 { &vop_rmdir_desc, union_rmdir }, /* rmdir */
174 { &vop_symlink_desc, union_symlink }, /* symlink */
175 { &vop_readdir_desc, union_readdir }, /* readdir */
176 { &vop_readlink_desc, union_readlink }, /* readlink */
177 { &vop_abortop_desc, union_abortop }, /* abortop */
178 { &vop_inactive_desc, union_inactive }, /* inactive */
179 { &vop_reclaim_desc, union_reclaim }, /* reclaim */
180 { &vop_lock_desc, union_lock }, /* lock */
181 { &vop_unlock_desc, union_unlock }, /* unlock */
182 { &vop_bmap_desc, union_bmap }, /* bmap */
183 { &vop_strategy_desc, union_strategy }, /* strategy */
184 { &vop_bwrite_desc, union_bwrite }, /* bwrite */
185 { &vop_print_desc, union_print }, /* print */
186 { &vop_islocked_desc, union_islocked }, /* islocked */
187 { &vop_pathconf_desc, union_pathconf }, /* pathconf */
188 { &vop_advlock_desc, union_advlock }, /* advlock */
189 { &vop_getpages_desc, union_getpages }, /* getpages */
190 { &vop_putpages_desc, union_putpages }, /* putpages */
191 { &vop_kqfilter_desc, union_kqfilter }, /* kqfilter */
192 { NULL, NULL }
193 };
194 const struct vnodeopv_desc union_vnodeop_opv_desc =
195 { &union_vnodeop_p, union_vnodeop_entries };
196
197 #define NODE_IS_SPECIAL(vp) \
198 ((vp)->v_type == VBLK || (vp)->v_type == VCHR || \
199 (vp)->v_type == VSOCK || (vp)->v_type == VFIFO)
200
201 int
202 union_parsepath(void *v)
203 {
204 struct vop_parsepath_args /* {
205 struct vnode *a_dvp;
206 const char *a_name;
207 size_t *a_retval;
208 } */ *ap = v;
209 struct vnode *upperdvp, *lowerdvp;
210 size_t upper, lower;
211 int error;
212
213 upperdvp = UPPERVP(ap->a_dvp);
214 lowerdvp = LOWERVP(ap->a_dvp);
215
216 if (upperdvp != NULLVP) {
217 error = VOP_PARSEPATH(upperdvp, ap->a_name, &upper);
218 if (error) {
219 return error;
220 }
221 } else {
222 upper = 0;
223 }
224
225 if (lowerdvp != NULLVP) {
226 error = VOP_PARSEPATH(lowerdvp, ap->a_name, &lower);
227 if (error) {
228 return error;
229 }
230 } else {
231 lower = 0;
232 }
233
234 if (upper == 0 && lower == 0) {
235 panic("%s: missing both layers", __func__);
236 }
237
238 /*
239 * If they're different, use the larger one. This is not a
240 * comprehensive solution, but it's sufficient for the
241 * non-default cases of parsepath that currently exist.
242 */
243 *ap->a_retval = MAX(upper, lower);
244 return 0;
245 }
246
247 static int
248 union_lookup1(struct vnode *udvp, struct vnode **dvpp, struct vnode **vpp,
249 struct componentname *cnp)
250 {
251 int error;
252 struct vnode *tdvp;
253 struct vnode *dvp;
254 struct mount *mp;
255
256 dvp = *dvpp;
257
258 /*
259 * If stepping up the directory tree, check for going
260 * back across the mount point, in which case do what
261 * lookup would do by stepping back down the mount
262 * hierarchy.
263 */
264 if (cnp->cn_flags & ISDOTDOT) {
265 while ((dvp != udvp) && (dvp->v_vflag & VV_ROOT)) {
266 /*
267 * Don't do the NOCROSSMOUNT check
268 * at this level. By definition,
269 * union fs deals with namespaces, not
270 * filesystems.
271 */
272 tdvp = dvp;
273 *dvpp = dvp = dvp->v_mount->mnt_vnodecovered;
274 VOP_UNLOCK(tdvp);
275 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
276 }
277 }
278
279 error = VOP_LOOKUP(dvp, &tdvp, cnp);
280 if (error)
281 return (error);
282 if (dvp != tdvp) {
283 if (cnp->cn_flags & ISDOTDOT)
284 VOP_UNLOCK(dvp);
285 error = vn_lock(tdvp, LK_EXCLUSIVE);
286 if (cnp->cn_flags & ISDOTDOT)
287 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
288 if (error) {
289 vrele(tdvp);
290 return error;
291 }
292 dvp = tdvp;
293 }
294
295 /*
296 * Lastly check if the current node is a mount point in
297 * which case walk up the mount hierarchy making sure not to
298 * bump into the root of the mount tree (ie. dvp != udvp).
299 */
300 while (dvp != udvp && (dvp->v_type == VDIR) &&
301 (mp = dvp->v_mountedhere)) {
302 if (vfs_busy(mp))
303 continue;
304 vput(dvp);
305 error = VFS_ROOT(mp, LK_EXCLUSIVE, &tdvp);
306 vfs_unbusy(mp);
307 if (error) {
308 return (error);
309 }
310 dvp = tdvp;
311 }
312
313 *vpp = dvp;
314 return (0);
315 }
316
317 int
318 union_lookup(void *v)
319 {
320 struct vop_lookup_v2_args /* {
321 struct vnodeop_desc *a_desc;
322 struct vnode *a_dvp;
323 struct vnode **a_vpp;
324 struct componentname *a_cnp;
325 } */ *ap = v;
326 int error;
327 int uerror, lerror;
328 struct vnode *uppervp, *lowervp;
329 struct vnode *upperdvp, *lowerdvp;
330 struct vnode *dvp = ap->a_dvp;
331 struct union_node *dun = VTOUNION(dvp);
332 struct componentname *cnp = ap->a_cnp;
333 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
334 kauth_cred_t saved_cred = NULL;
335 int iswhiteout;
336 struct vattr va;
337
338 #ifdef notyet
339 if (cnp->cn_namelen == 3 &&
340 cnp->cn_nameptr[2] == '.' &&
341 cnp->cn_nameptr[1] == '.' &&
342 cnp->cn_nameptr[0] == '.') {
343 dvp = *ap->a_vpp = LOWERVP(ap->a_dvp);
344 if (dvp == NULLVP)
345 return (ENOENT);
346 vref(dvp);
347 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
348 return (0);
349 }
350 #endif
351
352 if ((cnp->cn_flags & ISLASTCN) &&
353 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
354 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
355 return (EROFS);
356
357 start:
358 upperdvp = dun->un_uppervp;
359 lowerdvp = dun->un_lowervp;
360 uppervp = NULLVP;
361 lowervp = NULLVP;
362 iswhiteout = 0;
363
364 /*
365 * do the lookup in the upper level.
366 * if that level comsumes additional pathnames,
367 * then assume that something special is going
368 * on and just return that vnode.
369 */
370 if (upperdvp != NULLVP) {
371 uerror = union_lookup1(um->um_uppervp, &upperdvp,
372 &uppervp, cnp);
373 if (uerror == ENOENT || uerror == EJUSTRETURN) {
374 if (cnp->cn_flags & ISWHITEOUT) {
375 iswhiteout = 1;
376 } else if (lowerdvp != NULLVP) {
377 lerror = VOP_GETATTR(upperdvp, &va,
378 cnp->cn_cred);
379 if (lerror == 0 && (va.va_flags & OPAQUE))
380 iswhiteout = 1;
381 }
382 }
383 } else {
384 uerror = ENOENT;
385 }
386
387 /*
388 * in a similar way to the upper layer, do the lookup
389 * in the lower layer. this time, if there is some
390 * component magic going on, then vput whatever we got
391 * back from the upper layer and return the lower vnode
392 * instead.
393 */
394 if (lowerdvp != NULLVP && !iswhiteout) {
395 int nameiop;
396
397 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY);
398
399 /*
400 * Only do a LOOKUP on the bottom node, since
401 * we won't be making changes to it anyway.
402 */
403 nameiop = cnp->cn_nameiop;
404 cnp->cn_nameiop = LOOKUP;
405 if (um->um_op == UNMNT_BELOW) {
406 saved_cred = cnp->cn_cred;
407 cnp->cn_cred = um->um_cred;
408 }
409
410 /*
411 * we shouldn't have to worry about locking interactions
412 * between the lower layer and our union layer (w.r.t.
413 * `..' processing) because we don't futz with lowervp
414 * locks in the union-node instantiation code path.
415 */
416 lerror = union_lookup1(um->um_lowervp, &lowerdvp,
417 &lowervp, cnp);
418 if (um->um_op == UNMNT_BELOW)
419 cnp->cn_cred = saved_cred;
420 cnp->cn_nameiop = nameiop;
421
422 if (lowervp != lowerdvp)
423 VOP_UNLOCK(lowerdvp);
424 } else {
425 lerror = ENOENT;
426 if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
427 lowervp = LOWERVP(dun->un_pvp);
428 if (lowervp != NULLVP) {
429 vref(lowervp);
430 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
431 lerror = 0;
432 }
433 }
434 }
435
436 /*
437 * EJUSTRETURN is used by underlying filesystems to indicate that
438 * a directory modification op was started successfully.
439 * This will only happen in the upper layer, since
440 * the lower layer only does LOOKUPs.
441 * If this union is mounted read-only, bounce it now.
442 */
443
444 if ((uerror == EJUSTRETURN) && (cnp->cn_flags & ISLASTCN) &&
445 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
446 ((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)))
447 uerror = EROFS;
448
449 /*
450 * at this point, we have uerror and lerror indicating
451 * possible errors with the lookups in the upper and lower
452 * layers. additionally, uppervp and lowervp are (locked)
453 * references to existing vnodes in the upper and lower layers.
454 *
455 * there are now three cases to consider.
456 * 1. if both layers returned an error, then return whatever
457 * error the upper layer generated.
458 *
459 * 2. if the top layer failed and the bottom layer succeeded
460 * then two subcases occur.
461 * a. the bottom vnode is not a directory, in which
462 * case just return a new union vnode referencing
463 * an empty top layer and the existing bottom layer.
464 * b. the bottom vnode is a directory, in which case
465 * create a new directory in the top-level and
466 * continue as in case 3.
467 *
468 * 3. if the top layer succeeded then return a new union
469 * vnode referencing whatever the new top layer and
470 * whatever the bottom layer returned.
471 */
472
473 *ap->a_vpp = NULLVP;
474
475
476 /* case 1. */
477 if ((uerror != 0) && (lerror != 0)) {
478 return (uerror);
479 }
480
481 /* case 2. */
482 if (uerror != 0 /* && (lerror == 0) */ ) {
483 if (lowervp->v_type == VDIR) { /* case 2b. */
484 /*
485 * We may be racing another process to make the
486 * upper-level shadow directory. Be careful with
487 * locks/etc!
488 * If we have to create a shadow directory and want
489 * to commit the node we have to restart the lookup
490 * to get the componentname right.
491 */
492 if (upperdvp) {
493 VOP_UNLOCK(upperdvp);
494 uerror = union_mkshadow(um, upperdvp, cnp,
495 &uppervp);
496 vn_lock(upperdvp, LK_EXCLUSIVE | LK_RETRY);
497 if (uerror == 0 && cnp->cn_nameiop != LOOKUP) {
498 vrele(uppervp);
499 if (lowervp != NULLVP)
500 vput(lowervp);
501 goto start;
502 }
503 }
504 if (uerror) {
505 if (lowervp != NULLVP) {
506 vput(lowervp);
507 lowervp = NULLVP;
508 }
509 return (uerror);
510 }
511 }
512 } else { /* uerror == 0 */
513 if (uppervp != upperdvp)
514 VOP_UNLOCK(uppervp);
515 }
516
517 if (lowervp != NULLVP)
518 VOP_UNLOCK(lowervp);
519
520 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
521 uppervp, lowervp, 1);
522
523 if (error) {
524 if (uppervp != NULLVP)
525 vrele(uppervp);
526 if (lowervp != NULLVP)
527 vrele(lowervp);
528 return error;
529 }
530
531 return 0;
532 }
533
534 int
535 union_create(void *v)
536 {
537 struct vop_create_v3_args /* {
538 struct vnode *a_dvp;
539 struct vnode **a_vpp;
540 struct componentname *a_cnp;
541 struct vattr *a_vap;
542 } */ *ap = v;
543 struct union_node *un = VTOUNION(ap->a_dvp);
544 struct vnode *dvp = un->un_uppervp;
545 struct componentname *cnp = ap->a_cnp;
546
547 if (dvp != NULLVP) {
548 int error;
549 struct vnode *vp;
550 struct mount *mp;
551
552 mp = ap->a_dvp->v_mount;
553
554 vp = NULL;
555 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
556 if (error)
557 return (error);
558
559 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, cnp, vp,
560 NULLVP, 1);
561 if (error)
562 vrele(vp);
563 return (error);
564 }
565
566 return (EROFS);
567 }
568
569 int
570 union_whiteout(void *v)
571 {
572 struct vop_whiteout_args /* {
573 struct vnode *a_dvp;
574 struct componentname *a_cnp;
575 int a_flags;
576 } */ *ap = v;
577 struct union_node *un = VTOUNION(ap->a_dvp);
578 struct componentname *cnp = ap->a_cnp;
579
580 if (un->un_uppervp == NULLVP)
581 return (EOPNOTSUPP);
582
583 return (VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags));
584 }
585
586 int
587 union_mknod(void *v)
588 {
589 struct vop_mknod_v3_args /* {
590 struct vnode *a_dvp;
591 struct vnode **a_vpp;
592 struct componentname *a_cnp;
593 struct vattr *a_vap;
594 } */ *ap = v;
595 struct union_node *un = VTOUNION(ap->a_dvp);
596 struct vnode *dvp = un->un_uppervp;
597 struct componentname *cnp = ap->a_cnp;
598
599 if (dvp != NULLVP) {
600 int error;
601 struct vnode *vp;
602 struct mount *mp;
603
604 mp = ap->a_dvp->v_mount;
605 error = VOP_MKNOD(dvp, &vp, cnp, ap->a_vap);
606 if (error)
607 return (error);
608
609 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
610 cnp, vp, NULLVP, 1);
611 if (error)
612 vrele(vp);
613 return (error);
614 }
615
616 return (EROFS);
617 }
618
619 int
620 union_open(void *v)
621 {
622 struct vop_open_args /* {
623 struct vnodeop_desc *a_desc;
624 struct vnode *a_vp;
625 int a_mode;
626 kauth_cred_t a_cred;
627 } */ *ap = v;
628 struct union_node *un = VTOUNION(ap->a_vp);
629 struct vnode *tvp;
630 int mode = ap->a_mode;
631 kauth_cred_t cred = ap->a_cred;
632 struct lwp *l = curlwp;
633 int error;
634
635 /*
636 * If there is an existing upper vp then simply open that.
637 */
638 tvp = un->un_uppervp;
639 if (tvp == NULLVP) {
640 /*
641 * If the lower vnode is being opened for writing, then
642 * copy the file contents to the upper vnode and open that,
643 * otherwise can simply open the lower vnode.
644 */
645 tvp = un->un_lowervp;
646 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
647 error = union_copyup(un, (mode&O_TRUNC) == 0, cred, l);
648 if (error == 0)
649 error = VOP_OPEN(un->un_uppervp, mode, cred);
650 if (error == 0) {
651 mutex_enter(un->un_uppervp->v_interlock);
652 un->un_uppervp->v_writecount++;
653 mutex_exit(un->un_uppervp->v_interlock);
654 }
655 return (error);
656 }
657
658 /*
659 * Just open the lower vnode, but check for nodev mount flag
660 */
661 if ((tvp->v_type == VBLK || tvp->v_type == VCHR) &&
662 (ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
663 return ENXIO;
664 un->un_openl++;
665 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
666 error = VOP_OPEN(tvp, mode, cred);
667 VOP_UNLOCK(tvp);
668
669 return (error);
670 }
671 /*
672 * Just open the upper vnode, checking for nodev mount flag first
673 */
674 if ((tvp->v_type == VBLK || tvp->v_type == VCHR) &&
675 (ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
676 return ENXIO;
677
678 error = VOP_OPEN(tvp, mode, cred);
679 if (error == 0 && (ap->a_mode & FWRITE)) {
680 mutex_enter(tvp->v_interlock);
681 tvp->v_writecount++;
682 mutex_exit(tvp->v_interlock);
683 }
684
685 return (error);
686 }
687
688 int
689 union_close(void *v)
690 {
691 struct vop_close_args /* {
692 struct vnode *a_vp;
693 int a_fflag;
694 kauth_cred_t a_cred;
695 } */ *ap = v;
696 struct union_node *un = VTOUNION(ap->a_vp);
697 struct vnode *vp;
698 int error;
699 bool do_lock;
700
701 vp = un->un_uppervp;
702 if (vp != NULLVP) {
703 do_lock = false;
704 } else {
705 KASSERT(un->un_openl > 0);
706 --un->un_openl;
707 vp = un->un_lowervp;
708 do_lock = true;
709 }
710
711 KASSERT(vp != NULLVP);
712 ap->a_vp = vp;
713 if ((ap->a_fflag & FWRITE)) {
714 KASSERT(vp == un->un_uppervp);
715 mutex_enter(vp->v_interlock);
716 vp->v_writecount--;
717 mutex_exit(vp->v_interlock);
718 }
719 if (do_lock)
720 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
721 error = VCALL(vp, VOFFSET(vop_close), ap);
722 if (do_lock)
723 VOP_UNLOCK(vp);
724
725 return error;
726 }
727
728 /*
729 * Check access permission on the union vnode.
730 * The access check being enforced is to check
731 * against both the underlying vnode, and any
732 * copied vnode. This ensures that no additional
733 * file permissions are given away simply because
734 * the user caused an implicit file copy.
735 */
736 int
737 union_access(void *v)
738 {
739 struct vop_access_args /* {
740 struct vnodeop_desc *a_desc;
741 struct vnode *a_vp;
742 accmode_t a_accmode;
743 kauth_cred_t a_cred;
744 } */ *ap = v;
745 struct vnode *vp = ap->a_vp;
746 struct union_node *un = VTOUNION(vp);
747 int error = EACCES;
748 struct union_mount *um = MOUNTTOUNIONMOUNT(vp->v_mount);
749
750 /*
751 * Disallow write attempts on read-only file systems;
752 * unless the file is a socket, fifo, or a block or
753 * character device resident on the file system.
754 */
755 if (ap->a_accmode & VWRITE) {
756 switch (vp->v_type) {
757 case VDIR:
758 case VLNK:
759 case VREG:
760 if (vp->v_mount->mnt_flag & MNT_RDONLY)
761 return (EROFS);
762 break;
763 case VBAD:
764 case VBLK:
765 case VCHR:
766 case VSOCK:
767 case VFIFO:
768 case VNON:
769 default:
770 break;
771 }
772 }
773
774
775 if ((vp = un->un_uppervp) != NULLVP) {
776 ap->a_vp = vp;
777 return (VCALL(vp, VOFFSET(vop_access), ap));
778 }
779
780 if ((vp = un->un_lowervp) != NULLVP) {
781 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
782 ap->a_vp = vp;
783 error = VCALL(vp, VOFFSET(vop_access), ap);
784 if (error == 0) {
785 if (um->um_op == UNMNT_BELOW) {
786 ap->a_cred = um->um_cred;
787 error = VCALL(vp, VOFFSET(vop_access), ap);
788 }
789 }
790 VOP_UNLOCK(vp);
791 if (error)
792 return (error);
793 }
794
795 return (error);
796 }
797
798 /*
799 * We handle getattr only to change the fsid and
800 * track object sizes
801 */
802 int
803 union_getattr(void *v)
804 {
805 struct vop_getattr_args /* {
806 struct vnode *a_vp;
807 struct vattr *a_vap;
808 kauth_cred_t a_cred;
809 } */ *ap = v;
810 int error;
811 struct union_node *un = VTOUNION(ap->a_vp);
812 struct vnode *vp = un->un_uppervp;
813 struct vattr *vap;
814 struct vattr va;
815
816
817 /*
818 * Some programs walk the filesystem hierarchy by counting
819 * links to directories to avoid stat'ing all the time.
820 * This means the link count on directories needs to be "correct".
821 * The only way to do that is to call getattr on both layers
822 * and fix up the link count. The link count will not necessarily
823 * be accurate but will be large enough to defeat the tree walkers.
824 *
825 * To make life more interesting, some filesystems don't keep
826 * track of link counts in the expected way, and return a
827 * link count of `1' for those directories; if either of the
828 * component directories returns a link count of `1', we return a 1.
829 */
830
831 vap = ap->a_vap;
832
833 vp = un->un_uppervp;
834 if (vp != NULLVP) {
835 error = VOP_GETATTR(vp, vap, ap->a_cred);
836 if (error)
837 return (error);
838 mutex_enter(&un->un_lock);
839 union_newsize(ap->a_vp, vap->va_size, VNOVAL);
840 }
841
842 if (vp == NULLVP) {
843 vp = un->un_lowervp;
844 } else if (vp->v_type == VDIR) {
845 vp = un->un_lowervp;
846 if (vp != NULLVP)
847 vap = &va;
848 } else {
849 vp = NULLVP;
850 }
851
852 if (vp != NULLVP) {
853 if (vp == un->un_lowervp)
854 vn_lock(vp, LK_SHARED | LK_RETRY);
855 error = VOP_GETATTR(vp, vap, ap->a_cred);
856 if (vp == un->un_lowervp)
857 VOP_UNLOCK(vp);
858 if (error)
859 return (error);
860 mutex_enter(&un->un_lock);
861 union_newsize(ap->a_vp, VNOVAL, vap->va_size);
862 }
863
864 if ((vap != ap->a_vap) && (vap->va_type == VDIR)) {
865 /*
866 * Link count manipulation:
867 * - If both return "2", return 2 (no subdirs)
868 * - If one or the other return "1", return "1" (ENOCLUE)
869 */
870 if ((ap->a_vap->va_nlink == 2) &&
871 (vap->va_nlink == 2))
872 ;
873 else if (ap->a_vap->va_nlink != 1) {
874 if (vap->va_nlink == 1)
875 ap->a_vap->va_nlink = 1;
876 else
877 ap->a_vap->va_nlink += vap->va_nlink;
878 }
879 }
880 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
881 return (0);
882 }
883
884 int
885 union_setattr(void *v)
886 {
887 struct vop_setattr_args /* {
888 struct vnode *a_vp;
889 struct vattr *a_vap;
890 kauth_cred_t a_cred;
891 } */ *ap = v;
892 struct vattr *vap = ap->a_vap;
893 struct vnode *vp = ap->a_vp;
894 struct union_node *un = VTOUNION(vp);
895 bool size_only; /* All but va_size are VNOVAL. */
896 int error;
897
898 size_only = (vap->va_flags == VNOVAL && vap->va_uid == (uid_t)VNOVAL &&
899 vap->va_gid == (gid_t)VNOVAL && vap->va_atime.tv_sec == VNOVAL &&
900 vap->va_mtime.tv_sec == VNOVAL && vap->va_mode == (mode_t)VNOVAL);
901
902 if (!size_only && (vp->v_mount->mnt_flag & MNT_RDONLY))
903 return (EROFS);
904 if (vap->va_size != VNOVAL) {
905 switch (vp->v_type) {
906 case VDIR:
907 return (EISDIR);
908 case VCHR:
909 case VBLK:
910 case VSOCK:
911 case VFIFO:
912 break;
913 case VREG:
914 case VLNK:
915 default:
916 /*
917 * Disallow write attempts if the filesystem is
918 * mounted read-only.
919 */
920 if (vp->v_mount->mnt_flag & MNT_RDONLY)
921 return (EROFS);
922 }
923 }
924
925 /*
926 * Handle case of truncating lower object to zero size,
927 * by creating a zero length upper object. This is to
928 * handle the case of open with O_TRUNC and O_CREAT.
929 */
930 if ((un->un_uppervp == NULLVP) &&
931 /* assert(un->un_lowervp != NULLVP) */
932 (un->un_lowervp->v_type == VREG)) {
933 error = union_copyup(un, (vap->va_size != 0),
934 ap->a_cred, curlwp);
935 if (error)
936 return (error);
937 }
938
939 /*
940 * Try to set attributes in upper layer, ignore size change to zero
941 * for devices to handle O_TRUNC and return read-only filesystem error
942 * otherwise.
943 */
944 if (un->un_uppervp != NULLVP) {
945 error = VOP_SETATTR(un->un_uppervp, vap, ap->a_cred);
946 if ((error == 0) && (vap->va_size != VNOVAL)) {
947 mutex_enter(&un->un_lock);
948 union_newsize(ap->a_vp, vap->va_size, VNOVAL);
949 }
950 } else {
951 KASSERT(un->un_lowervp != NULLVP);
952 if (NODE_IS_SPECIAL(un->un_lowervp)) {
953 if (size_only &&
954 (vap->va_size == 0 || vap->va_size == VNOVAL))
955 error = 0;
956 else
957 error = EROFS;
958 } else {
959 error = EROFS;
960 }
961 }
962
963 return (error);
964 }
965
966 int
967 union_read(void *v)
968 {
969 struct vop_read_args /* {
970 struct vnode *a_vp;
971 struct uio *a_uio;
972 int a_ioflag;
973 kauth_cred_t a_cred;
974 } */ *ap = v;
975 int error;
976 struct vnode *vp = OTHERVP(ap->a_vp);
977 int dolock = (vp == LOWERVP(ap->a_vp));
978
979 if (dolock)
980 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
981 error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
982 if (dolock)
983 VOP_UNLOCK(vp);
984
985 /*
986 * XXX
987 * perhaps the size of the underlying object has changed under
988 * our feet. take advantage of the offset information present
989 * in the uio structure.
990 */
991 if (error == 0) {
992 struct union_node *un = VTOUNION(ap->a_vp);
993 off_t cur = ap->a_uio->uio_offset;
994 off_t usz = VNOVAL, lsz = VNOVAL;
995
996 mutex_enter(&un->un_lock);
997 if (vp == un->un_uppervp) {
998 if (cur > un->un_uppersz)
999 usz = cur;
1000 } else {
1001 if (cur > un->un_lowersz)
1002 lsz = cur;
1003 }
1004
1005 if (usz != VNOVAL || lsz != VNOVAL)
1006 union_newsize(ap->a_vp, usz, lsz);
1007 else
1008 mutex_exit(&un->un_lock);
1009 }
1010
1011 return (error);
1012 }
1013
1014 int
1015 union_write(void *v)
1016 {
1017 struct vop_read_args /* {
1018 struct vnode *a_vp;
1019 struct uio *a_uio;
1020 int a_ioflag;
1021 kauth_cred_t a_cred;
1022 } */ *ap = v;
1023 int error;
1024 struct vnode *vp;
1025 struct union_node *un = VTOUNION(ap->a_vp);
1026
1027 vp = UPPERVP(ap->a_vp);
1028 if (vp == NULLVP) {
1029 vp = LOWERVP(ap->a_vp);
1030 if (NODE_IS_SPECIAL(vp)) {
1031 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1032 error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag,
1033 ap->a_cred);
1034 VOP_UNLOCK(vp);
1035 return error;
1036 }
1037 panic("union: missing upper layer in write");
1038 }
1039
1040 error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1041
1042 /*
1043 * the size of the underlying object may be changed by the
1044 * write.
1045 */
1046 if (error == 0) {
1047 off_t cur = ap->a_uio->uio_offset;
1048
1049 mutex_enter(&un->un_lock);
1050 if (cur > un->un_uppersz)
1051 union_newsize(ap->a_vp, cur, VNOVAL);
1052 else
1053 mutex_exit(&un->un_lock);
1054 }
1055
1056 return (error);
1057 }
1058
1059 int
1060 union_ioctl(void *v)
1061 {
1062 struct vop_ioctl_args /* {
1063 struct vnode *a_vp;
1064 int a_command;
1065 void *a_data;
1066 int a_fflag;
1067 kauth_cred_t a_cred;
1068 } */ *ap = v;
1069 struct vnode *ovp = OTHERVP(ap->a_vp);
1070
1071 ap->a_vp = ovp;
1072 return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1073 }
1074
1075 int
1076 union_poll(void *v)
1077 {
1078 struct vop_poll_args /* {
1079 struct vnode *a_vp;
1080 int a_events;
1081 } */ *ap = v;
1082 struct vnode *ovp = OTHERVP(ap->a_vp);
1083
1084 ap->a_vp = ovp;
1085 return (VCALL(ovp, VOFFSET(vop_poll), ap));
1086 }
1087
1088 int
1089 union_revoke(void *v)
1090 {
1091 struct vop_revoke_args /* {
1092 struct vnode *a_vp;
1093 int a_flags;
1094 struct proc *a_p;
1095 } */ *ap = v;
1096 struct vnode *vp = ap->a_vp;
1097
1098 if (UPPERVP(vp))
1099 VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1100 if (LOWERVP(vp))
1101 VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1102 vgone(vp); /* XXXAD?? */
1103 return (0);
1104 }
1105
1106 int
1107 union_mmap(void *v)
1108 {
1109 struct vop_mmap_args /* {
1110 struct vnode *a_vp;
1111 vm_prot_t a_prot;
1112 kauth_cred_t a_cred;
1113 } */ *ap = v;
1114 struct vnode *ovp = OTHERVP(ap->a_vp);
1115
1116 ap->a_vp = ovp;
1117 return (VCALL(ovp, VOFFSET(vop_mmap), ap));
1118 }
1119
1120 int
1121 union_fsync(void *v)
1122 {
1123 struct vop_fsync_args /* {
1124 struct vnode *a_vp;
1125 kauth_cred_t a_cred;
1126 int a_flags;
1127 off_t offhi;
1128 off_t offlo;
1129 } */ *ap = v;
1130 int error = 0;
1131 struct vnode *targetvp;
1132
1133 /*
1134 * If vinvalbuf is calling us, it's a "shallow fsync" -- don't
1135 * bother syncing the underlying vnodes, since (a) they'll be
1136 * fsync'ed when reclaimed and (b) we could deadlock if
1137 * they're locked; otherwise, pass it through to the
1138 * underlying layer.
1139 */
1140 if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR) {
1141 error = spec_fsync(v);
1142 if (error)
1143 return error;
1144 }
1145
1146 if (ap->a_flags & FSYNC_RECLAIM)
1147 return 0;
1148
1149 targetvp = OTHERVP(ap->a_vp);
1150 if (targetvp != NULLVP) {
1151 int dolock = (targetvp == LOWERVP(ap->a_vp));
1152
1153 if (dolock)
1154 vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY);
1155 error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_flags,
1156 ap->a_offlo, ap->a_offhi);
1157 if (dolock)
1158 VOP_UNLOCK(targetvp);
1159 }
1160
1161 return (error);
1162 }
1163
1164 int
1165 union_seek(void *v)
1166 {
1167 struct vop_seek_args /* {
1168 struct vnode *a_vp;
1169 off_t a_oldoff;
1170 off_t a_newoff;
1171 kauth_cred_t a_cred;
1172 } */ *ap = v;
1173 struct vnode *ovp = OTHERVP(ap->a_vp);
1174
1175 ap->a_vp = ovp;
1176 return (VCALL(ovp, VOFFSET(vop_seek), ap));
1177 }
1178
1179 int
1180 union_remove(void *v)
1181 {
1182 struct vop_remove_v3_args /* {
1183 struct vnode *a_dvp;
1184 struct vnode *a_vp;
1185 struct componentname *a_cnp;
1186 nlink_t ctx_vp_new_nlink;
1187 } */ *ap = v;
1188 int error;
1189 struct union_node *dun = VTOUNION(ap->a_dvp);
1190 struct union_node *un = VTOUNION(ap->a_vp);
1191 struct componentname *cnp = ap->a_cnp;
1192
1193 if (dun->un_uppervp == NULLVP)
1194 panic("union remove: null upper vnode");
1195
1196 if (un->un_uppervp != NULLVP) {
1197 struct vnode *dvp = dun->un_uppervp;
1198 struct vnode *vp = un->un_uppervp;
1199
1200 /* Account for VOP_REMOVE to vrele vp. */
1201 vref(vp);
1202 if (union_dowhiteout(un, cnp->cn_cred))
1203 cnp->cn_flags |= DOWHITEOUT;
1204 error = VOP_REMOVE(dvp, vp, cnp);
1205 if (!error)
1206 union_removed_upper(un);
1207 vrele(ap->a_vp);
1208 } else {
1209 error = union_mkwhiteout(
1210 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
1211 dun->un_uppervp, ap->a_cnp, un);
1212 vput(ap->a_vp);
1213 }
1214
1215 return (error);
1216 }
1217
1218 int
1219 union_link(void *v)
1220 {
1221 struct vop_link_v2_args /* {
1222 struct vnode *a_dvp;
1223 struct vnode *a_vp;
1224 struct componentname *a_cnp;
1225 } */ *ap = v;
1226 int error = 0;
1227 struct componentname *cnp = ap->a_cnp;
1228 struct union_node *dun;
1229 struct vnode *vp;
1230 struct vnode *dvp;
1231
1232 dun = VTOUNION(ap->a_dvp);
1233
1234 KASSERT((ap->a_cnp->cn_flags & LOCKPARENT) != 0);
1235
1236 if (ap->a_dvp->v_op != ap->a_vp->v_op) {
1237 vp = ap->a_vp;
1238 } else {
1239 struct union_node *un = VTOUNION(ap->a_vp);
1240 if (un->un_uppervp == NULLVP) {
1241 const bool droplock = (dun->un_uppervp == un->un_dirvp);
1242
1243 /*
1244 * Needs to be copied before we can link it.
1245 */
1246 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
1247 if (droplock)
1248 VOP_UNLOCK(dun->un_uppervp);
1249 error = union_copyup(un, 1, cnp->cn_cred, curlwp);
1250 if (droplock) {
1251 vn_lock(dun->un_uppervp,
1252 LK_EXCLUSIVE | LK_RETRY);
1253 /*
1254 * During copyup, we dropped the lock on the
1255 * dir and invalidated any saved namei lookup
1256 * state for the directory we'll be entering
1257 * the link in. We need to re-run the lookup
1258 * in that directory to reset any state needed
1259 * for VOP_LINK.
1260 * Call relookup on the union-layer to reset
1261 * the state.
1262 */
1263 vp = NULLVP;
1264 if (dun->un_uppervp == NULLVP)
1265 panic("union: null upperdvp?");
1266 error = relookup(ap->a_dvp, &vp, ap->a_cnp, 0);
1267 if (error) {
1268 VOP_UNLOCK(ap->a_vp);
1269 return EROFS; /* ? */
1270 }
1271 if (vp != NULLVP) {
1272 /*
1273 * The name we want to create has
1274 * mysteriously appeared (a race?)
1275 */
1276 error = EEXIST;
1277 VOP_UNLOCK(ap->a_vp);
1278 vput(vp);
1279 return (error);
1280 }
1281 }
1282 VOP_UNLOCK(ap->a_vp);
1283 }
1284 vp = un->un_uppervp;
1285 }
1286
1287 dvp = dun->un_uppervp;
1288 if (dvp == NULLVP)
1289 error = EROFS;
1290
1291 if (error)
1292 return (error);
1293
1294 return VOP_LINK(dvp, vp, cnp);
1295 }
1296
1297 int
1298 union_rename(void *v)
1299 {
1300 struct vop_rename_args /* {
1301 struct vnode *a_fdvp;
1302 struct vnode *a_fvp;
1303 struct componentname *a_fcnp;
1304 struct vnode *a_tdvp;
1305 struct vnode *a_tvp;
1306 struct componentname *a_tcnp;
1307 } */ *ap = v;
1308 int error;
1309
1310 struct vnode *fdvp = ap->a_fdvp;
1311 struct vnode *fvp = ap->a_fvp;
1312 struct vnode *tdvp = ap->a_tdvp;
1313 struct vnode *tvp = ap->a_tvp;
1314
1315 /*
1316 * Account for VOP_RENAME to vrele all nodes.
1317 * Note: VOP_RENAME will unlock tdvp.
1318 */
1319
1320 if (fdvp->v_op == union_vnodeop_p) { /* always true */
1321 struct union_node *un = VTOUNION(fdvp);
1322 if (un->un_uppervp == NULLVP) {
1323 /*
1324 * this should never happen in normal
1325 * operation but might if there was
1326 * a problem creating the top-level shadow
1327 * directory.
1328 */
1329 error = EXDEV;
1330 goto bad;
1331 }
1332
1333 fdvp = un->un_uppervp;
1334 vref(fdvp);
1335 }
1336
1337 if (fvp->v_op == union_vnodeop_p) { /* always true */
1338 struct union_node *un = VTOUNION(fvp);
1339 if (un->un_uppervp == NULLVP) {
1340 /* XXX: should do a copyup */
1341 error = EXDEV;
1342 goto bad;
1343 }
1344
1345 if (un->un_lowervp != NULLVP)
1346 ap->a_fcnp->cn_flags |= DOWHITEOUT;
1347
1348 fvp = un->un_uppervp;
1349 vref(fvp);
1350 }
1351
1352 if (tdvp->v_op == union_vnodeop_p) {
1353 struct union_node *un = VTOUNION(tdvp);
1354 if (un->un_uppervp == NULLVP) {
1355 /*
1356 * this should never happen in normal
1357 * operation but might if there was
1358 * a problem creating the top-level shadow
1359 * directory.
1360 */
1361 error = EXDEV;
1362 goto bad;
1363 }
1364
1365 tdvp = un->un_uppervp;
1366 vref(tdvp);
1367 }
1368
1369 if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1370 struct union_node *un = VTOUNION(tvp);
1371
1372 tvp = un->un_uppervp;
1373 if (tvp != NULLVP) {
1374 vref(tvp);
1375 }
1376 }
1377
1378 error = VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp);
1379 goto out;
1380
1381 bad:
1382 vput(tdvp);
1383 if (tvp != NULLVP)
1384 vput(tvp);
1385 vrele(fdvp);
1386 vrele(fvp);
1387
1388 out:
1389 if (fdvp != ap->a_fdvp) {
1390 vrele(ap->a_fdvp);
1391 }
1392 if (fvp != ap->a_fvp) {
1393 vrele(ap->a_fvp);
1394 }
1395 if (tdvp != ap->a_tdvp) {
1396 vrele(ap->a_tdvp);
1397 }
1398 if (tvp != ap->a_tvp) {
1399 vrele(ap->a_tvp);
1400 }
1401 return (error);
1402 }
1403
1404 int
1405 union_mkdir(void *v)
1406 {
1407 struct vop_mkdir_v3_args /* {
1408 struct vnode *a_dvp;
1409 struct vnode **a_vpp;
1410 struct componentname *a_cnp;
1411 struct vattr *a_vap;
1412 } */ *ap = v;
1413 struct union_node *un = VTOUNION(ap->a_dvp);
1414 struct vnode *dvp = un->un_uppervp;
1415 struct componentname *cnp = ap->a_cnp;
1416
1417 if (dvp != NULLVP) {
1418 int error;
1419 struct vnode *vp;
1420
1421 vp = NULL;
1422 error = VOP_MKDIR(dvp, &vp, cnp, ap->a_vap);
1423 if (error) {
1424 vrele(ap->a_dvp);
1425 return (error);
1426 }
1427
1428 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, ap->a_dvp,
1429 NULLVP, cnp, vp, NULLVP, 1);
1430 if (error)
1431 vrele(vp);
1432 return (error);
1433 }
1434
1435 return (EROFS);
1436 }
1437
1438 int
1439 union_rmdir(void *v)
1440 {
1441 struct vop_rmdir_v2_args /* {
1442 struct vnode *a_dvp;
1443 struct vnode *a_vp;
1444 struct componentname *a_cnp;
1445 } */ *ap = v;
1446 int error;
1447 struct union_node *dun = VTOUNION(ap->a_dvp);
1448 struct union_node *un = VTOUNION(ap->a_vp);
1449 struct componentname *cnp = ap->a_cnp;
1450
1451 if (dun->un_uppervp == NULLVP)
1452 panic("union rmdir: null upper vnode");
1453
1454 error = union_check_rmdir(un, cnp->cn_cred);
1455 if (error) {
1456 vput(ap->a_vp);
1457 return error;
1458 }
1459
1460 if (un->un_uppervp != NULLVP) {
1461 struct vnode *dvp = dun->un_uppervp;
1462 struct vnode *vp = un->un_uppervp;
1463
1464 /* Account for VOP_RMDIR to vrele vp. */
1465 vref(vp);
1466 if (union_dowhiteout(un, cnp->cn_cred))
1467 cnp->cn_flags |= DOWHITEOUT;
1468 error = VOP_RMDIR(dvp, vp, ap->a_cnp);
1469 if (!error)
1470 union_removed_upper(un);
1471 vrele(ap->a_vp);
1472 } else {
1473 error = union_mkwhiteout(
1474 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
1475 dun->un_uppervp, ap->a_cnp, un);
1476 vput(ap->a_vp);
1477 }
1478
1479 return (error);
1480 }
1481
1482 int
1483 union_symlink(void *v)
1484 {
1485 struct vop_symlink_v3_args /* {
1486 struct vnode *a_dvp;
1487 struct vnode **a_vpp;
1488 struct componentname *a_cnp;
1489 struct vattr *a_vap;
1490 char *a_target;
1491 } */ *ap = v;
1492 struct union_node *un = VTOUNION(ap->a_dvp);
1493 struct vnode *dvp = un->un_uppervp;
1494 struct componentname *cnp = ap->a_cnp;
1495
1496 if (dvp != NULLVP) {
1497 int error;
1498
1499 error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1500 ap->a_target);
1501 return (error);
1502 }
1503
1504 return (EROFS);
1505 }
1506
1507 /*
1508 * union_readdir works in concert with getdirentries and
1509 * readdir(3) to provide a list of entries in the unioned
1510 * directories. getdirentries is responsible for walking
1511 * down the union stack. readdir(3) is responsible for
1512 * eliminating duplicate names from the returned data stream.
1513 */
1514 int
1515 union_readdir(void *v)
1516 {
1517 struct vop_readdir_args /* {
1518 struct vnodeop_desc *a_desc;
1519 struct vnode *a_vp;
1520 struct uio *a_uio;
1521 kauth_cred_t a_cred;
1522 int *a_eofflag;
1523 u_long *a_cookies;
1524 int a_ncookies;
1525 } */ *ap = v;
1526 struct union_node *un = VTOUNION(ap->a_vp);
1527 struct vnode *vp;
1528 int dolock, error;
1529
1530 if (un->un_hooknode) {
1531 KASSERT(un->un_uppervp == NULLVP);
1532 KASSERT(un->un_lowervp != NULLVP);
1533 vp = un->un_lowervp;
1534 dolock = 1;
1535 } else {
1536 vp = un->un_uppervp;
1537 dolock = 0;
1538 }
1539 if (vp == NULLVP)
1540 return 0;
1541
1542 if (dolock)
1543 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1544 ap->a_vp = vp;
1545 error = VCALL(vp, VOFFSET(vop_readdir), ap);
1546 if (dolock)
1547 VOP_UNLOCK(vp);
1548
1549 return error;
1550 }
1551
1552 int
1553 union_readlink(void *v)
1554 {
1555 struct vop_readlink_args /* {
1556 struct vnode *a_vp;
1557 struct uio *a_uio;
1558 kauth_cred_t a_cred;
1559 } */ *ap = v;
1560 int error;
1561 struct vnode *vp = OTHERVP(ap->a_vp);
1562 int dolock = (vp == LOWERVP(ap->a_vp));
1563
1564 if (dolock)
1565 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1566 ap->a_vp = vp;
1567 error = VCALL(vp, VOFFSET(vop_readlink), ap);
1568 if (dolock)
1569 VOP_UNLOCK(vp);
1570
1571 return (error);
1572 }
1573
1574 int
1575 union_abortop(void *v)
1576 {
1577 struct vop_abortop_args /* {
1578 struct vnode *a_dvp;
1579 struct componentname *a_cnp;
1580 } */ *ap = v;
1581
1582 KASSERT(UPPERVP(ap->a_dvp) != NULL);
1583
1584 ap->a_dvp = UPPERVP(ap->a_dvp);
1585 return VCALL(ap->a_dvp, VOFFSET(vop_abortop), ap);
1586 }
1587
1588 int
1589 union_inactive(void *v)
1590 {
1591 struct vop_inactive_v2_args /* {
1592 const struct vnodeop_desc *a_desc;
1593 struct vnode *a_vp;
1594 bool *a_recycle;
1595 } */ *ap = v;
1596 struct vnode *vp = ap->a_vp;
1597 struct union_node *un = VTOUNION(vp);
1598 struct vnode **vpp;
1599
1600 /*
1601 * Do nothing (and _don't_ bypass).
1602 * Wait to vrele lowervp until reclaim,
1603 * so that until then our union_node is in the
1604 * cache and reusable.
1605 *
1606 * NEEDSWORK: Someday, consider inactive'ing
1607 * the lowervp and then trying to reactivate it
1608 * with capabilities (v_id)
1609 * like they do in the name lookup cache code.
1610 * That's too much work for now.
1611 */
1612
1613 if (un->un_dircache != 0) {
1614 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1615 vrele(*vpp);
1616 free(un->un_dircache, M_TEMP);
1617 un->un_dircache = 0;
1618 }
1619
1620 *ap->a_recycle = ((un->un_cflags & UN_CACHED) == 0);
1621
1622 return (0);
1623 }
1624
1625 int
1626 union_reclaim(void *v)
1627 {
1628 struct vop_reclaim_v2_args /* {
1629 struct vnode *a_vp;
1630 } */ *ap = v;
1631 struct vnode *vp = ap->a_vp;
1632 struct vnode *uvp = UPPERVP(vp);
1633
1634 VOP_UNLOCK(vp);
1635
1636 if (uvp != NULL) {
1637 mutex_enter(uvp->v_interlock);
1638 KASSERT(vp->v_interlock == uvp->v_interlock);
1639 uvp->v_writecount -= vp->v_writecount;
1640 mutex_exit(uvp->v_interlock);
1641 }
1642
1643 union_freevp(vp);
1644
1645 return (0);
1646 }
1647
1648 static int
1649 union_lock1(struct vnode *vp, struct vnode *lockvp, int flags)
1650 {
1651 struct vop_lock_args ap;
1652
1653 ap.a_desc = VDESC(vop_lock);
1654 ap.a_vp = lockvp;
1655 ap.a_flags = flags;
1656
1657 if (lockvp == vp)
1658 return genfs_lock(&ap);
1659 else
1660 return VCALL(ap.a_vp, VOFFSET(vop_lock), &ap);
1661 }
1662
1663 static int
1664 union_unlock1(struct vnode *vp, struct vnode *lockvp)
1665 {
1666 struct vop_unlock_args ap;
1667
1668 ap.a_desc = VDESC(vop_unlock);
1669 ap.a_vp = lockvp;
1670
1671 if (lockvp == vp)
1672 return genfs_unlock(&ap);
1673 else
1674 return VCALL(ap.a_vp, VOFFSET(vop_unlock), &ap);
1675 }
1676
1677 int
1678 union_lock(void *v)
1679 {
1680 struct vop_lock_args /* {
1681 struct vnode *a_vp;
1682 int a_flags;
1683 } */ *ap = v;
1684 struct vnode *vp = ap->a_vp, *lockvp;
1685 struct union_node *un = VTOUNION(vp);
1686 int flags = ap->a_flags;
1687 int error;
1688
1689 if ((flags & LK_NOWAIT) != 0) {
1690 if (!mutex_tryenter(&un->un_lock))
1691 return EBUSY;
1692 lockvp = LOCKVP(vp);
1693 error = union_lock1(vp, lockvp, flags);
1694 mutex_exit(&un->un_lock);
1695 if (error)
1696 return error;
1697 if (mutex_tryenter(vp->v_interlock)) {
1698 error = vdead_check(vp, VDEAD_NOWAIT);
1699 mutex_exit(vp->v_interlock);
1700 } else
1701 error = EBUSY;
1702 if (error)
1703 union_unlock1(vp, lockvp);
1704 return error;
1705 }
1706
1707 mutex_enter(&un->un_lock);
1708 for (;;) {
1709 lockvp = LOCKVP(vp);
1710 mutex_exit(&un->un_lock);
1711 error = union_lock1(vp, lockvp, flags);
1712 if (error != 0)
1713 return error;
1714 mutex_enter(&un->un_lock);
1715 if (lockvp == LOCKVP(vp))
1716 break;
1717 union_unlock1(vp, lockvp);
1718 }
1719 mutex_exit(&un->un_lock);
1720
1721 mutex_enter(vp->v_interlock);
1722 error = vdead_check(vp, VDEAD_NOWAIT);
1723 if (error) {
1724 union_unlock1(vp, lockvp);
1725 error = vdead_check(vp, 0);
1726 KASSERT(error == ENOENT);
1727 }
1728 mutex_exit(vp->v_interlock);
1729 return error;
1730 }
1731
1732 int
1733 union_unlock(void *v)
1734 {
1735 struct vop_unlock_args /* {
1736 struct vnode *a_vp;
1737 int a_flags;
1738 } */ *ap = v;
1739 struct vnode *vp = ap->a_vp, *lockvp;
1740
1741 lockvp = LOCKVP(vp);
1742 union_unlock1(vp, lockvp);
1743
1744 return 0;
1745 }
1746
1747 int
1748 union_bmap(void *v)
1749 {
1750 struct vop_bmap_args /* {
1751 struct vnode *a_vp;
1752 daddr_t a_bn;
1753 struct vnode **a_vpp;
1754 daddr_t *a_bnp;
1755 int *a_runp;
1756 } */ *ap = v;
1757 int error;
1758 struct vnode *vp = OTHERVP(ap->a_vp);
1759 int dolock = (vp == LOWERVP(ap->a_vp));
1760
1761 if (dolock)
1762 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1763 ap->a_vp = vp;
1764 error = VCALL(vp, VOFFSET(vop_bmap), ap);
1765 if (dolock)
1766 VOP_UNLOCK(vp);
1767
1768 return (error);
1769 }
1770
1771 int
1772 union_print(void *v)
1773 {
1774 struct vop_print_args /* {
1775 struct vnode *a_vp;
1776 } */ *ap = v;
1777 struct vnode *vp = ap->a_vp;
1778
1779 printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1780 vp, UPPERVP(vp), LOWERVP(vp));
1781 if (UPPERVP(vp) != NULLVP)
1782 vprint("union: upper", UPPERVP(vp));
1783 if (LOWERVP(vp) != NULLVP)
1784 vprint("union: lower", LOWERVP(vp));
1785 if (VTOUNION(vp)->un_dircache) {
1786 struct vnode **vpp;
1787 for (vpp = VTOUNION(vp)->un_dircache; *vpp != NULLVP; vpp++)
1788 vprint("dircache:", *vpp);
1789 }
1790
1791 return (0);
1792 }
1793
1794 int
1795 union_islocked(void *v)
1796 {
1797 struct vop_islocked_args /* {
1798 struct vnode *a_vp;
1799 } */ *ap = v;
1800 struct vnode *vp;
1801 struct union_node *un;
1802
1803 un = VTOUNION(ap->a_vp);
1804 mutex_enter(&un->un_lock);
1805 vp = LOCKVP(ap->a_vp);
1806 mutex_exit(&un->un_lock);
1807
1808 if (vp == ap->a_vp)
1809 return genfs_islocked(ap);
1810 else
1811 return VOP_ISLOCKED(vp);
1812 }
1813
1814 int
1815 union_pathconf(void *v)
1816 {
1817 struct vop_pathconf_args /* {
1818 struct vnode *a_vp;
1819 int a_name;
1820 int *a_retval;
1821 } */ *ap = v;
1822 int error;
1823 struct vnode *vp = OTHERVP(ap->a_vp);
1824 int dolock = (vp == LOWERVP(ap->a_vp));
1825
1826 if (dolock)
1827 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1828 ap->a_vp = vp;
1829 error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1830 if (dolock)
1831 VOP_UNLOCK(vp);
1832
1833 return (error);
1834 }
1835
1836 int
1837 union_advlock(void *v)
1838 {
1839 struct vop_advlock_args /* {
1840 struct vnode *a_vp;
1841 void *a_id;
1842 int a_op;
1843 struct flock *a_fl;
1844 int a_flags;
1845 } */ *ap = v;
1846 struct vnode *ovp = OTHERVP(ap->a_vp);
1847
1848 ap->a_vp = ovp;
1849 return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1850 }
1851
1852 int
1853 union_strategy(void *v)
1854 {
1855 struct vop_strategy_args /* {
1856 struct vnode *a_vp;
1857 struct buf *a_bp;
1858 } */ *ap = v;
1859 struct vnode *ovp = OTHERVP(ap->a_vp);
1860 struct buf *bp = ap->a_bp;
1861
1862 KASSERT(ovp != NULLVP);
1863 if (!NODE_IS_SPECIAL(ovp))
1864 KASSERT((bp->b_flags & B_READ) || ovp != LOWERVP(bp->b_vp));
1865
1866 return (VOP_STRATEGY(ovp, bp));
1867 }
1868
1869 int
1870 union_bwrite(void *v)
1871 {
1872 struct vop_bwrite_args /* {
1873 struct vnode *a_vp;
1874 struct buf *a_bp;
1875 } */ *ap = v;
1876 struct vnode *ovp = OTHERVP(ap->a_vp);
1877 struct buf *bp = ap->a_bp;
1878
1879 KASSERT(ovp != NULLVP);
1880 if (!NODE_IS_SPECIAL(ovp))
1881 KASSERT((bp->b_flags & B_READ) || ovp != LOWERVP(bp->b_vp));
1882
1883 return (VOP_BWRITE(ovp, bp));
1884 }
1885
1886 int
1887 union_getpages(void *v)
1888 {
1889 struct vop_getpages_args /* {
1890 struct vnode *a_vp;
1891 voff_t a_offset;
1892 struct vm_page **a_m;
1893 int *a_count;
1894 int a_centeridx;
1895 vm_prot_t a_access_type;
1896 int a_advice;
1897 int a_flags;
1898 } */ *ap = v;
1899 struct vnode *vp = ap->a_vp;
1900
1901 KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
1902
1903 if (ap->a_flags & PGO_LOCKED) {
1904 return EBUSY;
1905 }
1906 ap->a_vp = OTHERVP(vp);
1907 KASSERT(vp->v_uobj.vmobjlock == ap->a_vp->v_uobj.vmobjlock);
1908
1909 /* Just pass the request on to the underlying layer. */
1910 return VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
1911 }
1912
1913 int
1914 union_putpages(void *v)
1915 {
1916 struct vop_putpages_args /* {
1917 struct vnode *a_vp;
1918 voff_t a_offlo;
1919 voff_t a_offhi;
1920 int a_flags;
1921 } */ *ap = v;
1922 struct vnode *vp = ap->a_vp;
1923
1924 KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
1925
1926 ap->a_vp = OTHERVP(vp);
1927 KASSERT(vp->v_uobj.vmobjlock == ap->a_vp->v_uobj.vmobjlock);
1928
1929 if (ap->a_flags & PGO_RECLAIM) {
1930 rw_exit(vp->v_uobj.vmobjlock);
1931 return 0;
1932 }
1933
1934 /* Just pass the request on to the underlying layer. */
1935 return VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
1936 }
1937
1938 int
1939 union_kqfilter(void *v)
1940 {
1941 struct vop_kqfilter_args /* {
1942 struct vnode *a_vp;
1943 struct knote *a_kn;
1944 } */ *ap = v;
1945 int error;
1946
1947 /*
1948 * We watch either the upper layer file (if it already exists),
1949 * or the lower layer one. If there is lower layer file only
1950 * at this moment, we will keep watching that lower layer file
1951 * even if upper layer file would be created later on.
1952 */
1953 if (UPPERVP(ap->a_vp))
1954 error = VOP_KQFILTER(UPPERVP(ap->a_vp), ap->a_kn);
1955 else if (LOWERVP(ap->a_vp))
1956 error = VOP_KQFILTER(LOWERVP(ap->a_vp), ap->a_kn);
1957 else {
1958 /* panic? */
1959 error = EOPNOTSUPP;
1960 }
1961
1962 return (error);
1963 }
1964