union_subr.c revision 1.78 1 /* $NetBSD: union_subr.c,v 1.78 2020/02/23 15:46:41 ad Exp $ */
2
3 /*
4 * Copyright (c) 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Jan-Simon Pendry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95
35 */
36
37 /*
38 * Copyright (c) 1994 Jan-Simon Pendry
39 *
40 * This code is derived from software contributed to Berkeley by
41 * Jan-Simon Pendry.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95
72 */
73
74 #include <sys/cdefs.h>
75 __KERNEL_RCSID(0, "$NetBSD: union_subr.c,v 1.78 2020/02/23 15:46:41 ad Exp $");
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/time.h>
81 #include <sys/kernel.h>
82 #include <sys/vnode.h>
83 #include <sys/namei.h>
84 #include <sys/malloc.h>
85 #include <sys/dirent.h>
86 #include <sys/file.h>
87 #include <sys/filedesc.h>
88 #include <sys/queue.h>
89 #include <sys/mount.h>
90 #include <sys/stat.h>
91 #include <sys/kauth.h>
92
93 #include <uvm/uvm_extern.h>
94
95 #include <fs/union/union.h>
96 #include <miscfs/genfs/genfs.h>
97 #include <miscfs/specfs/specdev.h>
98
99 static LIST_HEAD(uhashhead, union_node) *uhashtbl;
100 static u_long uhash_mask; /* size of hash table - 1 */
101 #define UNION_HASH(u, l) \
102 ((((u_long) (u) + (u_long) (l)) >> 8) & uhash_mask)
103 #define NOHASH ((u_long)-1)
104
105 static kmutex_t uhash_lock;
106
107 static void union_newupper(struct union_node *, struct vnode *);
108 static void union_newlower(struct union_node *, struct vnode *);
109 static void union_ref(struct union_node *);
110 static void union_rele(struct union_node *);
111 static int union_do_lookup(struct vnode *, struct componentname *, kauth_cred_t, const char *);
112 int union_vn_close(struct vnode *, int, kauth_cred_t, struct lwp *);
113 static void union_dircache_r(struct vnode *, struct vnode ***, int *);
114 struct vnode *union_dircache(struct vnode *, struct lwp *);
115
116 void
117 union_init(void)
118 {
119
120 mutex_init(&uhash_lock, MUTEX_DEFAULT, IPL_NONE);
121 uhashtbl = hashinit(desiredvnodes, HASH_LIST, true, &uhash_mask);
122 }
123
124 void
125 union_reinit(void)
126 {
127 struct union_node *un;
128 struct uhashhead *oldhash, *hash;
129 u_long oldmask, mask, val;
130 int i;
131
132 hash = hashinit(desiredvnodes, HASH_LIST, true, &mask);
133 mutex_enter(&uhash_lock);
134 oldhash = uhashtbl;
135 oldmask = uhash_mask;
136 uhashtbl = hash;
137 uhash_mask = mask;
138 for (i = 0; i <= oldmask; i++) {
139 while ((un = LIST_FIRST(&oldhash[i])) != NULL) {
140 LIST_REMOVE(un, un_cache);
141 val = UNION_HASH(un->un_uppervp, un->un_lowervp);
142 LIST_INSERT_HEAD(&hash[val], un, un_cache);
143 }
144 }
145 mutex_exit(&uhash_lock);
146 hashdone(oldhash, HASH_LIST, oldmask);
147 }
148
149 /*
150 * Free global unionfs resources.
151 */
152 void
153 union_done(void)
154 {
155
156 hashdone(uhashtbl, HASH_LIST, uhash_mask);
157 mutex_destroy(&uhash_lock);
158
159 /* Make sure to unset the readdir hook. */
160 vn_union_readdir_hook = NULL;
161 }
162
163 void
164 union_newlower(struct union_node *un, struct vnode *lowervp)
165 {
166 int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
167 int nhash = UNION_HASH(un->un_uppervp, lowervp);
168
169 if (un->un_lowervp == lowervp)
170 return;
171
172 KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE);
173 KASSERT(un->un_lowervp == NULL);
174
175 mutex_enter(&uhash_lock);
176
177 if (ohash != nhash && (un->un_cflags & UN_CACHED)) {
178 un->un_cflags &= ~UN_CACHED;
179 LIST_REMOVE(un, un_cache);
180 }
181 mutex_enter(&un->un_lock);
182 un->un_lowervp = lowervp;
183 un->un_lowersz = VNOVAL;
184 mutex_exit(&un->un_lock);
185 if (ohash != nhash) {
186 LIST_INSERT_HEAD(&uhashtbl[nhash], un, un_cache);
187 un->un_cflags |= UN_CACHED;
188 }
189
190 mutex_exit(&uhash_lock);
191 }
192
193 void
194 union_newupper(struct union_node *un, struct vnode *uppervp)
195 {
196 int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
197 int nhash = UNION_HASH(uppervp, un->un_lowervp);
198 struct vop_lock_args lock_ap;
199 struct vop_unlock_args unlock_ap;
200 int error __diagused;
201
202 if (un->un_uppervp == uppervp)
203 return;
204
205 KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE);
206 KASSERT(un->un_uppervp == NULL);
207
208 /*
209 * We have to transfer the vnode lock from the union vnode to
210 * the upper vnode. Lock the upper vnode first. We cannot use
211 * VOP_LOCK() here as it would break the fstrans state.
212 */
213 lock_ap.a_desc = VDESC(vop_lock);
214 lock_ap.a_vp = uppervp;
215 lock_ap.a_flags = LK_EXCLUSIVE;
216 error = VCALL(lock_ap.a_vp, VOFFSET(vop_lock), &lock_ap);
217 KASSERT(error == 0);
218
219 mutex_enter(&uhash_lock);
220
221 if (ohash != nhash && (un->un_cflags & UN_CACHED)) {
222 un->un_cflags &= ~UN_CACHED;
223 LIST_REMOVE(un, un_cache);
224 }
225 mutex_enter(&un->un_lock);
226 un->un_uppervp = uppervp;
227 un->un_uppersz = VNOVAL;
228 /*
229 * With the upper vnode in place unlock the union vnode to
230 * finalize the lock transfer.
231 */
232 unlock_ap.a_desc = VDESC(vop_unlock);
233 unlock_ap.a_vp = UNIONTOV(un);
234 genfs_unlock(&unlock_ap);
235 /* Update union vnode interlock & vmobjlock. */
236 vshareilock(UNIONTOV(un), uppervp);
237 rw_obj_hold(uppervp->v_uobj.vmobjlock);
238 uvm_obj_setlock(&UNIONTOV(un)->v_uobj, uppervp->v_uobj.vmobjlock);
239 mutex_exit(&un->un_lock);
240 if (ohash != nhash) {
241 LIST_INSERT_HEAD(&uhashtbl[nhash], un, un_cache);
242 un->un_cflags |= UN_CACHED;
243 }
244
245 mutex_exit(&uhash_lock);
246 }
247
248 /*
249 * Keep track of size changes in the underlying vnodes.
250 * If the size changes, then callback to the vm layer
251 * giving priority to the upper layer size.
252 *
253 * Mutex un_lock hold on entry and released on return.
254 */
255 void
256 union_newsize(struct vnode *vp, off_t uppersz, off_t lowersz)
257 {
258 struct union_node *un = VTOUNION(vp);
259 off_t sz;
260
261 KASSERT(mutex_owned(&un->un_lock));
262 /* only interested in regular files */
263 if (vp->v_type != VREG) {
264 mutex_exit(&un->un_lock);
265 uvm_vnp_setsize(vp, 0);
266 return;
267 }
268
269 sz = VNOVAL;
270
271 if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) {
272 un->un_uppersz = uppersz;
273 if (sz == VNOVAL)
274 sz = un->un_uppersz;
275 }
276
277 if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) {
278 un->un_lowersz = lowersz;
279 if (sz == VNOVAL)
280 sz = un->un_lowersz;
281 }
282 mutex_exit(&un->un_lock);
283
284 if (sz != VNOVAL) {
285 #ifdef UNION_DIAGNOSTIC
286 printf("union: %s size now %qd\n",
287 uppersz != VNOVAL ? "upper" : "lower", sz);
288 #endif
289 uvm_vnp_setsize(vp, sz);
290 }
291 }
292
293 static void
294 union_ref(struct union_node *un)
295 {
296
297 KASSERT(mutex_owned(&uhash_lock));
298 un->un_refs++;
299 }
300
301 static void
302 union_rele(struct union_node *un)
303 {
304
305 mutex_enter(&uhash_lock);
306 un->un_refs--;
307 if (un->un_refs > 0) {
308 mutex_exit(&uhash_lock);
309 return;
310 }
311 if (un->un_cflags & UN_CACHED) {
312 un->un_cflags &= ~UN_CACHED;
313 LIST_REMOVE(un, un_cache);
314 }
315 mutex_exit(&uhash_lock);
316
317 if (un->un_pvp != NULLVP)
318 vrele(un->un_pvp);
319 if (un->un_uppervp != NULLVP)
320 vrele(un->un_uppervp);
321 if (un->un_lowervp != NULLVP)
322 vrele(un->un_lowervp);
323 if (un->un_dirvp != NULLVP)
324 vrele(un->un_dirvp);
325 if (un->un_path)
326 free(un->un_path, M_TEMP);
327 mutex_destroy(&un->un_lock);
328
329 free(un, M_TEMP);
330 }
331
332 /*
333 * allocate a union_node/vnode pair. the vnode is
334 * referenced and unlocked. the new vnode is returned
335 * via (vpp). (mp) is the mountpoint of the union filesystem,
336 * (dvp) is the parent directory where the upper layer object
337 * should exist (but doesn't) and (cnp) is the componentname
338 * information which is partially copied to allow the upper
339 * layer object to be created at a later time. (uppervp)
340 * and (lowervp) reference the upper and lower layer objects
341 * being mapped. either, but not both, can be nil.
342 * both, if supplied, are unlocked.
343 * the reference is either maintained in the new union_node
344 * object which is allocated, or they are vrele'd.
345 *
346 * all union_nodes are maintained on a hash
347 * list. new nodes are only allocated when they cannot
348 * be found on this list. entries on the list are
349 * removed when the vfs reclaim entry is called.
350 *
351 * the vnode gets attached or referenced with vcache_get().
352 */
353 int
354 union_allocvp(
355 struct vnode **vpp,
356 struct mount *mp,
357 struct vnode *undvp, /* parent union vnode */
358 struct vnode *dvp, /* may be null */
359 struct componentname *cnp, /* may be null */
360 struct vnode *uppervp, /* may be null */
361 struct vnode *lowervp, /* may be null */
362 int docache)
363 {
364 int error;
365 struct union_node *un = NULL, *un1;
366 struct vnode *vp, *xlowervp = NULLVP;
367 u_long hash[3];
368 int try;
369 bool is_dotdot;
370
371 is_dotdot = (dvp != NULL && cnp != NULL && (cnp->cn_flags & ISDOTDOT));
372
373 if (uppervp == NULLVP && lowervp == NULLVP)
374 panic("union: unidentifiable allocation");
375
376 if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
377 xlowervp = lowervp;
378 lowervp = NULLVP;
379 }
380
381 /*
382 * If both uppervp and lowervp are not NULL we have to
383 * search union nodes with one vnode as NULL too.
384 */
385 hash[0] = UNION_HASH(uppervp, lowervp);
386 if (uppervp == NULL || lowervp == NULL) {
387 hash[1] = hash[2] = NOHASH;
388 } else {
389 hash[1] = UNION_HASH(uppervp, NULLVP);
390 hash[2] = UNION_HASH(NULLVP, lowervp);
391 }
392
393 if (!docache) {
394 un = NULL;
395 goto found;
396 }
397
398 loop:
399 mutex_enter(&uhash_lock);
400
401 for (try = 0; try < 3; try++) {
402 if (hash[try] == NOHASH)
403 continue;
404 LIST_FOREACH(un, &uhashtbl[hash[try]], un_cache) {
405 if ((un->un_lowervp && un->un_lowervp != lowervp) ||
406 (un->un_uppervp && un->un_uppervp != uppervp) ||
407 un->un_mount != mp)
408 continue;
409
410 union_ref(un);
411 mutex_exit(&uhash_lock);
412 error = vcache_get(mp, &un, sizeof(un), &vp);
413 KASSERT(error != 0 || UNIONTOV(un) == vp);
414 union_rele(un);
415 if (error == ENOENT)
416 goto loop;
417 else if (error)
418 goto out;
419 goto found;
420 }
421 }
422
423 mutex_exit(&uhash_lock);
424
425 found:
426 if (un) {
427 if (uppervp != dvp) {
428 if (is_dotdot)
429 VOP_UNLOCK(dvp);
430 vn_lock(UNIONTOV(un), LK_EXCLUSIVE | LK_RETRY);
431 if (is_dotdot)
432 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
433 }
434 /*
435 * Save information about the upper layer.
436 */
437 if (uppervp != un->un_uppervp) {
438 union_newupper(un, uppervp);
439 } else if (uppervp) {
440 vrele(uppervp);
441 }
442
443 /*
444 * Save information about the lower layer.
445 * This needs to keep track of pathname
446 * and directory information which union_vn_create
447 * might need.
448 */
449 if (lowervp != un->un_lowervp) {
450 union_newlower(un, lowervp);
451 if (cnp && (lowervp != NULLVP)) {
452 un->un_path = malloc(cnp->cn_namelen+1,
453 M_TEMP, M_WAITOK);
454 memcpy(un->un_path, cnp->cn_nameptr,
455 cnp->cn_namelen);
456 un->un_path[cnp->cn_namelen] = '\0';
457 vref(dvp);
458 un->un_dirvp = dvp;
459 }
460 } else if (lowervp) {
461 vrele(lowervp);
462 }
463 *vpp = UNIONTOV(un);
464 if (uppervp != dvp)
465 VOP_UNLOCK(*vpp);
466 error = 0;
467 goto out;
468 }
469
470 un = malloc(sizeof(struct union_node), M_TEMP, M_WAITOK);
471 mutex_init(&un->un_lock, MUTEX_DEFAULT, IPL_NONE);
472 un->un_refs = 1;
473 un->un_mount = mp;
474 un->un_vnode = NULL;
475 un->un_uppervp = uppervp;
476 un->un_lowervp = lowervp;
477 un->un_pvp = undvp;
478 if (undvp != NULLVP)
479 vref(undvp);
480 un->un_dircache = 0;
481 un->un_openl = 0;
482 un->un_cflags = 0;
483
484 un->un_uppersz = VNOVAL;
485 un->un_lowersz = VNOVAL;
486
487 if (dvp && cnp && (lowervp != NULLVP)) {
488 un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
489 memcpy(un->un_path, cnp->cn_nameptr, cnp->cn_namelen);
490 un->un_path[cnp->cn_namelen] = '\0';
491 vref(dvp);
492 un->un_dirvp = dvp;
493 } else {
494 un->un_path = 0;
495 un->un_dirvp = 0;
496 }
497
498 if (docache) {
499 mutex_enter(&uhash_lock);
500 LIST_FOREACH(un1, &uhashtbl[hash[0]], un_cache) {
501 if (un1->un_lowervp == lowervp &&
502 un1->un_uppervp == uppervp &&
503 un1->un_mount == mp) {
504 /*
505 * Another thread beat us, push back freshly
506 * allocated node and retry.
507 */
508 mutex_exit(&uhash_lock);
509 union_rele(un);
510 goto loop;
511 }
512 }
513 LIST_INSERT_HEAD(&uhashtbl[hash[0]], un, un_cache);
514 un->un_cflags |= UN_CACHED;
515 mutex_exit(&uhash_lock);
516 }
517
518 error = vcache_get(mp, &un, sizeof(un), vpp);
519 KASSERT(error != 0 || UNIONTOV(un) == *vpp);
520 union_rele(un);
521 if (error == ENOENT)
522 goto loop;
523
524 out:
525 if (xlowervp)
526 vrele(xlowervp);
527
528 return error;
529 }
530
531 int
532 union_freevp(struct vnode *vp)
533 {
534 struct union_node *un = VTOUNION(vp);
535
536 /* Detach vnode from union node. */
537 un->un_vnode = NULL;
538 un->un_uppersz = VNOVAL;
539 un->un_lowersz = VNOVAL;
540
541 /* Detach union node from vnode. */
542 mutex_enter(vp->v_interlock);
543 vp->v_data = NULL;
544 mutex_exit(vp->v_interlock);
545
546 union_rele(un);
547
548 return 0;
549 }
550
551 int
552 union_loadvnode(struct mount *mp, struct vnode *vp,
553 const void *key, size_t key_len, const void **new_key)
554 {
555 struct vattr va;
556 struct vnode *svp;
557 struct union_node *un;
558 struct union_mount *um;
559 voff_t uppersz, lowersz;
560
561 KASSERT(key_len == sizeof(un));
562 memcpy(&un, key, key_len);
563
564 um = MOUNTTOUNIONMOUNT(mp);
565 svp = (un->un_uppervp != NULLVP) ? un->un_uppervp : un->un_lowervp;
566
567 vp->v_tag = VT_UNION;
568 vp->v_op = union_vnodeop_p;
569 vp->v_data = un;
570 un->un_vnode = vp;
571
572 vp->v_type = svp->v_type;
573 if (svp->v_type == VCHR || svp->v_type == VBLK)
574 spec_node_init(vp, svp->v_rdev);
575
576 vshareilock(vp, svp);
577 rw_obj_hold(svp->v_uobj.vmobjlock);
578 uvm_obj_setlock(&vp->v_uobj, svp->v_uobj.vmobjlock);
579
580 /* detect the root vnode (and aliases) */
581 if ((un->un_uppervp == um->um_uppervp) &&
582 ((un->un_lowervp == NULLVP) || un->un_lowervp == um->um_lowervp)) {
583 if (un->un_lowervp == NULLVP) {
584 un->un_lowervp = um->um_lowervp;
585 if (un->un_lowervp != NULLVP)
586 vref(un->un_lowervp);
587 }
588 vp->v_vflag |= VV_ROOT;
589 }
590
591 uppersz = lowersz = VNOVAL;
592 if (un->un_uppervp != NULLVP) {
593 if (vn_lock(un->un_uppervp, LK_SHARED) == 0) {
594 if (VOP_GETATTR(un->un_uppervp, &va, FSCRED) == 0)
595 uppersz = va.va_size;
596 VOP_UNLOCK(un->un_uppervp);
597 }
598 }
599 if (un->un_lowervp != NULLVP) {
600 if (vn_lock(un->un_lowervp, LK_SHARED) == 0) {
601 if (VOP_GETATTR(un->un_lowervp, &va, FSCRED) == 0)
602 lowersz = va.va_size;
603 VOP_UNLOCK(un->un_lowervp);
604 }
605 }
606
607 mutex_enter(&un->un_lock);
608 union_newsize(vp, uppersz, lowersz);
609
610 mutex_enter(&uhash_lock);
611 union_ref(un);
612 mutex_exit(&uhash_lock);
613
614 *new_key = &vp->v_data;
615
616 return 0;
617 }
618
619 /*
620 * copyfile. copy the vnode (fvp) to the vnode (tvp)
621 * using a sequence of reads and writes. both (fvp)
622 * and (tvp) are locked on entry and exit.
623 */
624 int
625 union_copyfile(struct vnode *fvp, struct vnode *tvp, kauth_cred_t cred,
626 struct lwp *l)
627 {
628 char *tbuf;
629 struct uio uio;
630 struct iovec iov;
631 int error = 0;
632
633 /*
634 * strategy:
635 * allocate a buffer of size MAXBSIZE.
636 * loop doing reads and writes, keeping track
637 * of the current uio offset.
638 * give up at the first sign of trouble.
639 */
640
641 uio.uio_offset = 0;
642 UIO_SETUP_SYSSPACE(&uio);
643
644 tbuf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
645
646 /* ugly loop follows... */
647 do {
648 off_t offset = uio.uio_offset;
649
650 uio.uio_iov = &iov;
651 uio.uio_iovcnt = 1;
652 iov.iov_base = tbuf;
653 iov.iov_len = MAXBSIZE;
654 uio.uio_resid = iov.iov_len;
655 uio.uio_rw = UIO_READ;
656 error = VOP_READ(fvp, &uio, 0, cred);
657
658 if (error == 0) {
659 uio.uio_iov = &iov;
660 uio.uio_iovcnt = 1;
661 iov.iov_base = tbuf;
662 iov.iov_len = MAXBSIZE - uio.uio_resid;
663 uio.uio_offset = offset;
664 uio.uio_rw = UIO_WRITE;
665 uio.uio_resid = iov.iov_len;
666
667 if (uio.uio_resid == 0)
668 break;
669
670 do {
671 error = VOP_WRITE(tvp, &uio, 0, cred);
672 } while ((uio.uio_resid > 0) && (error == 0));
673 }
674
675 } while (error == 0);
676
677 free(tbuf, M_TEMP);
678 return (error);
679 }
680
681 /*
682 * (un) is assumed to be locked on entry and remains
683 * locked on exit.
684 */
685 int
686 union_copyup(struct union_node *un, int docopy, kauth_cred_t cred,
687 struct lwp *l)
688 {
689 int error;
690 struct vnode *lvp, *uvp;
691 struct vattr lvattr, uvattr;
692
693 error = union_vn_create(&uvp, un, l);
694 if (error)
695 return (error);
696
697 union_newupper(un, uvp);
698
699 lvp = un->un_lowervp;
700
701 if (docopy) {
702 /*
703 * XX - should not ignore errors
704 * from VOP_CLOSE
705 */
706 vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
707
708 error = VOP_GETATTR(lvp, &lvattr, cred);
709 if (error == 0)
710 error = VOP_OPEN(lvp, FREAD, cred);
711 if (error == 0) {
712 error = union_copyfile(lvp, uvp, cred, l);
713 (void) VOP_CLOSE(lvp, FREAD, cred);
714 }
715 if (error == 0) {
716 /* Copy permissions up too */
717 vattr_null(&uvattr);
718 uvattr.va_mode = lvattr.va_mode;
719 uvattr.va_flags = lvattr.va_flags;
720 error = VOP_SETATTR(uvp, &uvattr, cred);
721 }
722 VOP_UNLOCK(lvp);
723 #ifdef UNION_DIAGNOSTIC
724 if (error == 0)
725 uprintf("union: copied up %s\n", un->un_path);
726 #endif
727
728 }
729 union_vn_close(uvp, FWRITE, cred, l);
730
731 /*
732 * Subsequent IOs will go to the top layer, so
733 * call close on the lower vnode and open on the
734 * upper vnode to ensure that the filesystem keeps
735 * its references counts right. This doesn't do
736 * the right thing with (cred) and (FREAD) though.
737 * Ignoring error returns is not right, either.
738 */
739 if (error == 0) {
740 int i;
741
742 vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
743 for (i = 0; i < un->un_openl; i++) {
744 (void) VOP_CLOSE(lvp, FREAD, cred);
745 (void) VOP_OPEN(uvp, FREAD, cred);
746 }
747 un->un_openl = 0;
748 VOP_UNLOCK(lvp);
749 }
750
751 return (error);
752
753 }
754
755 /*
756 * Prepare the creation of a new node in the upper layer.
757 *
758 * (dvp) is the directory in which to create the new node.
759 * it is locked on entry and exit.
760 * (cnp) is the componentname to be created.
761 * (cred, path, hash) are credentials, path and its hash to fill (cnp).
762 */
763 static int
764 union_do_lookup(struct vnode *dvp, struct componentname *cnp, kauth_cred_t cred,
765 const char *path)
766 {
767 int error;
768 struct vnode *vp;
769
770 cnp->cn_nameiop = CREATE;
771 cnp->cn_flags = LOCKPARENT | ISLASTCN;
772 cnp->cn_cred = cred;
773 cnp->cn_nameptr = path;
774 cnp->cn_namelen = strlen(path);
775
776 error = VOP_LOOKUP(dvp, &vp, cnp);
777
778 if (error == 0) {
779 KASSERT(vp != NULL);
780 VOP_ABORTOP(dvp, cnp);
781 vrele(vp);
782 error = EEXIST;
783 } else if (error == EJUSTRETURN) {
784 error = 0;
785 }
786
787 return error;
788 }
789
790 /*
791 * Create a shadow directory in the upper layer.
792 * The new vnode is returned locked.
793 *
794 * (um) points to the union mount structure for access to the
795 * the mounting process's credentials.
796 * (dvp) is the directory in which to create the shadow directory.
797 * it is unlocked on entry and exit.
798 * (cnp) is the componentname to be created.
799 * (vpp) is the returned newly created shadow directory, which
800 * is returned locked.
801 *
802 * N.B. We still attempt to create shadow directories even if the union
803 * is mounted read-only, which is a little nonintuitive.
804 */
805 int
806 union_mkshadow(struct union_mount *um, struct vnode *dvp,
807 struct componentname *cnp, struct vnode **vpp)
808 {
809 int error;
810 struct vattr va;
811 struct componentname cn;
812 char *pnbuf;
813
814 if (cnp->cn_namelen + 1 > MAXPATHLEN)
815 return ENAMETOOLONG;
816 pnbuf = PNBUF_GET();
817 memcpy(pnbuf, cnp->cn_nameptr, cnp->cn_namelen);
818 pnbuf[cnp->cn_namelen] = '\0';
819
820 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
821
822 error = union_do_lookup(dvp, &cn,
823 (um->um_op == UNMNT_ABOVE ? cnp->cn_cred : um->um_cred), pnbuf);
824 if (error) {
825 VOP_UNLOCK(dvp);
826 PNBUF_PUT(pnbuf);
827 return error;
828 }
829
830 /*
831 * policy: when creating the shadow directory in the
832 * upper layer, create it owned by the user who did
833 * the mount, group from parent directory, and mode
834 * 777 modified by umask (ie mostly identical to the
835 * mkdir syscall). (jsp, kb)
836 */
837
838 vattr_null(&va);
839 va.va_type = VDIR;
840 va.va_mode = um->um_cmode;
841
842 KASSERT(*vpp == NULL);
843 error = VOP_MKDIR(dvp, vpp, &cn, &va);
844 VOP_UNLOCK(dvp);
845 PNBUF_PUT(pnbuf);
846 return error;
847 }
848
849 /*
850 * Create a whiteout entry in the upper layer.
851 *
852 * (um) points to the union mount structure for access to the
853 * the mounting process's credentials.
854 * (dvp) is the directory in which to create the whiteout.
855 * it is locked on entry and exit.
856 * (cnp) is the componentname to be created.
857 * (un) holds the path and its hash to be created.
858 */
859 int
860 union_mkwhiteout(struct union_mount *um, struct vnode *dvp,
861 struct componentname *cnp, struct union_node *un)
862 {
863 int error;
864 struct componentname cn;
865
866 error = union_do_lookup(dvp, &cn,
867 (um->um_op == UNMNT_ABOVE ? cnp->cn_cred : um->um_cred),
868 un->un_path);
869 if (error)
870 return error;
871
872 error = VOP_WHITEOUT(dvp, &cn, CREATE);
873 return error;
874 }
875
876 /*
877 * union_vn_create: creates and opens a new shadow file
878 * on the upper union layer. this function is similar
879 * in spirit to calling vn_open but it avoids calling namei().
880 * the problem with calling namei is that a) it locks too many
881 * things, and b) it doesn't start at the "right" directory,
882 * whereas union_do_lookup is told where to start.
883 */
884 int
885 union_vn_create(struct vnode **vpp, struct union_node *un, struct lwp *l)
886 {
887 struct vnode *vp;
888 kauth_cred_t cred = l->l_cred;
889 struct vattr vat;
890 struct vattr *vap = &vat;
891 int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
892 int error;
893 int cmode = UN_FILEMODE & ~l->l_proc->p_cwdi->cwdi_cmask;
894 struct componentname cn;
895
896 *vpp = NULLVP;
897
898 vn_lock(un->un_dirvp, LK_EXCLUSIVE | LK_RETRY);
899
900 error = union_do_lookup(un->un_dirvp, &cn, l->l_cred,
901 un->un_path);
902 if (error) {
903 VOP_UNLOCK(un->un_dirvp);
904 return error;
905 }
906
907 /*
908 * Good - there was no race to create the file
909 * so go ahead and create it. The permissions
910 * on the file will be 0666 modified by the
911 * current user's umask. Access to the file, while
912 * it is unioned, will require access to the top *and*
913 * bottom files. Access when not unioned will simply
914 * require access to the top-level file.
915 * TODO: confirm choice of access permissions.
916 */
917 vattr_null(vap);
918 vap->va_type = VREG;
919 vap->va_mode = cmode;
920 vp = NULL;
921 error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap);
922 if (error) {
923 VOP_UNLOCK(un->un_dirvp);
924 return error;
925 }
926
927 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
928 VOP_UNLOCK(un->un_dirvp);
929 error = VOP_OPEN(vp, fmode, cred);
930 if (error) {
931 vput(vp);
932 return error;
933 }
934
935 vp->v_writecount++;
936 VOP_UNLOCK(vp);
937 *vpp = vp;
938 return 0;
939 }
940
941 int
942 union_vn_close(struct vnode *vp, int fmode, kauth_cred_t cred, struct lwp *l)
943 {
944
945 if (fmode & FWRITE)
946 --vp->v_writecount;
947 return (VOP_CLOSE(vp, fmode, cred));
948 }
949
950 void
951 union_removed_upper(struct union_node *un)
952 {
953 struct vnode *vp = UNIONTOV(un);
954
955 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
956 #if 1
957 /*
958 * We do not set the uppervp to NULLVP here, because lowervp
959 * may also be NULLVP, so this routine would end up creating
960 * a bogus union node with no upper or lower VP (that causes
961 * pain in many places that assume at least one VP exists).
962 * Since we've removed this node from the cache hash chains,
963 * it won't be found again. When all current holders
964 * release it, union_inactive() will vgone() it.
965 */
966 union_diruncache(un);
967 #else
968 union_newupper(un, NULLVP);
969 #endif
970
971 VOP_UNLOCK(vp);
972
973 mutex_enter(&uhash_lock);
974 if (un->un_cflags & UN_CACHED) {
975 un->un_cflags &= ~UN_CACHED;
976 LIST_REMOVE(un, un_cache);
977 }
978 mutex_exit(&uhash_lock);
979 }
980
981 #if 0
982 struct vnode *
983 union_lowervp(struct vnode *vp)
984 {
985 struct union_node *un = VTOUNION(vp);
986
987 if ((un->un_lowervp != NULLVP) &&
988 (vp->v_type == un->un_lowervp->v_type)) {
989 if (vget(un->un_lowervp, 0, true /* wait */) == 0)
990 return (un->un_lowervp);
991 }
992
993 return (NULLVP);
994 }
995 #endif
996
997 /*
998 * determine whether a whiteout is needed
999 * during a remove/rmdir operation.
1000 */
1001 int
1002 union_dowhiteout(struct union_node *un, kauth_cred_t cred)
1003 {
1004 struct vattr va;
1005
1006 if (un->un_lowervp != NULLVP)
1007 return (1);
1008
1009 if (VOP_GETATTR(un->un_uppervp, &va, cred) == 0 &&
1010 (va.va_flags & OPAQUE))
1011 return (1);
1012
1013 return (0);
1014 }
1015
1016 static void
1017 union_dircache_r(struct vnode *vp, struct vnode ***vppp, int *cntp)
1018 {
1019 struct union_node *un;
1020
1021 if (vp->v_op != union_vnodeop_p) {
1022 if (vppp) {
1023 vref(vp);
1024 *(*vppp)++ = vp;
1025 if (--(*cntp) == 0)
1026 panic("union: dircache table too small");
1027 } else {
1028 (*cntp)++;
1029 }
1030
1031 return;
1032 }
1033
1034 un = VTOUNION(vp);
1035 if (un->un_uppervp != NULLVP)
1036 union_dircache_r(un->un_uppervp, vppp, cntp);
1037 if (un->un_lowervp != NULLVP)
1038 union_dircache_r(un->un_lowervp, vppp, cntp);
1039 }
1040
1041 struct vnode *
1042 union_dircache(struct vnode *vp, struct lwp *l)
1043 {
1044 int cnt;
1045 struct vnode *nvp = NULLVP;
1046 struct vnode **vpp;
1047 struct vnode **dircache;
1048 int error;
1049
1050 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1051 dircache = VTOUNION(vp)->un_dircache;
1052
1053 nvp = NULLVP;
1054
1055 if (dircache == 0) {
1056 cnt = 0;
1057 union_dircache_r(vp, 0, &cnt);
1058 cnt++;
1059 dircache = (struct vnode **)
1060 malloc(cnt * sizeof(struct vnode *),
1061 M_TEMP, M_WAITOK);
1062 vpp = dircache;
1063 union_dircache_r(vp, &vpp, &cnt);
1064 VTOUNION(vp)->un_dircache = dircache;
1065 *vpp = NULLVP;
1066 vpp = dircache + 1;
1067 } else {
1068 vpp = dircache;
1069 do {
1070 if (*vpp++ == VTOUNION(vp)->un_uppervp)
1071 break;
1072 } while (*vpp != NULLVP);
1073 }
1074
1075 if (*vpp == NULLVP)
1076 goto out;
1077
1078 vref(*vpp);
1079 error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, 0, *vpp, NULLVP, 0);
1080 if (!error) {
1081 vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY);
1082 VTOUNION(vp)->un_dircache = 0;
1083 VTOUNION(nvp)->un_dircache = dircache;
1084 }
1085
1086 out:
1087 VOP_UNLOCK(vp);
1088 return (nvp);
1089 }
1090
1091 void
1092 union_diruncache(struct union_node *un)
1093 {
1094 struct vnode **vpp;
1095
1096 KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE);
1097 if (un->un_dircache != 0) {
1098 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1099 vrele(*vpp);
1100 free(un->un_dircache, M_TEMP);
1101 un->un_dircache = 0;
1102 }
1103 }
1104
1105 /*
1106 * Check whether node can rmdir (check empty).
1107 */
1108 int
1109 union_check_rmdir(struct union_node *un, kauth_cred_t cred)
1110 {
1111 int dirlen, eofflag, error;
1112 char *dirbuf;
1113 struct vattr va;
1114 struct vnode *tvp;
1115 struct dirent *dp, *edp;
1116 struct componentname cn;
1117 struct iovec aiov;
1118 struct uio auio;
1119
1120 KASSERT(un->un_uppervp != NULL);
1121
1122 /* Check upper for being opaque. */
1123 KASSERT(VOP_ISLOCKED(un->un_uppervp));
1124 error = VOP_GETATTR(un->un_uppervp, &va, cred);
1125 if (error || (va.va_flags & OPAQUE))
1126 return error;
1127
1128 if (un->un_lowervp == NULL)
1129 return 0;
1130
1131 /* Check lower for being empty. */
1132 vn_lock(un->un_lowervp, LK_SHARED | LK_RETRY);
1133 error = VOP_GETATTR(un->un_lowervp, &va, cred);
1134 if (error) {
1135 VOP_UNLOCK(un->un_lowervp);
1136 return error;
1137 }
1138 dirlen = va.va_blocksize;
1139 dirbuf = kmem_alloc(dirlen, KM_SLEEP);
1140 /* error = 0; */
1141 eofflag = 0;
1142 auio.uio_offset = 0;
1143 do {
1144 aiov.iov_len = dirlen;
1145 aiov.iov_base = dirbuf;
1146 auio.uio_iov = &aiov;
1147 auio.uio_iovcnt = 1;
1148 auio.uio_resid = aiov.iov_len;
1149 auio.uio_rw = UIO_READ;
1150 UIO_SETUP_SYSSPACE(&auio);
1151 error = VOP_READDIR(un->un_lowervp, &auio, cred, &eofflag,
1152 NULL, NULL);
1153 if (error)
1154 break;
1155 edp = (struct dirent *)&dirbuf[dirlen - auio.uio_resid];
1156 for (dp = (struct dirent *)dirbuf;
1157 error == 0 && dp < edp;
1158 dp = (struct dirent *)((char *)dp + dp->d_reclen)) {
1159 if (dp->d_reclen == 0) {
1160 error = ENOTEMPTY;
1161 break;
1162 }
1163 if (dp->d_type == DT_WHT ||
1164 (dp->d_namlen == 1 && dp->d_name[0] == '.') ||
1165 (dp->d_namlen == 2 && !memcmp(dp->d_name, "..", 2)))
1166 continue;
1167 /* Check for presence in the upper layer. */
1168 cn.cn_nameiop = LOOKUP;
1169 cn.cn_flags = ISLASTCN | RDONLY;
1170 cn.cn_cred = cred;
1171 cn.cn_nameptr = dp->d_name;
1172 cn.cn_namelen = dp->d_namlen;
1173 error = VOP_LOOKUP(un->un_uppervp, &tvp, &cn);
1174 if (error == ENOENT && (cn.cn_flags & ISWHITEOUT)) {
1175 error = 0;
1176 continue;
1177 }
1178 if (error == 0)
1179 vrele(tvp);
1180 error = ENOTEMPTY;
1181 }
1182 } while (error == 0 && !eofflag);
1183 kmem_free(dirbuf, dirlen);
1184 VOP_UNLOCK(un->un_lowervp);
1185
1186 return error;
1187 }
1188
1189 /*
1190 * This hook is called from vn_readdir() to switch to lower directory
1191 * entry after the upper directory is read.
1192 */
1193 int
1194 union_readdirhook(struct vnode **vpp, struct file *fp, struct lwp *l)
1195 {
1196 struct vnode *vp = *vpp, *lvp;
1197 struct vattr va;
1198 int error;
1199
1200 if (vp->v_op != union_vnodeop_p)
1201 return (0);
1202
1203 /*
1204 * If the directory is opaque,
1205 * then don't show lower entries
1206 */
1207 vn_lock(vp, LK_SHARED | LK_RETRY);
1208 error = VOP_GETATTR(vp, &va, fp->f_cred);
1209 VOP_UNLOCK(vp);
1210 if (error || (va.va_flags & OPAQUE))
1211 return error;
1212
1213 if ((lvp = union_dircache(vp, l)) == NULLVP)
1214 return (0);
1215
1216 error = VOP_OPEN(lvp, FREAD, fp->f_cred);
1217 if (error) {
1218 vput(lvp);
1219 return (error);
1220 }
1221 VOP_UNLOCK(lvp);
1222 fp->f_vnode = lvp;
1223 fp->f_offset = 0;
1224 error = vn_close(vp, FREAD, fp->f_cred);
1225 if (error)
1226 return (error);
1227 *vpp = lvp;
1228 return (0);
1229 }
1230