umap_vnops.c revision 1.12 1 /* $NetBSD: umap_vnops.c,v 1.12 1999/03/22 17:24:22 sommerfe Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software donated to Berkeley by
8 * the UCLA Ficus project.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)umap_vnops.c 8.6 (Berkeley) 5/22/95
39 */
40
41 /*
42 * Umap Layer
43 */
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/time.h>
48 #include <sys/types.h>
49 #include <sys/vnode.h>
50 #include <sys/mount.h>
51 #include <sys/namei.h>
52 #include <sys/malloc.h>
53 #include <sys/buf.h>
54 #include <miscfs/umapfs/umap.h>
55 #include <miscfs/genfs/genfs.h>
56
57
58 int umap_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
59
60 int umap_bypass __P((void *));
61 int umap_getattr __P((void *));
62 int umap_inactive __P((void *));
63 int umap_reclaim __P((void *));
64 int umap_print __P((void *));
65 int umap_rename __P((void *));
66 int umap_strategy __P((void *));
67 int umap_bwrite __P((void *));
68 int umap_lock __P((void *));
69 int umap_unlock __P((void *));
70 int umap_fsync __P((void *));
71
72 extern int null_bypass __P((void *));
73
74 /*
75 * Global vfs data structures
76 */
77 /*
78 * XXX - strategy, bwrite are hand coded currently. They should
79 * go away with a merged buffer/block cache.
80 *
81 */
82 int (**umap_vnodeop_p) __P((void *));
83 struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
84 { &vop_default_desc, umap_bypass },
85
86 { &vop_getattr_desc, umap_getattr },
87 { &vop_lock_desc, umap_lock },
88 { &vop_unlock_desc, umap_unlock },
89 { &vop_fsync_desc, umap_fsync },
90 { &vop_inactive_desc, umap_inactive },
91 { &vop_reclaim_desc, umap_reclaim },
92 { &vop_print_desc, umap_print },
93 { &vop_rename_desc, umap_rename },
94
95 { &vop_strategy_desc, umap_strategy },
96 { &vop_bwrite_desc, umap_bwrite },
97
98 { (struct vnodeop_desc*) NULL, (int(*) __P((void *))) NULL }
99 };
100 struct vnodeopv_desc umapfs_vnodeop_opv_desc =
101 { &umap_vnodeop_p, umap_vnodeop_entries };
102
103 /*
104 * This is the 10-Apr-92 bypass routine.
105 * See null_vnops.c:null_bypass for more details.
106 */
107 int
108 umap_bypass(v)
109 void *v;
110 {
111 struct vop_generic_args /* {
112 struct vnodeop_desc *a_desc;
113 <other random data follows, presumably>
114 } */ *ap = v;
115 struct ucred **credpp = 0, *credp = 0;
116 struct ucred *savecredp = 0, *savecompcredp = 0;
117 struct ucred *compcredp = 0;
118 struct vnode **this_vp_p;
119 int error;
120 struct vnode *old_vps[VDESC_MAX_VPS];
121 struct vnode *vp1 = 0;
122 struct vnode **vps_p[VDESC_MAX_VPS];
123 struct vnode ***vppp;
124 struct vnodeop_desc *descp = ap->a_desc;
125 int reles, i;
126 struct componentname **compnamepp = 0;
127
128 if (umap_bug_bypass)
129 printf("umap_bypass: %s\n", descp->vdesc_name);
130
131 #ifdef SAFETY
132 /*
133 * We require at least one vp.
134 */
135 if (descp->vdesc_vp_offsets == NULL ||
136 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
137 panic ("umap_bypass: no vp's in map.\n");
138 #endif
139
140 /*
141 * Map the vnodes going in.
142 * Later, we'll invoke the operation based on
143 * the first mapped vnode's operation vector.
144 */
145 reles = descp->vdesc_flags;
146 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
147 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
148 break; /* bail out at end of list */
149 vps_p[i] = this_vp_p =
150 VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap);
151
152 if (i == 0) {
153 vp1 = *vps_p[0];
154 }
155
156 /*
157 * We're not guaranteed that any but the first vnode
158 * are of our type. Check for and don't map any
159 * that aren't. (Must map first vp or vclean fails.)
160 */
161
162 if (i && ((*this_vp_p)==NULL || (*this_vp_p)->v_op != umap_vnodeop_p)) {
163 old_vps[i] = NULL;
164 } else {
165 old_vps[i] = *this_vp_p;
166 *(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
167 if (reles & 1)
168 VREF(*this_vp_p);
169 }
170
171 }
172
173 /*
174 * Fix the credentials. (That's the purpose of this layer.)
175 */
176
177 if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
178
179 credpp = VOPARG_OFFSETTO(struct ucred**,
180 descp->vdesc_cred_offset, ap);
181
182 /* Save old values */
183
184 savecredp = *credpp;
185 if (savecredp != NOCRED)
186 *credpp = crdup(savecredp);
187 credp = *credpp;
188
189 if (umap_bug_bypass && credp->cr_uid != 0)
190 printf("umap_bypass: user was %d, group %d\n",
191 credp->cr_uid, credp->cr_gid);
192
193 /* Map all ids in the credential structure. */
194
195 umap_mapids(vp1->v_mount, credp);
196
197 if (umap_bug_bypass && credp->cr_uid != 0)
198 printf("umap_bypass: user now %d, group %d\n",
199 credp->cr_uid, credp->cr_gid);
200 }
201
202 /* BSD often keeps a credential in the componentname structure
203 * for speed. If there is one, it better get mapped, too.
204 */
205
206 if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
207
208 compnamepp = VOPARG_OFFSETTO(struct componentname**,
209 descp->vdesc_componentname_offset, ap);
210
211 savecompcredp = (*compnamepp)->cn_cred;
212 if (savecompcredp != NOCRED)
213 (*compnamepp)->cn_cred = crdup(savecompcredp);
214 compcredp = (*compnamepp)->cn_cred;
215
216 if (umap_bug_bypass && compcredp->cr_uid != 0)
217 printf("umap_bypass: component credit user was %d, group %d\n",
218 compcredp->cr_uid, compcredp->cr_gid);
219
220 /* Map all ids in the credential structure. */
221
222 umap_mapids(vp1->v_mount, compcredp);
223
224 if (umap_bug_bypass && compcredp->cr_uid != 0)
225 printf("umap_bypass: component credit user now %d, group %d\n",
226 compcredp->cr_uid, compcredp->cr_gid);
227 }
228
229 /*
230 * Call the operation on the lower layer
231 * with the modified argument structure.
232 */
233 error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
234
235 /*
236 * Maintain the illusion of call-by-value
237 * by restoring vnodes in the argument structure
238 * to their original value.
239 */
240 reles = descp->vdesc_flags;
241 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
242 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
243 break; /* bail out at end of list */
244 if (old_vps[i]) {
245 *(vps_p[i]) = old_vps[i];
246 if (reles & 1)
247 vrele(*(vps_p[i]));
248 };
249 };
250
251 /*
252 * Map the possible out-going vpp
253 * (Assumes that the lower layer always returns
254 * a VREF'ed vpp unless it gets an error.)
255 */
256 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
257 !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
258 !error) {
259 if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
260 goto out;
261 vppp = VOPARG_OFFSETTO(struct vnode***,
262 descp->vdesc_vpp_offset, ap);
263 error = umap_node_create(old_vps[0]->v_mount, **vppp, *vppp);
264 };
265
266 out:
267 /*
268 * Free duplicate cred structure and restore old one.
269 */
270 if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
271 if (umap_bug_bypass && credp && credp->cr_uid != 0)
272 printf("umap_bypass: returning-user was %d\n",
273 credp->cr_uid);
274
275 if (savecredp != NOCRED) {
276 crfree(credp);
277 *credpp = savecredp;
278 if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
279 printf("umap_bypass: returning-user now %d\n\n",
280 savecredp->cr_uid);
281 }
282 }
283
284 if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
285 if (umap_bug_bypass && compcredp && compcredp->cr_uid != 0)
286 printf("umap_bypass: returning-component-user was %d\n",
287 compcredp->cr_uid);
288
289 if (savecompcredp != NOCRED) {
290 crfree(compcredp);
291 (*compnamepp)->cn_cred = savecompcredp;
292 if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
293 printf("umap_bypass: returning-component-user now %d\n",
294 savecompcredp->cr_uid);
295 }
296 }
297
298 return (error);
299 }
300
301 /*
302 * We need to process our own vnode lock and then clear the
303 * interlock flag as it applies only to our vnode, not the
304 * vnodes below us on the stack.
305 */
306 int
307 umap_lock(v)
308 void *v;
309 {
310 struct vop_lock_args /* {
311 struct vnode *a_vp;
312 int a_flags;
313 struct proc *a_p;
314 } */ *ap = v;
315
316 genfs_nolock(ap);
317 if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
318 return (0);
319 ap->a_flags &= ~LK_INTERLOCK;
320 return (null_bypass(ap));
321 }
322
323 /*
324 * We need to process our own vnode unlock and then clear the
325 * interlock flag as it applies only to our vnode, not the
326 * vnodes below us on the stack.
327 */
328 int
329 umap_unlock(v)
330 void *v;
331 {
332 struct vop_unlock_args /* {
333 struct vnode *a_vp;
334 int a_flags;
335 struct proc *a_p;
336 } */ *ap = v;
337
338 genfs_nounlock(ap);
339 ap->a_flags &= ~LK_INTERLOCK;
340 return (null_bypass(ap));
341 }
342
343 /*
344 * If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
345 * syncing the underlying vnodes, since (a) they'll be fsync'ed when
346 * reclaimed and (b) we could deadlock if they're locked; otherwise,
347 * pass it through to the underlying layer.
348 */
349
350 int
351 umap_fsync(v)
352 void *v;
353 {
354 struct vop_fsync_args /* {
355 struct vnode *a_vp;
356 struct ucred *a_cred;
357 int a_flags;
358 struct proc *a_p;
359 } */ *ap = v;
360
361 if (ap->a_flags & FSYNC_RECLAIM)
362 return 0;
363
364 return (umap_bypass(ap));
365 }
366
367 /*
368 * We handle getattr to change the fsid.
369 */
370 int
371 umap_getattr(v)
372 void *v;
373 {
374 struct vop_getattr_args /* {
375 struct vnode *a_vp;
376 struct vattr *a_vap;
377 struct ucred *a_cred;
378 struct proc *a_p;
379 } */ *ap = v;
380 uid_t uid;
381 gid_t gid;
382 int error, tmpid, nentries, gnentries;
383 u_long (*mapdata)[2];
384 u_long (*gmapdata)[2];
385 struct vnode **vp1p;
386 struct vnodeop_desc *descp = ap->a_desc;
387
388 if ((error = umap_bypass(ap)) != 0)
389 return (error);
390 /* Requires that arguments be restored. */
391 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
392
393 /*
394 * Umap needs to map the uid and gid returned by a stat
395 * into the proper values for this site. This involves
396 * finding the returned uid in the mapping information,
397 * translating it into the uid on the other end,
398 * and filling in the proper field in the vattr
399 * structure pointed to by ap->a_vap. The group
400 * is easier, since currently all groups will be
401 * translate to the NULLGROUP.
402 */
403
404 /* Find entry in map */
405
406 uid = ap->a_vap->va_uid;
407 gid = ap->a_vap->va_gid;
408 if (umap_bug_bypass)
409 printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
410 gid);
411
412 vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
413 nentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
414 mapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
415 gnentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
416 gmapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
417
418 /* Reverse map the uid for the vnode. Since it's a reverse
419 map, we can't use umap_mapids() to do it. */
420
421 tmpid = umap_reverse_findid(uid, mapdata, nentries);
422
423 if (tmpid != -1) {
424 ap->a_vap->va_uid = (uid_t) tmpid;
425 if (umap_bug_bypass)
426 printf("umap_getattr: original uid = %d\n", uid);
427 } else
428 ap->a_vap->va_uid = (uid_t) NOBODY;
429
430 /* Reverse map the gid for the vnode. */
431
432 tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
433
434 if (tmpid != -1) {
435 ap->a_vap->va_gid = (gid_t) tmpid;
436 if (umap_bug_bypass)
437 printf("umap_getattr: original gid = %d\n", gid);
438 } else
439 ap->a_vap->va_gid = (gid_t) NULLGROUP;
440
441 return (0);
442 }
443
444 /*ARGSUSED*/
445 int
446 umap_inactive(v)
447 void *v;
448 {
449 struct vop_inactive_args /* {
450 struct vnode *a_vp;
451 struct proc *a_p;
452 } */ *ap = v;
453 /*
454 * Do nothing (and _don't_ bypass).
455 * Wait to vrele lowervp until reclaim,
456 * so that until then our umap_node is in the
457 * cache and reusable.
458 *
459 */
460 VOP_UNLOCK(ap->a_vp, 0);
461 return (0);
462 }
463
464 int
465 umap_reclaim(v)
466 void *v;
467 {
468 struct vop_reclaim_args /* {
469 struct vnode *a_vp;
470 } */ *ap = v;
471 struct vnode *vp = ap->a_vp;
472 struct umap_node *xp = VTOUMAP(vp);
473 struct vnode *lowervp = xp->umap_lowervp;
474
475 /* After this assignment, this node will not be re-used. */
476 xp->umap_lowervp = NULL;
477 LIST_REMOVE(xp, umap_hash);
478 FREE(vp->v_data, M_TEMP);
479 vp->v_data = NULL;
480 vrele(lowervp);
481 return (0);
482 }
483
484 int
485 umap_strategy(v)
486 void *v;
487 {
488 struct vop_strategy_args /* {
489 struct buf *a_bp;
490 } */ *ap = v;
491 struct buf *bp = ap->a_bp;
492 int error;
493 struct vnode *savedvp;
494
495 savedvp = bp->b_vp;
496 bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
497
498 error = VOP_STRATEGY(ap->a_bp);
499
500 bp->b_vp = savedvp;
501
502 return (error);
503 }
504
505 int
506 umap_bwrite(v)
507 void *v;
508 {
509 struct vop_bwrite_args /* {
510 struct buf *a_bp;
511 } */ *ap = v;
512 struct buf *bp = ap->a_bp;
513 int error;
514 struct vnode *savedvp;
515
516 savedvp = bp->b_vp;
517 bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
518
519 error = VOP_BWRITE(ap->a_bp);
520
521 bp->b_vp = savedvp;
522
523 return (error);
524 }
525
526
527 int
528 umap_print(v)
529 void *v;
530 {
531 struct vop_print_args /* {
532 struct vnode *a_vp;
533 } */ *ap = v;
534 struct vnode *vp = ap->a_vp;
535 printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp,
536 UMAPVPTOLOWERVP(vp));
537 return (0);
538 }
539
540 int
541 umap_rename(v)
542 void *v;
543 {
544 struct vop_rename_args /* {
545 struct vnode *a_fdvp;
546 struct vnode *a_fvp;
547 struct componentname *a_fcnp;
548 struct vnode *a_tdvp;
549 struct vnode *a_tvp;
550 struct componentname *a_tcnp;
551 } */ *ap = v;
552 int error;
553 struct componentname *compnamep;
554 struct ucred *compcredp, *savecompcredp;
555 struct vnode *vp;
556
557 /*
558 * Rename is irregular, having two componentname structures.
559 * We need to map the cre in the second structure,
560 * and then bypass takes care of the rest.
561 */
562
563 vp = ap->a_fdvp;
564 compnamep = ap->a_tcnp;
565 compcredp = compnamep->cn_cred;
566
567 savecompcredp = compcredp;
568 compcredp = compnamep->cn_cred = crdup(savecompcredp);
569
570 if (umap_bug_bypass && compcredp->cr_uid != 0)
571 printf("umap_rename: rename component credit user was %d, group %d\n",
572 compcredp->cr_uid, compcredp->cr_gid);
573
574 /* Map all ids in the credential structure. */
575
576 umap_mapids(vp->v_mount, compcredp);
577
578 if (umap_bug_bypass && compcredp->cr_uid != 0)
579 printf("umap_rename: rename component credit user now %d, group %d\n",
580 compcredp->cr_uid, compcredp->cr_gid);
581
582 error = umap_bypass(ap);
583
584 /* Restore the additional mapped componentname cred structure. */
585
586 crfree(compcredp);
587 compnamep->cn_cred = savecompcredp;
588
589 return error;
590 }
591