umap_vnops.c revision 1.10 1 /* $NetBSD: umap_vnops.c,v 1.10 1998/03/01 02:21:51 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software donated to Berkeley by
8 * the UCLA Ficus project.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)umap_vnops.c 8.6 (Berkeley) 5/22/95
39 */
40
41 /*
42 * Umap Layer
43 */
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/time.h>
48 #include <sys/types.h>
49 #include <sys/vnode.h>
50 #include <sys/mount.h>
51 #include <sys/namei.h>
52 #include <sys/malloc.h>
53 #include <sys/buf.h>
54 #include <miscfs/umapfs/umap.h>
55 #include <miscfs/genfs/genfs.h>
56
57
58 int umap_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
59
60 int umap_bypass __P((void *));
61 int umap_getattr __P((void *));
62 int umap_inactive __P((void *));
63 int umap_reclaim __P((void *));
64 int umap_print __P((void *));
65 int umap_rename __P((void *));
66 int umap_strategy __P((void *));
67 int umap_bwrite __P((void *));
68 int umap_lock __P((void *));
69 int umap_unlock __P((void *));
70
71 extern int null_bypass __P((void *));
72
73 /*
74 * Global vfs data structures
75 */
76 /*
77 * XXX - strategy, bwrite are hand coded currently. They should
78 * go away with a merged buffer/block cache.
79 *
80 */
81 int (**umap_vnodeop_p) __P((void *));
82 struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
83 { &vop_default_desc, umap_bypass },
84
85 { &vop_getattr_desc, umap_getattr },
86 { &vop_lock_desc, umap_lock },
87 { &vop_unlock_desc, umap_unlock },
88 { &vop_inactive_desc, umap_inactive },
89 { &vop_reclaim_desc, umap_reclaim },
90 { &vop_print_desc, umap_print },
91 { &vop_rename_desc, umap_rename },
92
93 { &vop_strategy_desc, umap_strategy },
94 { &vop_bwrite_desc, umap_bwrite },
95
96 { (struct vnodeop_desc*) NULL, (int(*) __P((void *))) NULL }
97 };
98 struct vnodeopv_desc umapfs_vnodeop_opv_desc =
99 { &umap_vnodeop_p, umap_vnodeop_entries };
100
101 /*
102 * This is the 10-Apr-92 bypass routine.
103 * See null_vnops.c:null_bypass for more details.
104 */
105 int
106 umap_bypass(v)
107 void *v;
108 {
109 struct vop_generic_args /* {
110 struct vnodeop_desc *a_desc;
111 <other random data follows, presumably>
112 } */ *ap = v;
113 struct ucred **credpp = 0, *credp = 0;
114 struct ucred *savecredp = 0, *savecompcredp = 0;
115 struct ucred *compcredp = 0;
116 struct vnode **this_vp_p;
117 int error;
118 struct vnode *old_vps[VDESC_MAX_VPS];
119 struct vnode *vp1 = 0;
120 struct vnode **vps_p[VDESC_MAX_VPS];
121 struct vnode ***vppp;
122 struct vnodeop_desc *descp = ap->a_desc;
123 int reles, i;
124 struct componentname **compnamepp = 0;
125
126 if (umap_bug_bypass)
127 printf("umap_bypass: %s\n", descp->vdesc_name);
128
129 #ifdef SAFETY
130 /*
131 * We require at least one vp.
132 */
133 if (descp->vdesc_vp_offsets == NULL ||
134 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
135 panic ("umap_bypass: no vp's in map.\n");
136 #endif
137
138 /*
139 * Map the vnodes going in.
140 * Later, we'll invoke the operation based on
141 * the first mapped vnode's operation vector.
142 */
143 reles = descp->vdesc_flags;
144 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
145 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
146 break; /* bail out at end of list */
147 vps_p[i] = this_vp_p =
148 VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap);
149
150 if (i == 0) {
151 vp1 = *vps_p[0];
152 }
153
154 /*
155 * We're not guaranteed that any but the first vnode
156 * are of our type. Check for and don't map any
157 * that aren't. (Must map first vp or vclean fails.)
158 */
159
160 if (i && (*this_vp_p)->v_op != umap_vnodeop_p) {
161 old_vps[i] = NULL;
162 } else {
163 old_vps[i] = *this_vp_p;
164 *(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
165 if (reles & 1)
166 VREF(*this_vp_p);
167 }
168
169 }
170
171 /*
172 * Fix the credentials. (That's the purpose of this layer.)
173 */
174
175 if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
176
177 credpp = VOPARG_OFFSETTO(struct ucred**,
178 descp->vdesc_cred_offset, ap);
179
180 /* Save old values */
181
182 savecredp = *credpp;
183 if (savecredp != NOCRED)
184 *credpp = crdup(savecredp);
185 credp = *credpp;
186
187 if (umap_bug_bypass && credp->cr_uid != 0)
188 printf("umap_bypass: user was %d, group %d\n",
189 credp->cr_uid, credp->cr_gid);
190
191 /* Map all ids in the credential structure. */
192
193 umap_mapids(vp1->v_mount, credp);
194
195 if (umap_bug_bypass && credp->cr_uid != 0)
196 printf("umap_bypass: user now %d, group %d\n",
197 credp->cr_uid, credp->cr_gid);
198 }
199
200 /* BSD often keeps a credential in the componentname structure
201 * for speed. If there is one, it better get mapped, too.
202 */
203
204 if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
205
206 compnamepp = VOPARG_OFFSETTO(struct componentname**,
207 descp->vdesc_componentname_offset, ap);
208
209 savecompcredp = (*compnamepp)->cn_cred;
210 if (savecompcredp != NOCRED)
211 (*compnamepp)->cn_cred = crdup(savecompcredp);
212 compcredp = (*compnamepp)->cn_cred;
213
214 if (umap_bug_bypass && compcredp->cr_uid != 0)
215 printf("umap_bypass: component credit user was %d, group %d\n",
216 compcredp->cr_uid, compcredp->cr_gid);
217
218 /* Map all ids in the credential structure. */
219
220 umap_mapids(vp1->v_mount, compcredp);
221
222 if (umap_bug_bypass && compcredp->cr_uid != 0)
223 printf("umap_bypass: component credit user now %d, group %d\n",
224 compcredp->cr_uid, compcredp->cr_gid);
225 }
226
227 /*
228 * Call the operation on the lower layer
229 * with the modified argument structure.
230 */
231 error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
232
233 /*
234 * Maintain the illusion of call-by-value
235 * by restoring vnodes in the argument structure
236 * to their original value.
237 */
238 reles = descp->vdesc_flags;
239 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
240 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
241 break; /* bail out at end of list */
242 if (old_vps[i]) {
243 *(vps_p[i]) = old_vps[i];
244 if (reles & 1)
245 vrele(*(vps_p[i]));
246 };
247 };
248
249 /*
250 * Map the possible out-going vpp
251 * (Assumes that the lower layer always returns
252 * a VREF'ed vpp unless it gets an error.)
253 */
254 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
255 !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
256 !error) {
257 if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
258 goto out;
259 vppp = VOPARG_OFFSETTO(struct vnode***,
260 descp->vdesc_vpp_offset, ap);
261 error = umap_node_create(old_vps[0]->v_mount, **vppp, *vppp);
262 };
263
264 out:
265 /*
266 * Free duplicate cred structure and restore old one.
267 */
268 if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
269 if (umap_bug_bypass && credp && credp->cr_uid != 0)
270 printf("umap_bypass: returning-user was %d\n",
271 credp->cr_uid);
272
273 if (savecredp != NOCRED) {
274 crfree(credp);
275 *credpp = savecredp;
276 if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
277 printf("umap_bypass: returning-user now %d\n\n",
278 savecredp->cr_uid);
279 }
280 }
281
282 if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
283 if (umap_bug_bypass && compcredp && compcredp->cr_uid != 0)
284 printf("umap_bypass: returning-component-user was %d\n",
285 compcredp->cr_uid);
286
287 if (savecompcredp != NOCRED) {
288 crfree(compcredp);
289 (*compnamepp)->cn_cred = savecompcredp;
290 if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
291 printf("umap_bypass: returning-component-user now %d\n",
292 savecompcredp->cr_uid);
293 }
294 }
295
296 return (error);
297 }
298
299 /*
300 * We need to process our own vnode lock and then clear the
301 * interlock flag as it applies only to our vnode, not the
302 * vnodes below us on the stack.
303 */
304 int
305 umap_lock(v)
306 void *v;
307 {
308 struct vop_lock_args /* {
309 struct vnode *a_vp;
310 int a_flags;
311 struct proc *a_p;
312 } */ *ap = v;
313
314 genfs_nolock(ap);
315 if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
316 return (0);
317 ap->a_flags &= ~LK_INTERLOCK;
318 return (null_bypass(ap));
319 }
320
321 /*
322 * We need to process our own vnode unlock and then clear the
323 * interlock flag as it applies only to our vnode, not the
324 * vnodes below us on the stack.
325 */
326 int
327 umap_unlock(v)
328 void *v;
329 {
330 struct vop_unlock_args /* {
331 struct vnode *a_vp;
332 int a_flags;
333 struct proc *a_p;
334 } */ *ap = v;
335
336 genfs_nounlock(ap);
337 ap->a_flags &= ~LK_INTERLOCK;
338 return (null_bypass(ap));
339 }
340
341 /*
342 * We handle getattr to change the fsid.
343 */
344 int
345 umap_getattr(v)
346 void *v;
347 {
348 struct vop_getattr_args /* {
349 struct vnode *a_vp;
350 struct vattr *a_vap;
351 struct ucred *a_cred;
352 struct proc *a_p;
353 } */ *ap = v;
354 uid_t uid;
355 gid_t gid;
356 int error, tmpid, nentries, gnentries;
357 u_long (*mapdata)[2];
358 u_long (*gmapdata)[2];
359 struct vnode **vp1p;
360 struct vnodeop_desc *descp = ap->a_desc;
361
362 if ((error = umap_bypass(ap)) != 0)
363 return (error);
364 /* Requires that arguments be restored. */
365 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
366
367 /*
368 * Umap needs to map the uid and gid returned by a stat
369 * into the proper values for this site. This involves
370 * finding the returned uid in the mapping information,
371 * translating it into the uid on the other end,
372 * and filling in the proper field in the vattr
373 * structure pointed to by ap->a_vap. The group
374 * is easier, since currently all groups will be
375 * translate to the NULLGROUP.
376 */
377
378 /* Find entry in map */
379
380 uid = ap->a_vap->va_uid;
381 gid = ap->a_vap->va_gid;
382 if (umap_bug_bypass)
383 printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
384 gid);
385
386 vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
387 nentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
388 mapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
389 gnentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
390 gmapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
391
392 /* Reverse map the uid for the vnode. Since it's a reverse
393 map, we can't use umap_mapids() to do it. */
394
395 tmpid = umap_reverse_findid(uid, mapdata, nentries);
396
397 if (tmpid != -1) {
398 ap->a_vap->va_uid = (uid_t) tmpid;
399 if (umap_bug_bypass)
400 printf("umap_getattr: original uid = %d\n", uid);
401 } else
402 ap->a_vap->va_uid = (uid_t) NOBODY;
403
404 /* Reverse map the gid for the vnode. */
405
406 tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
407
408 if (tmpid != -1) {
409 ap->a_vap->va_gid = (gid_t) tmpid;
410 if (umap_bug_bypass)
411 printf("umap_getattr: original gid = %d\n", gid);
412 } else
413 ap->a_vap->va_gid = (gid_t) NULLGROUP;
414
415 return (0);
416 }
417
418 /*ARGSUSED*/
419 int
420 umap_inactive(v)
421 void *v;
422 {
423 struct vop_inactive_args /* {
424 struct vnode *a_vp;
425 struct proc *a_p;
426 } */ *ap = v;
427 /*
428 * Do nothing (and _don't_ bypass).
429 * Wait to vrele lowervp until reclaim,
430 * so that until then our umap_node is in the
431 * cache and reusable.
432 *
433 */
434 VOP_UNLOCK(ap->a_vp, 0);
435 return (0);
436 }
437
438 int
439 umap_reclaim(v)
440 void *v;
441 {
442 struct vop_reclaim_args /* {
443 struct vnode *a_vp;
444 } */ *ap = v;
445 struct vnode *vp = ap->a_vp;
446 struct umap_node *xp = VTOUMAP(vp);
447 struct vnode *lowervp = xp->umap_lowervp;
448
449 /* After this assignment, this node will not be re-used. */
450 xp->umap_lowervp = NULL;
451 LIST_REMOVE(xp, umap_hash);
452 FREE(vp->v_data, M_TEMP);
453 vp->v_data = NULL;
454 vrele(lowervp);
455 return (0);
456 }
457
458 int
459 umap_strategy(v)
460 void *v;
461 {
462 struct vop_strategy_args /* {
463 struct buf *a_bp;
464 } */ *ap = v;
465 struct buf *bp = ap->a_bp;
466 int error;
467 struct vnode *savedvp;
468
469 savedvp = bp->b_vp;
470 bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
471
472 error = VOP_STRATEGY(ap->a_bp);
473
474 bp->b_vp = savedvp;
475
476 return (error);
477 }
478
479 int
480 umap_bwrite(v)
481 void *v;
482 {
483 struct vop_bwrite_args /* {
484 struct buf *a_bp;
485 } */ *ap = v;
486 struct buf *bp = ap->a_bp;
487 int error;
488 struct vnode *savedvp;
489
490 savedvp = bp->b_vp;
491 bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
492
493 error = VOP_BWRITE(ap->a_bp);
494
495 bp->b_vp = savedvp;
496
497 return (error);
498 }
499
500
501 int
502 umap_print(v)
503 void *v;
504 {
505 struct vop_print_args /* {
506 struct vnode *a_vp;
507 } */ *ap = v;
508 struct vnode *vp = ap->a_vp;
509 printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp,
510 UMAPVPTOLOWERVP(vp));
511 return (0);
512 }
513
514 int
515 umap_rename(v)
516 void *v;
517 {
518 struct vop_rename_args /* {
519 struct vnode *a_fdvp;
520 struct vnode *a_fvp;
521 struct componentname *a_fcnp;
522 struct vnode *a_tdvp;
523 struct vnode *a_tvp;
524 struct componentname *a_tcnp;
525 } */ *ap = v;
526 int error;
527 struct componentname *compnamep;
528 struct ucred *compcredp, *savecompcredp;
529 struct vnode *vp;
530
531 /*
532 * Rename is irregular, having two componentname structures.
533 * We need to map the cre in the second structure,
534 * and then bypass takes care of the rest.
535 */
536
537 vp = ap->a_fdvp;
538 compnamep = ap->a_tcnp;
539 compcredp = compnamep->cn_cred;
540
541 savecompcredp = compcredp;
542 compcredp = compnamep->cn_cred = crdup(savecompcredp);
543
544 if (umap_bug_bypass && compcredp->cr_uid != 0)
545 printf("umap_rename: rename component credit user was %d, group %d\n",
546 compcredp->cr_uid, compcredp->cr_gid);
547
548 /* Map all ids in the credential structure. */
549
550 umap_mapids(vp->v_mount, compcredp);
551
552 if (umap_bug_bypass && compcredp->cr_uid != 0)
553 printf("umap_rename: rename component credit user now %d, group %d\n",
554 compcredp->cr_uid, compcredp->cr_gid);
555
556 error = umap_bypass(ap);
557
558 /* Restore the additional mapped componentname cred structure. */
559
560 crfree(compcredp);
561 compnamep->cn_cred = savecompcredp;
562
563 return error;
564 }
565