layer_subr.c revision 1.35 1 1.35 hannken /* $NetBSD: layer_subr.c,v 1.35 2014/02/10 11:23:14 hannken Exp $ */
2 1.1 wrstuden
3 1.1 wrstuden /*
4 1.1 wrstuden * Copyright (c) 1999 National Aeronautics & Space Administration
5 1.1 wrstuden * All rights reserved.
6 1.1 wrstuden *
7 1.1 wrstuden * This software was written by William Studenmund of the
8 1.8 wiz * Numerical Aerospace Simulation Facility, NASA Ames Research Center.
9 1.1 wrstuden *
10 1.1 wrstuden * Redistribution and use in source and binary forms, with or without
11 1.1 wrstuden * modification, are permitted provided that the following conditions
12 1.1 wrstuden * are met:
13 1.1 wrstuden * 1. Redistributions of source code must retain the above copyright
14 1.1 wrstuden * notice, this list of conditions and the following disclaimer.
15 1.1 wrstuden * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 wrstuden * notice, this list of conditions and the following disclaimer in the
17 1.1 wrstuden * documentation and/or other materials provided with the distribution.
18 1.5 soren * 3. Neither the name of the National Aeronautics & Space Administration
19 1.1 wrstuden * nor the names of its contributors may be used to endorse or promote
20 1.1 wrstuden * products derived from this software without specific prior written
21 1.1 wrstuden * permission.
22 1.1 wrstuden *
23 1.1 wrstuden * THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
24 1.1 wrstuden * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 1.1 wrstuden * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 1.1 wrstuden * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
27 1.1 wrstuden * UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
28 1.1 wrstuden * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 1.1 wrstuden * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 1.1 wrstuden * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 1.1 wrstuden * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 1.1 wrstuden * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 1.1 wrstuden * POSSIBILITY OF SUCH DAMAGE.
34 1.1 wrstuden */
35 1.30 rmind
36 1.1 wrstuden /*
37 1.1 wrstuden * Copyright (c) 1992, 1993
38 1.1 wrstuden * The Regents of the University of California. All rights reserved.
39 1.1 wrstuden *
40 1.1 wrstuden * This code is derived from software donated to Berkeley by
41 1.1 wrstuden * Jan-Simon Pendry.
42 1.1 wrstuden *
43 1.1 wrstuden * Redistribution and use in source and binary forms, with or without
44 1.1 wrstuden * modification, are permitted provided that the following conditions
45 1.1 wrstuden * are met:
46 1.1 wrstuden * 1. Redistributions of source code must retain the above copyright
47 1.1 wrstuden * notice, this list of conditions and the following disclaimer.
48 1.1 wrstuden * 2. Redistributions in binary form must reproduce the above copyright
49 1.1 wrstuden * notice, this list of conditions and the following disclaimer in the
50 1.1 wrstuden * documentation and/or other materials provided with the distribution.
51 1.15 agc * 3. Neither the name of the University nor the names of its contributors
52 1.1 wrstuden * may be used to endorse or promote products derived from this software
53 1.1 wrstuden * without specific prior written permission.
54 1.1 wrstuden *
55 1.1 wrstuden * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 1.1 wrstuden * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 1.1 wrstuden * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 1.1 wrstuden * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 1.1 wrstuden * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 1.1 wrstuden * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 1.1 wrstuden * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 1.1 wrstuden * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 1.1 wrstuden * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 1.1 wrstuden * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 1.1 wrstuden * SUCH DAMAGE.
66 1.1 wrstuden *
67 1.1 wrstuden * from: Id: lofs_subr.c,v 1.11 1992/05/30 10:05:43 jsp Exp
68 1.1 wrstuden * @(#)null_subr.c 8.7 (Berkeley) 5/14/95
69 1.1 wrstuden */
70 1.9 lukem
71 1.9 lukem #include <sys/cdefs.h>
72 1.35 hannken __KERNEL_RCSID(0, "$NetBSD: layer_subr.c,v 1.35 2014/02/10 11:23:14 hannken Exp $");
73 1.1 wrstuden
74 1.1 wrstuden #include <sys/param.h>
75 1.1 wrstuden #include <sys/systm.h>
76 1.1 wrstuden #include <sys/proc.h>
77 1.1 wrstuden #include <sys/time.h>
78 1.1 wrstuden #include <sys/vnode.h>
79 1.1 wrstuden #include <sys/mount.h>
80 1.1 wrstuden #include <sys/namei.h>
81 1.23 ad #include <sys/kmem.h>
82 1.1 wrstuden #include <sys/malloc.h>
83 1.23 ad
84 1.1 wrstuden #include <miscfs/specfs/specdev.h>
85 1.1 wrstuden #include <miscfs/genfs/layer.h>
86 1.1 wrstuden #include <miscfs/genfs/layer_extern.h>
87 1.1 wrstuden
88 1.16 erh #ifdef LAYERFS_DIAGNOSTIC
89 1.16 erh int layerfs_debug = 1;
90 1.16 erh #endif
91 1.16 erh
92 1.1 wrstuden /*
93 1.1 wrstuden * layer cache:
94 1.1 wrstuden * Each cache entry holds a reference to the lower vnode
95 1.1 wrstuden * along with a pointer to the alias vnode. When an
96 1.1 wrstuden * entry is added the lower vnode is VREF'd. When the
97 1.1 wrstuden * alias is removed the lower vnode is vrele'd.
98 1.1 wrstuden */
99 1.1 wrstuden
100 1.1 wrstuden void
101 1.27 cegger layerfs_init(void)
102 1.1 wrstuden {
103 1.30 rmind /* Nothing. */
104 1.6 jdolecek }
105 1.1 wrstuden
106 1.6 jdolecek void
107 1.27 cegger layerfs_done(void)
108 1.6 jdolecek {
109 1.30 rmind /* Nothing. */
110 1.1 wrstuden }
111 1.1 wrstuden
112 1.1 wrstuden /*
113 1.30 rmind * layer_node_find: find and return alias for lower vnode or NULL.
114 1.30 rmind *
115 1.35 hannken * => Return alias vnode referenced. if already exists.
116 1.30 rmind * => The layermp's hashlock must be held on entry, we will unlock on success.
117 1.1 wrstuden */
118 1.1 wrstuden struct vnode *
119 1.26 dsl layer_node_find(struct mount *mp, struct vnode *lowervp)
120 1.1 wrstuden {
121 1.1 wrstuden struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
122 1.1 wrstuden struct layer_node_hashhead *hd;
123 1.1 wrstuden struct layer_node *a;
124 1.1 wrstuden struct vnode *vp;
125 1.21 chs int error;
126 1.1 wrstuden
127 1.1 wrstuden /*
128 1.30 rmind * Find hash bucket and search the (two-way) linked list looking
129 1.30 rmind * for a layerfs node structure which is referencing the lower vnode.
130 1.30 rmind * If found, the increment the layer_node reference count, but NOT
131 1.35 hannken * the lower vnode's reference counter.
132 1.1 wrstuden */
133 1.30 rmind KASSERT(mutex_owned(&lmp->layerm_hashlock));
134 1.1 wrstuden hd = LAYER_NHASH(lmp, lowervp);
135 1.1 wrstuden loop:
136 1.21 chs LIST_FOREACH(a, hd, layer_hash) {
137 1.30 rmind if (a->layer_lowervp != lowervp) {
138 1.30 rmind continue;
139 1.30 rmind }
140 1.30 rmind vp = LAYERTOV(a);
141 1.30 rmind if (vp->v_mount != mp) {
142 1.30 rmind continue;
143 1.30 rmind }
144 1.32 rmind mutex_enter(vp->v_interlock);
145 1.30 rmind mutex_exit(&lmp->layerm_hashlock);
146 1.35 hannken error = vget(vp, 0);
147 1.30 rmind if (error) {
148 1.30 rmind mutex_enter(&lmp->layerm_hashlock);
149 1.30 rmind goto loop;
150 1.1 wrstuden }
151 1.30 rmind return vp;
152 1.1 wrstuden }
153 1.1 wrstuden return NULL;
154 1.1 wrstuden }
155 1.1 wrstuden
156 1.1 wrstuden /*
157 1.30 rmind * layer_node_alloc: make a new layerfs vnode.
158 1.30 rmind *
159 1.30 rmind * => vp is the alias vnode, lowervp is the lower vnode.
160 1.30 rmind * => We will hold a reference to lowervp.
161 1.1 wrstuden */
162 1.1 wrstuden int
163 1.26 dsl layer_node_alloc(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
164 1.1 wrstuden {
165 1.1 wrstuden struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
166 1.1 wrstuden struct layer_node_hashhead *hd;
167 1.1 wrstuden struct layer_node *xp;
168 1.1 wrstuden struct vnode *vp, *nvp;
169 1.1 wrstuden int error;
170 1.1 wrstuden
171 1.32 rmind /* Get a new vnode and share its interlock with underlying vnode. */
172 1.32 rmind error = getnewvnode(lmp->layerm_tag, mp, lmp->layerm_vnodeop_p,
173 1.32 rmind lowervp->v_interlock, &vp);
174 1.30 rmind if (error) {
175 1.30 rmind return error;
176 1.30 rmind }
177 1.1 wrstuden vp->v_type = lowervp->v_type;
178 1.32 rmind mutex_enter(vp->v_interlock);
179 1.22 ad vp->v_iflag |= VI_LAYER;
180 1.32 rmind mutex_exit(vp->v_interlock);
181 1.1 wrstuden
182 1.23 ad xp = kmem_alloc(lmp->layerm_size, KM_SLEEP);
183 1.23 ad if (xp == NULL) {
184 1.23 ad ungetnewvnode(vp);
185 1.23 ad return ENOMEM;
186 1.23 ad }
187 1.1 wrstuden if (vp->v_type == VBLK || vp->v_type == VCHR) {
188 1.25 ad spec_node_init(vp, lowervp->v_rdev);
189 1.1 wrstuden }
190 1.1 wrstuden
191 1.1 wrstuden /*
192 1.30 rmind * Before inserting the node into the hash, check if other thread
193 1.30 rmind * did not race with us. If so - return that node, destroy ours.
194 1.1 wrstuden */
195 1.22 ad mutex_enter(&lmp->layerm_hashlock);
196 1.1 wrstuden if ((nvp = layer_node_find(mp, lowervp)) != NULL) {
197 1.34 hannken ungetnewvnode(vp);
198 1.30 rmind kmem_free(xp, lmp->layerm_size);
199 1.30 rmind *vpp = nvp;
200 1.30 rmind return 0;
201 1.1 wrstuden }
202 1.1 wrstuden
203 1.34 hannken vp->v_data = xp;
204 1.34 hannken vp->v_vflag = (vp->v_vflag & ~VV_MPSAFE) |
205 1.34 hannken (lowervp->v_vflag & VV_MPSAFE);
206 1.34 hannken xp->layer_vnode = vp;
207 1.34 hannken xp->layer_lowervp = lowervp;
208 1.34 hannken xp->layer_flags = 0;
209 1.34 hannken
210 1.1 wrstuden /*
211 1.21 chs * Insert the new node into the hash.
212 1.21 chs * Add a reference to the lower node.
213 1.1 wrstuden */
214 1.28 pooka vref(lowervp);
215 1.1 wrstuden hd = LAYER_NHASH(lmp, lowervp);
216 1.1 wrstuden LIST_INSERT_HEAD(hd, xp, layer_hash);
217 1.7 chs uvm_vnp_setsize(vp, 0);
218 1.22 ad mutex_exit(&lmp->layerm_hashlock);
219 1.30 rmind
220 1.30 rmind *vpp = vp;
221 1.30 rmind return 0;
222 1.1 wrstuden }
223 1.1 wrstuden
224 1.1 wrstuden /*
225 1.30 rmind * layer_node_create: try to find an existing layerfs vnode refering to it,
226 1.30 rmind * otherwise make a new vnode which contains a reference to the lower vnode.
227 1.1 wrstuden *
228 1.30 rmind * => Caller should lock the lower node.
229 1.1 wrstuden */
230 1.1 wrstuden int
231 1.30 rmind layer_node_create(struct mount *mp, struct vnode *lowervp, struct vnode **nvpp)
232 1.1 wrstuden {
233 1.1 wrstuden struct vnode *aliasvp;
234 1.1 wrstuden struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
235 1.1 wrstuden
236 1.22 ad mutex_enter(&lmp->layerm_hashlock);
237 1.21 chs aliasvp = layer_node_find(mp, lowervp);
238 1.21 chs if (aliasvp != NULL) {
239 1.1 wrstuden /*
240 1.30 rmind * Note: layer_node_find() has taken another reference to
241 1.30 rmind * the alias vnode and moved the lock holding to aliasvp.
242 1.1 wrstuden */
243 1.1 wrstuden #ifdef LAYERFS_DIAGNOSTIC
244 1.16 erh if (layerfs_debug)
245 1.16 erh vprint("layer_node_create: exists", aliasvp);
246 1.1 wrstuden #endif
247 1.1 wrstuden } else {
248 1.1 wrstuden int error;
249 1.1 wrstuden
250 1.22 ad mutex_exit(&lmp->layerm_hashlock);
251 1.1 wrstuden /*
252 1.30 rmind * Get a new vnode. Make it to reference the layer_node.
253 1.30 rmind * Note: aliasvp will be return with the reference held.
254 1.1 wrstuden */
255 1.30 rmind error = (lmp->layerm_alloc)(mp, lowervp, &aliasvp);
256 1.30 rmind if (error)
257 1.30 rmind return error;
258 1.1 wrstuden #ifdef LAYERFS_DIAGNOSTIC
259 1.16 erh if (layerfs_debug)
260 1.16 erh printf("layer_node_create: create new alias vnode\n");
261 1.1 wrstuden #endif
262 1.1 wrstuden }
263 1.1 wrstuden
264 1.1 wrstuden /*
265 1.30 rmind * Now that we acquired a reference on the upper vnode, release one
266 1.30 rmind * on the lower node. The existence of the layer_node retains one
267 1.1 wrstuden * reference to the lower node.
268 1.1 wrstuden */
269 1.1 wrstuden vrele(lowervp);
270 1.30 rmind KASSERT(lowervp->v_usecount > 0);
271 1.1 wrstuden
272 1.1 wrstuden #ifdef LAYERFS_DIAGNOSTIC
273 1.16 erh if (layerfs_debug)
274 1.16 erh vprint("layer_node_create: alias", aliasvp);
275 1.1 wrstuden #endif
276 1.30 rmind *nvpp = aliasvp;
277 1.30 rmind return 0;
278 1.1 wrstuden }
279 1.1 wrstuden
280 1.16 erh #ifdef LAYERFS_DIAGNOSTIC
281 1.1 wrstuden struct vnode *
282 1.26 dsl layer_checkvp(struct vnode *vp, const char *fil, int lno)
283 1.1 wrstuden {
284 1.1 wrstuden struct layer_node *a = VTOLAYER(vp);
285 1.1 wrstuden #ifdef notyet
286 1.1 wrstuden /*
287 1.1 wrstuden * Can't do this check because vop_reclaim runs
288 1.1 wrstuden * with a funny vop vector.
289 1.1 wrstuden *
290 1.1 wrstuden * WRS - no it doesnt...
291 1.1 wrstuden */
292 1.1 wrstuden if (vp->v_op != layer_vnodeop_p) {
293 1.1 wrstuden printf ("layer_checkvp: on non-layer-node\n");
294 1.1 wrstuden #ifdef notyet
295 1.1 wrstuden while (layer_checkvp_barrier) /*WAIT*/ ;
296 1.1 wrstuden #endif
297 1.1 wrstuden panic("layer_checkvp");
298 1.1 wrstuden };
299 1.1 wrstuden #endif
300 1.1 wrstuden if (a->layer_lowervp == NULL) {
301 1.1 wrstuden /* Should never happen */
302 1.1 wrstuden int i; u_long *p;
303 1.1 wrstuden printf("vp = %p, ZERO ptr\n", vp);
304 1.1 wrstuden for (p = (u_long *) a, i = 0; i < 8; i++)
305 1.1 wrstuden printf(" %lx", p[i]);
306 1.1 wrstuden printf("\n");
307 1.1 wrstuden /* wait for debugger */
308 1.1 wrstuden panic("layer_checkvp");
309 1.1 wrstuden }
310 1.1 wrstuden if (a->layer_lowervp->v_usecount < 1) {
311 1.1 wrstuden int i; u_long *p;
312 1.1 wrstuden printf("vp = %p, unref'ed lowervp\n", vp);
313 1.1 wrstuden for (p = (u_long *) a, i = 0; i < 8; i++)
314 1.1 wrstuden printf(" %lx", p[i]);
315 1.1 wrstuden printf("\n");
316 1.1 wrstuden /* wait for debugger */
317 1.1 wrstuden panic ("layer with unref'ed lowervp");
318 1.1 wrstuden };
319 1.1 wrstuden #ifdef notnow
320 1.1 wrstuden printf("layer %p/%d -> %p/%d [%s, %d]\n",
321 1.1 wrstuden LAYERTOV(a), LAYERTOV(a)->v_usecount,
322 1.1 wrstuden a->layer_lowervp, a->layer_lowervp->v_usecount,
323 1.1 wrstuden fil, lno);
324 1.1 wrstuden #endif
325 1.1 wrstuden return a->layer_lowervp;
326 1.1 wrstuden }
327 1.16 erh #endif
328