nfs_node.c revision 1.104 1 /* $NetBSD: nfs_node.c,v 1.104 2008/09/30 14:29:39 pooka Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.104 2008/09/30 14:29:39 pooka Exp $");
39
40 #include "opt_nfs.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/proc.h>
45 #include <sys/mount.h>
46 #include <sys/namei.h>
47 #include <sys/vnode.h>
48 #include <sys/kernel.h>
49 #include <sys/pool.h>
50 #include <sys/lock.h>
51 #include <sys/hash.h>
52 #include <sys/kauth.h>
53
54 #include <nfs/rpcv2.h>
55 #include <nfs/nfsproto.h>
56 #include <nfs/nfs.h>
57 #include <nfs/nfsnode.h>
58 #include <nfs/nfsmount.h>
59 #include <nfs/nfs_var.h>
60
61 struct nfsnodehashhead *nfsnodehashtbl;
62 u_long nfsnodehash;
63 static kmutex_t nfs_hashlock;
64
65 struct pool nfs_node_pool;
66 struct pool nfs_vattr_pool;
67
68 MALLOC_JUSTDEFINE(M_NFSNODE, "NFS node", "NFS vnode private part");
69
70 extern int prtactive;
71
72 #define nfs_hash(x,y) hash32_buf((x), (y), HASH32_BUF_INIT)
73
74 void nfs_gop_size(struct vnode *, off_t, off_t *, int);
75 int nfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t);
76 int nfs_gop_write(struct vnode *, struct vm_page **, int, int);
77
78 static const struct genfs_ops nfs_genfsops = {
79 .gop_size = nfs_gop_size,
80 .gop_alloc = nfs_gop_alloc,
81 .gop_write = nfs_gop_write,
82 };
83
84 /*
85 * Initialize hash links for nfsnodes
86 * and build nfsnode free list.
87 */
88 void
89 nfs_node_init()
90 {
91
92 malloc_type_attach(M_NFSNODE);
93 pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
94 &pool_allocator_nointr, IPL_NONE);
95 pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
96 &pool_allocator_nointr, IPL_NONE);
97
98 nfsnodehashtbl = hashinit(desiredvnodes, HASH_LIST, true,
99 &nfsnodehash);
100 mutex_init(&nfs_hashlock, MUTEX_DEFAULT, IPL_NONE);
101 }
102
103 /*
104 * Reinitialize inode hash table.
105 */
106
107 void
108 nfs_node_reinit()
109 {
110 struct nfsnode *np;
111 struct nfsnodehashhead *oldhash, *hash;
112 u_long oldmask, mask, val;
113 int i;
114
115 hash = hashinit(desiredvnodes, HASH_LIST, true, &mask);
116
117 mutex_enter(&nfs_hashlock);
118 oldhash = nfsnodehashtbl;
119 oldmask = nfsnodehash;
120 nfsnodehashtbl = hash;
121 nfsnodehash = mask;
122 for (i = 0; i <= oldmask; i++) {
123 while ((np = LIST_FIRST(&oldhash[i])) != NULL) {
124 LIST_REMOVE(np, n_hash);
125 val = NFSNOHASH(nfs_hash(np->n_fhp, np->n_fhsize));
126 LIST_INSERT_HEAD(&hash[val], np, n_hash);
127 }
128 }
129 mutex_exit(&nfs_hashlock);
130 hashdone(oldhash, HASH_LIST, oldmask);
131 }
132
133 /*
134 * Free resources previously allocated in nfs_node_init().
135 */
136 void
137 nfs_node_done()
138 {
139
140 mutex_destroy(&nfs_hashlock);
141 hashdone(nfsnodehashtbl, HASH_LIST, nfsnodehash);
142 pool_destroy(&nfs_node_pool);
143 pool_destroy(&nfs_vattr_pool);
144 malloc_type_detach(M_NFSNODE);
145 }
146
147 /*
148 * Look up a vnode/nfsnode by file handle.
149 * Callers must check for mount points!!
150 * In all cases, a pointer to a
151 * nfsnode structure is returned.
152 */
153 int
154 nfs_nget1(mntp, fhp, fhsize, npp, lkflags)
155 struct mount *mntp;
156 nfsfh_t *fhp;
157 int fhsize;
158 struct nfsnode **npp;
159 int lkflags;
160 {
161 struct nfsnode *np, *np2;
162 struct nfsnodehashhead *nhpp;
163 struct vnode *vp;
164 int error;
165
166 nhpp = &nfsnodehashtbl[NFSNOHASH(nfs_hash(fhp, fhsize))];
167 loop:
168 mutex_enter(&nfs_hashlock);
169 LIST_FOREACH(np, nhpp, n_hash) {
170 if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize ||
171 memcmp(fhp, np->n_fhp, fhsize))
172 continue;
173 vp = NFSTOV(np);
174 mutex_enter(&vp->v_interlock);
175 mutex_exit(&nfs_hashlock);
176 error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | lkflags);
177 if (error == EBUSY)
178 return error;
179 if (error)
180 goto loop;
181 *npp = np;
182 return(0);
183 }
184 mutex_exit(&nfs_hashlock);
185
186 error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &vp);
187 if (error) {
188 *npp = 0;
189 return (error);
190 }
191 np = pool_get(&nfs_node_pool, PR_WAITOK);
192 memset(np, 0, sizeof *np);
193 np->n_vnode = vp;
194
195 /*
196 * Insert the nfsnode in the hash queue for its new file handle
197 */
198
199 if (fhsize > NFS_SMALLFH) {
200 np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
201 } else
202 np->n_fhp = &np->n_fh;
203 memcpy(np->n_fhp, fhp, fhsize);
204 np->n_fhsize = fhsize;
205 np->n_accstamp = -1;
206 np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
207
208 mutex_enter(&nfs_hashlock);
209 LIST_FOREACH(np2, nhpp, n_hash) {
210 if (mntp != NFSTOV(np2)->v_mount || np2->n_fhsize != fhsize ||
211 memcmp(fhp, np2->n_fhp, fhsize))
212 continue;
213 mutex_exit(&nfs_hashlock);
214 if (fhsize > NFS_SMALLFH) {
215 kmem_free(np->n_fhp, fhsize);
216 }
217 pool_put(&nfs_vattr_pool, np->n_vattr);
218 pool_put(&nfs_node_pool, np);
219 ungetnewvnode(vp);
220 goto loop;
221 }
222 vp->v_data = np;
223 genfs_node_init(vp, &nfs_genfsops);
224 /*
225 * Initalize read/write creds to useful values. VOP_OPEN will
226 * overwrite these.
227 */
228 np->n_rcred = curlwp->l_cred;
229 kauth_cred_hold(np->n_rcred);
230 np->n_wcred = curlwp->l_cred;
231 kauth_cred_hold(np->n_wcred);
232 vlockmgr(&vp->v_lock, LK_EXCLUSIVE);
233 NFS_INVALIDATE_ATTRCACHE(np);
234 uvm_vnp_setsize(vp, 0);
235 LIST_INSERT_HEAD(nhpp, np, n_hash);
236 mutex_exit(&nfs_hashlock);
237
238 *npp = np;
239 return (0);
240 }
241
242 int
243 nfs_inactive(v)
244 void *v;
245 {
246 struct vop_inactive_args /* {
247 struct vnode *a_vp;
248 bool *a_recycle;
249 } */ *ap = v;
250 struct nfsnode *np;
251 struct sillyrename *sp;
252 struct vnode *vp = ap->a_vp;
253
254 np = VTONFS(vp);
255 if (vp->v_type != VDIR) {
256 sp = np->n_sillyrename;
257 np->n_sillyrename = (struct sillyrename *)0;
258 } else
259 sp = NULL;
260 if (sp != NULL)
261 nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1);
262 *ap->a_recycle = (np->n_flag & NREMOVED) != 0;
263 np->n_flag &=
264 (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED);
265
266 if (vp->v_type == VDIR && np->n_dircache)
267 nfs_invaldircache(vp,
268 NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF);
269
270 VOP_UNLOCK(vp, 0);
271
272 if (sp != NULL) {
273 int error;
274
275 /*
276 * Remove the silly file that was rename'd earlier
277 *
278 * Just in case our thread also has the parent node locked,
279 * we use LK_CANRECURSE.
280 */
281
282 error = vn_lock(sp->s_dvp, LK_EXCLUSIVE | LK_CANRECURSE);
283 if (error || sp->s_dvp->v_data == NULL) {
284 /* XXX should recover */
285 printf("%s: vp=%p error=%d\n",
286 __func__, sp->s_dvp, error);
287 } else {
288 nfs_removeit(sp);
289 }
290 kauth_cred_free(sp->s_cred);
291 vput(sp->s_dvp);
292 kmem_free(sp, sizeof(*sp));
293 }
294
295 return (0);
296 }
297
298 /*
299 * Reclaim an nfsnode so that it can be used for other purposes.
300 */
301 int
302 nfs_reclaim(v)
303 void *v;
304 {
305 struct vop_reclaim_args /* {
306 struct vnode *a_vp;
307 } */ *ap = v;
308 struct vnode *vp = ap->a_vp;
309 struct nfsnode *np = VTONFS(vp);
310
311 if (prtactive && vp->v_usecount > 1)
312 vprint("nfs_reclaim: pushing active", vp);
313
314 mutex_enter(&nfs_hashlock);
315 LIST_REMOVE(np, n_hash);
316 mutex_exit(&nfs_hashlock);
317
318 /*
319 * Free up any directory cookie structures and
320 * large file handle structures that might be associated with
321 * this nfs node.
322 */
323 if (vp->v_type == VDIR && np->n_dircache != NULL) {
324 nfs_invaldircache(vp, NFS_INVALDIRCACHE_FORCE);
325 hashdone(np->n_dircache, HASH_LIST, nfsdirhashmask);
326 }
327 KASSERT(np->n_dirgens == NULL);
328
329 if (np->n_fhsize > NFS_SMALLFH)
330 kmem_free(np->n_fhp, np->n_fhsize);
331
332 pool_put(&nfs_vattr_pool, np->n_vattr);
333 if (np->n_rcred)
334 kauth_cred_free(np->n_rcred);
335
336 if (np->n_wcred)
337 kauth_cred_free(np->n_wcred);
338
339 cache_purge(vp);
340 if (vp->v_type == VREG) {
341 mutex_destroy(&np->n_commitlock);
342 }
343 genfs_node_destroy(vp);
344 pool_put(&nfs_node_pool, np);
345 vp->v_data = NULL;
346 return (0);
347 }
348
349 void
350 nfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
351 {
352
353 *eobp = MAX(size, vp->v_size);
354 }
355
356 int
357 nfs_gop_alloc(struct vnode *vp, off_t off, off_t len, int flags,
358 kauth_cred_t cred)
359 {
360
361 return 0;
362 }
363
364 int
365 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
366 {
367 int i;
368
369 for (i = 0; i < npages; i++) {
370 pmap_page_protect(pgs[i], VM_PROT_READ);
371 }
372 return genfs_gop_write(vp, pgs, npages, flags);
373 }
374