rump_vfs.c revision 1.3 1 /* $NetBSD: rump_vfs.c,v 1.3 2008/11/25 20:35:46 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/buf.h>
33 #include <sys/filedesc.h>
34 #include <sys/module.h>
35 #include <sys/namei.h>
36 #include <sys/queue.h>
37 #include <sys/vfs_syscalls.h>
38 #include <sys/vnode.h>
39 #include <sys/wapbl.h>
40
41 #include <miscfs/specfs/specdev.h>
42
43 #include <rump/rump.h>
44 #include <rump/rumpuser.h>
45
46 #include "rump_private.h"
47 #include "rump_vfs_private.h"
48
49 struct fakeblk {
50 char path[MAXPATHLEN];
51 LIST_ENTRY(fakeblk) entries;
52 };
53 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
54
55 static struct cwdinfo rump_cwdi;
56
57 static void
58 pvfs_init(struct proc *p)
59 {
60
61 p->p_cwdi = cwdinit();
62 }
63
64 static void
65 pvfs_rele(struct proc *p)
66 {
67
68 cwdfree(p->p_cwdi);
69 }
70
71 void
72 rump_vfs_init()
73 {
74
75 syncdelay = 0;
76 dovfsusermount = 1;
77
78 cache_cpu_init(&rump_cpu);
79 vfsinit();
80 bufinit();
81 wapbl_init();
82 cwd_sys_init();
83 rumpuser_bioinit(rump_biodone);
84 rumpfs_init();
85
86 rump_proc_vfs_init = pvfs_init;
87 rump_proc_vfs_release = pvfs_rele;
88
89 rw_init(&rump_cwdi.cwdi_lock);
90 rump_cwdi.cwdi_cdir = rootvnode;
91 vref(rump_cwdi.cwdi_cdir);
92 proc0.p_cwdi = &rump_cwdi;
93 }
94
95 struct mount *
96 rump_mnt_init(struct vfsops *vfsops, int mntflags)
97 {
98 struct mount *mp;
99
100 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
101
102 mp->mnt_op = vfsops;
103 mp->mnt_flag = mntflags;
104 TAILQ_INIT(&mp->mnt_vnodelist);
105 rw_init(&mp->mnt_unmounting);
106 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
107 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
108 mp->mnt_refcnt = 1;
109
110 mount_initspecific(mp);
111
112 return mp;
113 }
114
115 int
116 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
117 {
118 struct vnode *rvp;
119 int rv;
120
121 rv = VFS_MOUNT(mp, path, data, dlen);
122 if (rv)
123 return rv;
124
125 (void) VFS_STATVFS(mp, &mp->mnt_stat);
126 rv = VFS_START(mp, 0);
127 if (rv)
128 VFS_UNMOUNT(mp, MNT_FORCE);
129
130 /*
131 * XXX: set a root for lwp0. This is strictly not correct,
132 * but makes things works for single fs case without having
133 * to manually call rump_rcvp_set().
134 */
135 VFS_ROOT(mp, &rvp);
136 rump_rcvp_set(rvp, rvp);
137 vput(rvp);
138
139 return rv;
140 }
141
142 void
143 rump_mnt_destroy(struct mount *mp)
144 {
145
146 mount_finispecific(mp);
147 kmem_free(mp, sizeof(*mp));
148 }
149
150 struct componentname *
151 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
152 kauth_cred_t creds, struct lwp *l)
153 {
154 struct componentname *cnp;
155 const char *cp = NULL;
156
157 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
158
159 cnp->cn_nameiop = nameiop;
160 cnp->cn_flags = flags | HASBUF;
161
162 cnp->cn_pnbuf = PNBUF_GET();
163 strcpy(cnp->cn_pnbuf, name);
164 cnp->cn_nameptr = cnp->cn_pnbuf;
165 cnp->cn_namelen = namelen;
166 cnp->cn_hash = namei_hash(name, &cp);
167
168 cnp->cn_cred = creds;
169
170 return cnp;
171 }
172
173 void
174 rump_freecn(struct componentname *cnp, int flags)
175 {
176
177 if (flags & RUMPCN_FREECRED)
178 rump_cred_destroy(cnp->cn_cred);
179
180 if ((flags & RUMPCN_HASNTBUF) == 0) {
181 if (cnp->cn_flags & SAVENAME) {
182 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
183 PNBUF_PUT(cnp->cn_pnbuf);
184 } else {
185 PNBUF_PUT(cnp->cn_pnbuf);
186 }
187 }
188 kmem_free(cnp, sizeof(*cnp));
189 }
190
191 /* hey baby, what's your namei? */
192 int
193 rump_namei(uint32_t op, uint32_t flags, const char *namep,
194 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
195 {
196 struct nameidata nd;
197 int rv;
198
199 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
200 rv = namei(&nd);
201 if (rv)
202 return rv;
203
204 if (dvpp) {
205 KASSERT(flags & LOCKPARENT);
206 *dvpp = nd.ni_dvp;
207 } else {
208 KASSERT((flags & LOCKPARENT) == 0);
209 }
210
211 if (vpp) {
212 *vpp = nd.ni_vp;
213 } else {
214 if (nd.ni_vp) {
215 if (flags & LOCKLEAF)
216 vput(nd.ni_vp);
217 else
218 vrele(nd.ni_vp);
219 }
220 }
221
222 if (cnpp) {
223 struct componentname *cnp;
224
225 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
226 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
227 *cnpp = cnp;
228 } else if (nd.ni_cnd.cn_flags & HASBUF) {
229 panic("%s: pathbuf mismatch", __func__);
230 }
231
232 return rv;
233 }
234
235 static struct fakeblk *
236 _rump_fakeblk_find(const char *path)
237 {
238 char buf[MAXPATHLEN];
239 struct fakeblk *fblk;
240 int error;
241
242 if (rumpuser_realpath(path, buf, &error) == NULL)
243 return NULL;
244
245 LIST_FOREACH(fblk, &fakeblks, entries)
246 if (strcmp(fblk->path, buf) == 0)
247 return fblk;
248
249 return NULL;
250 }
251
252 int
253 rump_fakeblk_register(const char *path)
254 {
255 char buf[MAXPATHLEN];
256 struct fakeblk *fblk;
257 int error;
258
259 if (_rump_fakeblk_find(path))
260 return EEXIST;
261
262 if (rumpuser_realpath(path, buf, &error) == NULL)
263 return error;
264
265 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
266 if (fblk == NULL)
267 return ENOMEM;
268
269 strlcpy(fblk->path, buf, MAXPATHLEN);
270 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
271
272 return 0;
273 }
274
275 int
276 rump_fakeblk_find(const char *path)
277 {
278
279 return _rump_fakeblk_find(path) != NULL;
280 }
281
282 void
283 rump_fakeblk_deregister(const char *path)
284 {
285 struct fakeblk *fblk;
286
287 fblk = _rump_fakeblk_find(path);
288 if (fblk == NULL)
289 return;
290
291 LIST_REMOVE(fblk, entries);
292 kmem_free(fblk, sizeof(*fblk));
293 }
294
295 void
296 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
297 {
298
299 *vtype = vp->v_type;
300 *vsize = vp->v_size;
301 if (vp->v_specnode)
302 *vdev = vp->v_rdev;
303 else
304 *vdev = 0;
305 }
306
307 struct vfsops *
308 rump_vfslist_iterate(struct vfsops *ops)
309 {
310
311 if (ops == NULL)
312 return LIST_FIRST(&vfs_list);
313 else
314 return LIST_NEXT(ops, vfs_list);
315 }
316
317 struct vfsops *
318 rump_vfs_getopsbyname(const char *name)
319 {
320
321 return vfs_getopsbyname(name);
322 }
323
324 struct vattr*
325 rump_vattr_init()
326 {
327 struct vattr *vap;
328
329 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
330 vattr_null(vap);
331
332 return vap;
333 }
334
335 void
336 rump_vattr_settype(struct vattr *vap, enum vtype vt)
337 {
338
339 vap->va_type = vt;
340 }
341
342 void
343 rump_vattr_setmode(struct vattr *vap, mode_t mode)
344 {
345
346 vap->va_mode = mode;
347 }
348
349 void
350 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
351 {
352
353 vap->va_rdev = dev;
354 }
355
356 void
357 rump_vattr_free(struct vattr *vap)
358 {
359
360 kmem_free(vap, sizeof(*vap));
361 }
362
363 void
364 rump_vp_incref(struct vnode *vp)
365 {
366
367 mutex_enter(&vp->v_interlock);
368 ++vp->v_usecount;
369 mutex_exit(&vp->v_interlock);
370 }
371
372 int
373 rump_vp_getref(struct vnode *vp)
374 {
375
376 return vp->v_usecount;
377 }
378
379 void
380 rump_vp_decref(struct vnode *vp)
381 {
382
383 mutex_enter(&vp->v_interlock);
384 --vp->v_usecount;
385 mutex_exit(&vp->v_interlock);
386 }
387
388 /*
389 * Really really recycle with a cherry on top. We should be
390 * extra-sure we can do this. For example with p2k there is
391 * no problem, since puffs in the kernel takes care of refcounting
392 * for us.
393 */
394 void
395 rump_vp_recycle_nokidding(struct vnode *vp)
396 {
397
398 mutex_enter(&vp->v_interlock);
399 vp->v_usecount = 1;
400 /*
401 * XXX: NFS holds a reference to the root vnode, so don't clean
402 * it out. This is very wrong, but fixing it properly would
403 * take too much effort for now
404 */
405 if (vp->v_tag == VT_NFS && vp->v_vflag & VV_ROOT) {
406 mutex_exit(&vp->v_interlock);
407 return;
408 }
409 vclean(vp, DOCLOSE);
410 vrelel(vp, 0);
411 }
412
413 void
414 rump_vp_rele(struct vnode *vp)
415 {
416
417 vrele(vp);
418 }
419
420 void
421 rump_vp_interlock(struct vnode *vp)
422 {
423
424 mutex_enter(&vp->v_interlock);
425 }
426
427 int
428 rump_vfs_unmount(struct mount *mp, int mntflags)
429 {
430
431 return VFS_UNMOUNT(mp, mntflags);
432 }
433
434 int
435 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
436 {
437 int rv;
438
439 rv = VFS_ROOT(mp, vpp);
440 if (rv)
441 return rv;
442
443 if (!lock)
444 VOP_UNLOCK(*vpp, 0);
445
446 return 0;
447 }
448
449 int
450 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
451 {
452
453 return VFS_STATVFS(mp, sbp);
454 }
455
456 int
457 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
458 {
459
460 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
461 }
462
463 int
464 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
465 {
466
467 return VFS_FHTOVP(mp, fid, vpp);
468 }
469
470 int
471 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
472 {
473
474 return VFS_VPTOFH(vp, fid, fidsize);
475 }
476
477 /*ARGSUSED*/
478 void
479 rump_vfs_syncwait(struct mount *mp)
480 {
481 int n;
482
483 n = buf_syncwait();
484 if (n)
485 printf("syncwait: unsynced buffers: %d\n", n);
486 }
487
488 void
489 rump_bioops_sync()
490 {
491
492 if (bioopsp)
493 bioopsp->io_sync(NULL);
494 }
495
496 void
497 rump_biodone(void *arg, size_t count, int error)
498 {
499 struct buf *bp = arg;
500
501 bp->b_resid = bp->b_bcount - count;
502 KASSERT(bp->b_resid >= 0);
503 bp->b_error = error;
504
505 rump_intr_enter();
506 biodone(bp);
507 rump_intr_exit();
508 }
509
510 void
511 rump_rcvp_set(struct vnode *rvp, struct vnode *cvp)
512 {
513 struct lwp *l = curlwp;
514 struct cwdinfo *cwdi = l->l_proc->p_cwdi;
515
516 KASSERT(cvp);
517
518 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
519 if (cwdi->cwdi_rdir)
520 vrele(cwdi->cwdi_rdir);
521 if (rvp)
522 vref(rvp);
523 cwdi->cwdi_rdir = rvp;
524
525 vrele(cwdi->cwdi_cdir);
526 vref(cvp);
527 cwdi->cwdi_cdir = cvp;
528 rw_exit(&cwdi->cwdi_lock);
529 }
530
531 struct vnode *
532 rump_cdir_get()
533 {
534 struct vnode *vp;
535 struct cwdinfo *cwdi = curlwp->l_proc->p_cwdi;
536
537 rw_enter(&cwdi->cwdi_lock, RW_READER);
538 vp = cwdi->cwdi_cdir;
539 rw_exit(&cwdi->cwdi_lock);
540 vref(vp);
541
542 return vp;
543 }
544