rump_vfs.c revision 1.1 1 /* $NetBSD: rump_vfs.c,v 1.1 2008/11/19 14:10:49 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/buf.h>
33 #include <sys/filedesc.h>
34 #include <sys/module.h>
35 #include <sys/namei.h>
36 #include <sys/queue.h>
37 #include <sys/vfs_syscalls.h>
38 #include <sys/vnode.h>
39 #include <sys/wapbl.h>
40
41 #include <miscfs/specfs/specdev.h>
42
43 #include <rump/rump.h>
44 #include <rump/rumpuser.h>
45
46 #include "rump_private.h"
47 #include "rump_vfs_private.h"
48
49 struct fakeblk {
50 char path[MAXPATHLEN];
51 LIST_ENTRY(fakeblk) entries;
52 };
53 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
54
55 static struct cwdinfo rump_cwdi;
56
57 static void
58 pvfs_init(struct proc *p)
59 {
60
61 p->p_cwdi = cwdinit();
62 }
63
64 static void
65 pvfs_rele(struct proc *p)
66 {
67
68 cwdfree(p->p_cwdi);
69 }
70
71 void
72 rump_vfs_init()
73 {
74
75 syncdelay = 0;
76 dovfsusermount = 1;
77
78 cache_cpu_init(&rump_cpu);
79 vfsinit();
80 bufinit();
81 wapbl_init();
82 cwd_sys_init();
83 rumpuser_bioinit(rump_biodone);
84 rumpfs_init();
85
86 rump_proc_vfs_init = pvfs_init;
87 rump_proc_vfs_release = pvfs_rele;
88
89 rw_init(&rump_cwdi.cwdi_lock);
90 rump_cwdi.cwdi_cdir = rootvnode;
91 proc0.p_cwdi = &rump_cwdi;
92 }
93
94 struct mount *
95 rump_mnt_init(struct vfsops *vfsops, int mntflags)
96 {
97 struct mount *mp;
98
99 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
100
101 mp->mnt_op = vfsops;
102 mp->mnt_flag = mntflags;
103 TAILQ_INIT(&mp->mnt_vnodelist);
104 rw_init(&mp->mnt_unmounting);
105 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
106 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
107 mp->mnt_refcnt = 1;
108
109 mount_initspecific(mp);
110
111 return mp;
112 }
113
114 int
115 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
116 {
117 struct vnode *rvp;
118 int rv;
119
120 rv = VFS_MOUNT(mp, path, data, dlen);
121 if (rv)
122 return rv;
123
124 (void) VFS_STATVFS(mp, &mp->mnt_stat);
125 rv = VFS_START(mp, 0);
126 if (rv)
127 VFS_UNMOUNT(mp, MNT_FORCE);
128
129 /*
130 * XXX: set a root for lwp0. This is strictly not correct,
131 * but makes things works for single fs case without having
132 * to manually call rump_rcvp_set().
133 */
134 VFS_ROOT(mp, &rvp);
135 rump_rcvp_set(rvp, rvp);
136 vput(rvp);
137
138 return rv;
139 }
140
141 void
142 rump_mnt_destroy(struct mount *mp)
143 {
144
145 mount_finispecific(mp);
146 kmem_free(mp, sizeof(*mp));
147 }
148
149 struct componentname *
150 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
151 kauth_cred_t creds, struct lwp *l)
152 {
153 struct componentname *cnp;
154 const char *cp = NULL;
155
156 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
157
158 cnp->cn_nameiop = nameiop;
159 cnp->cn_flags = flags | HASBUF;
160
161 cnp->cn_pnbuf = PNBUF_GET();
162 strcpy(cnp->cn_pnbuf, name);
163 cnp->cn_nameptr = cnp->cn_pnbuf;
164 cnp->cn_namelen = namelen;
165 cnp->cn_hash = namei_hash(name, &cp);
166
167 cnp->cn_cred = creds;
168
169 return cnp;
170 }
171
172 void
173 rump_freecn(struct componentname *cnp, int flags)
174 {
175
176 if (flags & RUMPCN_FREECRED)
177 rump_cred_destroy(cnp->cn_cred);
178
179 if ((flags & RUMPCN_HASNTBUF) == 0) {
180 if (cnp->cn_flags & SAVENAME) {
181 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
182 PNBUF_PUT(cnp->cn_pnbuf);
183 } else {
184 PNBUF_PUT(cnp->cn_pnbuf);
185 }
186 }
187 kmem_free(cnp, sizeof(*cnp));
188 }
189
190 /* hey baby, what's your namei? */
191 int
192 rump_namei(uint32_t op, uint32_t flags, const char *namep,
193 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
194 {
195 struct nameidata nd;
196 int rv;
197
198 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
199 rv = namei(&nd);
200 if (rv)
201 return rv;
202
203 if (dvpp) {
204 KASSERT(flags & LOCKPARENT);
205 *dvpp = nd.ni_dvp;
206 } else {
207 KASSERT((flags & LOCKPARENT) == 0);
208 }
209
210 if (vpp) {
211 *vpp = nd.ni_vp;
212 } else {
213 if (nd.ni_vp) {
214 if (flags & LOCKLEAF)
215 vput(nd.ni_vp);
216 else
217 vrele(nd.ni_vp);
218 }
219 }
220
221 if (cnpp) {
222 struct componentname *cnp;
223
224 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
225 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
226 *cnpp = cnp;
227 } else if (nd.ni_cnd.cn_flags & HASBUF) {
228 panic("%s: pathbuf mismatch", __func__);
229 }
230
231 return rv;
232 }
233
234 static struct fakeblk *
235 _rump_fakeblk_find(const char *path)
236 {
237 char buf[MAXPATHLEN];
238 struct fakeblk *fblk;
239 int error;
240
241 if (rumpuser_realpath(path, buf, &error) == NULL)
242 return NULL;
243
244 LIST_FOREACH(fblk, &fakeblks, entries)
245 if (strcmp(fblk->path, buf) == 0)
246 return fblk;
247
248 return NULL;
249 }
250
251 int
252 rump_fakeblk_register(const char *path)
253 {
254 char buf[MAXPATHLEN];
255 struct fakeblk *fblk;
256 int error;
257
258 if (_rump_fakeblk_find(path))
259 return EEXIST;
260
261 if (rumpuser_realpath(path, buf, &error) == NULL)
262 return error;
263
264 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
265 if (fblk == NULL)
266 return ENOMEM;
267
268 strlcpy(fblk->path, buf, MAXPATHLEN);
269 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
270
271 return 0;
272 }
273
274 int
275 rump_fakeblk_find(const char *path)
276 {
277
278 return _rump_fakeblk_find(path) != NULL;
279 }
280
281 void
282 rump_fakeblk_deregister(const char *path)
283 {
284 struct fakeblk *fblk;
285
286 fblk = _rump_fakeblk_find(path);
287 if (fblk == NULL)
288 return;
289
290 LIST_REMOVE(fblk, entries);
291 kmem_free(fblk, sizeof(*fblk));
292 }
293
294 void
295 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
296 {
297
298 *vtype = vp->v_type;
299 *vsize = vp->v_size;
300 if (vp->v_specnode)
301 *vdev = vp->v_rdev;
302 else
303 *vdev = 0;
304 }
305
306 struct vfsops *
307 rump_vfslist_iterate(struct vfsops *ops)
308 {
309
310 if (ops == NULL)
311 return LIST_FIRST(&vfs_list);
312 else
313 return LIST_NEXT(ops, vfs_list);
314 }
315
316 struct vfsops *
317 rump_vfs_getopsbyname(const char *name)
318 {
319
320 return vfs_getopsbyname(name);
321 }
322
323 struct vattr*
324 rump_vattr_init()
325 {
326 struct vattr *vap;
327
328 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
329 vattr_null(vap);
330
331 return vap;
332 }
333
334 void
335 rump_vattr_settype(struct vattr *vap, enum vtype vt)
336 {
337
338 vap->va_type = vt;
339 }
340
341 void
342 rump_vattr_setmode(struct vattr *vap, mode_t mode)
343 {
344
345 vap->va_mode = mode;
346 }
347
348 void
349 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
350 {
351
352 vap->va_rdev = dev;
353 }
354
355 void
356 rump_vattr_free(struct vattr *vap)
357 {
358
359 kmem_free(vap, sizeof(*vap));
360 }
361
362 void
363 rump_vp_incref(struct vnode *vp)
364 {
365
366 mutex_enter(&vp->v_interlock);
367 ++vp->v_usecount;
368 mutex_exit(&vp->v_interlock);
369 }
370
371 int
372 rump_vp_getref(struct vnode *vp)
373 {
374
375 return vp->v_usecount;
376 }
377
378 void
379 rump_vp_decref(struct vnode *vp)
380 {
381
382 mutex_enter(&vp->v_interlock);
383 --vp->v_usecount;
384 mutex_exit(&vp->v_interlock);
385 }
386
387 /*
388 * Really really recycle with a cherry on top. We should be
389 * extra-sure we can do this. For example with p2k there is
390 * no problem, since puffs in the kernel takes care of refcounting
391 * for us.
392 */
393 void
394 rump_vp_recycle_nokidding(struct vnode *vp)
395 {
396
397 mutex_enter(&vp->v_interlock);
398 vp->v_usecount = 1;
399 /*
400 * XXX: NFS holds a reference to the root vnode, so don't clean
401 * it out. This is very wrong, but fixing it properly would
402 * take too much effort for now
403 */
404 if (vp->v_tag == VT_NFS && vp->v_vflag & VV_ROOT) {
405 mutex_exit(&vp->v_interlock);
406 return;
407 }
408 vclean(vp, DOCLOSE);
409 vrelel(vp, 0);
410 }
411
412 void
413 rump_vp_rele(struct vnode *vp)
414 {
415
416 vrele(vp);
417 }
418
419 void
420 rump_vp_interlock(struct vnode *vp)
421 {
422
423 mutex_enter(&vp->v_interlock);
424 }
425
426 int
427 rump_vfs_unmount(struct mount *mp, int mntflags)
428 {
429
430 return VFS_UNMOUNT(mp, mntflags);
431 }
432
433 int
434 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
435 {
436 int rv;
437
438 rv = VFS_ROOT(mp, vpp);
439 if (rv)
440 return rv;
441
442 if (!lock)
443 VOP_UNLOCK(*vpp, 0);
444
445 return 0;
446 }
447
448 int
449 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
450 {
451
452 return VFS_STATVFS(mp, sbp);
453 }
454
455 int
456 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
457 {
458
459 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
460 }
461
462 int
463 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
464 {
465
466 return VFS_FHTOVP(mp, fid, vpp);
467 }
468
469 int
470 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
471 {
472
473 return VFS_VPTOFH(vp, fid, fidsize);
474 }
475
476 /*ARGSUSED*/
477 void
478 rump_vfs_syncwait(struct mount *mp)
479 {
480 int n;
481
482 n = buf_syncwait();
483 if (n)
484 printf("syncwait: unsynced buffers: %d\n", n);
485 }
486
487 int
488 rump_vfs_load(struct modinfo **mi)
489 {
490
491 if (!module_compatible((*mi)->mi_version, __NetBSD_Version__))
492 return EPROGMISMATCH;
493
494 return (*mi)->mi_modcmd(MODULE_CMD_INIT, NULL);
495 }
496
497 void
498 rump_bioops_sync()
499 {
500
501 if (bioopsp)
502 bioopsp->io_sync(NULL);
503 }
504
505 void
506 rump_biodone(void *arg, size_t count, int error)
507 {
508 struct buf *bp = arg;
509
510 bp->b_resid = bp->b_bcount - count;
511 KASSERT(bp->b_resid >= 0);
512 bp->b_error = error;
513
514 rump_intr_enter();
515 biodone(bp);
516 rump_intr_exit();
517 }
518
519 void
520 rump_rcvp_set(struct vnode *rvp, struct vnode *cvp)
521 {
522 struct lwp *l = curlwp;
523 struct cwdinfo *cwdi = l->l_proc->p_cwdi;
524
525 KASSERT(cvp);
526
527 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
528 if (cwdi->cwdi_rdir)
529 vrele(cwdi->cwdi_rdir);
530 if (rvp)
531 vref(rvp);
532 cwdi->cwdi_rdir = rvp;
533
534 vrele(cwdi->cwdi_cdir);
535 vref(cvp);
536 cwdi->cwdi_cdir = cvp;
537 rw_exit(&cwdi->cwdi_lock);
538 }
539
540 struct vnode *
541 rump_cdir_get()
542 {
543 struct vnode *vp;
544 struct cwdinfo *cwdi = curlwp->l_proc->p_cwdi;
545
546 rw_enter(&cwdi->cwdi_lock, RW_READER);
547 vp = cwdi->cwdi_cdir;
548 rw_exit(&cwdi->cwdi_lock);
549 vref(vp);
550
551 return vp;
552 }
553