rump.c revision 1.14 1 /* $NetBSD: rump.c,v 1.14 2007/10/31 15:57:21 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/filedesc.h>
32 #include <sys/kauth.h>
33 #include <sys/kmem.h>
34 #include <sys/mount.h>
35 #include <sys/namei.h>
36 #include <sys/queue.h>
37 #include <sys/resourcevar.h>
38 #include <sys/vnode.h>
39 #include <sys/cpu.h>
40
41 #include <miscfs/specfs/specdev.h>
42
43 #include "rump_private.h"
44 #include "rumpuser.h"
45
46 struct proc rump_proc;
47 struct cwdinfo rump_cwdi;
48 struct pstats rump_stats;
49 struct plimit rump_limits;
50 kauth_cred_t rump_cred;
51 struct cpu_info rump_cpu;
52
53 struct fakeblk {
54 char path[MAXPATHLEN];
55 LIST_ENTRY(fakeblk) entries;
56 };
57
58 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
59
60 static void
61 rump_aiodone_worker(struct work *wk, void *dummy)
62 {
63 struct buf *bp = (struct buf *)wk;
64
65 KASSERT(&bp->b_work == wk);
66 bp->b_iodone(bp);
67 }
68
69 void
70 rump_init()
71 {
72 extern char hostname[];
73 extern size_t hostnamelen;
74 struct proc *p;
75 struct lwp *l;
76 int error;
77
78 l = &lwp0;
79 p = &rump_proc;
80 p->p_stats = &rump_stats;
81 p->p_cwdi = &rump_cwdi;
82 p->p_limit = &rump_limits;
83 p->p_pid = 0;
84 l->l_cred = rump_cred;
85 l->l_proc = p;
86 l->l_lid = 1;
87
88 rumpvm_init();
89
90 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
91
92 /* should be "enough" */
93 syncdelay = 0;
94
95 vfsinit();
96 bufinit();
97
98 rump_sleepers_init();
99 rumpuser_thrinit();
100
101 /* aieeeedondest */
102 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
103 rump_aiodone_worker, NULL, 0, 0, 0))
104 panic("aiodoned");
105
106 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
107 hostnamelen = strlen(hostname);
108 }
109
110 struct mount *
111 rump_mnt_init(struct vfsops *vfsops, int mntflags)
112 {
113 struct mount *mp;
114
115 mp = rumpuser_malloc(sizeof(struct mount), 0);
116 memset(mp, 0, sizeof(struct mount));
117
118 mp->mnt_op = vfsops;
119 mp->mnt_flag = mntflags;
120 TAILQ_INIT(&mp->mnt_vnodelist);
121
122 return mp;
123 }
124
125 int
126 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen,
127 struct lwp *l)
128 {
129 int rv;
130
131 rv = VFS_MOUNT(mp, path, data, dlen, l);
132 if (rv)
133 return rv;
134
135 rv = VFS_STATVFS(mp, &mp->mnt_stat, l);
136 if (rv) {
137 VFS_UNMOUNT(mp, MNT_FORCE, l);
138 return rv;
139 }
140
141 rv = VFS_START(mp, 0, l);
142 if (rv)
143 VFS_UNMOUNT(mp, MNT_FORCE, l);
144
145 return rv;
146 }
147
148 void
149 rump_mnt_destroy(struct mount *mp)
150 {
151
152 rumpuser_free(mp);
153 }
154
155 struct componentname *
156 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
157 kauth_cred_t creds, struct lwp *l)
158 {
159 struct componentname *cnp;
160
161 cnp = rumpuser_malloc(sizeof(struct componentname), 0);
162 memset(cnp, 0, sizeof(struct componentname));
163
164 cnp->cn_nameiop = nameiop;
165 cnp->cn_flags = flags;
166
167 cnp->cn_pnbuf = PNBUF_GET();
168 strcpy(cnp->cn_pnbuf, name);
169 cnp->cn_nameptr = cnp->cn_pnbuf;
170 cnp->cn_namelen = namelen;
171
172 cnp->cn_cred = creds;
173 cnp->cn_lwp = l;
174
175 return cnp;
176 }
177
178 void
179 rump_freecn(struct componentname *cnp, int flags)
180 {
181
182 if (flags & RUMPCN_FREECRED)
183 rump_cred_destroy(cnp->cn_cred);
184
185 if (cnp->cn_flags & SAVENAME) {
186 if (flags & RUMPCN_ISLOOKUP || cnp->cn_flags & SAVESTART)
187 PNBUF_PUT(cnp->cn_pnbuf);
188 } else {
189 PNBUF_PUT(cnp->cn_pnbuf);
190 }
191 rumpuser_free(cnp);
192 }
193
194 int
195 rump_recyclenode(struct vnode *vp)
196 {
197
198 return vrecycle(vp, NULL, curlwp);
199 }
200
201 static struct fakeblk *
202 _rump_fakeblk_find(const char *path)
203 {
204 char buf[MAXPATHLEN];
205 struct fakeblk *fblk;
206 int error;
207
208 if (rumpuser_realpath(path, buf, &error) == NULL)
209 return NULL;
210
211 LIST_FOREACH(fblk, &fakeblks, entries)
212 if (strcmp(fblk->path, buf) == 0)
213 return fblk;
214
215 return NULL;
216 }
217
218 int
219 rump_fakeblk_register(const char *path)
220 {
221 char buf[MAXPATHLEN];
222 struct fakeblk *fblk;
223 int error;
224
225 if (_rump_fakeblk_find(path))
226 return EEXIST;
227
228 if (rumpuser_realpath(path, buf, &error) == NULL)
229 return error;
230
231 fblk = rumpuser_malloc(sizeof(struct fakeblk), 1);
232 if (fblk == NULL)
233 return ENOMEM;
234
235 strlcpy(fblk->path, buf, MAXPATHLEN);
236 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
237
238 return 0;
239 }
240
241 int
242 rump_fakeblk_find(const char *path)
243 {
244
245 return _rump_fakeblk_find(path) != NULL;
246 }
247
248 void
249 rump_fakeblk_deregister(const char *path)
250 {
251 struct fakeblk *fblk;
252
253 fblk = _rump_fakeblk_find(path);
254 if (fblk == NULL)
255 return;
256
257 LIST_REMOVE(fblk, entries);
258 rumpuser_free(fblk);
259 }
260
261 void
262 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
263 {
264
265 *vtype = vp->v_type;
266 *vsize = vp->v_size;
267 if (vp->v_specinfo)
268 *vdev = vp->v_rdev;
269 else
270 *vdev = 0;
271 }
272
273 struct vfsops *
274 rump_vfslist_iterate(struct vfsops *ops)
275 {
276
277 if (ops == NULL)
278 return LIST_FIRST(&vfs_list);
279 else
280 return LIST_NEXT(ops, vfs_list);
281 }
282
283 struct vfsops *
284 rump_vfs_getopsbyname(const char *name)
285 {
286
287 return vfs_getopsbyname(name);
288 }
289
290 struct vattr*
291 rump_vattr_init()
292 {
293 struct vattr *vap;
294
295 vap = rumpuser_malloc(sizeof(struct vattr), 0);
296 vattr_null(vap);
297
298 return vap;
299 }
300
301 void
302 rump_vattr_settype(struct vattr *vap, enum vtype vt)
303 {
304
305 vap->va_type = vt;
306 }
307
308 void
309 rump_vattr_setmode(struct vattr *vap, mode_t mode)
310 {
311
312 vap->va_mode = mode;
313 }
314
315 void
316 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
317 {
318
319 vap->va_rdev = dev;
320 }
321
322 void
323 rump_vattr_free(struct vattr *vap)
324 {
325
326 rumpuser_free(vap);
327 }
328
329 void
330 rump_vp_incref(struct vnode *vp)
331 {
332
333 ++vp->v_usecount;
334 }
335
336 int
337 rump_vp_getref(struct vnode *vp)
338 {
339
340 return vp->v_usecount;
341 }
342
343 void
344 rump_vp_decref(struct vnode *vp)
345 {
346
347 --vp->v_usecount;
348 }
349
350 struct uio *
351 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
352 {
353 struct uio *uio;
354 enum uio_rw uiorw;
355
356 switch (rw) {
357 case RUMPUIO_READ:
358 uiorw = UIO_READ;
359 break;
360 case RUMPUIO_WRITE:
361 uiorw = UIO_WRITE;
362 break;
363 default:
364 panic("%s: invalid rw %d", __func__, rw);
365 }
366
367 uio = rumpuser_malloc(sizeof(struct uio), 0);
368 uio->uio_iov = rumpuser_malloc(sizeof(struct iovec), 0);
369
370 uio->uio_iov->iov_base = buf;
371 uio->uio_iov->iov_len = bufsize;
372
373 uio->uio_iovcnt = 1;
374 uio->uio_offset = offset;
375 uio->uio_resid = bufsize;
376 uio->uio_rw = uiorw;
377 uio->uio_vmspace = UIO_VMSPACE_SYS;
378
379 return uio;
380 }
381
382 size_t
383 rump_uio_getresid(struct uio *uio)
384 {
385
386 return uio->uio_resid;
387 }
388
389 off_t
390 rump_uio_getoff(struct uio *uio)
391 {
392
393 return uio->uio_offset;
394 }
395
396 size_t
397 rump_uio_free(struct uio *uio)
398 {
399 size_t resid;
400
401 resid = uio->uio_resid;
402 rumpuser_free(uio->uio_iov);
403 rumpuser_free(uio);
404
405 return resid;
406 }
407
408 void
409 rump_vp_lock_exclusive(struct vnode *vp)
410 {
411
412 /* we can skip vn_lock() */
413 VOP_LOCK(vp, LK_EXCLUSIVE);
414 }
415
416 void
417 rump_vp_lock_shared(struct vnode *vp)
418 {
419
420 VOP_LOCK(vp, LK_SHARED);
421 }
422
423 void
424 rump_vp_unlock(struct vnode *vp)
425 {
426
427 VOP_UNLOCK(vp, 0);
428 }
429
430 int
431 rump_vp_islocked(struct vnode *vp)
432 {
433
434 return VOP_ISLOCKED(vp);
435 }
436
437 int
438 rump_vfs_unmount(struct mount *mp, int mntflags, struct lwp *l)
439 {
440
441 return VFS_UNMOUNT(mp, mntflags, l);
442 }
443
444 int
445 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
446 {
447 int rv;
448
449 rv = VFS_ROOT(mp, vpp);
450 if (rv)
451 return rv;
452
453 if (!lock)
454 VOP_UNLOCK(*vpp, 0);
455
456 return 0;
457 }
458
459 /* XXX: statvfs is different from system to system */
460 #if 0
461 int
462 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
463 {
464
465 return VFS_STATVFS(mp, sbp, l);
466 }
467 #endif
468
469 int
470 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred, struct lwp *l)
471 {
472
473 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred, l);
474 }
475
476 int
477 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
478 {
479
480 return VFS_FHTOVP(mp, fid, vpp);
481 }
482
483 int
484 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
485 {
486
487 return VFS_VPTOFH(vp, fid, fidsize);
488 }
489
490 void
491 rump_bioops_sync()
492 {
493
494 if (bioopsp)
495 bioopsp->io_sync(NULL);
496 }
497
498 struct lwp *
499 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
500 {
501 struct lwp *l;
502 struct proc *p;
503
504 l = kmem_alloc(sizeof(struct lwp), KM_SLEEP);
505 p = kmem_alloc(sizeof(struct proc), KM_SLEEP);
506 p->p_stats = &rump_stats;
507 p->p_cwdi = &rump_cwdi;
508 p->p_limit = &rump_limits;
509 p->p_pid = pid;
510 l->l_cred = rump_cred;
511 l->l_proc = p;
512 l->l_lid = lid;
513
514 if (set)
515 rumpuser_set_curlwp(l);
516
517 return l;
518 }
519
520 void
521 rump_clear_curlwp()
522 {
523 struct lwp *l;
524
525 l = rumpuser_get_curlwp();
526 kmem_free(l->l_proc, sizeof(struct proc));
527 kmem_free(l, sizeof(struct lwp));
528 rumpuser_set_curlwp(NULL);
529 }
530
531 struct lwp *
532 rump_get_curlwp()
533 {
534 struct lwp *l;
535
536 l = rumpuser_get_curlwp();
537 if (l == NULL)
538 l = &lwp0;
539
540 return l;
541 }
542