rump.c revision 1.19 1 /* $NetBSD: rump.c,v 1.19 2007/11/07 16:24:22 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/filedesc.h>
32 #include <sys/kauth.h>
33 #include <sys/kmem.h>
34 #include <sys/mount.h>
35 #include <sys/namei.h>
36 #include <sys/queue.h>
37 #include <sys/resourcevar.h>
38 #include <sys/vnode.h>
39 #include <sys/cpu.h>
40
41 #include <miscfs/specfs/specdev.h>
42
43 #include "rump_private.h"
44 #include "rumpuser.h"
45
46 struct proc rump_proc;
47 struct cwdinfo rump_cwdi;
48 struct pstats rump_stats;
49 struct plimit rump_limits;
50 kauth_cred_t rump_cred;
51 struct cpu_info rump_cpu;
52
53 kmutex_t rump_giantlock;
54
55 struct fakeblk {
56 char path[MAXPATHLEN];
57 LIST_ENTRY(fakeblk) entries;
58 };
59
60 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
61
62 static void
63 rump_aiodone_worker(struct work *wk, void *dummy)
64 {
65 struct buf *bp = (struct buf *)wk;
66
67 KASSERT(&bp->b_work == wk);
68 bp->b_iodone(bp);
69 }
70
71 void
72 rump_init()
73 {
74 extern char hostname[];
75 extern size_t hostnamelen;
76 struct proc *p;
77 struct lwp *l;
78 int error;
79
80 l = &lwp0;
81 p = &rump_proc;
82 p->p_stats = &rump_stats;
83 p->p_cwdi = &rump_cwdi;
84 p->p_limit = &rump_limits;
85 p->p_pid = 0;
86 l->l_cred = rump_cred;
87 l->l_proc = p;
88 l->l_lid = 1;
89
90 rumpvm_init();
91
92 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
93
94 /* should be "enough" */
95 syncdelay = 0;
96
97 vfsinit();
98 bufinit();
99
100 rump_sleepers_init();
101 rumpuser_thrinit();
102
103 mutex_init(&rump_giantlock, MUTEX_DEFAULT, IPL_NONE);
104
105 /* aieeeedondest */
106 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
107 rump_aiodone_worker, NULL, 0, 0, 0))
108 panic("aiodoned");
109
110 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
111 hostnamelen = strlen(hostname);
112 }
113
114 struct mount *
115 rump_mnt_init(struct vfsops *vfsops, int mntflags)
116 {
117 struct mount *mp;
118
119 mp = rumpuser_malloc(sizeof(struct mount), 0);
120 memset(mp, 0, sizeof(struct mount));
121
122 mp->mnt_op = vfsops;
123 mp->mnt_flag = mntflags;
124 TAILQ_INIT(&mp->mnt_vnodelist);
125
126 return mp;
127 }
128
129 int
130 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen,
131 struct lwp *l)
132 {
133 int rv;
134
135 rv = VFS_MOUNT(mp, path, data, dlen, l);
136 if (rv)
137 return rv;
138
139 rv = VFS_STATVFS(mp, &mp->mnt_stat, l);
140 if (rv) {
141 VFS_UNMOUNT(mp, MNT_FORCE, l);
142 return rv;
143 }
144
145 rv = VFS_START(mp, 0, l);
146 if (rv)
147 VFS_UNMOUNT(mp, MNT_FORCE, l);
148
149 return rv;
150 }
151
152 void
153 rump_mnt_destroy(struct mount *mp)
154 {
155
156 rumpuser_free(mp);
157 }
158
159 struct componentname *
160 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
161 kauth_cred_t creds, struct lwp *l)
162 {
163 struct componentname *cnp;
164
165 cnp = rumpuser_malloc(sizeof(struct componentname), 0);
166 memset(cnp, 0, sizeof(struct componentname));
167
168 cnp->cn_nameiop = nameiop;
169 cnp->cn_flags = flags;
170
171 cnp->cn_pnbuf = PNBUF_GET();
172 strcpy(cnp->cn_pnbuf, name);
173 cnp->cn_nameptr = cnp->cn_pnbuf;
174 cnp->cn_namelen = namelen;
175
176 cnp->cn_cred = creds;
177 cnp->cn_lwp = l;
178
179 return cnp;
180 }
181
182 void
183 rump_freecn(struct componentname *cnp, int flags)
184 {
185
186 if (flags & RUMPCN_FREECRED)
187 rump_cred_destroy(cnp->cn_cred);
188
189 if (cnp->cn_flags & SAVENAME) {
190 if (flags & RUMPCN_ISLOOKUP || cnp->cn_flags & SAVESTART)
191 PNBUF_PUT(cnp->cn_pnbuf);
192 } else {
193 PNBUF_PUT(cnp->cn_pnbuf);
194 }
195 rumpuser_free(cnp);
196 }
197
198 int
199 rump_recyclenode(struct vnode *vp)
200 {
201
202 return vrecycle(vp, NULL, curlwp);
203 }
204
205 static struct fakeblk *
206 _rump_fakeblk_find(const char *path)
207 {
208 char buf[MAXPATHLEN];
209 struct fakeblk *fblk;
210 int error;
211
212 if (rumpuser_realpath(path, buf, &error) == NULL)
213 return NULL;
214
215 LIST_FOREACH(fblk, &fakeblks, entries)
216 if (strcmp(fblk->path, buf) == 0)
217 return fblk;
218
219 return NULL;
220 }
221
222 int
223 rump_fakeblk_register(const char *path)
224 {
225 char buf[MAXPATHLEN];
226 struct fakeblk *fblk;
227 int error;
228
229 if (_rump_fakeblk_find(path))
230 return EEXIST;
231
232 if (rumpuser_realpath(path, buf, &error) == NULL)
233 return error;
234
235 fblk = rumpuser_malloc(sizeof(struct fakeblk), 1);
236 if (fblk == NULL)
237 return ENOMEM;
238
239 strlcpy(fblk->path, buf, MAXPATHLEN);
240 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
241
242 return 0;
243 }
244
245 int
246 rump_fakeblk_find(const char *path)
247 {
248
249 return _rump_fakeblk_find(path) != NULL;
250 }
251
252 void
253 rump_fakeblk_deregister(const char *path)
254 {
255 struct fakeblk *fblk;
256
257 fblk = _rump_fakeblk_find(path);
258 if (fblk == NULL)
259 return;
260
261 LIST_REMOVE(fblk, entries);
262 rumpuser_free(fblk);
263 }
264
265 void
266 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
267 {
268
269 *vtype = vp->v_type;
270 *vsize = vp->v_size;
271 if (vp->v_specinfo)
272 *vdev = vp->v_rdev;
273 else
274 *vdev = 0;
275 }
276
277 struct vfsops *
278 rump_vfslist_iterate(struct vfsops *ops)
279 {
280
281 if (ops == NULL)
282 return LIST_FIRST(&vfs_list);
283 else
284 return LIST_NEXT(ops, vfs_list);
285 }
286
287 struct vfsops *
288 rump_vfs_getopsbyname(const char *name)
289 {
290
291 return vfs_getopsbyname(name);
292 }
293
294 struct vattr*
295 rump_vattr_init()
296 {
297 struct vattr *vap;
298
299 vap = rumpuser_malloc(sizeof(struct vattr), 0);
300 vattr_null(vap);
301
302 return vap;
303 }
304
305 void
306 rump_vattr_settype(struct vattr *vap, enum vtype vt)
307 {
308
309 vap->va_type = vt;
310 }
311
312 void
313 rump_vattr_setmode(struct vattr *vap, mode_t mode)
314 {
315
316 vap->va_mode = mode;
317 }
318
319 void
320 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
321 {
322
323 vap->va_rdev = dev;
324 }
325
326 void
327 rump_vattr_free(struct vattr *vap)
328 {
329
330 rumpuser_free(vap);
331 }
332
333 void
334 rump_vp_incref(struct vnode *vp)
335 {
336
337 ++vp->v_usecount;
338 }
339
340 int
341 rump_vp_getref(struct vnode *vp)
342 {
343
344 return vp->v_usecount;
345 }
346
347 void
348 rump_vp_decref(struct vnode *vp)
349 {
350
351 --vp->v_usecount;
352 }
353
354 struct uio *
355 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
356 {
357 struct uio *uio;
358 enum uio_rw uiorw;
359
360 switch (rw) {
361 case RUMPUIO_READ:
362 uiorw = UIO_READ;
363 break;
364 case RUMPUIO_WRITE:
365 uiorw = UIO_WRITE;
366 break;
367 default:
368 panic("%s: invalid rw %d", __func__, rw);
369 }
370
371 uio = rumpuser_malloc(sizeof(struct uio), 0);
372 uio->uio_iov = rumpuser_malloc(sizeof(struct iovec), 0);
373
374 uio->uio_iov->iov_base = buf;
375 uio->uio_iov->iov_len = bufsize;
376
377 uio->uio_iovcnt = 1;
378 uio->uio_offset = offset;
379 uio->uio_resid = bufsize;
380 uio->uio_rw = uiorw;
381 uio->uio_vmspace = UIO_VMSPACE_SYS;
382
383 return uio;
384 }
385
386 size_t
387 rump_uio_getresid(struct uio *uio)
388 {
389
390 return uio->uio_resid;
391 }
392
393 off_t
394 rump_uio_getoff(struct uio *uio)
395 {
396
397 return uio->uio_offset;
398 }
399
400 size_t
401 rump_uio_free(struct uio *uio)
402 {
403 size_t resid;
404
405 resid = uio->uio_resid;
406 rumpuser_free(uio->uio_iov);
407 rumpuser_free(uio);
408
409 return resid;
410 }
411
412 void
413 rump_vp_lock_exclusive(struct vnode *vp)
414 {
415
416 /* we can skip vn_lock() */
417 VOP_LOCK(vp, LK_EXCLUSIVE);
418 }
419
420 void
421 rump_vp_lock_shared(struct vnode *vp)
422 {
423
424 VOP_LOCK(vp, LK_SHARED);
425 }
426
427 void
428 rump_vp_unlock(struct vnode *vp)
429 {
430
431 VOP_UNLOCK(vp, 0);
432 }
433
434 int
435 rump_vp_islocked(struct vnode *vp)
436 {
437
438 return VOP_ISLOCKED(vp);
439 }
440
441 int
442 rump_vfs_unmount(struct mount *mp, int mntflags, struct lwp *l)
443 {
444
445 return VFS_UNMOUNT(mp, mntflags, l);
446 }
447
448 int
449 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
450 {
451 int rv;
452
453 rv = VFS_ROOT(mp, vpp);
454 if (rv)
455 return rv;
456
457 if (!lock)
458 VOP_UNLOCK(*vpp, 0);
459
460 return 0;
461 }
462
463 /* XXX: statvfs is different from system to system */
464 #if 0
465 int
466 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
467 {
468
469 return VFS_STATVFS(mp, sbp, l);
470 }
471 #endif
472
473 int
474 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred, struct lwp *l)
475 {
476
477 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred, l);
478 }
479
480 int
481 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
482 {
483
484 return VFS_FHTOVP(mp, fid, vpp);
485 }
486
487 int
488 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
489 {
490
491 return VFS_VPTOFH(vp, fid, fidsize);
492 }
493
494 /*ARGSUSED*/
495 void
496 rump_vfs_syncwait(struct mount *mp)
497 {
498 int n;
499
500 n = buf_syncwait();
501 if (n)
502 printf("syncwait: unsynced buffers: %d\n", n);
503 }
504
505 void
506 rump_bioops_sync()
507 {
508
509 if (bioopsp)
510 bioopsp->io_sync(NULL);
511 }
512
513 struct lwp *
514 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
515 {
516 struct lwp *l;
517 struct proc *p;
518
519 l = kmem_alloc(sizeof(struct lwp), KM_SLEEP);
520 p = kmem_alloc(sizeof(struct proc), KM_SLEEP);
521 p->p_stats = &rump_stats;
522 p->p_cwdi = &rump_cwdi;
523 p->p_limit = &rump_limits;
524 p->p_pid = pid;
525 l->l_cred = rump_cred;
526 l->l_proc = p;
527 l->l_lid = lid;
528
529 if (set)
530 rumpuser_set_curlwp(l);
531
532 return l;
533 }
534
535 void
536 rump_clear_curlwp()
537 {
538 struct lwp *l;
539
540 l = rumpuser_get_curlwp();
541 kmem_free(l->l_proc, sizeof(struct proc));
542 kmem_free(l, sizeof(struct lwp));
543 rumpuser_set_curlwp(NULL);
544 }
545
546 struct lwp *
547 rump_get_curlwp()
548 {
549 struct lwp *l;
550
551 l = rumpuser_get_curlwp();
552 if (l == NULL)
553 l = &lwp0;
554
555 return l;
556 }
557
558 int
559 rump_splfoo()
560 {
561
562 if (!rumpuser_is_intr())
563 rumpuser_rw_enter(&rumpspl, 0);
564
565 return 0;
566 }
567
568 static void
569 rump_intr_enter(void)
570 {
571
572 rumpuser_set_intr();
573 rumpuser_rw_enter(&rumpspl, 1);
574 }
575
576 static void
577 rump_intr_exit(void)
578 {
579
580 rumpuser_rw_exit(&rumpspl);
581 rumpuser_clear_intr();
582 }
583
584 void
585 rump_splx(int dummy)
586 {
587
588 if (!rumpuser_is_intr())
589 rumpuser_rw_exit(&rumpspl);
590 }
591
592 void
593 rump_biodone(void *arg, size_t count, int error)
594 {
595 struct buf *bp = arg;
596
597 bp->b_resid = bp->b_bcount - count;
598 KASSERT(bp->b_resid >= 0);
599 bp->b_error = error;
600
601 rump_intr_enter();
602 biodone(bp);
603 rump_intr_exit();
604 }
605