rump.c revision 1.28 1 /* $NetBSD: rump.c,v 1.28 2008/01/03 02:48:03 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/queue.h>
38 #include <sys/resourcevar.h>
39 #include <sys/select.h>
40 #include <sys/vnode.h>
41
42 #include <miscfs/specfs/specdev.h>
43
44 #include "rump_private.h"
45 #include "rumpuser.h"
46
47 struct proc rump_proc;
48 struct cwdinfo rump_cwdi;
49 struct pstats rump_stats;
50 struct plimit rump_limits;
51 kauth_cred_t rump_cred;
52 struct cpu_info rump_cpu;
53 struct filedesc0 rump_filedesc0;
54
55 kmutex_t rump_giantlock;
56
57 sigset_t sigcantmask;
58
59 struct fakeblk {
60 char path[MAXPATHLEN];
61 LIST_ENTRY(fakeblk) entries;
62 };
63
64 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
65
66 static void
67 rump_aiodone_worker(struct work *wk, void *dummy)
68 {
69 struct buf *bp = (struct buf *)wk;
70
71 KASSERT(&bp->b_work == wk);
72 bp->b_iodone(bp);
73 }
74
75 int rump_inited;
76
77 void
78 rump_init()
79 {
80 extern char hostname[];
81 extern size_t hostnamelen;
82 extern kmutex_t rump_atomic_lock;
83 struct proc *p;
84 struct lwp *l;
85 int error;
86
87 /* XXX */
88 if (rump_inited)
89 return;
90 rump_inited = 1;
91
92 l = &lwp0;
93 p = &rump_proc;
94 p->p_stats = &rump_stats;
95 p->p_cwdi = &rump_cwdi;
96 p->p_limit = &rump_limits;
97 p->p_pid = 0;
98 p->p_fd = &rump_filedesc0.fd_fd;
99 p->p_vmspace = &rump_vmspace;
100 l->l_cred = rump_cred;
101 l->l_proc = p;
102 l->l_lid = 1;
103
104 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
105 rumpvm_init();
106
107 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
108 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
109
110 /* should be "enough" */
111 syncdelay = 0;
112
113 vfsinit();
114 bufinit();
115 filedesc_init();
116 selsysinit();
117
118 rump_sleepers_init();
119 rumpuser_thrinit();
120
121 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
122
123 /* aieeeedondest */
124 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
125 rump_aiodone_worker, NULL, 0, 0, 0))
126 panic("aiodoned");
127
128 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
129 hostnamelen = strlen(hostname);
130
131 sigemptyset(&sigcantmask);
132
133 fdinit1(&rump_filedesc0);
134 }
135
136 struct mount *
137 rump_mnt_init(struct vfsops *vfsops, int mntflags)
138 {
139 struct mount *mp;
140
141 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
142
143 mp->mnt_op = vfsops;
144 mp->mnt_flag = mntflags;
145 TAILQ_INIT(&mp->mnt_vnodelist);
146
147 mount_initspecific(mp);
148
149 return mp;
150 }
151
152 int
153 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
154 {
155 int rv;
156
157 rv = VFS_MOUNT(mp, path, data, dlen);
158 if (rv)
159 return rv;
160
161 (void) VFS_STATVFS(mp, &mp->mnt_stat);
162 rv = VFS_START(mp, 0);
163 if (rv)
164 VFS_UNMOUNT(mp, MNT_FORCE);
165
166 return rv;
167 }
168
169 void
170 rump_mnt_destroy(struct mount *mp)
171 {
172
173 mount_finispecific(mp);
174 kmem_free(mp, sizeof(*mp));
175 }
176
177 struct componentname *
178 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
179 kauth_cred_t creds, struct lwp *l)
180 {
181 struct componentname *cnp;
182
183 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
184
185 cnp->cn_nameiop = nameiop;
186 cnp->cn_flags = flags;
187
188 cnp->cn_pnbuf = PNBUF_GET();
189 strcpy(cnp->cn_pnbuf, name);
190 cnp->cn_nameptr = cnp->cn_pnbuf;
191 cnp->cn_namelen = namelen;
192
193 cnp->cn_cred = creds;
194
195 return cnp;
196 }
197
198 void
199 rump_freecn(struct componentname *cnp, int flags)
200 {
201
202 if (flags & RUMPCN_FREECRED)
203 rump_cred_destroy(cnp->cn_cred);
204
205 if (cnp->cn_flags & SAVENAME) {
206 if (flags & RUMPCN_ISLOOKUP || cnp->cn_flags & SAVESTART)
207 PNBUF_PUT(cnp->cn_pnbuf);
208 } else {
209 PNBUF_PUT(cnp->cn_pnbuf);
210 }
211 kmem_free(cnp, sizeof(*cnp));
212 }
213
214 int
215 rump_recyclenode(struct vnode *vp)
216 {
217
218 return vrecycle(vp, NULL, curlwp);
219 }
220
221 static struct fakeblk *
222 _rump_fakeblk_find(const char *path)
223 {
224 char buf[MAXPATHLEN];
225 struct fakeblk *fblk;
226 int error;
227
228 if (rumpuser_realpath(path, buf, &error) == NULL)
229 return NULL;
230
231 LIST_FOREACH(fblk, &fakeblks, entries)
232 if (strcmp(fblk->path, buf) == 0)
233 return fblk;
234
235 return NULL;
236 }
237
238 int
239 rump_fakeblk_register(const char *path)
240 {
241 char buf[MAXPATHLEN];
242 struct fakeblk *fblk;
243 int error;
244
245 if (_rump_fakeblk_find(path))
246 return EEXIST;
247
248 if (rumpuser_realpath(path, buf, &error) == NULL)
249 return error;
250
251 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
252 if (fblk == NULL)
253 return ENOMEM;
254
255 strlcpy(fblk->path, buf, MAXPATHLEN);
256 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
257
258 return 0;
259 }
260
261 int
262 rump_fakeblk_find(const char *path)
263 {
264
265 return _rump_fakeblk_find(path) != NULL;
266 }
267
268 void
269 rump_fakeblk_deregister(const char *path)
270 {
271 struct fakeblk *fblk;
272
273 fblk = _rump_fakeblk_find(path);
274 if (fblk == NULL)
275 return;
276
277 LIST_REMOVE(fblk, entries);
278 kmem_free(fblk, sizeof(*fblk));
279 }
280
281 void
282 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
283 {
284
285 *vtype = vp->v_type;
286 *vsize = vp->v_size;
287 if (vp->v_specinfo)
288 *vdev = vp->v_rdev;
289 else
290 *vdev = 0;
291 }
292
293 struct vfsops *
294 rump_vfslist_iterate(struct vfsops *ops)
295 {
296
297 if (ops == NULL)
298 return LIST_FIRST(&vfs_list);
299 else
300 return LIST_NEXT(ops, vfs_list);
301 }
302
303 struct vfsops *
304 rump_vfs_getopsbyname(const char *name)
305 {
306
307 return vfs_getopsbyname(name);
308 }
309
310 struct vattr*
311 rump_vattr_init()
312 {
313 struct vattr *vap;
314
315 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
316 vattr_null(vap);
317
318 return vap;
319 }
320
321 void
322 rump_vattr_settype(struct vattr *vap, enum vtype vt)
323 {
324
325 vap->va_type = vt;
326 }
327
328 void
329 rump_vattr_setmode(struct vattr *vap, mode_t mode)
330 {
331
332 vap->va_mode = mode;
333 }
334
335 void
336 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
337 {
338
339 vap->va_rdev = dev;
340 }
341
342 void
343 rump_vattr_free(struct vattr *vap)
344 {
345
346 kmem_free(vap, sizeof(*vap));
347 }
348
349 void
350 rump_vp_incref(struct vnode *vp)
351 {
352
353 ++vp->v_usecount;
354 }
355
356 int
357 rump_vp_getref(struct vnode *vp)
358 {
359
360 return vp->v_usecount;
361 }
362
363 void
364 rump_vp_decref(struct vnode *vp)
365 {
366
367 --vp->v_usecount;
368 }
369
370 struct uio *
371 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
372 {
373 struct uio *uio;
374 enum uio_rw uiorw;
375
376 switch (rw) {
377 case RUMPUIO_READ:
378 uiorw = UIO_READ;
379 break;
380 case RUMPUIO_WRITE:
381 uiorw = UIO_WRITE;
382 break;
383 default:
384 panic("%s: invalid rw %d", __func__, rw);
385 }
386
387 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
388 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
389
390 uio->uio_iov->iov_base = buf;
391 uio->uio_iov->iov_len = bufsize;
392
393 uio->uio_iovcnt = 1;
394 uio->uio_offset = offset;
395 uio->uio_resid = bufsize;
396 uio->uio_rw = uiorw;
397 uio->uio_vmspace = UIO_VMSPACE_SYS;
398
399 return uio;
400 }
401
402 size_t
403 rump_uio_getresid(struct uio *uio)
404 {
405
406 return uio->uio_resid;
407 }
408
409 off_t
410 rump_uio_getoff(struct uio *uio)
411 {
412
413 return uio->uio_offset;
414 }
415
416 size_t
417 rump_uio_free(struct uio *uio)
418 {
419 size_t resid;
420
421 resid = uio->uio_resid;
422 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
423 kmem_free(uio, sizeof(*uio));
424
425 return resid;
426 }
427
428 void
429 rump_vp_lock_exclusive(struct vnode *vp)
430 {
431
432 /* we can skip vn_lock() */
433 VOP_LOCK(vp, LK_EXCLUSIVE);
434 }
435
436 void
437 rump_vp_lock_shared(struct vnode *vp)
438 {
439
440 VOP_LOCK(vp, LK_SHARED);
441 }
442
443 void
444 rump_vp_unlock(struct vnode *vp)
445 {
446
447 VOP_UNLOCK(vp, 0);
448 }
449
450 int
451 rump_vp_islocked(struct vnode *vp)
452 {
453
454 return VOP_ISLOCKED(vp);
455 }
456
457 void
458 rump_vp_interlock(struct vnode *vp)
459 {
460
461 mutex_enter(&vp->v_interlock);
462 }
463
464 int
465 rump_vfs_unmount(struct mount *mp, int mntflags)
466 {
467
468 return VFS_UNMOUNT(mp, mntflags);
469 }
470
471 int
472 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
473 {
474 int rv;
475
476 rv = VFS_ROOT(mp, vpp);
477 if (rv)
478 return rv;
479
480 if (!lock)
481 VOP_UNLOCK(*vpp, 0);
482
483 return 0;
484 }
485
486 /* XXX: statvfs is different from system to system */
487 #if 0
488 int
489 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
490 {
491
492 return VFS_STATVFS(mp, sbp);
493 }
494 #endif
495
496 int
497 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
498 {
499
500 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
501 }
502
503 int
504 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
505 {
506
507 return VFS_FHTOVP(mp, fid, vpp);
508 }
509
510 int
511 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
512 {
513
514 return VFS_VPTOFH(vp, fid, fidsize);
515 }
516
517 /*ARGSUSED*/
518 void
519 rump_vfs_syncwait(struct mount *mp)
520 {
521 int n;
522
523 n = buf_syncwait();
524 if (n)
525 printf("syncwait: unsynced buffers: %d\n", n);
526 }
527
528 void
529 rump_bioops_sync()
530 {
531
532 if (bioopsp)
533 bioopsp->io_sync(NULL);
534 }
535
536 struct lwp *
537 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
538 {
539 struct lwp *l;
540 struct proc *p;
541
542 l = kmem_alloc(sizeof(struct lwp), KM_SLEEP);
543 p = kmem_alloc(sizeof(struct proc), KM_SLEEP);
544 p->p_stats = &rump_stats;
545 p->p_cwdi = &rump_cwdi;
546 p->p_limit = &rump_limits;
547 p->p_pid = pid;
548 p->p_vmspace = &rump_vmspace;
549 l->l_cred = rump_cred;
550 l->l_proc = p;
551 l->l_lid = lid;
552
553 if (set)
554 rumpuser_set_curlwp(l);
555
556 return l;
557 }
558
559 void
560 rump_clear_curlwp()
561 {
562 struct lwp *l;
563
564 l = rumpuser_get_curlwp();
565 kmem_free(l->l_proc, sizeof(struct proc));
566 kmem_free(l, sizeof(struct lwp));
567 rumpuser_set_curlwp(NULL);
568 }
569
570 struct lwp *
571 rump_get_curlwp()
572 {
573 struct lwp *l;
574
575 l = rumpuser_get_curlwp();
576 if (l == NULL)
577 l = &lwp0;
578
579 return l;
580 }
581
582 int
583 rump_splfoo()
584 {
585
586 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
587 rumpuser_rw_enter(&rumpspl, 0);
588 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
589 }
590
591 return 0;
592 }
593
594 static void
595 rump_intr_enter(void)
596 {
597
598 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
599 rumpuser_rw_enter(&rumpspl, 1);
600 }
601
602 static void
603 rump_intr_exit(void)
604 {
605
606 rumpuser_rw_exit(&rumpspl);
607 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
608 }
609
610 void
611 rump_splx(int dummy)
612 {
613
614 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
615 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
616 rumpuser_rw_exit(&rumpspl);
617 }
618 }
619
620 void
621 rump_biodone(void *arg, size_t count, int error)
622 {
623 struct buf *bp = arg;
624
625 bp->b_resid = bp->b_bcount - count;
626 KASSERT(bp->b_resid >= 0);
627 bp->b_error = error;
628
629 rump_intr_enter();
630 biodone(bp);
631 rump_intr_exit();
632 }
633