rump.c revision 1.33 1 /* $NetBSD: rump.c,v 1.33 2008/01/27 20:01:29 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/queue.h>
38 #include <sys/resourcevar.h>
39 #include <sys/select.h>
40 #include <sys/vnode.h>
41
42 #include <miscfs/specfs/specdev.h>
43
44 #include "rump_private.h"
45 #include "rumpuser.h"
46
47 struct proc rump_proc;
48 struct cwdinfo rump_cwdi;
49 struct pstats rump_stats;
50 struct plimit rump_limits;
51 kauth_cred_t rump_cred = RUMPCRED_SUSER;
52 struct cpu_info rump_cpu;
53 struct filedesc0 rump_filedesc0;
54
55 kmutex_t rump_giantlock;
56
57 sigset_t sigcantmask;
58
59 struct fakeblk {
60 char path[MAXPATHLEN];
61 LIST_ENTRY(fakeblk) entries;
62 };
63
64 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
65
66 #ifndef RUMP_WITHOUT_THREADS
67 static void
68 rump_aiodone_worker(struct work *wk, void *dummy)
69 {
70 struct buf *bp = (struct buf *)wk;
71
72 KASSERT(&bp->b_work == wk);
73 bp->b_iodone(bp);
74 }
75 #endif /* RUMP_WITHOUT_THREADS */
76
77 int rump_inited;
78
79 void
80 rump_init()
81 {
82 extern char hostname[];
83 extern size_t hostnamelen;
84 extern kmutex_t rump_atomic_lock;
85 struct proc *p;
86 struct lwp *l;
87 int error;
88
89 /* XXX */
90 if (rump_inited)
91 return;
92 rump_inited = 1;
93
94 l = &lwp0;
95 p = &rump_proc;
96 p->p_stats = &rump_stats;
97 p->p_cwdi = &rump_cwdi;
98 p->p_limit = &rump_limits;
99 p->p_pid = 0;
100 p->p_fd = &rump_filedesc0.fd_fd;
101 p->p_vmspace = &rump_vmspace;
102 l->l_cred = rump_cred;
103 l->l_proc = p;
104 l->l_lid = 1;
105 rw_init(&rump_cwdi.cwdi_lock);
106
107 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
108 rumpvm_init();
109
110 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
111 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
112
113 /* should be "enough" */
114 syncdelay = 0;
115
116 vfsinit();
117 bufinit();
118 filedesc_init();
119 selsysinit();
120
121 rumpvfs_init();
122
123 rump_sleepers_init();
124 rumpuser_thrinit();
125
126 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
127
128 #ifndef RUMP_WITHOUT_THREADS
129 /* aieeeedondest */
130 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
131 rump_aiodone_worker, NULL, 0, 0, 0))
132 panic("aiodoned");
133 #endif /* RUMP_WITHOUT_THREADS */
134
135 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
136 hostnamelen = strlen(hostname);
137
138 sigemptyset(&sigcantmask);
139
140 fdinit1(&rump_filedesc0);
141 }
142
143 struct mount *
144 rump_mnt_init(struct vfsops *vfsops, int mntflags)
145 {
146 struct mount *mp;
147
148 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
149
150 mp->mnt_op = vfsops;
151 mp->mnt_flag = mntflags;
152 TAILQ_INIT(&mp->mnt_vnodelist);
153 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
154
155 mount_initspecific(mp);
156
157 return mp;
158 }
159
160 int
161 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
162 {
163 int rv;
164
165 rv = VFS_MOUNT(mp, path, data, dlen);
166 if (rv)
167 return rv;
168
169 (void) VFS_STATVFS(mp, &mp->mnt_stat);
170 rv = VFS_START(mp, 0);
171 if (rv)
172 VFS_UNMOUNT(mp, MNT_FORCE);
173
174 return rv;
175 }
176
177 void
178 rump_mnt_destroy(struct mount *mp)
179 {
180
181 mount_finispecific(mp);
182 kmem_free(mp, sizeof(*mp));
183 }
184
185 struct componentname *
186 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
187 kauth_cred_t creds, struct lwp *l)
188 {
189 struct componentname *cnp;
190 const char *cp = NULL;
191
192 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
193
194 cnp->cn_nameiop = nameiop;
195 cnp->cn_flags = flags;
196
197 cnp->cn_pnbuf = PNBUF_GET();
198 strcpy(cnp->cn_pnbuf, name);
199 cnp->cn_nameptr = cnp->cn_pnbuf;
200 cnp->cn_namelen = namelen;
201 cnp->cn_hash = namei_hash(name, &cp);
202
203 cnp->cn_cred = creds;
204
205 return cnp;
206 }
207
208 void
209 rump_freecn(struct componentname *cnp, int flags)
210 {
211
212 if (flags & RUMPCN_FREECRED)
213 rump_cred_destroy(cnp->cn_cred);
214
215 if (cnp->cn_flags & SAVENAME) {
216 if (flags & RUMPCN_ISLOOKUP || cnp->cn_flags & SAVESTART)
217 PNBUF_PUT(cnp->cn_pnbuf);
218 } else {
219 PNBUF_PUT(cnp->cn_pnbuf);
220 }
221 kmem_free(cnp, sizeof(*cnp));
222 }
223
224 static struct fakeblk *
225 _rump_fakeblk_find(const char *path)
226 {
227 char buf[MAXPATHLEN];
228 struct fakeblk *fblk;
229 int error;
230
231 if (rumpuser_realpath(path, buf, &error) == NULL)
232 return NULL;
233
234 LIST_FOREACH(fblk, &fakeblks, entries)
235 if (strcmp(fblk->path, buf) == 0)
236 return fblk;
237
238 return NULL;
239 }
240
241 int
242 rump_fakeblk_register(const char *path)
243 {
244 char buf[MAXPATHLEN];
245 struct fakeblk *fblk;
246 int error;
247
248 if (_rump_fakeblk_find(path))
249 return EEXIST;
250
251 if (rumpuser_realpath(path, buf, &error) == NULL)
252 return error;
253
254 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
255 if (fblk == NULL)
256 return ENOMEM;
257
258 strlcpy(fblk->path, buf, MAXPATHLEN);
259 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
260
261 return 0;
262 }
263
264 int
265 rump_fakeblk_find(const char *path)
266 {
267
268 return _rump_fakeblk_find(path) != NULL;
269 }
270
271 void
272 rump_fakeblk_deregister(const char *path)
273 {
274 struct fakeblk *fblk;
275
276 fblk = _rump_fakeblk_find(path);
277 if (fblk == NULL)
278 return;
279
280 LIST_REMOVE(fblk, entries);
281 kmem_free(fblk, sizeof(*fblk));
282 }
283
284 void
285 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
286 {
287
288 *vtype = vp->v_type;
289 *vsize = vp->v_size;
290 if (vp->v_specnode)
291 *vdev = vp->v_rdev;
292 else
293 *vdev = 0;
294 }
295
296 struct vfsops *
297 rump_vfslist_iterate(struct vfsops *ops)
298 {
299
300 if (ops == NULL)
301 return LIST_FIRST(&vfs_list);
302 else
303 return LIST_NEXT(ops, vfs_list);
304 }
305
306 struct vfsops *
307 rump_vfs_getopsbyname(const char *name)
308 {
309
310 return vfs_getopsbyname(name);
311 }
312
313 struct vattr*
314 rump_vattr_init()
315 {
316 struct vattr *vap;
317
318 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
319 vattr_null(vap);
320
321 return vap;
322 }
323
324 void
325 rump_vattr_settype(struct vattr *vap, enum vtype vt)
326 {
327
328 vap->va_type = vt;
329 }
330
331 void
332 rump_vattr_setmode(struct vattr *vap, mode_t mode)
333 {
334
335 vap->va_mode = mode;
336 }
337
338 void
339 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
340 {
341
342 vap->va_rdev = dev;
343 }
344
345 void
346 rump_vattr_free(struct vattr *vap)
347 {
348
349 kmem_free(vap, sizeof(*vap));
350 }
351
352 void
353 rump_vp_incref(struct vnode *vp)
354 {
355
356 mutex_enter(&vp->v_interlock);
357 ++vp->v_usecount;
358 mutex_exit(&vp->v_interlock);
359 }
360
361 int
362 rump_vp_getref(struct vnode *vp)
363 {
364
365 return vp->v_usecount;
366 }
367
368 void
369 rump_vp_decref(struct vnode *vp)
370 {
371
372 mutex_enter(&vp->v_interlock);
373 --vp->v_usecount;
374 mutex_exit(&vp->v_interlock);
375 }
376
377 /*
378 * Really really recycle with a cherry on top. We should be
379 * extra-sure we can do this. For example with p2k there is
380 * no problem, since puffs in the kernel takes care of refcounting
381 * for us.
382 */
383 void
384 rump_vp_recycle_nokidding(struct vnode *vp)
385 {
386
387 mutex_enter(&vp->v_interlock);
388 vp->v_usecount = 1;
389 vclean(vp, DOCLOSE);
390 vrelel(vp, 0, 0);
391 }
392
393 void
394 rump_vp_rele(struct vnode *vp)
395 {
396
397 vrele(vp);
398 }
399
400 struct uio *
401 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
402 {
403 struct uio *uio;
404 enum uio_rw uiorw;
405
406 switch (rw) {
407 case RUMPUIO_READ:
408 uiorw = UIO_READ;
409 break;
410 case RUMPUIO_WRITE:
411 uiorw = UIO_WRITE;
412 break;
413 default:
414 panic("%s: invalid rw %d", __func__, rw);
415 }
416
417 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
418 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
419
420 uio->uio_iov->iov_base = buf;
421 uio->uio_iov->iov_len = bufsize;
422
423 uio->uio_iovcnt = 1;
424 uio->uio_offset = offset;
425 uio->uio_resid = bufsize;
426 uio->uio_rw = uiorw;
427 uio->uio_vmspace = UIO_VMSPACE_SYS;
428
429 return uio;
430 }
431
432 size_t
433 rump_uio_getresid(struct uio *uio)
434 {
435
436 return uio->uio_resid;
437 }
438
439 off_t
440 rump_uio_getoff(struct uio *uio)
441 {
442
443 return uio->uio_offset;
444 }
445
446 size_t
447 rump_uio_free(struct uio *uio)
448 {
449 size_t resid;
450
451 resid = uio->uio_resid;
452 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
453 kmem_free(uio, sizeof(*uio));
454
455 return resid;
456 }
457
458 void
459 rump_vp_lock_exclusive(struct vnode *vp)
460 {
461
462 /* we can skip vn_lock() */
463 VOP_LOCK(vp, LK_EXCLUSIVE);
464 }
465
466 void
467 rump_vp_lock_shared(struct vnode *vp)
468 {
469
470 VOP_LOCK(vp, LK_SHARED);
471 }
472
473 void
474 rump_vp_unlock(struct vnode *vp)
475 {
476
477 VOP_UNLOCK(vp, 0);
478 }
479
480 int
481 rump_vp_islocked(struct vnode *vp)
482 {
483
484 return VOP_ISLOCKED(vp);
485 }
486
487 void
488 rump_vp_interlock(struct vnode *vp)
489 {
490
491 mutex_enter(&vp->v_interlock);
492 }
493
494 int
495 rump_vfs_unmount(struct mount *mp, int mntflags)
496 {
497
498 return VFS_UNMOUNT(mp, mntflags);
499 }
500
501 int
502 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
503 {
504 int rv;
505
506 rv = VFS_ROOT(mp, vpp);
507 if (rv)
508 return rv;
509
510 if (!lock)
511 VOP_UNLOCK(*vpp, 0);
512
513 return 0;
514 }
515
516 /* XXX: statvfs is different from system to system */
517 #if 0
518 int
519 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
520 {
521
522 return VFS_STATVFS(mp, sbp);
523 }
524 #endif
525
526 int
527 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
528 {
529
530 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
531 }
532
533 int
534 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
535 {
536
537 return VFS_FHTOVP(mp, fid, vpp);
538 }
539
540 int
541 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
542 {
543
544 return VFS_VPTOFH(vp, fid, fidsize);
545 }
546
547 /*ARGSUSED*/
548 void
549 rump_vfs_syncwait(struct mount *mp)
550 {
551 int n;
552
553 n = buf_syncwait();
554 if (n)
555 printf("syncwait: unsynced buffers: %d\n", n);
556 }
557
558 void
559 rump_bioops_sync()
560 {
561
562 if (bioopsp)
563 bioopsp->io_sync(NULL);
564 }
565
566 struct lwp *
567 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
568 {
569 struct lwp *l;
570 struct proc *p;
571
572 l = kmem_alloc(sizeof(struct lwp), KM_SLEEP);
573 p = kmem_alloc(sizeof(struct proc), KM_SLEEP);
574 p->p_stats = &rump_stats;
575 p->p_cwdi = &rump_cwdi;
576 p->p_limit = &rump_limits;
577 p->p_pid = pid;
578 p->p_vmspace = &rump_vmspace;
579 l->l_cred = rump_cred;
580 l->l_proc = p;
581 l->l_lid = lid;
582
583 if (set)
584 rumpuser_set_curlwp(l);
585
586 return l;
587 }
588
589 void
590 rump_clear_curlwp()
591 {
592 struct lwp *l;
593
594 l = rumpuser_get_curlwp();
595 kmem_free(l->l_proc, sizeof(struct proc));
596 kmem_free(l, sizeof(struct lwp));
597 rumpuser_set_curlwp(NULL);
598 }
599
600 struct lwp *
601 rump_get_curlwp()
602 {
603 struct lwp *l;
604
605 l = rumpuser_get_curlwp();
606 if (l == NULL)
607 l = &lwp0;
608
609 return l;
610 }
611
612 int
613 rump_splfoo()
614 {
615
616 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
617 rumpuser_rw_enter(&rumpspl, 0);
618 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
619 }
620
621 return 0;
622 }
623
624 static void
625 rump_intr_enter(void)
626 {
627
628 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
629 rumpuser_rw_enter(&rumpspl, 1);
630 }
631
632 static void
633 rump_intr_exit(void)
634 {
635
636 rumpuser_rw_exit(&rumpspl);
637 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
638 }
639
640 void
641 rump_splx(int dummy)
642 {
643
644 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
645 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
646 rumpuser_rw_exit(&rumpspl);
647 }
648 }
649
650 void
651 rump_biodone(void *arg, size_t count, int error)
652 {
653 struct buf *bp = arg;
654
655 bp->b_resid = bp->b_bcount - count;
656 KASSERT(bp->b_resid >= 0);
657 bp->b_error = error;
658
659 rump_intr_enter();
660 biodone(bp);
661 rump_intr_exit();
662 }
663