rump.c revision 1.21 1 /* $NetBSD: rump.c,v 1.21 2007/11/07 18:59:18 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/filedesc.h>
32 #include <sys/kauth.h>
33 #include <sys/kmem.h>
34 #include <sys/mount.h>
35 #include <sys/namei.h>
36 #include <sys/queue.h>
37 #include <sys/resourcevar.h>
38 #include <sys/vnode.h>
39 #include <sys/cpu.h>
40
41 #include <miscfs/specfs/specdev.h>
42
43 #include "rump_private.h"
44 #include "rumpuser.h"
45
46 struct proc rump_proc;
47 struct cwdinfo rump_cwdi;
48 struct pstats rump_stats;
49 struct plimit rump_limits;
50 kauth_cred_t rump_cred;
51 struct cpu_info rump_cpu;
52
53 kmutex_t rump_giantlock;
54
55 struct fakeblk {
56 char path[MAXPATHLEN];
57 LIST_ENTRY(fakeblk) entries;
58 };
59
60 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
61
62 static void
63 rump_aiodone_worker(struct work *wk, void *dummy)
64 {
65 struct buf *bp = (struct buf *)wk;
66
67 KASSERT(&bp->b_work == wk);
68 bp->b_iodone(bp);
69 }
70
71 void
72 rump_init()
73 {
74 extern char hostname[];
75 extern size_t hostnamelen;
76 struct proc *p;
77 struct lwp *l;
78 int error;
79
80 l = &lwp0;
81 p = &rump_proc;
82 p->p_stats = &rump_stats;
83 p->p_cwdi = &rump_cwdi;
84 p->p_limit = &rump_limits;
85 p->p_pid = 0;
86 l->l_cred = rump_cred;
87 l->l_proc = p;
88 l->l_lid = 1;
89
90 rumpvm_init();
91
92 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
93
94 /* should be "enough" */
95 syncdelay = 0;
96
97 vfsinit();
98 bufinit();
99
100 rump_sleepers_init();
101 rumpuser_thrinit();
102
103 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
104
105 /* aieeeedondest */
106 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
107 rump_aiodone_worker, NULL, 0, 0, 0))
108 panic("aiodoned");
109
110 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
111 hostnamelen = strlen(hostname);
112 }
113
114 struct mount *
115 rump_mnt_init(struct vfsops *vfsops, int mntflags)
116 {
117 struct mount *mp;
118
119 mp = rumpuser_malloc(sizeof(struct mount), 0);
120 memset(mp, 0, sizeof(struct mount));
121
122 mp->mnt_op = vfsops;
123 mp->mnt_flag = mntflags;
124 TAILQ_INIT(&mp->mnt_vnodelist);
125
126 mount_initspecific(mp);
127
128 return mp;
129 }
130
131 int
132 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen,
133 struct lwp *l)
134 {
135 int rv;
136
137 rv = VFS_MOUNT(mp, path, data, dlen, l);
138 if (rv)
139 return rv;
140
141 rv = VFS_STATVFS(mp, &mp->mnt_stat, l);
142 if (rv) {
143 VFS_UNMOUNT(mp, MNT_FORCE, l);
144 return rv;
145 }
146
147 rv = VFS_START(mp, 0, l);
148 if (rv)
149 VFS_UNMOUNT(mp, MNT_FORCE, l);
150
151 return rv;
152 }
153
154 void
155 rump_mnt_destroy(struct mount *mp)
156 {
157
158 mount_finispecific(mp);
159 rumpuser_free(mp);
160 }
161
162 struct componentname *
163 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
164 kauth_cred_t creds, struct lwp *l)
165 {
166 struct componentname *cnp;
167
168 cnp = rumpuser_malloc(sizeof(struct componentname), 0);
169 memset(cnp, 0, sizeof(struct componentname));
170
171 cnp->cn_nameiop = nameiop;
172 cnp->cn_flags = flags;
173
174 cnp->cn_pnbuf = PNBUF_GET();
175 strcpy(cnp->cn_pnbuf, name);
176 cnp->cn_nameptr = cnp->cn_pnbuf;
177 cnp->cn_namelen = namelen;
178
179 cnp->cn_cred = creds;
180 cnp->cn_lwp = l;
181
182 return cnp;
183 }
184
185 void
186 rump_freecn(struct componentname *cnp, int flags)
187 {
188
189 if (flags & RUMPCN_FREECRED)
190 rump_cred_destroy(cnp->cn_cred);
191
192 if (cnp->cn_flags & SAVENAME) {
193 if (flags & RUMPCN_ISLOOKUP || cnp->cn_flags & SAVESTART)
194 PNBUF_PUT(cnp->cn_pnbuf);
195 } else {
196 PNBUF_PUT(cnp->cn_pnbuf);
197 }
198 rumpuser_free(cnp);
199 }
200
201 int
202 rump_recyclenode(struct vnode *vp)
203 {
204
205 return vrecycle(vp, NULL, curlwp);
206 }
207
208 static struct fakeblk *
209 _rump_fakeblk_find(const char *path)
210 {
211 char buf[MAXPATHLEN];
212 struct fakeblk *fblk;
213 int error;
214
215 if (rumpuser_realpath(path, buf, &error) == NULL)
216 return NULL;
217
218 LIST_FOREACH(fblk, &fakeblks, entries)
219 if (strcmp(fblk->path, buf) == 0)
220 return fblk;
221
222 return NULL;
223 }
224
225 int
226 rump_fakeblk_register(const char *path)
227 {
228 char buf[MAXPATHLEN];
229 struct fakeblk *fblk;
230 int error;
231
232 if (_rump_fakeblk_find(path))
233 return EEXIST;
234
235 if (rumpuser_realpath(path, buf, &error) == NULL)
236 return error;
237
238 fblk = rumpuser_malloc(sizeof(struct fakeblk), 1);
239 if (fblk == NULL)
240 return ENOMEM;
241
242 strlcpy(fblk->path, buf, MAXPATHLEN);
243 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
244
245 return 0;
246 }
247
248 int
249 rump_fakeblk_find(const char *path)
250 {
251
252 return _rump_fakeblk_find(path) != NULL;
253 }
254
255 void
256 rump_fakeblk_deregister(const char *path)
257 {
258 struct fakeblk *fblk;
259
260 fblk = _rump_fakeblk_find(path);
261 if (fblk == NULL)
262 return;
263
264 LIST_REMOVE(fblk, entries);
265 rumpuser_free(fblk);
266 }
267
268 void
269 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
270 {
271
272 *vtype = vp->v_type;
273 *vsize = vp->v_size;
274 if (vp->v_specinfo)
275 *vdev = vp->v_rdev;
276 else
277 *vdev = 0;
278 }
279
280 struct vfsops *
281 rump_vfslist_iterate(struct vfsops *ops)
282 {
283
284 if (ops == NULL)
285 return LIST_FIRST(&vfs_list);
286 else
287 return LIST_NEXT(ops, vfs_list);
288 }
289
290 struct vfsops *
291 rump_vfs_getopsbyname(const char *name)
292 {
293
294 return vfs_getopsbyname(name);
295 }
296
297 struct vattr*
298 rump_vattr_init()
299 {
300 struct vattr *vap;
301
302 vap = rumpuser_malloc(sizeof(struct vattr), 0);
303 vattr_null(vap);
304
305 return vap;
306 }
307
308 void
309 rump_vattr_settype(struct vattr *vap, enum vtype vt)
310 {
311
312 vap->va_type = vt;
313 }
314
315 void
316 rump_vattr_setmode(struct vattr *vap, mode_t mode)
317 {
318
319 vap->va_mode = mode;
320 }
321
322 void
323 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
324 {
325
326 vap->va_rdev = dev;
327 }
328
329 void
330 rump_vattr_free(struct vattr *vap)
331 {
332
333 rumpuser_free(vap);
334 }
335
336 void
337 rump_vp_incref(struct vnode *vp)
338 {
339
340 ++vp->v_usecount;
341 }
342
343 int
344 rump_vp_getref(struct vnode *vp)
345 {
346
347 return vp->v_usecount;
348 }
349
350 void
351 rump_vp_decref(struct vnode *vp)
352 {
353
354 --vp->v_usecount;
355 }
356
357 struct uio *
358 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
359 {
360 struct uio *uio;
361 enum uio_rw uiorw;
362
363 switch (rw) {
364 case RUMPUIO_READ:
365 uiorw = UIO_READ;
366 break;
367 case RUMPUIO_WRITE:
368 uiorw = UIO_WRITE;
369 break;
370 default:
371 panic("%s: invalid rw %d", __func__, rw);
372 }
373
374 uio = rumpuser_malloc(sizeof(struct uio), 0);
375 uio->uio_iov = rumpuser_malloc(sizeof(struct iovec), 0);
376
377 uio->uio_iov->iov_base = buf;
378 uio->uio_iov->iov_len = bufsize;
379
380 uio->uio_iovcnt = 1;
381 uio->uio_offset = offset;
382 uio->uio_resid = bufsize;
383 uio->uio_rw = uiorw;
384 uio->uio_vmspace = UIO_VMSPACE_SYS;
385
386 return uio;
387 }
388
389 size_t
390 rump_uio_getresid(struct uio *uio)
391 {
392
393 return uio->uio_resid;
394 }
395
396 off_t
397 rump_uio_getoff(struct uio *uio)
398 {
399
400 return uio->uio_offset;
401 }
402
403 size_t
404 rump_uio_free(struct uio *uio)
405 {
406 size_t resid;
407
408 resid = uio->uio_resid;
409 rumpuser_free(uio->uio_iov);
410 rumpuser_free(uio);
411
412 return resid;
413 }
414
415 void
416 rump_vp_lock_exclusive(struct vnode *vp)
417 {
418
419 /* we can skip vn_lock() */
420 VOP_LOCK(vp, LK_EXCLUSIVE);
421 }
422
423 void
424 rump_vp_lock_shared(struct vnode *vp)
425 {
426
427 VOP_LOCK(vp, LK_SHARED);
428 }
429
430 void
431 rump_vp_unlock(struct vnode *vp)
432 {
433
434 VOP_UNLOCK(vp, 0);
435 }
436
437 int
438 rump_vp_islocked(struct vnode *vp)
439 {
440
441 return VOP_ISLOCKED(vp);
442 }
443
444 int
445 rump_vfs_unmount(struct mount *mp, int mntflags, struct lwp *l)
446 {
447
448 return VFS_UNMOUNT(mp, mntflags, l);
449 }
450
451 int
452 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
453 {
454 int rv;
455
456 rv = VFS_ROOT(mp, vpp);
457 if (rv)
458 return rv;
459
460 if (!lock)
461 VOP_UNLOCK(*vpp, 0);
462
463 return 0;
464 }
465
466 /* XXX: statvfs is different from system to system */
467 #if 0
468 int
469 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
470 {
471
472 return VFS_STATVFS(mp, sbp, l);
473 }
474 #endif
475
476 int
477 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred, struct lwp *l)
478 {
479
480 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred, l);
481 }
482
483 int
484 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
485 {
486
487 return VFS_FHTOVP(mp, fid, vpp);
488 }
489
490 int
491 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
492 {
493
494 return VFS_VPTOFH(vp, fid, fidsize);
495 }
496
497 /*ARGSUSED*/
498 void
499 rump_vfs_syncwait(struct mount *mp)
500 {
501 int n;
502
503 n = buf_syncwait();
504 if (n)
505 printf("syncwait: unsynced buffers: %d\n", n);
506 }
507
508 void
509 rump_bioops_sync()
510 {
511
512 if (bioopsp)
513 bioopsp->io_sync(NULL);
514 }
515
516 struct lwp *
517 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
518 {
519 struct lwp *l;
520 struct proc *p;
521
522 l = kmem_alloc(sizeof(struct lwp), KM_SLEEP);
523 p = kmem_alloc(sizeof(struct proc), KM_SLEEP);
524 p->p_stats = &rump_stats;
525 p->p_cwdi = &rump_cwdi;
526 p->p_limit = &rump_limits;
527 p->p_pid = pid;
528 l->l_cred = rump_cred;
529 l->l_proc = p;
530 l->l_lid = lid;
531
532 if (set)
533 rumpuser_set_curlwp(l);
534
535 return l;
536 }
537
538 void
539 rump_clear_curlwp()
540 {
541 struct lwp *l;
542
543 l = rumpuser_get_curlwp();
544 kmem_free(l->l_proc, sizeof(struct proc));
545 kmem_free(l, sizeof(struct lwp));
546 rumpuser_set_curlwp(NULL);
547 }
548
549 struct lwp *
550 rump_get_curlwp()
551 {
552 struct lwp *l;
553
554 l = rumpuser_get_curlwp();
555 if (l == NULL)
556 l = &lwp0;
557
558 return l;
559 }
560
561 int
562 rump_splfoo()
563 {
564
565 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
566 rumpuser_rw_enter(&rumpspl, 0);
567 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
568 }
569
570 return 0;
571 }
572
573 static void
574 rump_intr_enter(void)
575 {
576
577 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
578 rumpuser_rw_enter(&rumpspl, 1);
579 }
580
581 static void
582 rump_intr_exit(void)
583 {
584
585 rumpuser_rw_exit(&rumpspl);
586 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
587 }
588
589 void
590 rump_splx(int dummy)
591 {
592
593 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
594 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
595 rumpuser_rw_exit(&rumpspl);
596 }
597 }
598
599 void
600 rump_biodone(void *arg, size_t count, int error)
601 {
602 struct buf *bp = arg;
603
604 bp->b_resid = bp->b_bcount - count;
605 KASSERT(bp->b_resid >= 0);
606 bp->b_error = error;
607
608 rump_intr_enter();
609 biodone(bp);
610 rump_intr_exit();
611 }
612