emul.c revision 1.53.4.1 1 /* $NetBSD: emul.c,v 1.53.4.1 2011/07/15 23:41:13 riz Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #define malloc(a,b,c) __wrap_malloc(a,b,c)
31
32 #include <sys/param.h>
33 #include <sys/malloc.h>
34 #include <sys/null.h>
35 #include <sys/vnode.h>
36 #include <sys/stat.h>
37 #include <sys/select.h>
38 #include <sys/syslog.h>
39 #include <sys/namei.h>
40 #include <sys/kauth.h>
41 #include <sys/conf.h>
42 #include <sys/device.h>
43 #include <sys/queue.h>
44 #include <sys/file.h>
45 #include <sys/filedesc.h>
46 #include <sys/kthread.h>
47 #include <sys/cpu.h>
48 #include <sys/kmem.h>
49 #include <sys/poll.h>
50 #include <sys/tprintf.h>
51 #include <sys/timetc.h>
52
53 #include <machine/stdarg.h>
54
55 #include <rump/rumpuser.h>
56
57 #include <uvm/uvm_map.h>
58
59 #include "rump_private.h"
60
61 time_t time_second = 1;
62
63 kmutex_t *proc_lock;
64 struct lwp lwp0;
65 struct vnode *rootvp;
66 struct device *root_device;
67 dev_t rootdev;
68 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
69 int doing_shutdown;
70 int ncpu = 1;
71 const int schedppq = 1;
72 int hardclock_ticks;
73 bool mp_online = false;
74 struct vm_map *mb_map;
75
76 char hostname[MAXHOSTNAMELEN];
77 size_t hostnamelen;
78
79 u_long bufmem_valimit;
80 u_long bufmem_hiwater;
81 u_long bufmem_lowater;
82 u_long bufmem;
83 u_int nbuf;
84
85 const char *panicstr;
86 const char ostype[] = "NetBSD";
87 const char osrelease[] = "999"; /* paradroid 4evah */
88 const char kernel_ident[] = "RUMP-ROAST";
89 const char *domainname;
90 int domainnamelen;
91
92 const int sigprop[NSIG];
93
94 const struct filterops seltrue_filtops;
95
96 void
97 panic(const char *fmt, ...)
98 {
99 va_list ap;
100
101 va_start(ap, fmt);
102 printf("panic: ");
103 vprintf(fmt, ap);
104 va_end(ap);
105 printf("\n");
106 abort();
107 }
108
109 void
110 log(int level, const char *fmt, ...)
111 {
112 va_list ap;
113
114 va_start(ap, fmt);
115 vprintf(fmt, ap);
116 va_end(ap);
117 }
118
119 void
120 vlog(int level, const char *fmt, va_list ap)
121 {
122
123 vprintf(fmt, ap);
124 }
125
126 void
127 uprintf(const char *fmt, ...)
128 {
129 va_list ap;
130
131 va_start(ap, fmt);
132 vprintf(fmt, ap);
133 va_end(ap);
134 }
135
136 /* relegate this to regular printf */
137 tpr_t
138 tprintf_open(struct proc *p)
139 {
140
141 return (tpr_t)0x111;
142 }
143
144 void
145 tprintf(tpr_t tpr, const char *fmt, ...)
146 {
147 va_list ap;
148
149 va_start(ap, fmt);
150 vprintf(fmt, ap);
151 va_end(ap);
152 }
153
154 void
155 tprintf_close(tpr_t tpr)
156 {
157
158 }
159
160 void
161 printf_nolog(const char *fmt, ...)
162 {
163 va_list ap;
164
165 va_start(ap, fmt);
166 vprintf(fmt, ap);
167 va_end(ap);
168 }
169
170 void
171 aprint_normal(const char *fmt, ...)
172 {
173 va_list ap;
174
175 va_start(ap, fmt);
176 vprintf(fmt, ap);
177 va_end(ap);
178 }
179
180 int
181 copyin(const void *uaddr, void *kaddr, size_t len)
182 {
183
184 memcpy(kaddr, uaddr, len);
185 return 0;
186 }
187
188 int
189 copyout(const void *kaddr, void *uaddr, size_t len)
190 {
191
192 memcpy(uaddr, kaddr, len);
193 return 0;
194 }
195
196 int
197 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
198 {
199
200 return copyinstr(kfaddr, kdaddr, len, done);
201 }
202
203 int
204 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
205 {
206
207 strlcpy(kaddr, uaddr, len);
208 if (done)
209 *done = strlen(kaddr)+1; /* includes termination */
210 return 0;
211 }
212
213 int
214 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
215 {
216
217 return copyin(uaddr, kaddr, len);
218 }
219
220 int
221 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
222 {
223
224 return copyout(kaddr, uaddr, len);
225 }
226
227 int
228 kcopy(const void *src, void *dst, size_t len)
229 {
230
231 memcpy(dst, src, len);
232 return 0;
233 }
234
235 int
236 uiomove(void *buf, size_t n, struct uio *uio)
237 {
238 struct iovec *iov;
239 uint8_t *b = buf;
240 size_t cnt;
241 int rv;
242
243 if (uio->uio_vmspace != UIO_VMSPACE_SYS)
244 panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
245
246 /*
247 * See if rump ubc code claims the offset. This is of course
248 * a blatant violation of abstraction levels, but let's keep
249 * me simple & stupid for now.
250 */
251 if (rump_ubc_magic_uiomove(buf, n, uio, &rv, NULL))
252 return rv;
253
254 while (n && uio->uio_resid) {
255 iov = uio->uio_iov;
256 cnt = iov->iov_len;
257 if (cnt == 0) {
258 uio->uio_iov++;
259 uio->uio_iovcnt--;
260 continue;
261 }
262 if (cnt > n)
263 cnt = n;
264
265 if (uio->uio_rw == UIO_READ)
266 memcpy(iov->iov_base, b, cnt);
267 else
268 memcpy(b, iov->iov_base, cnt);
269
270 iov->iov_base = (uint8_t *)iov->iov_base + cnt;
271 iov->iov_len -= cnt;
272 b += cnt;
273 uio->uio_resid -= cnt;
274 uio->uio_offset += cnt;
275 n -= cnt;
276 }
277
278 return 0;
279 }
280
281 void
282 uio_setup_sysspace(struct uio *uio)
283 {
284
285 uio->uio_vmspace = UIO_VMSPACE_SYS;
286 }
287
288 const struct bdevsw *
289 bdevsw_lookup(dev_t dev)
290 {
291
292 return (const struct bdevsw *)1;
293 }
294
295 devclass_t
296 device_class(device_t dev)
297 {
298
299 if (dev != root_device)
300 panic("%s: dev != root_device not supported", __func__);
301
302 return DV_DISK;
303 }
304
305 void
306 getmicrouptime(struct timeval *tvp)
307 {
308 int error;
309
310 rumpuser_gettimeofday(tvp, &error);
311 }
312
313 void
314 malloc_type_attach(struct malloc_type *type)
315 {
316
317 return;
318 }
319
320 void
321 malloc_type_detach(struct malloc_type *type)
322 {
323
324 return;
325 }
326
327 void *
328 __wrap_malloc(unsigned long size, struct malloc_type *type, int flags)
329 {
330 void *rv;
331
332 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
333 if (rv && flags & M_ZERO)
334 memset(rv, 0, size);
335
336 return rv;
337 }
338
339 void
340 nanotime(struct timespec *ts)
341 {
342 struct timeval tv;
343 int error;
344
345 rumpuser_gettimeofday(&tv, &error);
346 TIMEVAL_TO_TIMESPEC(&tv, ts);
347 }
348
349 /* hooray for mick, so what if I do */
350 void
351 getnanotime(struct timespec *ts)
352 {
353
354 nanotime(ts);
355 }
356
357 void
358 microtime(struct timeval *tv)
359 {
360 int error;
361
362 rumpuser_gettimeofday(tv, &error);
363 }
364
365 void
366 getmicrotime(struct timeval *tv)
367 {
368 int error;
369
370 rumpuser_gettimeofday(tv, &error);
371 }
372
373 void
374 bdev_strategy(struct buf *bp)
375 {
376
377 panic("%s: not supported", __func__);
378 }
379
380 int
381 bdev_type(dev_t dev)
382 {
383
384 return D_DISK;
385 }
386
387 struct kthdesc {
388 void (*f)(void *);
389 void *arg;
390 struct lwp *mylwp;
391 };
392
393 static void *
394 threadbouncer(void *arg)
395 {
396 struct kthdesc *k = arg;
397 void (*f)(void *);
398 void *thrarg;
399
400 f = k->f;
401 thrarg = k->arg;
402 rumpuser_set_curlwp(k->mylwp);
403 kmem_free(k, sizeof(struct kthdesc));
404
405 f(thrarg);
406 panic("unreachable, should kthread_exit()");
407 }
408
409 int
410 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
411 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
412 {
413 struct kthdesc *k;
414 struct lwp *l;
415 int rv;
416
417 if (!rump_threads) {
418 /* fake them */
419 if (strcmp(fmt, "vrele") == 0) {
420 printf("rump warning: threads not enabled, not starting"
421 " vrele thread\n");
422 return 0;
423 } else if (strcmp(fmt, "cachegc") == 0) {
424 printf("rump warning: threads not enabled, not starting"
425 " namecache g/c thread\n");
426 return 0;
427 } else
428 panic("threads not available, setenv RUMP_THREADS 1");
429 }
430
431 KASSERT(fmt != NULL);
432 if (ci != NULL)
433 panic("%s: bounded threads not supported", __func__);
434
435 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
436 k->f = func;
437 k->arg = arg;
438 k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
439 rv = rumpuser_thread_create(threadbouncer, k);
440 if (rv)
441 return rv;
442
443 if (newlp)
444 *newlp = l;
445 return 0;
446 }
447
448 void
449 kthread_exit(int ecode)
450 {
451
452 rumpuser_thread_exit();
453 }
454
455 struct proc *
456 p_find(pid_t pid, uint flags)
457 {
458
459 panic("%s: not implemented", __func__);
460 }
461
462 struct pgrp *
463 pg_find(pid_t pid, uint flags)
464 {
465
466 panic("%s: not implemented", __func__);
467 }
468
469 void
470 psignal(struct proc *p, int signo)
471 {
472
473 switch (signo) {
474 case SIGSYS:
475 break;
476 default:
477 panic("unhandled signal %d", signo);
478 }
479 }
480
481 void
482 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
483 {
484
485 panic("%s: not implemented", __func__);
486 }
487
488 void
489 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
490 {
491
492 panic("%s: not implemented", __func__);
493 }
494
495 int
496 pgid_in_session(struct proc *p, pid_t pg_id)
497 {
498
499 panic("%s: not implemented", __func__);
500 }
501
502 int
503 sigispending(struct lwp *l, int signo)
504 {
505
506 return 0;
507 }
508
509 void
510 knote_fdclose(int fd)
511 {
512
513 /* since we don't add knotes, we don't have to remove them */
514 }
515
516 int
517 seltrue_kqfilter(dev_t dev, struct knote *kn)
518 {
519
520 panic("%s: not implemented", __func__);
521 }
522
523 int
524 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
525 {
526 extern int hz;
527 int rv, error;
528 struct timespec time;
529
530 if (mtx)
531 mutex_exit(mtx);
532
533 time.tv_sec = timeo / hz;
534 time.tv_nsec = (timeo % hz) * (1000000000 / hz);
535
536 rv = rumpuser_nanosleep(&time, NULL, &error);
537
538 if (mtx)
539 mutex_enter(mtx);
540
541 if (rv)
542 return error;
543
544 return 0;
545 }
546
547 void
548 suspendsched()
549 {
550
551 panic("%s: not implemented", __func__);
552 }
553
554 u_int
555 lwp_unsleep(lwp_t *l, bool cleanup)
556 {
557
558 KASSERT(mutex_owned(l->l_mutex));
559
560 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
561 }
562
563 vaddr_t
564 calc_cache_size(struct vm_map *map, int pct, int va_pct)
565 {
566 paddr_t t;
567
568 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
569 if ((vaddr_t)t != t) {
570 panic("%s: needs tweak", __func__);
571 }
572 return t;
573 }
574
575 int
576 seltrue(dev_t dev, int events, struct lwp *l)
577 {
578 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
579 }
580
581 void
582 selrecord(lwp_t *selector, struct selinfo *sip)
583 {
584 }
585
586 void
587 selinit(struct selinfo *sip)
588 {
589 }
590
591 void
592 selnotify(struct selinfo *sip, int events, long knhint)
593 {
594 }
595
596 void
597 seldestroy(struct selinfo *sip)
598 {
599 }
600
601 const char *
602 device_xname(device_t dv)
603 {
604 return "bogus0";
605 }
606
607 void
608 assert_sleepable(void)
609 {
610
611 /* always sleepable, although we should improve this */
612 }
613
614 int
615 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
616 const struct cdevsw *cdev, int *cmajor)
617 {
618
619 panic("%s: not implemented", __func__);
620 }
621
622 int
623 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
624 {
625
626 panic("%s: not implemented", __func__);
627 }
628
629 void
630 tc_setclock(struct timespec *ts)
631 {
632
633 panic("%s: not implemented", __func__);
634 }
635
636 void
637 proc_crmod_enter()
638 {
639
640 panic("%s: not implemented", __func__);
641 }
642
643 void
644 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
645 {
646
647 panic("%s: not implemented", __func__);
648 }
649
650 int
651 killpg1(struct lwp *l, ksiginfo_t *ksi, int pgid, int all)
652 {
653
654 panic("%s: not implemented", __func__);
655 }
656
657 int
658 setucontext(struct lwp *l, const ucontext_t *ucp)
659 {
660
661 panic("%s: not implemented", __func__);
662 }
663
664 void
665 getucontext(struct lwp *l, ucontext_t *ucp)
666 {
667
668 panic("%s: not implemented", __func__);
669 }
670
671 void
672 getnanouptime(struct timespec *tsp)
673 {
674
675 panic("%s: not implemented", __func__);
676 }
677
678 void
679 kpsignal2(struct proc *p, ksiginfo_t *ksi)
680 {
681
682 panic("%s: not implemented", __func__);
683 }
684
685 void
686 sigclearall(struct proc *p, const sigset_t *mask, ksiginfoq_t *kq)
687 {
688
689 panic("%s: not implemented", __func__);
690 }
691
692 int
693 sigget(sigpend_t *sp, ksiginfo_t *out, int signo, const sigset_t *mask)
694 {
695
696 panic("%s: not implemented", __func__);
697 }
698
699 void
700 ksiginfo_queue_drain0(ksiginfoq_t *kq)
701 {
702
703 panic("%s: not implemented", __func__);
704 }
705