emul.c revision 1.58 1 /* $NetBSD: emul.c,v 1.58 2008/12/10 18:47:01 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #define malloc(a,b,c) __wrap_malloc(a,b,c)
31
32 #include <sys/param.h>
33 #include <sys/malloc.h>
34 #include <sys/null.h>
35 #include <sys/vnode.h>
36 #include <sys/stat.h>
37 #include <sys/select.h>
38 #include <sys/syslog.h>
39 #include <sys/namei.h>
40 #include <sys/kauth.h>
41 #include <sys/conf.h>
42 #include <sys/device.h>
43 #include <sys/queue.h>
44 #include <sys/file.h>
45 #include <sys/filedesc.h>
46 #include <sys/kthread.h>
47 #include <sys/cpu.h>
48 #include <sys/kmem.h>
49 #include <sys/poll.h>
50 #include <sys/tprintf.h>
51 #include <sys/timetc.h>
52
53 #include <machine/stdarg.h>
54
55 #include <rump/rumpuser.h>
56
57 #include <uvm/uvm_map.h>
58
59 #include "rump_private.h"
60
61 time_t time_second = 1;
62
63 kmutex_t *proc_lock;
64 struct lwp lwp0;
65 struct vnode *rootvp;
66 struct device *root_device;
67 dev_t rootdev;
68 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
69 int doing_shutdown;
70 int ncpu = 1;
71 const int schedppq = 1;
72 int hardclock_ticks;
73 bool mp_online = false;
74 struct vm_map *mb_map;
75 struct timeval boottime;
76
77 char hostname[MAXHOSTNAMELEN];
78 size_t hostnamelen;
79
80 u_long bufmem_valimit;
81 u_long bufmem_hiwater;
82 u_long bufmem_lowater;
83 u_long bufmem;
84 u_int nbuf;
85
86 const char *panicstr;
87 const char ostype[] = "NetBSD";
88 const char osrelease[] = "999"; /* paradroid 4evah */
89 const char kernel_ident[] = "RUMP-ROAST";
90 const char *domainname;
91 int domainnamelen;
92
93 const struct filterops seltrue_filtops;
94
95 void
96 panic(const char *fmt, ...)
97 {
98 va_list ap;
99
100 va_start(ap, fmt);
101 printf("panic: ");
102 vprintf(fmt, ap);
103 va_end(ap);
104 printf("\n");
105 abort();
106 }
107
108 void
109 log(int level, const char *fmt, ...)
110 {
111 va_list ap;
112
113 va_start(ap, fmt);
114 vprintf(fmt, ap);
115 va_end(ap);
116 }
117
118 void
119 vlog(int level, const char *fmt, va_list ap)
120 {
121
122 vprintf(fmt, ap);
123 }
124
125 void
126 uprintf(const char *fmt, ...)
127 {
128 va_list ap;
129
130 va_start(ap, fmt);
131 vprintf(fmt, ap);
132 va_end(ap);
133 }
134
135 /* relegate this to regular printf */
136 tpr_t
137 tprintf_open(struct proc *p)
138 {
139
140 return (tpr_t)0x111;
141 }
142
143 void
144 tprintf(tpr_t tpr, const char *fmt, ...)
145 {
146 va_list ap;
147
148 va_start(ap, fmt);
149 vprintf(fmt, ap);
150 va_end(ap);
151 }
152
153 void
154 tprintf_close(tpr_t tpr)
155 {
156
157 }
158
159 void
160 printf_nolog(const char *fmt, ...)
161 {
162 va_list ap;
163
164 va_start(ap, fmt);
165 vprintf(fmt, ap);
166 va_end(ap);
167 }
168
169 void
170 aprint_normal(const char *fmt, ...)
171 {
172 va_list ap;
173
174 va_start(ap, fmt);
175 vprintf(fmt, ap);
176 va_end(ap);
177 }
178
179 int
180 copyin(const void *uaddr, void *kaddr, size_t len)
181 {
182
183 memcpy(kaddr, uaddr, len);
184 return 0;
185 }
186
187 int
188 copyout(const void *kaddr, void *uaddr, size_t len)
189 {
190
191 memcpy(uaddr, kaddr, len);
192 return 0;
193 }
194
195 int
196 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
197 {
198
199 return copyinstr(kfaddr, kdaddr, len, done);
200 }
201
202 int
203 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
204 {
205
206 strlcpy(kaddr, uaddr, len);
207 if (done)
208 *done = strlen(kaddr)+1; /* includes termination */
209 return 0;
210 }
211
212 int
213 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
214 {
215
216 return copyin(uaddr, kaddr, len);
217 }
218
219 int
220 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
221 {
222
223 return copyout(kaddr, uaddr, len);
224 }
225
226 int
227 kcopy(const void *src, void *dst, size_t len)
228 {
229
230 memcpy(dst, src, len);
231 return 0;
232 }
233
234 int
235 uiomove(void *buf, size_t n, struct uio *uio)
236 {
237 struct iovec *iov;
238 uint8_t *b = buf;
239 size_t cnt;
240
241 if (uio->uio_vmspace != UIO_VMSPACE_SYS)
242 panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
243
244 while (n && uio->uio_resid) {
245 iov = uio->uio_iov;
246 cnt = iov->iov_len;
247 if (cnt == 0) {
248 uio->uio_iov++;
249 uio->uio_iovcnt--;
250 continue;
251 }
252 if (cnt > n)
253 cnt = n;
254
255 if (uio->uio_rw == UIO_READ)
256 memcpy(iov->iov_base, b, cnt);
257 else
258 memcpy(b, iov->iov_base, cnt);
259
260 iov->iov_base = (uint8_t *)iov->iov_base + cnt;
261 iov->iov_len -= cnt;
262 b += cnt;
263 uio->uio_resid -= cnt;
264 uio->uio_offset += cnt;
265 n -= cnt;
266 }
267
268 return 0;
269 }
270
271 void
272 uio_setup_sysspace(struct uio *uio)
273 {
274
275 uio->uio_vmspace = UIO_VMSPACE_SYS;
276 }
277
278 const struct bdevsw *
279 bdevsw_lookup(dev_t dev)
280 {
281
282 return (const struct bdevsw *)1;
283 }
284
285 devclass_t
286 device_class(device_t dev)
287 {
288
289 if (dev != root_device)
290 panic("%s: dev != root_device not supported", __func__);
291
292 return DV_DISK;
293 }
294
295 void
296 getmicrouptime(struct timeval *tvp)
297 {
298 int error;
299
300 rumpuser_gettimeofday(tvp, &error);
301 }
302
303 void
304 malloc_type_attach(struct malloc_type *type)
305 {
306
307 return;
308 }
309
310 void
311 malloc_type_detach(struct malloc_type *type)
312 {
313
314 return;
315 }
316
317 void *
318 __wrap_malloc(unsigned long size, struct malloc_type *type, int flags)
319 {
320 void *rv;
321
322 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
323 if (rv && flags & M_ZERO)
324 memset(rv, 0, size);
325
326 return rv;
327 }
328
329 void
330 nanotime(struct timespec *ts)
331 {
332 struct timeval tv;
333 int error;
334
335 rumpuser_gettimeofday(&tv, &error);
336 TIMEVAL_TO_TIMESPEC(&tv, ts);
337 }
338
339 /* hooray for mick, so what if I do */
340 void
341 getnanotime(struct timespec *ts)
342 {
343
344 nanotime(ts);
345 }
346
347 void
348 microtime(struct timeval *tv)
349 {
350 int error;
351
352 rumpuser_gettimeofday(tv, &error);
353 }
354
355 void
356 getmicrotime(struct timeval *tv)
357 {
358 int error;
359
360 rumpuser_gettimeofday(tv, &error);
361 }
362
363 void
364 bdev_strategy(struct buf *bp)
365 {
366
367 panic("%s: not supported", __func__);
368 }
369
370 int
371 bdev_type(dev_t dev)
372 {
373
374 return D_DISK;
375 }
376
377 struct kthdesc {
378 void (*f)(void *);
379 void *arg;
380 struct lwp *mylwp;
381 bool mpsafe;
382 };
383
384 static void *
385 threadbouncer(void *arg)
386 {
387 struct kthdesc *k = arg;
388 void (*f)(void *);
389 void *thrarg;
390
391 f = k->f;
392 thrarg = k->arg;
393 rumpuser_set_curlwp(k->mylwp);
394 kmem_free(k, sizeof(struct kthdesc));
395
396 if (!k->mpsafe)
397 KERNEL_LOCK(1, NULL);
398 f(thrarg);
399 panic("unreachable, should kthread_exit()");
400 }
401
402 int
403 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
404 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
405 {
406 struct kthdesc *k;
407 struct lwp *l;
408 int rv;
409
410 /*
411 * We don't want a module unload thread.
412 * (XXX: yes, this is a kludge too, and the kernel should
413 * have a more flexible method for configuring which threads
414 * we want).
415 */
416 if (strcmp(fmt, "modunload") == 0) {
417 return 0;
418 }
419
420 if (!rump_threads) {
421 /* fake them */
422 if (strcmp(fmt, "vrele") == 0) {
423 printf("rump warning: threads not enabled, not starting"
424 " vrele thread\n");
425 return 0;
426 } else if (strcmp(fmt, "cachegc") == 0) {
427 printf("rump warning: threads not enabled, not starting"
428 " namecache g/c thread\n");
429 return 0;
430 } else
431 panic("threads not available, setenv RUMP_THREADS 1");
432 }
433
434 KASSERT(fmt != NULL);
435 if (ci != NULL)
436 panic("%s: bounded threads not supported", __func__);
437
438 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
439 k->f = func;
440 k->arg = arg;
441 k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
442 k->mpsafe = flags & KTHREAD_MPSAFE;
443 rv = rumpuser_thread_create(threadbouncer, k);
444 if (rv)
445 return rv;
446
447 if (newlp)
448 *newlp = l;
449 return 0;
450 }
451
452 void
453 kthread_exit(int ecode)
454 {
455
456 panic("FIXME: kthread_exit() does not support mpsafe locking");
457 rumpuser_thread_exit();
458 }
459
460 struct proc *
461 p_find(pid_t pid, uint flags)
462 {
463
464 panic("%s: not implemented", __func__);
465 }
466
467 struct pgrp *
468 pg_find(pid_t pid, uint flags)
469 {
470
471 panic("%s: not implemented", __func__);
472 }
473
474 void
475 psignal(struct proc *p, int signo)
476 {
477
478 switch (signo) {
479 case SIGSYS:
480 break;
481 default:
482 panic("unhandled signal %d", signo);
483 }
484 }
485
486 void
487 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
488 {
489
490 panic("%s: not implemented", __func__);
491 }
492
493 void
494 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
495 {
496
497 panic("%s: not implemented", __func__);
498 }
499
500 int
501 pgid_in_session(struct proc *p, pid_t pg_id)
502 {
503
504 panic("%s: not implemented", __func__);
505 }
506
507 int
508 sigispending(struct lwp *l, int signo)
509 {
510
511 return 0;
512 }
513
514 void
515 sigpending1(struct lwp *l, sigset_t *ss)
516 {
517
518 panic("%s: not implemented", __func__);
519 }
520
521 void
522 knote_fdclose(int fd)
523 {
524
525 /* since we don't add knotes, we don't have to remove them */
526 }
527
528 int
529 seltrue_kqfilter(dev_t dev, struct knote *kn)
530 {
531
532 panic("%s: not implemented", __func__);
533 }
534
535 int
536 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
537 {
538 extern int hz;
539 int rv, error;
540 struct timespec time;
541
542 if (mtx)
543 mutex_exit(mtx);
544
545 time.tv_sec = timeo / hz;
546 time.tv_nsec = (timeo % hz) * (1000000000 / hz);
547
548 rv = rumpuser_nanosleep(&time, NULL, &error);
549
550 if (mtx)
551 mutex_enter(mtx);
552
553 if (rv)
554 return error;
555
556 return 0;
557 }
558
559 void
560 suspendsched()
561 {
562
563 panic("%s: not implemented", __func__);
564 }
565
566 u_int
567 lwp_unsleep(lwp_t *l, bool cleanup)
568 {
569
570 KASSERT(mutex_owned(l->l_mutex));
571
572 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
573 }
574
575 vaddr_t
576 calc_cache_size(struct vm_map *map, int pct, int va_pct)
577 {
578 paddr_t t;
579
580 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
581 if ((vaddr_t)t != t) {
582 panic("%s: needs tweak", __func__);
583 }
584 return t;
585 }
586
587 int
588 seltrue(dev_t dev, int events, struct lwp *l)
589 {
590 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
591 }
592
593 void
594 selrecord(lwp_t *selector, struct selinfo *sip)
595 {
596 }
597
598 void
599 selinit(struct selinfo *sip)
600 {
601 }
602
603 void
604 selnotify(struct selinfo *sip, int events, long knhint)
605 {
606 }
607
608 void
609 seldestroy(struct selinfo *sip)
610 {
611 }
612
613 const char *
614 device_xname(device_t dv)
615 {
616 return "bogus0";
617 }
618
619 void
620 assert_sleepable(void)
621 {
622
623 /* always sleepable, although we should improve this */
624 }
625
626 int
627 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
628 const struct cdevsw *cdev, int *cmajor)
629 {
630
631 panic("%s: not implemented", __func__);
632 }
633
634 int
635 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
636 {
637
638 panic("%s: not implemented", __func__);
639 }
640
641 void
642 tc_setclock(struct timespec *ts)
643 {
644
645 panic("%s: not implemented", __func__);
646 }
647
648 void
649 proc_crmod_enter()
650 {
651
652 panic("%s: not implemented", __func__);
653 }
654
655 void
656 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
657 {
658
659 panic("%s: not implemented", __func__);
660 }
661