emul.c revision 1.103 1 /* $NetBSD: emul.c,v 1.103 2009/10/16 00:14:53 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.103 2009/10/16 00:14:53 pooka Exp $");
32
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/null.h>
36 #include <sys/vnode.h>
37 #include <sys/stat.h>
38 #include <sys/select.h>
39 #include <sys/syslog.h>
40 #include <sys/namei.h>
41 #include <sys/kauth.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kthread.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/poll.h>
51 #include <sys/timetc.h>
52 #include <sys/tprintf.h>
53 #include <sys/module.h>
54 #include <sys/tty.h>
55 #include <sys/reboot.h>
56
57 #include <dev/cons.h>
58
59 #include <machine/stdarg.h>
60
61 #include <rump/rumpuser.h>
62
63 #include <uvm/uvm_map.h>
64
65 #include "rump_private.h"
66
67 time_t time_second = 1;
68
69 kmutex_t *proc_lock;
70 struct lwp lwp0;
71 struct vnode *rootvp;
72 struct device *root_device;
73 dev_t rootdev;
74 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
75 int doing_shutdown;
76 const int schedppq = 1;
77 int hardclock_ticks;
78 bool mp_online = false;
79 struct vm_map *mb_map;
80 struct timeval boottime;
81 struct emul emul_netbsd;
82 int cold = 1;
83 int boothowto = AB_SILENT;
84 struct tty *constty;
85
86 char hostname[MAXHOSTNAMELEN];
87 size_t hostnamelen;
88
89 const char *panicstr;
90 const char ostype[] = "NetBSD";
91 const char osrelease[] = "999"; /* paradroid 4evah */
92 const char kernel_ident[] = "RUMP-ROAST";
93 const char *domainname;
94 int domainnamelen;
95
96 const struct filterops sig_filtops;
97
98 #define DEVSW_SIZE 255
99 const struct bdevsw *bdevsw0[DEVSW_SIZE]; /* XXX storage size */
100 const struct bdevsw **bdevsw = bdevsw0;
101 const int sys_cdevsws = DEVSW_SIZE;
102 int max_cdevsws = DEVSW_SIZE;
103
104 const struct cdevsw *cdevsw0[DEVSW_SIZE]; /* XXX storage size */
105 const struct cdevsw **cdevsw = cdevsw0;
106 const int sys_bdevsws = DEVSW_SIZE;
107 int max_bdevsws = DEVSW_SIZE;
108
109 struct devsw_conv devsw_conv0;
110 struct devsw_conv *devsw_conv = &devsw_conv0;
111 int max_devsw_convs = 0;
112 int mem_no = 2;
113
114 struct device *booted_device;
115 struct device *booted_wedge;
116 int booted_partition;
117
118 kmutex_t tty_lock;
119
120 int
121 copyin(const void *uaddr, void *kaddr, size_t len)
122 {
123
124 if (curproc->p_vmspace == &rump_vmspace)
125 memcpy(kaddr, uaddr, len);
126 else
127 rump_sysproxy_copyin(uaddr, kaddr, len);
128 return 0;
129 }
130
131 int
132 copyout(const void *kaddr, void *uaddr, size_t len)
133 {
134
135 if (curproc->p_vmspace == &rump_vmspace)
136 memcpy(uaddr, kaddr, len);
137 else
138 rump_sysproxy_copyout(kaddr, uaddr, len);
139 return 0;
140 }
141
142 int
143 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
144 {
145
146 return copyinstr(kfaddr, kdaddr, len, done);
147 }
148
149 int
150 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
151 {
152
153 if (curproc->p_vmspace == &rump_vmspace)
154 strlcpy(kaddr, uaddr, len);
155 else
156 rump_sysproxy_copyin(uaddr, kaddr, len);
157 if (done)
158 *done = strlen(kaddr)+1; /* includes termination */
159 return 0;
160 }
161
162 int
163 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
164 {
165
166 if (curproc->p_vmspace == &rump_vmspace)
167 strlcpy(uaddr, kaddr, len);
168 else
169 rump_sysproxy_copyout(kaddr, uaddr, len);
170 if (done)
171 *done = strlen(uaddr)+1; /* includes termination */
172 return 0;
173 }
174
175 int
176 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
177 {
178
179 return copyin(uaddr, kaddr, len);
180 }
181
182 int
183 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
184 {
185
186 return copyout(kaddr, uaddr, len);
187 }
188
189 int
190 kcopy(const void *src, void *dst, size_t len)
191 {
192
193 memcpy(dst, src, len);
194 return 0;
195 }
196
197 int
198 uiomove(void *buf, size_t n, struct uio *uio)
199 {
200 struct iovec *iov;
201 uint8_t *b = buf;
202 size_t cnt;
203
204 if (uio->uio_vmspace != UIO_VMSPACE_SYS)
205 panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
206
207 while (n && uio->uio_resid) {
208 iov = uio->uio_iov;
209 cnt = iov->iov_len;
210 if (cnt == 0) {
211 uio->uio_iov++;
212 uio->uio_iovcnt--;
213 continue;
214 }
215 if (cnt > n)
216 cnt = n;
217
218 if (uio->uio_rw == UIO_READ)
219 memcpy(iov->iov_base, b, cnt);
220 else
221 memcpy(b, iov->iov_base, cnt);
222
223 iov->iov_base = (uint8_t *)iov->iov_base + cnt;
224 iov->iov_len -= cnt;
225 b += cnt;
226 uio->uio_resid -= cnt;
227 uio->uio_offset += cnt;
228 n -= cnt;
229 }
230
231 return 0;
232 }
233
234 void
235 uio_setup_sysspace(struct uio *uio)
236 {
237
238 uio->uio_vmspace = UIO_VMSPACE_SYS;
239 }
240
241 devclass_t
242 device_class(device_t dev)
243 {
244
245 if (dev != root_device)
246 panic("%s: dev != root_device not supported", __func__);
247
248 return DV_DISK;
249 }
250
251 void
252 getnanouptime(struct timespec *ts)
253 {
254
255 rump_getuptime(ts);
256 }
257
258 void
259 getmicrouptime(struct timeval *tv)
260 {
261 struct timespec ts;
262
263 getnanouptime(&ts);
264 TIMESPEC_TO_TIMEVAL(tv, &ts);
265 }
266
267 void
268 malloc_type_attach(struct malloc_type *type)
269 {
270
271 return;
272 }
273
274 void
275 malloc_type_detach(struct malloc_type *type)
276 {
277
278 return;
279 }
280
281 void *
282 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
283 {
284 void *rv;
285
286 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
287 if (rv && flags & M_ZERO)
288 memset(rv, 0, size);
289
290 return rv;
291 }
292
293 void *
294 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
295 {
296
297 return rumpuser_realloc(ptr, size, (flags & (M_CANFAIL|M_NOWAIT)) != 0);
298 }
299
300 void
301 kern_free(void *ptr, struct malloc_type *type)
302 {
303
304 rumpuser_free(ptr);
305 }
306
307 static void
308 gettime(struct timespec *ts)
309 {
310 uint64_t sec, nsec;
311 int error;
312
313 rumpuser_gettime(&sec, &nsec, &error);
314 ts->tv_sec = sec;
315 ts->tv_nsec = nsec;
316 }
317
318 void
319 nanotime(struct timespec *ts)
320 {
321
322 if (rump_threads) {
323 rump_gettime(ts);
324 } else {
325 gettime(ts);
326 }
327 }
328
329 /* hooray for mick, so what if I do */
330 void
331 getnanotime(struct timespec *ts)
332 {
333
334 nanotime(ts);
335 }
336
337 void
338 microtime(struct timeval *tv)
339 {
340 struct timespec ts;
341
342 if (rump_threads) {
343 rump_gettime(&ts);
344 TIMESPEC_TO_TIMEVAL(tv, &ts);
345 } else {
346 gettime(&ts);
347 TIMESPEC_TO_TIMEVAL(tv, &ts);
348 }
349 }
350
351 void
352 getmicrotime(struct timeval *tv)
353 {
354
355 microtime(tv);
356 }
357
358 struct kthdesc {
359 void (*f)(void *);
360 void *arg;
361 struct lwp *mylwp;
362 };
363
364 static void *
365 threadbouncer(void *arg)
366 {
367 struct kthdesc *k = arg;
368 void (*f)(void *);
369 void *thrarg;
370
371 /* schedule ourselves first */
372 f = k->f;
373 thrarg = k->arg;
374 rumpuser_set_curlwp(k->mylwp);
375 rump_schedule();
376
377 kmem_free(k, sizeof(struct kthdesc));
378 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
379 KERNEL_LOCK(1, NULL);
380
381 f(thrarg);
382
383 panic("unreachable, should kthread_exit()");
384 }
385
386 int
387 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
388 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
389 {
390 char thrstore[MAXCOMLEN];
391 const char *thrname = NULL;
392 va_list ap;
393 struct kthdesc *k;
394 struct lwp *l;
395 int rv;
396
397 thrstore[0] = '\0';
398 if (fmt) {
399 va_start(ap, fmt);
400 vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
401 va_end(ap);
402 thrname = thrstore;
403 }
404
405 /*
406 * We don't want a module unload thread.
407 * (XXX: yes, this is a kludge too, and the kernel should
408 * have a more flexible method for configuring which threads
409 * we want).
410 */
411 if (strcmp(thrstore, "modunload") == 0) {
412 return 0;
413 }
414
415 if (!rump_threads) {
416 /* fake them */
417 if (strcmp(thrstore, "vrele") == 0) {
418 printf("rump warning: threads not enabled, not starting"
419 " vrele thread\n");
420 return 0;
421 } else if (strcmp(thrstore, "cachegc") == 0) {
422 printf("rump warning: threads not enabled, not starting"
423 " namecache g/c thread\n");
424 return 0;
425 } else if (strcmp(thrstore, "nfssilly") == 0) {
426 printf("rump warning: threads not enabled, not enabling"
427 " nfs silly rename\n");
428 return 0;
429 } else if (strcmp(thrstore, "unpgc") == 0) {
430 printf("rump warning: threads not enabled, not enabling"
431 " UNP garbage collection\n");
432 return 0;
433 } else
434 panic("threads not available, setenv RUMP_THREADS 1");
435 }
436
437 KASSERT(fmt != NULL);
438 if (ci != NULL)
439 panic("%s: bounded threads not supported", __func__);
440
441 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
442 k->f = func;
443 k->arg = arg;
444 k->mylwp = l = rump_lwp_alloc(0, rump_nextlid());
445 if (flags & KTHREAD_MPSAFE)
446 l->l_pflag |= LP_MPSAFE;
447 rv = rumpuser_thread_create(threadbouncer, k, thrname);
448 if (rv)
449 return rv;
450
451 if (newlp)
452 *newlp = l;
453 return 0;
454 }
455
456 void
457 kthread_exit(int ecode)
458 {
459
460 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
461 KERNEL_UNLOCK_ONE(NULL);
462 rump_lwp_release(curlwp);
463 rump_unschedule();
464 rumpuser_thread_exit();
465 }
466
467 struct proc *
468 p_find(pid_t pid, uint flags)
469 {
470
471 panic("%s: not implemented", __func__);
472 }
473
474 struct pgrp *
475 pg_find(pid_t pid, uint flags)
476 {
477
478 panic("%s: not implemented", __func__);
479 }
480
481 void
482 psignal(struct proc *p, int signo)
483 {
484
485 switch (signo) {
486 case SIGSYS:
487 break;
488 default:
489 panic("unhandled signal %d\n", signo);
490 }
491 }
492
493 void
494 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
495 {
496
497 panic("%s: not implemented", __func__);
498 }
499
500 void
501 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
502 {
503
504 panic("%s: not implemented", __func__);
505 }
506
507 int
508 pgid_in_session(struct proc *p, pid_t pg_id)
509 {
510
511 panic("%s: not implemented", __func__);
512 }
513
514 int
515 sigispending(struct lwp *l, int signo)
516 {
517
518 return 0;
519 }
520
521 void
522 sigpending1(struct lwp *l, sigset_t *ss)
523 {
524
525 panic("%s: not implemented", __func__);
526 }
527
528 int
529 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
530 {
531 extern int hz;
532 int rv, error;
533 uint64_t sec, nsec;
534
535 if (mtx)
536 mutex_exit(mtx);
537
538 sec = timeo / hz;
539 nsec = (timeo % hz) * (1000000000 / hz);
540 rv = rumpuser_nanosleep(&sec, &nsec, &error);
541
542 if (mtx)
543 mutex_enter(mtx);
544
545 if (rv)
546 return error;
547
548 return 0;
549 }
550
551 void
552 suspendsched(void)
553 {
554
555 /* we don't control scheduling currently, can't do anything now */
556 }
557
558 u_int
559 lwp_unsleep(lwp_t *l, bool cleanup)
560 {
561
562 KASSERT(mutex_owned(l->l_mutex));
563
564 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
565 }
566
567 vaddr_t
568 calc_cache_size(struct vm_map *map, int pct, int va_pct)
569 {
570 paddr_t t;
571
572 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
573 if ((vaddr_t)t != t) {
574 panic("%s: needs tweak", __func__);
575 }
576 return t;
577 }
578
579 const char *
580 device_xname(device_t dv)
581 {
582 return "bogus0";
583 }
584
585 void
586 assert_sleepable(void)
587 {
588
589 /* always sleepable, although we should improve this */
590 }
591
592 void
593 tc_setclock(const struct timespec *ts)
594 {
595
596 panic("%s: not implemented", __func__);
597 }
598
599 int
600 proc_uidmatch(kauth_cred_t cred, kauth_cred_t target)
601 {
602
603 panic("%s: not implemented", __func__);
604 }
605
606 void
607 proc_crmod_enter(void)
608 {
609
610 panic("%s: not implemented", __func__);
611 }
612
613 void
614 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
615 {
616
617 panic("%s: not implemented", __func__);
618 }
619
620 void
621 module_init_md(void)
622 {
623
624 /*
625 * Nothing for now. However, we should load the librump
626 * symbol table.
627 */
628 }
629
630 /* us and them, after all we're only ordinary seconds */
631 static void
632 rump_delay(unsigned int us)
633 {
634 uint64_t sec, nsec;
635 int error;
636
637 sec = us / 1000000;
638 nsec = (us % 1000000) * 1000;
639
640 if (__predict_false(sec != 0))
641 printf("WARNING: over 1s delay\n");
642
643 rumpuser_nanosleep(&sec, &nsec, &error);
644 }
645 void (*delay_func)(unsigned int) = rump_delay;
646
647 void
648 kpreempt_disable(void)
649 {
650
651 /* XXX: see below */
652 KPREEMPT_DISABLE(curlwp);
653 }
654
655 void
656 kpreempt_enable(void)
657 {
658
659 /* try to make sure kpreempt_disable() is only used from panic() */
660 panic("kpreempt not supported");
661 }
662
663 void
664 proc_sesshold(struct session *ss)
665 {
666
667 panic("proc_sesshold() impossible, session %p", ss);
668 }
669
670 void
671 proc_sessrele(struct session *ss)
672 {
673
674 panic("proc_sessrele() impossible, session %p", ss);
675 }
676
677 int
678 ttycheckoutq(struct tty *tp, int wait)
679 {
680
681 return 1;
682 }
683
684 void
685 cnputc(int c)
686 {
687 int error;
688
689 rumpuser_putchar(c, &error);
690 }
691
692 void
693 cnflush(void)
694 {
695
696 /* done */
697 }
698
699 int
700 tputchar(int c, int flags, struct tty *tp)
701 {
702
703 cnputc(c);
704 return 0;
705 }
706
707 void
708 cpu_reboot(int howto, char *bootstr)
709 {
710
711 rump_reboot(howto);
712
713 /* this function is __dead, we must exit */
714 rumpuser_exit(0);
715 }
716
717 bool
718 pmf_device_register1(struct device *dev,
719 bool (*suspend)(device_t PMF_FN_PROTO),
720 bool (*resume)(device_t PMF_FN_PROTO),
721 bool (*shutdown)(device_t, int))
722 {
723
724 return true;
725 }
726
727 void
728 pmf_device_deregister(struct device *dev)
729 {
730
731 /* nada */
732 }
733