emul.c revision 1.89 1 /* $NetBSD: emul.c,v 1.89 2009/05/07 16:03:24 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.89 2009/05/07 16:03:24 pooka Exp $");
32
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/null.h>
36 #include <sys/vnode.h>
37 #include <sys/stat.h>
38 #include <sys/select.h>
39 #include <sys/syslog.h>
40 #include <sys/namei.h>
41 #include <sys/kauth.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kthread.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/poll.h>
51 #include <sys/timetc.h>
52 #include <sys/tprintf.h>
53 #include <sys/module.h>
54 #include <sys/tty.h>
55 #include <sys/reboot.h>
56
57 #include <dev/cons.h>
58
59 #include <machine/stdarg.h>
60
61 #include <rump/rumpuser.h>
62
63 #include <uvm/uvm_map.h>
64
65 #include "rump_private.h"
66
67 time_t time_second = 1;
68
69 kmutex_t *proc_lock;
70 struct lwp lwp0;
71 struct vnode *rootvp;
72 struct device *root_device;
73 dev_t rootdev;
74 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
75 int doing_shutdown;
76 int ncpu = 1;
77 const int schedppq = 1;
78 int hardclock_ticks;
79 bool mp_online = false;
80 struct vm_map *mb_map;
81 struct timeval boottime;
82 struct emul emul_netbsd;
83 int cold = 1;
84 int boothowto;
85 struct tty *constty;
86
87 char hostname[MAXHOSTNAMELEN];
88 size_t hostnamelen;
89
90 u_long bufmem_valimit;
91 u_long bufmem_hiwater;
92 u_long bufmem_lowater;
93 u_long bufmem;
94 u_int nbuf;
95
96 const char *panicstr;
97 const char ostype[] = "NetBSD";
98 const char osrelease[] = "999"; /* paradroid 4evah */
99 const char kernel_ident[] = "RUMP-ROAST";
100 const char *domainname;
101 int domainnamelen;
102
103 const struct filterops seltrue_filtops;
104 const struct filterops sig_filtops;
105
106 #define DEVSW_SIZE 255
107 const struct bdevsw *bdevsw0[DEVSW_SIZE]; /* XXX storage size */
108 const struct bdevsw **bdevsw = bdevsw0;
109 const int sys_cdevsws = DEVSW_SIZE;
110 int max_cdevsws = DEVSW_SIZE;
111
112 const struct cdevsw *cdevsw0[DEVSW_SIZE]; /* XXX storage size */
113 const struct cdevsw **cdevsw = cdevsw0;
114 const int sys_bdevsws = DEVSW_SIZE;
115 int max_bdevsws = DEVSW_SIZE;
116
117 struct devsw_conv devsw_conv0;
118 struct devsw_conv *devsw_conv = &devsw_conv0;
119 int max_devsw_convs = 0;
120 int mem_no = 2;
121
122 kmutex_t tty_lock;
123
124 int
125 copyin(const void *uaddr, void *kaddr, size_t len)
126 {
127
128 if (curproc->p_vmspace == &rump_vmspace)
129 memcpy(kaddr, uaddr, len);
130 else
131 rump_sysproxy_copyin(uaddr, kaddr, len);
132 return 0;
133 }
134
135 int
136 copyout(const void *kaddr, void *uaddr, size_t len)
137 {
138
139 if (curproc->p_vmspace == &rump_vmspace)
140 memcpy(uaddr, kaddr, len);
141 else
142 rump_sysproxy_copyout(kaddr, uaddr, len);
143 return 0;
144 }
145
146 int
147 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
148 {
149
150 return copyinstr(kfaddr, kdaddr, len, done);
151 }
152
153 int
154 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
155 {
156
157 if (curproc->p_vmspace == &rump_vmspace)
158 strlcpy(kaddr, uaddr, len);
159 else
160 rump_sysproxy_copyin(uaddr, kaddr, len);
161 if (done)
162 *done = strlen(kaddr)+1; /* includes termination */
163 return 0;
164 }
165
166 int
167 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
168 {
169
170 if (curproc->p_vmspace == &rump_vmspace)
171 strlcpy(uaddr, kaddr, len);
172 else
173 rump_sysproxy_copyout(kaddr, uaddr, len);
174 if (done)
175 *done = strlen(uaddr)+1; /* includes termination */
176 return 0;
177 }
178
179 int
180 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
181 {
182
183 return copyin(uaddr, kaddr, len);
184 }
185
186 int
187 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
188 {
189
190 return copyout(kaddr, uaddr, len);
191 }
192
193 int
194 kcopy(const void *src, void *dst, size_t len)
195 {
196
197 memcpy(dst, src, len);
198 return 0;
199 }
200
201 int
202 uiomove(void *buf, size_t n, struct uio *uio)
203 {
204 struct iovec *iov;
205 uint8_t *b = buf;
206 size_t cnt;
207
208 if (uio->uio_vmspace != UIO_VMSPACE_SYS)
209 panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
210
211 while (n && uio->uio_resid) {
212 iov = uio->uio_iov;
213 cnt = iov->iov_len;
214 if (cnt == 0) {
215 uio->uio_iov++;
216 uio->uio_iovcnt--;
217 continue;
218 }
219 if (cnt > n)
220 cnt = n;
221
222 if (uio->uio_rw == UIO_READ)
223 memcpy(iov->iov_base, b, cnt);
224 else
225 memcpy(b, iov->iov_base, cnt);
226
227 iov->iov_base = (uint8_t *)iov->iov_base + cnt;
228 iov->iov_len -= cnt;
229 b += cnt;
230 uio->uio_resid -= cnt;
231 uio->uio_offset += cnt;
232 n -= cnt;
233 }
234
235 return 0;
236 }
237
238 void
239 uio_setup_sysspace(struct uio *uio)
240 {
241
242 uio->uio_vmspace = UIO_VMSPACE_SYS;
243 }
244
245 devclass_t
246 device_class(device_t dev)
247 {
248
249 if (dev != root_device)
250 panic("%s: dev != root_device not supported", __func__);
251
252 return DV_DISK;
253 }
254
255 void
256 getnanouptime(struct timespec *ts)
257 {
258
259 rump_getuptime(ts);
260 }
261
262 void
263 getmicrouptime(struct timeval *tv)
264 {
265 struct timespec ts;
266
267 getnanouptime(&ts);
268 TIMESPEC_TO_TIMEVAL(tv, &ts);
269 }
270
271 void
272 malloc_type_attach(struct malloc_type *type)
273 {
274
275 return;
276 }
277
278 void
279 malloc_type_detach(struct malloc_type *type)
280 {
281
282 return;
283 }
284
285 void *
286 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
287 {
288 void *rv;
289
290 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
291 if (rv && flags & M_ZERO)
292 memset(rv, 0, size);
293
294 return rv;
295 }
296
297 void *
298 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
299 {
300
301 return rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
302 }
303
304 void
305 kern_free(void *ptr, struct malloc_type *type)
306 {
307
308 rumpuser_free(ptr);
309 }
310
311 static void
312 gettime(struct timespec *ts)
313 {
314 uint64_t sec, nsec;
315 int error;
316
317 rumpuser_gettime(&sec, &nsec, &error);
318 ts->tv_sec = sec;
319 ts->tv_nsec = nsec;
320 }
321
322 void
323 nanotime(struct timespec *ts)
324 {
325
326 if (rump_threads) {
327 rump_gettime(ts);
328 } else {
329 gettime(ts);
330 }
331 }
332
333 /* hooray for mick, so what if I do */
334 void
335 getnanotime(struct timespec *ts)
336 {
337
338 nanotime(ts);
339 }
340
341 void
342 microtime(struct timeval *tv)
343 {
344 struct timespec ts;
345
346 if (rump_threads) {
347 rump_gettime(&ts);
348 TIMESPEC_TO_TIMEVAL(tv, &ts);
349 } else {
350 gettime(&ts);
351 TIMESPEC_TO_TIMEVAL(tv, &ts);
352 }
353 }
354
355 void
356 getmicrotime(struct timeval *tv)
357 {
358
359 microtime(tv);
360 }
361
362 struct kthdesc {
363 void (*f)(void *);
364 void *arg;
365 struct lwp *mylwp;
366 };
367
368 static void *
369 threadbouncer(void *arg)
370 {
371 struct kthdesc *k = arg;
372 void (*f)(void *);
373 void *thrarg;
374
375 f = k->f;
376 thrarg = k->arg;
377 rumpuser_set_curlwp(k->mylwp);
378 kmem_free(k, sizeof(struct kthdesc));
379
380 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
381 KERNEL_LOCK(1, NULL);
382 f(thrarg);
383 panic("unreachable, should kthread_exit()");
384 }
385
386 int
387 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
388 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
389 {
390 char thrstore[MAXCOMLEN];
391 const char *thrname = NULL;
392 va_list ap;
393 struct kthdesc *k;
394 struct lwp *l;
395 int rv;
396
397 thrstore[0] = '\0';
398 if (fmt) {
399 va_start(ap, fmt);
400 vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
401 va_end(ap);
402 thrname = thrstore;
403 }
404
405 /*
406 * We don't want a module unload thread.
407 * (XXX: yes, this is a kludge too, and the kernel should
408 * have a more flexible method for configuring which threads
409 * we want).
410 */
411 if (strcmp(thrstore, "modunload") == 0) {
412 return 0;
413 }
414
415 if (!rump_threads) {
416 /* fake them */
417 if (strcmp(thrstore, "vrele") == 0) {
418 printf("rump warning: threads not enabled, not starting"
419 " vrele thread\n");
420 return 0;
421 } else if (strcmp(thrstore, "cachegc") == 0) {
422 printf("rump warning: threads not enabled, not starting"
423 " namecache g/c thread\n");
424 return 0;
425 } else if (strcmp(thrstore, "nfssilly") == 0) {
426 printf("rump warning: threads not enabled, not enabling"
427 " nfs silly rename\n");
428 return 0;
429 } else if (strcmp(thrstore, "unpgc") == 0) {
430 printf("rump warning: threads not enabled, not enabling"
431 " UNP garbage collection\n");
432 return 0;
433 } else
434 panic("threads not available, setenv RUMP_THREADS 1");
435 }
436
437 KASSERT(fmt != NULL);
438 if (ci != NULL)
439 panic("%s: bounded threads not supported", __func__);
440
441 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
442 k->f = func;
443 k->arg = arg;
444 k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
445 if (flags & KTHREAD_MPSAFE)
446 l->l_pflag |= LP_MPSAFE;
447 rv = rumpuser_thread_create(threadbouncer, k, thrname);
448 if (rv)
449 return rv;
450
451 if (newlp)
452 *newlp = l;
453 return 0;
454 }
455
456 void
457 kthread_exit(int ecode)
458 {
459
460 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
461 KERNEL_UNLOCK_ONE(NULL);
462 rump_clear_curlwp();
463 rumpuser_thread_exit();
464 }
465
466 struct proc *
467 p_find(pid_t pid, uint flags)
468 {
469
470 panic("%s: not implemented", __func__);
471 }
472
473 struct pgrp *
474 pg_find(pid_t pid, uint flags)
475 {
476
477 panic("%s: not implemented", __func__);
478 }
479
480 void
481 psignal(struct proc *p, int signo)
482 {
483
484 switch (signo) {
485 case SIGSYS:
486 break;
487 default:
488 panic("unhandled signal %d", signo);
489 }
490 }
491
492 void
493 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
494 {
495
496 panic("%s: not implemented", __func__);
497 }
498
499 void
500 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
501 {
502
503 panic("%s: not implemented", __func__);
504 }
505
506 int
507 pgid_in_session(struct proc *p, pid_t pg_id)
508 {
509
510 panic("%s: not implemented", __func__);
511 }
512
513 int
514 sigispending(struct lwp *l, int signo)
515 {
516
517 return 0;
518 }
519
520 void
521 sigpending1(struct lwp *l, sigset_t *ss)
522 {
523
524 panic("%s: not implemented", __func__);
525 }
526
527 int
528 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
529 {
530 extern int hz;
531 int rv, error;
532 uint64_t sec, nsec;
533
534 if (mtx)
535 mutex_exit(mtx);
536
537 sec = timeo / hz;
538 nsec = (timeo % hz) * (1000000000 / hz);
539 rv = rumpuser_nanosleep(&sec, &nsec, &error);
540
541 if (mtx)
542 mutex_enter(mtx);
543
544 if (rv)
545 return error;
546
547 return 0;
548 }
549
550 void
551 suspendsched(void)
552 {
553
554 panic("%s: not implemented", __func__);
555 }
556
557 u_int
558 lwp_unsleep(lwp_t *l, bool cleanup)
559 {
560
561 KASSERT(mutex_owned(l->l_mutex));
562
563 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
564 }
565
566 vaddr_t
567 calc_cache_size(struct vm_map *map, int pct, int va_pct)
568 {
569 paddr_t t;
570
571 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
572 if ((vaddr_t)t != t) {
573 panic("%s: needs tweak", __func__);
574 }
575 return t;
576 }
577
578 int
579 seltrue(dev_t dev, int events, struct lwp *l)
580 {
581 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
582 }
583
584 void
585 selrecord(lwp_t *selector, struct selinfo *sip)
586 {
587 }
588
589 void
590 selinit(struct selinfo *sip)
591 {
592 }
593
594 void
595 selnotify(struct selinfo *sip, int events, long knhint)
596 {
597 }
598
599 void
600 seldestroy(struct selinfo *sip)
601 {
602 }
603
604 const char *
605 device_xname(device_t dv)
606 {
607 return "bogus0";
608 }
609
610 void
611 assert_sleepable(void)
612 {
613
614 /* always sleepable, although we should improve this */
615 }
616
617 void
618 tc_setclock(const struct timespec *ts)
619 {
620
621 panic("%s: not implemented", __func__);
622 }
623
624 void
625 proc_crmod_enter(void)
626 {
627
628 panic("%s: not implemented", __func__);
629 }
630
631 void
632 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
633 {
634
635 panic("%s: not implemented", __func__);
636 }
637
638 void
639 module_init_md(void)
640 {
641
642 /*
643 * Nothing for now. However, we should load the librump
644 * symbol table.
645 */
646 }
647
648 /* us and them, after all we're only ordinary seconds */
649 static void
650 rump_delay(unsigned int us)
651 {
652 uint64_t sec, nsec;
653 int error;
654
655 sec = us / 1000000;
656 nsec = (us % 1000000) * 1000;
657
658 if (__predict_false(sec != 0))
659 printf("WARNING: over 1s delay\n");
660
661 rumpuser_nanosleep(&sec, &nsec, &error);
662 }
663 void (*delay_func)(unsigned int) = rump_delay;
664
665 void
666 kpreempt_disable(void)
667 {
668
669 /* XXX: see below */
670 KPREEMPT_DISABLE(curlwp);
671 }
672
673 void
674 kpreempt_enable(void)
675 {
676
677 /* try to make sure kpreempt_disable() is only used from panic() */
678 panic("kpreempt not supported");
679 }
680
681 void
682 proc_sesshold(struct session *ss)
683 {
684
685 panic("proc_sesshold() impossible, session %p", ss);
686 }
687
688 void
689 proc_sessrele(struct session *ss)
690 {
691
692 panic("proc_sessrele() impossible, session %p", ss);
693 }
694
695 int
696 ttycheckoutq(struct tty *tp, int wait)
697 {
698
699 return 1;
700 }
701
702 void
703 cnputc(int c)
704 {
705 int error;
706
707 rumpuser_putchar(c, &error);
708 }
709
710 void
711 cnflush(void)
712 {
713
714 /* done */
715 }
716
717 int
718 tputchar(int c, int flags, struct tty *tp)
719 {
720
721 cnputc(c);
722 return 0;
723 }
724
725 void
726 cpu_reboot(int howto, char *bootstr)
727 {
728
729 rumpuser_panic();
730 }
731
732 /* XXX: static, but not used except to make spcopy.S link */
733 #ifdef __hppa__
734 #undef curlwp
735 struct lwp *curlwp = &lwp0;
736 #endif
737
738 /*
739 * XXX: from sys_select.c, see that file for license.
740 * (these will go away really soon in favour of the real sys_select.c)
741 * ((really, the select code just needs cleanup))
742 * (((seriously)))
743 */
744 int
745 inittimeleft(struct timespec *ts, struct timespec *sleepts)
746 {
747 if (itimespecfix(ts))
748 return -1;
749 getnanouptime(sleepts);
750 return 0;
751 }
752
753 int
754 gettimeleft(struct timespec *ts, struct timespec *sleepts)
755 {
756 /*
757 * We have to recalculate the timeout on every retry.
758 */
759 struct timespec sleptts;
760 /*
761 * reduce ts by elapsed time
762 * based on monotonic time scale
763 */
764 getnanouptime(&sleptts);
765 timespecadd(ts, sleepts, ts);
766 timespecsub(ts, &sleptts, ts);
767 *sleepts = sleptts;
768 return tstohz(ts);
769 }
770