emul.c revision 1.38.2.4 1 /* $NetBSD: emul.c,v 1.38.2.4 2009/08/19 18:48:29 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.38.2.4 2009/08/19 18:48:29 yamt Exp $");
32
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/null.h>
36 #include <sys/vnode.h>
37 #include <sys/stat.h>
38 #include <sys/select.h>
39 #include <sys/syslog.h>
40 #include <sys/namei.h>
41 #include <sys/kauth.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kthread.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/poll.h>
51 #include <sys/timetc.h>
52 #include <sys/tprintf.h>
53 #include <sys/module.h>
54 #include <sys/tty.h>
55 #include <sys/reboot.h>
56
57 #include <dev/cons.h>
58
59 #include <machine/stdarg.h>
60
61 #include <rump/rumpuser.h>
62
63 #include <uvm/uvm_map.h>
64
65 #include "rump_private.h"
66
67 time_t time_second = 1;
68
69 kmutex_t *proc_lock;
70 struct lwp lwp0;
71 struct vnode *rootvp;
72 struct device *root_device;
73 dev_t rootdev;
74 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
75 int doing_shutdown;
76 int ncpu = 1;
77 const int schedppq = 1;
78 int hardclock_ticks;
79 bool mp_online = false;
80 struct vm_map *mb_map;
81 struct timeval boottime;
82 struct emul emul_netbsd;
83 int cold = 1;
84 int boothowto;
85 struct tty *constty;
86
87 char hostname[MAXHOSTNAMELEN];
88 size_t hostnamelen;
89
90 const char *panicstr;
91 const char ostype[] = "NetBSD";
92 const char osrelease[] = "999"; /* paradroid 4evah */
93 const char kernel_ident[] = "RUMP-ROAST";
94 const char *domainname;
95 int domainnamelen;
96
97 const struct filterops seltrue_filtops;
98 const struct filterops sig_filtops;
99
100 #define DEVSW_SIZE 255
101 const struct bdevsw *bdevsw0[DEVSW_SIZE]; /* XXX storage size */
102 const struct bdevsw **bdevsw = bdevsw0;
103 const int sys_cdevsws = DEVSW_SIZE;
104 int max_cdevsws = DEVSW_SIZE;
105
106 const struct cdevsw *cdevsw0[DEVSW_SIZE]; /* XXX storage size */
107 const struct cdevsw **cdevsw = cdevsw0;
108 const int sys_bdevsws = DEVSW_SIZE;
109 int max_bdevsws = DEVSW_SIZE;
110
111 struct devsw_conv devsw_conv0;
112 struct devsw_conv *devsw_conv = &devsw_conv0;
113 int max_devsw_convs = 0;
114 int mem_no = 2;
115
116 kmutex_t tty_lock;
117
118 int
119 copyin(const void *uaddr, void *kaddr, size_t len)
120 {
121
122 if (curproc->p_vmspace == &rump_vmspace)
123 memcpy(kaddr, uaddr, len);
124 else
125 rump_sysproxy_copyin(uaddr, kaddr, len);
126 return 0;
127 }
128
129 int
130 copyout(const void *kaddr, void *uaddr, size_t len)
131 {
132
133 if (curproc->p_vmspace == &rump_vmspace)
134 memcpy(uaddr, kaddr, len);
135 else
136 rump_sysproxy_copyout(kaddr, uaddr, len);
137 return 0;
138 }
139
140 int
141 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
142 {
143
144 return copyinstr(kfaddr, kdaddr, len, done);
145 }
146
147 int
148 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
149 {
150
151 if (curproc->p_vmspace == &rump_vmspace)
152 strlcpy(kaddr, uaddr, len);
153 else
154 rump_sysproxy_copyin(uaddr, kaddr, len);
155 if (done)
156 *done = strlen(kaddr)+1; /* includes termination */
157 return 0;
158 }
159
160 int
161 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
162 {
163
164 if (curproc->p_vmspace == &rump_vmspace)
165 strlcpy(uaddr, kaddr, len);
166 else
167 rump_sysproxy_copyout(kaddr, uaddr, len);
168 if (done)
169 *done = strlen(uaddr)+1; /* includes termination */
170 return 0;
171 }
172
173 int
174 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
175 {
176
177 return copyin(uaddr, kaddr, len);
178 }
179
180 int
181 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
182 {
183
184 return copyout(kaddr, uaddr, len);
185 }
186
187 int
188 kcopy(const void *src, void *dst, size_t len)
189 {
190
191 memcpy(dst, src, len);
192 return 0;
193 }
194
195 int
196 uiomove(void *buf, size_t n, struct uio *uio)
197 {
198 struct iovec *iov;
199 uint8_t *b = buf;
200 size_t cnt;
201
202 if (uio->uio_vmspace != UIO_VMSPACE_SYS)
203 panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
204
205 while (n && uio->uio_resid) {
206 iov = uio->uio_iov;
207 cnt = iov->iov_len;
208 if (cnt == 0) {
209 uio->uio_iov++;
210 uio->uio_iovcnt--;
211 continue;
212 }
213 if (cnt > n)
214 cnt = n;
215
216 if (uio->uio_rw == UIO_READ)
217 memcpy(iov->iov_base, b, cnt);
218 else
219 memcpy(b, iov->iov_base, cnt);
220
221 iov->iov_base = (uint8_t *)iov->iov_base + cnt;
222 iov->iov_len -= cnt;
223 b += cnt;
224 uio->uio_resid -= cnt;
225 uio->uio_offset += cnt;
226 n -= cnt;
227 }
228
229 return 0;
230 }
231
232 void
233 uio_setup_sysspace(struct uio *uio)
234 {
235
236 uio->uio_vmspace = UIO_VMSPACE_SYS;
237 }
238
239 devclass_t
240 device_class(device_t dev)
241 {
242
243 if (dev != root_device)
244 panic("%s: dev != root_device not supported", __func__);
245
246 return DV_DISK;
247 }
248
249 void
250 getnanouptime(struct timespec *ts)
251 {
252
253 rump_getuptime(ts);
254 }
255
256 void
257 getmicrouptime(struct timeval *tv)
258 {
259 struct timespec ts;
260
261 getnanouptime(&ts);
262 TIMESPEC_TO_TIMEVAL(tv, &ts);
263 }
264
265 void
266 malloc_type_attach(struct malloc_type *type)
267 {
268
269 return;
270 }
271
272 void
273 malloc_type_detach(struct malloc_type *type)
274 {
275
276 return;
277 }
278
279 void *
280 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
281 {
282 void *rv;
283
284 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
285 if (rv && flags & M_ZERO)
286 memset(rv, 0, size);
287
288 return rv;
289 }
290
291 void *
292 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
293 {
294
295 return rumpuser_realloc(ptr, size, (flags & (M_CANFAIL|M_NOWAIT)) != 0);
296 }
297
298 void
299 kern_free(void *ptr, struct malloc_type *type)
300 {
301
302 rumpuser_free(ptr);
303 }
304
305 static void
306 gettime(struct timespec *ts)
307 {
308 uint64_t sec, nsec;
309 int error;
310
311 rumpuser_gettime(&sec, &nsec, &error);
312 ts->tv_sec = sec;
313 ts->tv_nsec = nsec;
314 }
315
316 void
317 nanotime(struct timespec *ts)
318 {
319
320 if (rump_threads) {
321 rump_gettime(ts);
322 } else {
323 gettime(ts);
324 }
325 }
326
327 /* hooray for mick, so what if I do */
328 void
329 getnanotime(struct timespec *ts)
330 {
331
332 nanotime(ts);
333 }
334
335 void
336 microtime(struct timeval *tv)
337 {
338 struct timespec ts;
339
340 if (rump_threads) {
341 rump_gettime(&ts);
342 TIMESPEC_TO_TIMEVAL(tv, &ts);
343 } else {
344 gettime(&ts);
345 TIMESPEC_TO_TIMEVAL(tv, &ts);
346 }
347 }
348
349 void
350 getmicrotime(struct timeval *tv)
351 {
352
353 microtime(tv);
354 }
355
356 struct kthdesc {
357 void (*f)(void *);
358 void *arg;
359 struct lwp *mylwp;
360 };
361
362 static void *
363 threadbouncer(void *arg)
364 {
365 struct kthdesc *k = arg;
366 void (*f)(void *);
367 void *thrarg;
368
369 f = k->f;
370 thrarg = k->arg;
371 rumpuser_set_curlwp(k->mylwp);
372 kmem_free(k, sizeof(struct kthdesc));
373
374 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
375 KERNEL_LOCK(1, NULL);
376 f(thrarg);
377 panic("unreachable, should kthread_exit()");
378 }
379
380 int
381 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
382 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
383 {
384 char thrstore[MAXCOMLEN];
385 const char *thrname = NULL;
386 va_list ap;
387 struct kthdesc *k;
388 struct lwp *l;
389 int rv;
390
391 thrstore[0] = '\0';
392 if (fmt) {
393 va_start(ap, fmt);
394 vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
395 va_end(ap);
396 thrname = thrstore;
397 }
398
399 /*
400 * We don't want a module unload thread.
401 * (XXX: yes, this is a kludge too, and the kernel should
402 * have a more flexible method for configuring which threads
403 * we want).
404 */
405 if (strcmp(thrstore, "modunload") == 0) {
406 return 0;
407 }
408
409 if (!rump_threads) {
410 /* fake them */
411 if (strcmp(thrstore, "vrele") == 0) {
412 printf("rump warning: threads not enabled, not starting"
413 " vrele thread\n");
414 return 0;
415 } else if (strcmp(thrstore, "cachegc") == 0) {
416 printf("rump warning: threads not enabled, not starting"
417 " namecache g/c thread\n");
418 return 0;
419 } else if (strcmp(thrstore, "nfssilly") == 0) {
420 printf("rump warning: threads not enabled, not enabling"
421 " nfs silly rename\n");
422 return 0;
423 } else if (strcmp(thrstore, "unpgc") == 0) {
424 printf("rump warning: threads not enabled, not enabling"
425 " UNP garbage collection\n");
426 return 0;
427 } else
428 panic("threads not available, setenv RUMP_THREADS 1");
429 }
430
431 KASSERT(fmt != NULL);
432 if (ci != NULL)
433 panic("%s: bounded threads not supported", __func__);
434
435 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
436 k->f = func;
437 k->arg = arg;
438 k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
439 if (flags & KTHREAD_MPSAFE)
440 l->l_pflag |= LP_MPSAFE;
441 rv = rumpuser_thread_create(threadbouncer, k, thrname);
442 if (rv)
443 return rv;
444
445 if (newlp)
446 *newlp = l;
447 return 0;
448 }
449
450 void
451 kthread_exit(int ecode)
452 {
453
454 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
455 KERNEL_UNLOCK_ONE(NULL);
456 rump_clear_curlwp();
457 rumpuser_thread_exit();
458 }
459
460 struct proc *
461 p_find(pid_t pid, uint flags)
462 {
463
464 panic("%s: not implemented", __func__);
465 }
466
467 struct pgrp *
468 pg_find(pid_t pid, uint flags)
469 {
470
471 panic("%s: not implemented", __func__);
472 }
473
474 void
475 psignal(struct proc *p, int signo)
476 {
477
478 switch (signo) {
479 case SIGSYS:
480 break;
481 default:
482 panic("unhandled signal %d", signo);
483 }
484 }
485
486 void
487 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
488 {
489
490 panic("%s: not implemented", __func__);
491 }
492
493 void
494 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
495 {
496
497 panic("%s: not implemented", __func__);
498 }
499
500 int
501 pgid_in_session(struct proc *p, pid_t pg_id)
502 {
503
504 panic("%s: not implemented", __func__);
505 }
506
507 int
508 sigispending(struct lwp *l, int signo)
509 {
510
511 return 0;
512 }
513
514 void
515 sigpending1(struct lwp *l, sigset_t *ss)
516 {
517
518 panic("%s: not implemented", __func__);
519 }
520
521 int
522 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
523 {
524 extern int hz;
525 int rv, error;
526 uint64_t sec, nsec;
527
528 if (mtx)
529 mutex_exit(mtx);
530
531 sec = timeo / hz;
532 nsec = (timeo % hz) * (1000000000 / hz);
533 rv = rumpuser_nanosleep(&sec, &nsec, &error);
534
535 if (mtx)
536 mutex_enter(mtx);
537
538 if (rv)
539 return error;
540
541 return 0;
542 }
543
544 void
545 suspendsched(void)
546 {
547
548 panic("%s: not implemented", __func__);
549 }
550
551 u_int
552 lwp_unsleep(lwp_t *l, bool cleanup)
553 {
554
555 KASSERT(mutex_owned(l->l_mutex));
556
557 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
558 }
559
560 vaddr_t
561 calc_cache_size(struct vm_map *map, int pct, int va_pct)
562 {
563 paddr_t t;
564
565 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
566 if ((vaddr_t)t != t) {
567 panic("%s: needs tweak", __func__);
568 }
569 return t;
570 }
571
572 int
573 seltrue(dev_t dev, int events, struct lwp *l)
574 {
575 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
576 }
577
578 void
579 selrecord(lwp_t *selector, struct selinfo *sip)
580 {
581 }
582
583 void
584 selinit(struct selinfo *sip)
585 {
586 }
587
588 void
589 selnotify(struct selinfo *sip, int events, long knhint)
590 {
591 }
592
593 void
594 seldestroy(struct selinfo *sip)
595 {
596 }
597
598 const char *
599 device_xname(device_t dv)
600 {
601 return "bogus0";
602 }
603
604 void
605 assert_sleepable(void)
606 {
607
608 /* always sleepable, although we should improve this */
609 }
610
611 void
612 tc_setclock(const struct timespec *ts)
613 {
614
615 panic("%s: not implemented", __func__);
616 }
617
618 void
619 proc_crmod_enter(void)
620 {
621
622 panic("%s: not implemented", __func__);
623 }
624
625 void
626 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
627 {
628
629 panic("%s: not implemented", __func__);
630 }
631
632 void
633 module_init_md(void)
634 {
635
636 /*
637 * Nothing for now. However, we should load the librump
638 * symbol table.
639 */
640 }
641
642 /* us and them, after all we're only ordinary seconds */
643 static void
644 rump_delay(unsigned int us)
645 {
646 uint64_t sec, nsec;
647 int error;
648
649 sec = us / 1000000;
650 nsec = (us % 1000000) * 1000;
651
652 if (__predict_false(sec != 0))
653 printf("WARNING: over 1s delay\n");
654
655 rumpuser_nanosleep(&sec, &nsec, &error);
656 }
657 void (*delay_func)(unsigned int) = rump_delay;
658
659 void
660 kpreempt_disable(void)
661 {
662
663 /* XXX: see below */
664 KPREEMPT_DISABLE(curlwp);
665 }
666
667 void
668 kpreempt_enable(void)
669 {
670
671 /* try to make sure kpreempt_disable() is only used from panic() */
672 panic("kpreempt not supported");
673 }
674
675 void
676 proc_sesshold(struct session *ss)
677 {
678
679 panic("proc_sesshold() impossible, session %p", ss);
680 }
681
682 void
683 proc_sessrele(struct session *ss)
684 {
685
686 panic("proc_sessrele() impossible, session %p", ss);
687 }
688
689 int
690 ttycheckoutq(struct tty *tp, int wait)
691 {
692
693 return 1;
694 }
695
696 void
697 cnputc(int c)
698 {
699 int error;
700
701 rumpuser_putchar(c, &error);
702 }
703
704 void
705 cnflush(void)
706 {
707
708 /* done */
709 }
710
711 int
712 tputchar(int c, int flags, struct tty *tp)
713 {
714
715 cnputc(c);
716 return 0;
717 }
718
719 void
720 cpu_reboot(int howto, char *bootstr)
721 {
722
723 rumpuser_panic();
724 }
725
726 /* XXX: static, but not used except to make spcopy.S link */
727 #ifdef __hppa__
728 #undef curlwp
729 struct lwp *curlwp = &lwp0;
730 #endif
731
732 /*
733 * XXX: from sys_select.c, see that file for license.
734 * (these will go away really soon in favour of the real sys_select.c)
735 * ((really, the select code just needs cleanup))
736 * (((seriously)))
737 */
738 int
739 inittimeleft(struct timespec *ts, struct timespec *sleepts)
740 {
741 if (itimespecfix(ts))
742 return -1;
743 getnanouptime(sleepts);
744 return 0;
745 }
746
747 int
748 gettimeleft(struct timespec *ts, struct timespec *sleepts)
749 {
750 /*
751 * We have to recalculate the timeout on every retry.
752 */
753 struct timespec sleptts;
754 /*
755 * reduce ts by elapsed time
756 * based on monotonic time scale
757 */
758 getnanouptime(&sleptts);
759 timespecadd(ts, sleepts, ts);
760 timespecsub(ts, &sleptts, ts);
761 *sleepts = sleptts;
762 return tstohz(ts);
763 }
764