emul.c revision 1.86 1 /* $NetBSD: emul.c,v 1.86 2009/04/26 14:37:03 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.86 2009/04/26 14:37:03 pgoyette Exp $");
32
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/null.h>
36 #include <sys/vnode.h>
37 #include <sys/stat.h>
38 #include <sys/select.h>
39 #include <sys/syslog.h>
40 #include <sys/namei.h>
41 #include <sys/kauth.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kthread.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/poll.h>
51 #include <sys/timetc.h>
52 #include <sys/tprintf.h>
53 #include <sys/module.h>
54 #include <sys/tty.h>
55 #include <sys/reboot.h>
56
57 #include <dev/cons.h>
58
59 #include <machine/stdarg.h>
60
61 #include <rump/rumpuser.h>
62
63 #include <uvm/uvm_map.h>
64
65 #include "rump_private.h"
66
67 time_t time_second = 1;
68
69 kmutex_t *proc_lock;
70 struct lwp lwp0;
71 struct vnode *rootvp;
72 struct device *root_device;
73 dev_t rootdev;
74 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
75 int doing_shutdown;
76 int ncpu = 1;
77 const int schedppq = 1;
78 int hardclock_ticks;
79 bool mp_online = false;
80 struct vm_map *mb_map;
81 struct timeval boottime;
82 struct emul emul_netbsd;
83 int cold = 1;
84 int boothowto;
85 struct tty *constty;
86
87 char hostname[MAXHOSTNAMELEN];
88 size_t hostnamelen;
89
90 u_long bufmem_valimit;
91 u_long bufmem_hiwater;
92 u_long bufmem_lowater;
93 u_long bufmem;
94 u_int nbuf;
95
96 const char *panicstr;
97 const char ostype[] = "NetBSD";
98 const char osrelease[] = "999"; /* paradroid 4evah */
99 const char kernel_ident[] = "RUMP-ROAST";
100 const char *domainname;
101 int domainnamelen;
102
103 const struct filterops seltrue_filtops;
104 const struct filterops sig_filtops;
105
106 #define DEVSW_SIZE 255
107 const struct bdevsw *bdevsw0[DEVSW_SIZE]; /* XXX storage size */
108 const struct bdevsw **bdevsw = bdevsw0;
109 const int sys_cdevsws = DEVSW_SIZE;
110 int max_cdevsws = DEVSW_SIZE;
111
112 const struct cdevsw *cdevsw0[DEVSW_SIZE]; /* XXX storage size */
113 const struct cdevsw **cdevsw = cdevsw0;
114 const int sys_bdevsws = DEVSW_SIZE;
115 int max_bdevsws = DEVSW_SIZE;
116
117 struct devsw_conv devsw_conv0;
118 struct devsw_conv *devsw_conv = &devsw_conv0;
119 int max_devsw_convs = 0;
120 int mem_no = 2;
121
122 kmutex_t tty_lock;
123
124 int
125 copyin(const void *uaddr, void *kaddr, size_t len)
126 {
127
128 memcpy(kaddr, uaddr, len);
129 return 0;
130 }
131
132 int
133 copyout(const void *kaddr, void *uaddr, size_t len)
134 {
135
136 memcpy(uaddr, kaddr, len);
137 return 0;
138 }
139
140 int
141 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
142 {
143
144 return copyinstr(kfaddr, kdaddr, len, done);
145 }
146
147 int
148 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
149 {
150
151 strlcpy(kaddr, uaddr, len);
152 if (done)
153 *done = strlen(kaddr)+1; /* includes termination */
154 return 0;
155 }
156
157 int
158 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
159 {
160
161 strlcpy(uaddr, kaddr, len);
162 if (done)
163 *done = strlen(uaddr)+1; /* includes termination */
164 return 0;
165 }
166
167 int
168 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
169 {
170
171 return copyin(uaddr, kaddr, len);
172 }
173
174 int
175 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
176 {
177
178 return copyout(kaddr, uaddr, len);
179 }
180
181 int
182 kcopy(const void *src, void *dst, size_t len)
183 {
184
185 memcpy(dst, src, len);
186 return 0;
187 }
188
189 int
190 uiomove(void *buf, size_t n, struct uio *uio)
191 {
192 struct iovec *iov;
193 uint8_t *b = buf;
194 size_t cnt;
195
196 if (uio->uio_vmspace != UIO_VMSPACE_SYS)
197 panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
198
199 while (n && uio->uio_resid) {
200 iov = uio->uio_iov;
201 cnt = iov->iov_len;
202 if (cnt == 0) {
203 uio->uio_iov++;
204 uio->uio_iovcnt--;
205 continue;
206 }
207 if (cnt > n)
208 cnt = n;
209
210 if (uio->uio_rw == UIO_READ)
211 memcpy(iov->iov_base, b, cnt);
212 else
213 memcpy(b, iov->iov_base, cnt);
214
215 iov->iov_base = (uint8_t *)iov->iov_base + cnt;
216 iov->iov_len -= cnt;
217 b += cnt;
218 uio->uio_resid -= cnt;
219 uio->uio_offset += cnt;
220 n -= cnt;
221 }
222
223 return 0;
224 }
225
226 void
227 uio_setup_sysspace(struct uio *uio)
228 {
229
230 uio->uio_vmspace = UIO_VMSPACE_SYS;
231 }
232
233 devclass_t
234 device_class(device_t dev)
235 {
236
237 if (dev != root_device)
238 panic("%s: dev != root_device not supported", __func__);
239
240 return DV_DISK;
241 }
242
243 void
244 getnanouptime(struct timespec *ts)
245 {
246 uint64_t sec, nsec;
247 int error;
248
249 /* XXX: this is wrong, does not report *uptime* */
250 rumpuser_gettime(&sec, &nsec, &error);
251 ts->tv_sec = sec;
252 ts->tv_nsec = nsec;
253 }
254
255 void
256 getmicrouptime(struct timeval *tv)
257 {
258 uint64_t sec, nsec;
259 int error;
260
261 /* XXX: this is wrong, does not report *uptime* */
262 rumpuser_gettime(&sec, &nsec, &error);
263 tv->tv_sec = sec;
264 tv->tv_usec = nsec / 1000;
265 }
266
267 void
268 malloc_type_attach(struct malloc_type *type)
269 {
270
271 return;
272 }
273
274 void
275 malloc_type_detach(struct malloc_type *type)
276 {
277
278 return;
279 }
280
281 void *
282 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
283 {
284 void *rv;
285
286 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
287 if (rv && flags & M_ZERO)
288 memset(rv, 0, size);
289
290 return rv;
291 }
292
293 void *
294 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
295 {
296
297 return rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
298 }
299
300 void
301 kern_free(void *ptr, struct malloc_type *type)
302 {
303
304 rumpuser_free(ptr);
305 }
306
307 static void
308 gettime(struct timespec *ts)
309 {
310 uint64_t sec, nsec;
311 int error;
312
313 rumpuser_gettime(&sec, &nsec, &error);
314 ts->tv_sec = sec;
315 ts->tv_nsec = nsec;
316 }
317
318 void
319 nanotime(struct timespec *ts)
320 {
321
322 if (rump_threads) {
323 rump_gettime(ts);
324 } else {
325 gettime(ts);
326 }
327 }
328
329 /* hooray for mick, so what if I do */
330 void
331 getnanotime(struct timespec *ts)
332 {
333
334 nanotime(ts);
335 }
336
337 void
338 microtime(struct timeval *tv)
339 {
340 struct timespec ts;
341
342 if (rump_threads) {
343 rump_gettime(&ts);
344 TIMESPEC_TO_TIMEVAL(tv, &ts);
345 } else {
346 gettime(&ts);
347 TIMESPEC_TO_TIMEVAL(tv, &ts);
348 }
349 }
350
351 void
352 getmicrotime(struct timeval *tv)
353 {
354
355 microtime(tv);
356 }
357
358 struct kthdesc {
359 void (*f)(void *);
360 void *arg;
361 struct lwp *mylwp;
362 };
363
364 static void *
365 threadbouncer(void *arg)
366 {
367 struct kthdesc *k = arg;
368 void (*f)(void *);
369 void *thrarg;
370
371 f = k->f;
372 thrarg = k->arg;
373 rumpuser_set_curlwp(k->mylwp);
374 kmem_free(k, sizeof(struct kthdesc));
375
376 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
377 KERNEL_LOCK(1, NULL);
378 f(thrarg);
379 panic("unreachable, should kthread_exit()");
380 }
381
382 int
383 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
384 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
385 {
386 char thrstore[MAXCOMLEN];
387 const char *thrname = NULL;
388 va_list ap;
389 struct kthdesc *k;
390 struct lwp *l;
391 int rv;
392
393 thrstore[0] = '\0';
394 if (fmt) {
395 va_start(ap, fmt);
396 vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
397 va_end(ap);
398 thrname = thrstore;
399 }
400
401 /*
402 * We don't want a module unload thread.
403 * (XXX: yes, this is a kludge too, and the kernel should
404 * have a more flexible method for configuring which threads
405 * we want).
406 */
407 if (strcmp(thrstore, "modunload") == 0) {
408 return 0;
409 }
410
411 if (!rump_threads) {
412 /* fake them */
413 if (strcmp(thrstore, "vrele") == 0) {
414 printf("rump warning: threads not enabled, not starting"
415 " vrele thread\n");
416 return 0;
417 } else if (strcmp(thrstore, "cachegc") == 0) {
418 printf("rump warning: threads not enabled, not starting"
419 " namecache g/c thread\n");
420 return 0;
421 } else if (strcmp(thrstore, "nfssilly") == 0) {
422 printf("rump warning: threads not enabled, not enabling"
423 " nfs silly rename\n");
424 return 0;
425 } else
426 panic("threads not available, setenv RUMP_THREADS 1");
427 }
428
429 KASSERT(fmt != NULL);
430 if (ci != NULL)
431 panic("%s: bounded threads not supported", __func__);
432
433 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
434 k->f = func;
435 k->arg = arg;
436 k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
437 if (flags & KTHREAD_MPSAFE)
438 l->l_pflag |= LP_MPSAFE;
439 rv = rumpuser_thread_create(threadbouncer, k, thrname);
440 if (rv)
441 return rv;
442
443 if (newlp)
444 *newlp = l;
445 return 0;
446 }
447
448 void
449 kthread_exit(int ecode)
450 {
451
452 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
453 KERNEL_UNLOCK_ONE(NULL);
454 rump_clear_curlwp();
455 rumpuser_thread_exit();
456 }
457
458 struct proc *
459 p_find(pid_t pid, uint flags)
460 {
461
462 panic("%s: not implemented", __func__);
463 }
464
465 struct pgrp *
466 pg_find(pid_t pid, uint flags)
467 {
468
469 panic("%s: not implemented", __func__);
470 }
471
472 void
473 psignal(struct proc *p, int signo)
474 {
475
476 switch (signo) {
477 case SIGSYS:
478 break;
479 default:
480 panic("unhandled signal %d", signo);
481 }
482 }
483
484 void
485 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
486 {
487
488 panic("%s: not implemented", __func__);
489 }
490
491 void
492 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
493 {
494
495 panic("%s: not implemented", __func__);
496 }
497
498 int
499 pgid_in_session(struct proc *p, pid_t pg_id)
500 {
501
502 panic("%s: not implemented", __func__);
503 }
504
505 int
506 sigispending(struct lwp *l, int signo)
507 {
508
509 return 0;
510 }
511
512 void
513 sigpending1(struct lwp *l, sigset_t *ss)
514 {
515
516 panic("%s: not implemented", __func__);
517 }
518
519 int
520 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
521 {
522 extern int hz;
523 int rv, error;
524 uint64_t sec, nsec;
525
526 if (mtx)
527 mutex_exit(mtx);
528
529 sec = timeo / hz;
530 nsec = (timeo % hz) * (1000000000 / hz);
531 rv = rumpuser_nanosleep(&sec, &nsec, &error);
532
533 if (mtx)
534 mutex_enter(mtx);
535
536 if (rv)
537 return error;
538
539 return 0;
540 }
541
542 void
543 suspendsched(void)
544 {
545
546 panic("%s: not implemented", __func__);
547 }
548
549 u_int
550 lwp_unsleep(lwp_t *l, bool cleanup)
551 {
552
553 KASSERT(mutex_owned(l->l_mutex));
554
555 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
556 }
557
558 vaddr_t
559 calc_cache_size(struct vm_map *map, int pct, int va_pct)
560 {
561 paddr_t t;
562
563 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
564 if ((vaddr_t)t != t) {
565 panic("%s: needs tweak", __func__);
566 }
567 return t;
568 }
569
570 int
571 seltrue(dev_t dev, int events, struct lwp *l)
572 {
573 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
574 }
575
576 void
577 selrecord(lwp_t *selector, struct selinfo *sip)
578 {
579 }
580
581 void
582 selinit(struct selinfo *sip)
583 {
584 }
585
586 void
587 selnotify(struct selinfo *sip, int events, long knhint)
588 {
589 }
590
591 void
592 seldestroy(struct selinfo *sip)
593 {
594 }
595
596 const char *
597 device_xname(device_t dv)
598 {
599 return "bogus0";
600 }
601
602 void
603 assert_sleepable(void)
604 {
605
606 /* always sleepable, although we should improve this */
607 }
608
609 void
610 tc_setclock(const struct timespec *ts)
611 {
612
613 panic("%s: not implemented", __func__);
614 }
615
616 void
617 proc_crmod_enter(void)
618 {
619
620 panic("%s: not implemented", __func__);
621 }
622
623 void
624 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
625 {
626
627 panic("%s: not implemented", __func__);
628 }
629
630 void
631 module_init_md(void)
632 {
633
634 /*
635 * Nothing for now. However, we should load the librump
636 * symbol table.
637 */
638 }
639
640 /* us and them, after all we're only ordinary seconds */
641 static void
642 rump_delay(unsigned int us)
643 {
644 uint64_t sec, nsec;
645 int error;
646
647 sec = us / 1000000;
648 nsec = (us % 1000000) * 1000;
649
650 if (__predict_false(sec != 0))
651 printf("WARNING: over 1s delay\n");
652
653 rumpuser_nanosleep(&sec, &nsec, &error);
654 }
655 void (*delay_func)(unsigned int) = rump_delay;
656
657 void
658 kpreempt_disable(void)
659 {
660
661 /* XXX: see below */
662 KPREEMPT_DISABLE(curlwp);
663 }
664
665 void
666 kpreempt_enable(void)
667 {
668
669 /* try to make sure kpreempt_disable() is only used from panic() */
670 panic("kpreempt not supported");
671 }
672
673 void
674 proc_sesshold(struct session *ss)
675 {
676
677 panic("proc_sesshold() impossible, session %p", ss);
678 }
679
680 void
681 proc_sessrele(struct session *ss)
682 {
683
684 panic("proc_sessrele() impossible, session %p", ss);
685 }
686
687 int
688 ttycheckoutq(struct tty *tp, int wait)
689 {
690
691 return 1;
692 }
693
694 void
695 cnputc(int c)
696 {
697 int error;
698
699 rumpuser_putchar(c, &error);
700 }
701
702 void
703 cnflush(void)
704 {
705
706 /* done */
707 }
708
709 int
710 tputchar(int c, int flags, struct tty *tp)
711 {
712
713 cnputc(c);
714 return 0;
715 }
716
717 void
718 cpu_reboot(int howto, char *bootstr)
719 {
720
721 rumpuser_panic();
722 }
723
724 /* XXX: static, but not used except to make spcopy.S link */
725 #ifdef __hppa__
726 #undef curlwp
727 struct lwp *curlwp = &lwp0;
728 #endif
729
730 /*
731 * XXX: from sys_select.c, see that file for license.
732 * (these will go away really soon in favour of the real sys_select.c)
733 * ((really, the select code just needs cleanup))
734 * (((seriously)))
735 */
736 int
737 inittimeleft(struct timespec *ts, struct timespec *sleepts)
738 {
739 if (itimespecfix(ts))
740 return -1;
741 getnanouptime(sleepts);
742 return 0;
743 }
744
745 int
746 gettimeleft(struct timespec *ts, struct timespec *sleepts)
747 {
748 /*
749 * We have to recalculate the timeout on every retry.
750 */
751 struct timespec sleptts;
752 /*
753 * reduce ts by elapsed time
754 * based on monotonic time scale
755 */
756 getnanouptime(&sleptts);
757 timespecadd(ts, sleepts, ts);
758 timespecsub(ts, &sleptts, ts);
759 *sleepts = sleptts;
760 return tstohz(ts);
761 }
762