emul.c revision 1.105 1 /* $NetBSD: emul.c,v 1.105 2009/11/04 16:55:20 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.105 2009/11/04 16:55:20 pooka Exp $");
32
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/null.h>
36 #include <sys/vnode.h>
37 #include <sys/stat.h>
38 #include <sys/select.h>
39 #include <sys/syslog.h>
40 #include <sys/namei.h>
41 #include <sys/kauth.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kthread.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/poll.h>
51 #include <sys/timetc.h>
52 #include <sys/tprintf.h>
53 #include <sys/module.h>
54 #include <sys/tty.h>
55 #include <sys/reboot.h>
56
57 #include <dev/cons.h>
58
59 #include <machine/stdarg.h>
60
61 #include <rump/rumpuser.h>
62
63 #include <uvm/uvm_map.h>
64
65 #include "rump_private.h"
66
67 time_t time_second = 1;
68
69 kmutex_t *proc_lock;
70 struct lwp lwp0;
71 struct vnode *rootvp;
72 struct device *root_device;
73 dev_t rootdev;
74 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
75 int doing_shutdown;
76 const int schedppq = 1;
77 int hardclock_ticks;
78 bool mp_online = false;
79 struct vm_map *mb_map;
80 struct timeval boottime;
81 struct emul emul_netbsd;
82 int cold = 1;
83 int boothowto = AB_SILENT;
84 struct tty *constty;
85
86 char hostname[MAXHOSTNAMELEN];
87 size_t hostnamelen;
88
89 const char *panicstr;
90 const char ostype[] = "NetBSD";
91 const char osrelease[] = "999"; /* paradroid 4evah */
92 const char kernel_ident[] = "RUMP-ROAST";
93 const char *domainname;
94 int domainnamelen;
95
96 const struct filterops sig_filtops;
97
98 #define DEVSW_SIZE 255
99 const struct bdevsw *bdevsw0[DEVSW_SIZE]; /* XXX storage size */
100 const struct bdevsw **bdevsw = bdevsw0;
101 const int sys_cdevsws = DEVSW_SIZE;
102 int max_cdevsws = DEVSW_SIZE;
103
104 const struct cdevsw *cdevsw0[DEVSW_SIZE]; /* XXX storage size */
105 const struct cdevsw **cdevsw = cdevsw0;
106 const int sys_bdevsws = DEVSW_SIZE;
107 int max_bdevsws = DEVSW_SIZE;
108
109 struct devsw_conv devsw_conv0;
110 struct devsw_conv *devsw_conv = &devsw_conv0;
111 int max_devsw_convs = 0;
112 int mem_no = 2;
113
114 struct device *booted_device;
115 struct device *booted_wedge;
116 int booted_partition;
117
118 kmutex_t tty_lock;
119
120 int
121 copyin(const void *uaddr, void *kaddr, size_t len)
122 {
123
124 if (curproc->p_vmspace == &rump_vmspace)
125 memcpy(kaddr, uaddr, len);
126 else
127 rump_sysproxy_copyin(uaddr, kaddr, len);
128 return 0;
129 }
130
131 int
132 copyout(const void *kaddr, void *uaddr, size_t len)
133 {
134
135 if (curproc->p_vmspace == &rump_vmspace)
136 memcpy(uaddr, kaddr, len);
137 else
138 rump_sysproxy_copyout(kaddr, uaddr, len);
139 return 0;
140 }
141
142 int
143 subyte(void *uaddr, int byte)
144 {
145
146 if (curproc->p_vmspace == &rump_vmspace)
147 *(char *)uaddr = byte;
148 else
149 rump_sysproxy_copyout(&byte, uaddr, 1);
150 return 0;
151 }
152
153 int
154 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
155 {
156
157 return copyinstr(kfaddr, kdaddr, len, done);
158 }
159
160 int
161 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
162 {
163
164 if (curproc->p_vmspace == &rump_vmspace)
165 strlcpy(kaddr, uaddr, len);
166 else
167 rump_sysproxy_copyin(uaddr, kaddr, len);
168 if (done)
169 *done = strlen(kaddr)+1; /* includes termination */
170 return 0;
171 }
172
173 int
174 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
175 {
176
177 if (curproc->p_vmspace == &rump_vmspace)
178 strlcpy(uaddr, kaddr, len);
179 else
180 rump_sysproxy_copyout(kaddr, uaddr, len);
181 if (done)
182 *done = strlen(uaddr)+1; /* includes termination */
183 return 0;
184 }
185
186 int
187 kcopy(const void *src, void *dst, size_t len)
188 {
189
190 memcpy(dst, src, len);
191 return 0;
192 }
193
194 devclass_t
195 device_class(device_t dev)
196 {
197
198 if (dev != root_device)
199 panic("%s: dev != root_device not supported", __func__);
200
201 return DV_DISK;
202 }
203
204 void
205 getnanouptime(struct timespec *ts)
206 {
207
208 rump_getuptime(ts);
209 }
210
211 void
212 getmicrouptime(struct timeval *tv)
213 {
214 struct timespec ts;
215
216 getnanouptime(&ts);
217 TIMESPEC_TO_TIMEVAL(tv, &ts);
218 }
219
220 void
221 malloc_type_attach(struct malloc_type *type)
222 {
223
224 return;
225 }
226
227 void
228 malloc_type_detach(struct malloc_type *type)
229 {
230
231 return;
232 }
233
234 void *
235 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
236 {
237 void *rv;
238
239 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
240 if (rv && flags & M_ZERO)
241 memset(rv, 0, size);
242
243 return rv;
244 }
245
246 void *
247 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
248 {
249
250 return rumpuser_realloc(ptr, size, (flags & (M_CANFAIL|M_NOWAIT)) != 0);
251 }
252
253 void
254 kern_free(void *ptr, struct malloc_type *type)
255 {
256
257 rumpuser_free(ptr);
258 }
259
260 static void
261 gettime(struct timespec *ts)
262 {
263 uint64_t sec, nsec;
264 int error;
265
266 rumpuser_gettime(&sec, &nsec, &error);
267 ts->tv_sec = sec;
268 ts->tv_nsec = nsec;
269 }
270
271 void
272 nanotime(struct timespec *ts)
273 {
274
275 if (rump_threads) {
276 rump_gettime(ts);
277 } else {
278 gettime(ts);
279 }
280 }
281
282 /* hooray for mick, so what if I do */
283 void
284 getnanotime(struct timespec *ts)
285 {
286
287 nanotime(ts);
288 }
289
290 void
291 microtime(struct timeval *tv)
292 {
293 struct timespec ts;
294
295 if (rump_threads) {
296 rump_gettime(&ts);
297 TIMESPEC_TO_TIMEVAL(tv, &ts);
298 } else {
299 gettime(&ts);
300 TIMESPEC_TO_TIMEVAL(tv, &ts);
301 }
302 }
303
304 void
305 getmicrotime(struct timeval *tv)
306 {
307
308 microtime(tv);
309 }
310
311 struct kthdesc {
312 void (*f)(void *);
313 void *arg;
314 struct lwp *mylwp;
315 };
316
317 static void *
318 threadbouncer(void *arg)
319 {
320 struct kthdesc *k = arg;
321 void (*f)(void *);
322 void *thrarg;
323
324 /* schedule ourselves first */
325 f = k->f;
326 thrarg = k->arg;
327 rumpuser_set_curlwp(k->mylwp);
328 rump_schedule();
329
330 kmem_free(k, sizeof(struct kthdesc));
331 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
332 KERNEL_LOCK(1, NULL);
333
334 f(thrarg);
335
336 panic("unreachable, should kthread_exit()");
337 }
338
339 int
340 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
341 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
342 {
343 char thrstore[MAXCOMLEN];
344 const char *thrname = NULL;
345 va_list ap;
346 struct kthdesc *k;
347 struct lwp *l;
348 int rv;
349
350 thrstore[0] = '\0';
351 if (fmt) {
352 va_start(ap, fmt);
353 vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
354 va_end(ap);
355 thrname = thrstore;
356 }
357
358 /*
359 * We don't want a module unload thread.
360 * (XXX: yes, this is a kludge too, and the kernel should
361 * have a more flexible method for configuring which threads
362 * we want).
363 */
364 if (strcmp(thrstore, "modunload") == 0) {
365 return 0;
366 }
367
368 if (!rump_threads) {
369 /* fake them */
370 if (strcmp(thrstore, "vrele") == 0) {
371 printf("rump warning: threads not enabled, not starting"
372 " vrele thread\n");
373 return 0;
374 } else if (strcmp(thrstore, "cachegc") == 0) {
375 printf("rump warning: threads not enabled, not starting"
376 " namecache g/c thread\n");
377 return 0;
378 } else if (strcmp(thrstore, "nfssilly") == 0) {
379 printf("rump warning: threads not enabled, not enabling"
380 " nfs silly rename\n");
381 return 0;
382 } else if (strcmp(thrstore, "unpgc") == 0) {
383 printf("rump warning: threads not enabled, not enabling"
384 " UNP garbage collection\n");
385 return 0;
386 } else
387 panic("threads not available, setenv RUMP_THREADS 1");
388 }
389
390 KASSERT(fmt != NULL);
391 if (ci != NULL)
392 panic("%s: bounded threads not supported", __func__);
393
394 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
395 k->f = func;
396 k->arg = arg;
397 k->mylwp = l = rump_lwp_alloc(0, rump_nextlid());
398 if (flags & KTHREAD_MPSAFE)
399 l->l_pflag |= LP_MPSAFE;
400 rv = rumpuser_thread_create(threadbouncer, k, thrname);
401 if (rv)
402 return rv;
403
404 if (newlp)
405 *newlp = l;
406 return 0;
407 }
408
409 void
410 kthread_exit(int ecode)
411 {
412
413 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
414 KERNEL_UNLOCK_ONE(NULL);
415 rump_lwp_release(curlwp);
416 rump_unschedule();
417 rumpuser_thread_exit();
418 }
419
420 struct proc *
421 p_find(pid_t pid, uint flags)
422 {
423
424 panic("%s: not implemented", __func__);
425 }
426
427 struct pgrp *
428 pg_find(pid_t pid, uint flags)
429 {
430
431 panic("%s: not implemented", __func__);
432 }
433
434 void
435 psignal(struct proc *p, int signo)
436 {
437
438 switch (signo) {
439 case SIGSYS:
440 break;
441 default:
442 panic("unhandled signal %d\n", signo);
443 }
444 }
445
446 void
447 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
448 {
449
450 panic("%s: not implemented", __func__);
451 }
452
453 void
454 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
455 {
456
457 panic("%s: not implemented", __func__);
458 }
459
460 int
461 pgid_in_session(struct proc *p, pid_t pg_id)
462 {
463
464 panic("%s: not implemented", __func__);
465 }
466
467 int
468 sigispending(struct lwp *l, int signo)
469 {
470
471 return 0;
472 }
473
474 void
475 sigpending1(struct lwp *l, sigset_t *ss)
476 {
477
478 panic("%s: not implemented", __func__);
479 }
480
481 int
482 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
483 {
484 extern int hz;
485 int rv, error;
486 uint64_t sec, nsec;
487
488 if (mtx)
489 mutex_exit(mtx);
490
491 sec = timeo / hz;
492 nsec = (timeo % hz) * (1000000000 / hz);
493 rv = rumpuser_nanosleep(&sec, &nsec, &error);
494
495 if (mtx)
496 mutex_enter(mtx);
497
498 if (rv)
499 return error;
500
501 return 0;
502 }
503
504 void
505 suspendsched(void)
506 {
507
508 /* we don't control scheduling currently, can't do anything now */
509 }
510
511 void
512 lwp_unsleep(lwp_t *l, bool cleanup)
513 {
514
515 KASSERT(mutex_owned(l->l_mutex));
516
517 (*l->l_syncobj->sobj_unsleep)(l, cleanup);
518 }
519
520 vaddr_t
521 calc_cache_size(struct vm_map *map, int pct, int va_pct)
522 {
523 paddr_t t;
524
525 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
526 if ((vaddr_t)t != t) {
527 panic("%s: needs tweak", __func__);
528 }
529 return t;
530 }
531
532 const char *
533 device_xname(device_t dv)
534 {
535 return "bogus0";
536 }
537
538 void
539 assert_sleepable(void)
540 {
541
542 /* always sleepable, although we should improve this */
543 }
544
545 void
546 tc_setclock(const struct timespec *ts)
547 {
548
549 panic("%s: not implemented", __func__);
550 }
551
552 int
553 proc_uidmatch(kauth_cred_t cred, kauth_cred_t target)
554 {
555
556 panic("%s: not implemented", __func__);
557 }
558
559 void
560 proc_crmod_enter(void)
561 {
562
563 panic("%s: not implemented", __func__);
564 }
565
566 void
567 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
568 {
569
570 panic("%s: not implemented", __func__);
571 }
572
573 void
574 module_init_md(void)
575 {
576
577 /*
578 * Nothing for now. However, we should load the librump
579 * symbol table.
580 */
581 }
582
583 /* us and them, after all we're only ordinary seconds */
584 static void
585 rump_delay(unsigned int us)
586 {
587 uint64_t sec, nsec;
588 int error;
589
590 sec = us / 1000000;
591 nsec = (us % 1000000) * 1000;
592
593 if (__predict_false(sec != 0))
594 printf("WARNING: over 1s delay\n");
595
596 rumpuser_nanosleep(&sec, &nsec, &error);
597 }
598 void (*delay_func)(unsigned int) = rump_delay;
599
600 void
601 kpreempt_disable(void)
602 {
603
604 /* XXX: see below */
605 KPREEMPT_DISABLE(curlwp);
606 }
607
608 void
609 kpreempt_enable(void)
610 {
611
612 /* try to make sure kpreempt_disable() is only used from panic() */
613 panic("kpreempt not supported");
614 }
615
616 void
617 proc_sesshold(struct session *ss)
618 {
619
620 panic("proc_sesshold() impossible, session %p", ss);
621 }
622
623 void
624 proc_sessrele(struct session *ss)
625 {
626
627 panic("proc_sessrele() impossible, session %p", ss);
628 }
629
630 int
631 proc_vmspace_getref(struct proc *p, struct vmspace **vm)
632 {
633
634 /* XXX */
635 *vm = p->p_vmspace;
636 return 0;
637 }
638
639 int
640 ttycheckoutq(struct tty *tp, int wait)
641 {
642
643 return 1;
644 }
645
646 void
647 cnputc(int c)
648 {
649 int error;
650
651 rumpuser_putchar(c, &error);
652 }
653
654 void
655 cnflush(void)
656 {
657
658 /* done */
659 }
660
661 int
662 tputchar(int c, int flags, struct tty *tp)
663 {
664
665 cnputc(c);
666 return 0;
667 }
668
669 void
670 cpu_reboot(int howto, char *bootstr)
671 {
672
673 rump_reboot(howto);
674
675 /* this function is __dead, we must exit */
676 rumpuser_exit(0);
677 }
678
679 bool
680 pmf_device_register1(struct device *dev,
681 bool (*suspend)(device_t PMF_FN_PROTO),
682 bool (*resume)(device_t PMF_FN_PROTO),
683 bool (*shutdown)(device_t, int))
684 {
685
686 return true;
687 }
688
689 void
690 pmf_device_deregister(struct device *dev)
691 {
692
693 /* nada */
694 }
695