emul.c revision 1.73 1 /* $NetBSD: emul.c,v 1.73 2009/01/11 02:45:55 christos Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.73 2009/01/11 02:45:55 christos Exp $");
32
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/null.h>
36 #include <sys/vnode.h>
37 #include <sys/stat.h>
38 #include <sys/select.h>
39 #include <sys/syslog.h>
40 #include <sys/namei.h>
41 #include <sys/kauth.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kthread.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/poll.h>
51 #include <sys/timetc.h>
52 #include <sys/tprintf.h>
53 #include <sys/module.h>
54 #include <sys/tty.h>
55 #include <sys/reboot.h>
56
57 #include <dev/cons.h>
58
59 #include <machine/stdarg.h>
60
61 #include <rump/rumpuser.h>
62
63 #include <uvm/uvm_map.h>
64
65 #include "rump_private.h"
66
67 time_t time_second = 1;
68
69 kmutex_t *proc_lock;
70 struct lwp lwp0;
71 struct vnode *rootvp;
72 struct device *root_device;
73 dev_t rootdev;
74 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
75 int doing_shutdown;
76 int ncpu = 1;
77 const int schedppq = 1;
78 int hardclock_ticks;
79 bool mp_online = false;
80 struct vm_map *mb_map;
81 struct timeval boottime;
82 struct emul emul_netbsd;
83 int cold = 1;
84 int boothowto;
85 struct tty *constty;
86
87 char hostname[MAXHOSTNAMELEN];
88 size_t hostnamelen;
89
90 u_long bufmem_valimit;
91 u_long bufmem_hiwater;
92 u_long bufmem_lowater;
93 u_long bufmem;
94 u_int nbuf;
95
96 const char *panicstr;
97 const char ostype[] = "NetBSD";
98 const char osrelease[] = "999"; /* paradroid 4evah */
99 const char kernel_ident[] = "RUMP-ROAST";
100 const char *domainname;
101 int domainnamelen;
102
103 const struct filterops seltrue_filtops;
104
105 #define DEVSW_SIZE 255
106 const struct bdevsw *bdevsw0[DEVSW_SIZE]; /* XXX storage size */
107 const struct bdevsw **bdevsw = bdevsw0;
108 const int sys_cdevsws = DEVSW_SIZE;
109 int max_cdevsws = DEVSW_SIZE;
110
111 const struct cdevsw *cdevsw0[DEVSW_SIZE]; /* XXX storage size */
112 const struct cdevsw **cdevsw = cdevsw0;
113 const int sys_bdevsws = DEVSW_SIZE;
114 int max_bdevsws = DEVSW_SIZE;
115
116 struct devsw_conv devsw_conv0;
117 struct devsw_conv *devsw_conv = &devsw_conv0;
118 int max_devsw_convs = 0;
119
120
121 int
122 copyin(const void *uaddr, void *kaddr, size_t len)
123 {
124
125 memcpy(kaddr, uaddr, len);
126 return 0;
127 }
128
129 int
130 copyout(const void *kaddr, void *uaddr, size_t len)
131 {
132
133 memcpy(uaddr, kaddr, len);
134 return 0;
135 }
136
137 int
138 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
139 {
140
141 return copyinstr(kfaddr, kdaddr, len, done);
142 }
143
144 int
145 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
146 {
147
148 strlcpy(kaddr, uaddr, len);
149 if (done)
150 *done = strlen(kaddr)+1; /* includes termination */
151 return 0;
152 }
153
154 int
155 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
156 {
157
158 return copyin(uaddr, kaddr, len);
159 }
160
161 int
162 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
163 {
164
165 return copyout(kaddr, uaddr, len);
166 }
167
168 int
169 kcopy(const void *src, void *dst, size_t len)
170 {
171
172 memcpy(dst, src, len);
173 return 0;
174 }
175
176 int
177 uiomove(void *buf, size_t n, struct uio *uio)
178 {
179 struct iovec *iov;
180 uint8_t *b = buf;
181 size_t cnt;
182
183 if (uio->uio_vmspace != UIO_VMSPACE_SYS)
184 panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
185
186 while (n && uio->uio_resid) {
187 iov = uio->uio_iov;
188 cnt = iov->iov_len;
189 if (cnt == 0) {
190 uio->uio_iov++;
191 uio->uio_iovcnt--;
192 continue;
193 }
194 if (cnt > n)
195 cnt = n;
196
197 if (uio->uio_rw == UIO_READ)
198 memcpy(iov->iov_base, b, cnt);
199 else
200 memcpy(b, iov->iov_base, cnt);
201
202 iov->iov_base = (uint8_t *)iov->iov_base + cnt;
203 iov->iov_len -= cnt;
204 b += cnt;
205 uio->uio_resid -= cnt;
206 uio->uio_offset += cnt;
207 n -= cnt;
208 }
209
210 return 0;
211 }
212
213 void
214 uio_setup_sysspace(struct uio *uio)
215 {
216
217 uio->uio_vmspace = UIO_VMSPACE_SYS;
218 }
219
220 devclass_t
221 device_class(device_t dev)
222 {
223
224 if (dev != root_device)
225 panic("%s: dev != root_device not supported", __func__);
226
227 return DV_DISK;
228 }
229
230 void
231 getmicrouptime(struct timeval *tvp)
232 {
233 int error;
234
235 rumpuser_gettimeofday(tvp, &error);
236 }
237
238 void
239 malloc_type_attach(struct malloc_type *type)
240 {
241
242 return;
243 }
244
245 void
246 malloc_type_detach(struct malloc_type *type)
247 {
248
249 return;
250 }
251
252 void *
253 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
254 {
255 void *rv;
256
257 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
258 if (rv && flags & M_ZERO)
259 memset(rv, 0, size);
260
261 return rv;
262 }
263
264 void *
265 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
266 {
267
268 return rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
269 }
270
271 void
272 kern_free(void *ptr, struct malloc_type *type)
273 {
274
275 rumpuser_free(ptr);
276 }
277
278 void
279 nanotime(struct timespec *ts)
280 {
281 struct timeval tv;
282 int error;
283
284 rumpuser_gettimeofday(&tv, &error);
285 TIMEVAL_TO_TIMESPEC(&tv, ts);
286 }
287
288 /* hooray for mick, so what if I do */
289 void
290 getnanotime(struct timespec *ts)
291 {
292
293 nanotime(ts);
294 }
295
296 void
297 microtime(struct timeval *tv)
298 {
299 int error;
300
301 rumpuser_gettimeofday(tv, &error);
302 }
303
304 void
305 getmicrotime(struct timeval *tv)
306 {
307 int error;
308
309 rumpuser_gettimeofday(tv, &error);
310 }
311
312 struct kthdesc {
313 void (*f)(void *);
314 void *arg;
315 struct lwp *mylwp;
316 };
317
318 static void *
319 threadbouncer(void *arg)
320 {
321 struct kthdesc *k = arg;
322 void (*f)(void *);
323 void *thrarg;
324
325 f = k->f;
326 thrarg = k->arg;
327 rumpuser_set_curlwp(k->mylwp);
328 kmem_free(k, sizeof(struct kthdesc));
329
330 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
331 KERNEL_LOCK(1, NULL);
332 f(thrarg);
333 panic("unreachable, should kthread_exit()");
334 }
335
336 int
337 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
338 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
339 {
340 char thrstore[MAXCOMLEN];
341 const char *thrname = NULL;
342 va_list ap;
343 struct kthdesc *k;
344 struct lwp *l;
345 int rv;
346
347 /*
348 * We don't want a module unload thread.
349 * (XXX: yes, this is a kludge too, and the kernel should
350 * have a more flexible method for configuring which threads
351 * we want).
352 */
353 if (strcmp(fmt, "modunload") == 0) {
354 return 0;
355 }
356
357 if (!rump_threads) {
358 /* fake them */
359 if (strcmp(fmt, "vrele") == 0) {
360 printf("rump warning: threads not enabled, not starting"
361 " vrele thread\n");
362 return 0;
363 } else if (strcmp(fmt, "cachegc") == 0) {
364 printf("rump warning: threads not enabled, not starting"
365 " namecache g/c thread\n");
366 return 0;
367 } else
368 panic("threads not available, setenv RUMP_THREADS 1");
369 }
370
371 KASSERT(fmt != NULL);
372 if (ci != NULL)
373 panic("%s: bounded threads not supported", __func__);
374
375 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
376 k->f = func;
377 k->arg = arg;
378 k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
379 if (flags & KTHREAD_MPSAFE)
380 l->l_pflag |= LP_MPSAFE;
381 if (fmt) {
382 va_start(ap, fmt);
383 vsnprintf(thrstore, sizeof(thrname), fmt, ap);
384 va_end(ap);
385 thrname = thrstore;
386 }
387 rv = rumpuser_thread_create(threadbouncer, k, thrname);
388 if (rv)
389 return rv;
390
391 if (newlp)
392 *newlp = l;
393 return 0;
394 }
395
396 void
397 kthread_exit(int ecode)
398 {
399
400 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
401 KERNEL_UNLOCK_ONE(NULL);
402 rump_clear_curlwp();
403 rumpuser_thread_exit();
404 }
405
406 struct proc *
407 p_find(pid_t pid, uint flags)
408 {
409
410 panic("%s: not implemented", __func__);
411 }
412
413 struct pgrp *
414 pg_find(pid_t pid, uint flags)
415 {
416
417 panic("%s: not implemented", __func__);
418 }
419
420 void
421 psignal(struct proc *p, int signo)
422 {
423
424 switch (signo) {
425 case SIGSYS:
426 break;
427 default:
428 panic("unhandled signal %d", signo);
429 }
430 }
431
432 void
433 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
434 {
435
436 panic("%s: not implemented", __func__);
437 }
438
439 void
440 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
441 {
442
443 panic("%s: not implemented", __func__);
444 }
445
446 int
447 pgid_in_session(struct proc *p, pid_t pg_id)
448 {
449
450 panic("%s: not implemented", __func__);
451 }
452
453 int
454 sigispending(struct lwp *l, int signo)
455 {
456
457 return 0;
458 }
459
460 void
461 sigpending1(struct lwp *l, sigset_t *ss)
462 {
463
464 panic("%s: not implemented", __func__);
465 }
466
467 void
468 knote_fdclose(int fd)
469 {
470
471 /* since we don't add knotes, we don't have to remove them */
472 }
473
474 int
475 seltrue_kqfilter(dev_t dev, struct knote *kn)
476 {
477
478 panic("%s: not implemented", __func__);
479 }
480
481 int
482 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
483 {
484 extern int hz;
485 int rv, error;
486 struct timespec time;
487
488 if (mtx)
489 mutex_exit(mtx);
490
491 time.tv_sec = timeo / hz;
492 time.tv_nsec = (timeo % hz) * (1000000000 / hz);
493
494 rv = rumpuser_nanosleep(&time, NULL, &error);
495
496 if (mtx)
497 mutex_enter(mtx);
498
499 if (rv)
500 return error;
501
502 return 0;
503 }
504
505 void
506 suspendsched()
507 {
508
509 panic("%s: not implemented", __func__);
510 }
511
512 u_int
513 lwp_unsleep(lwp_t *l, bool cleanup)
514 {
515
516 KASSERT(mutex_owned(l->l_mutex));
517
518 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
519 }
520
521 vaddr_t
522 calc_cache_size(struct vm_map *map, int pct, int va_pct)
523 {
524 paddr_t t;
525
526 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
527 if ((vaddr_t)t != t) {
528 panic("%s: needs tweak", __func__);
529 }
530 return t;
531 }
532
533 int
534 seltrue(dev_t dev, int events, struct lwp *l)
535 {
536 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
537 }
538
539 void
540 selrecord(lwp_t *selector, struct selinfo *sip)
541 {
542 }
543
544 void
545 selinit(struct selinfo *sip)
546 {
547 }
548
549 void
550 selnotify(struct selinfo *sip, int events, long knhint)
551 {
552 }
553
554 void
555 seldestroy(struct selinfo *sip)
556 {
557 }
558
559 const char *
560 device_xname(device_t dv)
561 {
562 return "bogus0";
563 }
564
565 void
566 assert_sleepable(void)
567 {
568
569 /* always sleepable, although we should improve this */
570 }
571
572 void
573 tc_setclock(const struct timespec *ts)
574 {
575
576 panic("%s: not implemented", __func__);
577 }
578
579 void
580 proc_crmod_enter()
581 {
582
583 panic("%s: not implemented", __func__);
584 }
585
586 void
587 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
588 {
589
590 panic("%s: not implemented", __func__);
591 }
592
593 void
594 module_init_md()
595 {
596
597 /*
598 * Nothing for now. However, we should load the librump
599 * symbol table.
600 */
601 }
602
603 /* us and them, after all we're only ordinary seconds */
604 static void
605 rump_delay(unsigned int us)
606 {
607 struct timespec ts;
608 int error;
609
610 ts.tv_sec = us / 1000000;
611 ts.tv_nsec = (us % 1000000) * 1000;
612
613 if (__predict_false(ts.tv_sec != 0))
614 printf("WARNING: over 1s delay\n");
615
616 rumpuser_nanosleep(&ts, NULL, &error);
617 }
618 void (*delay_func)(unsigned int) = rump_delay;
619
620 void
621 kpreempt_disable()
622 {
623
624 /* XXX: see below */
625 KPREEMPT_DISABLE(curlwp);
626 }
627
628 void
629 kpreempt_enable()
630 {
631
632 /* try to make sure kpreempt_disable() is only used from panic() */
633 panic("kpreempt not supported");
634 }
635
636 void
637 sessdelete(struct session *ss)
638 {
639
640 panic("sessdelete() impossible, session %p", ss);
641 }
642
643 int
644 ttycheckoutq(struct tty *tp, int wait)
645 {
646
647 return 1;
648 }
649
650 void
651 cnputc(int c)
652 {
653 int error;
654
655 rumpuser_putchar(c, &error);
656 }
657
658 void
659 cnflush()
660 {
661
662 /* done */
663 }
664
665 int
666 tputchar(int c, int flags, struct tty *tp)
667 {
668
669 cnputc(c);
670 return 0;
671 }
672
673 void
674 cpu_reboot(int howto, char *bootstr)
675 {
676
677 rumpuser_panic();
678 }
679