emul.c revision 1.106 1 /* $NetBSD: emul.c,v 1.106 2009/11/04 17:01:45 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.106 2009/11/04 17:01:45 pooka Exp $");
32
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/null.h>
36 #include <sys/vnode.h>
37 #include <sys/stat.h>
38 #include <sys/select.h>
39 #include <sys/syslog.h>
40 #include <sys/namei.h>
41 #include <sys/kauth.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kthread.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/poll.h>
51 #include <sys/timetc.h>
52 #include <sys/tprintf.h>
53 #include <sys/module.h>
54 #include <sys/tty.h>
55 #include <sys/reboot.h>
56
57 #include <dev/cons.h>
58
59 #include <machine/stdarg.h>
60
61 #include <rump/rumpuser.h>
62
63 #include <uvm/uvm_map.h>
64
65 #include "rump_private.h"
66
67 time_t time_second = 1;
68
69 kmutex_t *proc_lock;
70 struct lwp lwp0;
71 struct vnode *rootvp;
72 struct device *root_device;
73 dev_t rootdev;
74 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
75 int doing_shutdown;
76 const int schedppq = 1;
77 int hardclock_ticks;
78 bool mp_online = false;
79 struct vm_map *mb_map;
80 struct timeval boottime;
81 struct emul emul_netbsd;
82 int cold = 1;
83 int boothowto = AB_SILENT;
84 struct tty *constty;
85
86 char hostname[MAXHOSTNAMELEN];
87 size_t hostnamelen;
88
89 const char *panicstr;
90 const char ostype[] = "NetBSD";
91 const char osrelease[] = "999"; /* paradroid 4evah */
92 const char kernel_ident[] = "RUMP-ROAST";
93 const char *domainname;
94 int domainnamelen;
95
96 const struct filterops sig_filtops;
97
98 #define DEVSW_SIZE 255
99 const struct bdevsw *bdevsw0[DEVSW_SIZE]; /* XXX storage size */
100 const struct bdevsw **bdevsw = bdevsw0;
101 const int sys_cdevsws = DEVSW_SIZE;
102 int max_cdevsws = DEVSW_SIZE;
103
104 const struct cdevsw *cdevsw0[DEVSW_SIZE]; /* XXX storage size */
105 const struct cdevsw **cdevsw = cdevsw0;
106 const int sys_bdevsws = DEVSW_SIZE;
107 int max_bdevsws = DEVSW_SIZE;
108
109 struct devsw_conv devsw_conv0;
110 struct devsw_conv *devsw_conv = &devsw_conv0;
111 int max_devsw_convs = 0;
112 int mem_no = 2;
113
114 struct device *booted_device;
115 struct device *booted_wedge;
116 int booted_partition;
117
118 kmutex_t tty_lock;
119
120 devclass_t
121 device_class(device_t dev)
122 {
123
124 if (dev != root_device)
125 panic("%s: dev != root_device not supported", __func__);
126
127 return DV_DISK;
128 }
129
130 void
131 getnanouptime(struct timespec *ts)
132 {
133
134 rump_getuptime(ts);
135 }
136
137 void
138 getmicrouptime(struct timeval *tv)
139 {
140 struct timespec ts;
141
142 getnanouptime(&ts);
143 TIMESPEC_TO_TIMEVAL(tv, &ts);
144 }
145
146 void
147 malloc_type_attach(struct malloc_type *type)
148 {
149
150 return;
151 }
152
153 void
154 malloc_type_detach(struct malloc_type *type)
155 {
156
157 return;
158 }
159
160 void *
161 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
162 {
163 void *rv;
164
165 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
166 if (rv && flags & M_ZERO)
167 memset(rv, 0, size);
168
169 return rv;
170 }
171
172 void *
173 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
174 {
175
176 return rumpuser_realloc(ptr, size, (flags & (M_CANFAIL|M_NOWAIT)) != 0);
177 }
178
179 void
180 kern_free(void *ptr, struct malloc_type *type)
181 {
182
183 rumpuser_free(ptr);
184 }
185
186 static void
187 gettime(struct timespec *ts)
188 {
189 uint64_t sec, nsec;
190 int error;
191
192 rumpuser_gettime(&sec, &nsec, &error);
193 ts->tv_sec = sec;
194 ts->tv_nsec = nsec;
195 }
196
197 void
198 nanotime(struct timespec *ts)
199 {
200
201 if (rump_threads) {
202 rump_gettime(ts);
203 } else {
204 gettime(ts);
205 }
206 }
207
208 /* hooray for mick, so what if I do */
209 void
210 getnanotime(struct timespec *ts)
211 {
212
213 nanotime(ts);
214 }
215
216 void
217 microtime(struct timeval *tv)
218 {
219 struct timespec ts;
220
221 if (rump_threads) {
222 rump_gettime(&ts);
223 TIMESPEC_TO_TIMEVAL(tv, &ts);
224 } else {
225 gettime(&ts);
226 TIMESPEC_TO_TIMEVAL(tv, &ts);
227 }
228 }
229
230 void
231 getmicrotime(struct timeval *tv)
232 {
233
234 microtime(tv);
235 }
236
237 struct kthdesc {
238 void (*f)(void *);
239 void *arg;
240 struct lwp *mylwp;
241 };
242
243 static void *
244 threadbouncer(void *arg)
245 {
246 struct kthdesc *k = arg;
247 void (*f)(void *);
248 void *thrarg;
249
250 /* schedule ourselves first */
251 f = k->f;
252 thrarg = k->arg;
253 rumpuser_set_curlwp(k->mylwp);
254 rump_schedule();
255
256 kmem_free(k, sizeof(struct kthdesc));
257 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
258 KERNEL_LOCK(1, NULL);
259
260 f(thrarg);
261
262 panic("unreachable, should kthread_exit()");
263 }
264
265 int
266 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
267 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
268 {
269 char thrstore[MAXCOMLEN];
270 const char *thrname = NULL;
271 va_list ap;
272 struct kthdesc *k;
273 struct lwp *l;
274 int rv;
275
276 thrstore[0] = '\0';
277 if (fmt) {
278 va_start(ap, fmt);
279 vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
280 va_end(ap);
281 thrname = thrstore;
282 }
283
284 /*
285 * We don't want a module unload thread.
286 * (XXX: yes, this is a kludge too, and the kernel should
287 * have a more flexible method for configuring which threads
288 * we want).
289 */
290 if (strcmp(thrstore, "modunload") == 0) {
291 return 0;
292 }
293
294 if (!rump_threads) {
295 /* fake them */
296 if (strcmp(thrstore, "vrele") == 0) {
297 printf("rump warning: threads not enabled, not starting"
298 " vrele thread\n");
299 return 0;
300 } else if (strcmp(thrstore, "cachegc") == 0) {
301 printf("rump warning: threads not enabled, not starting"
302 " namecache g/c thread\n");
303 return 0;
304 } else if (strcmp(thrstore, "nfssilly") == 0) {
305 printf("rump warning: threads not enabled, not enabling"
306 " nfs silly rename\n");
307 return 0;
308 } else if (strcmp(thrstore, "unpgc") == 0) {
309 printf("rump warning: threads not enabled, not enabling"
310 " UNP garbage collection\n");
311 return 0;
312 } else
313 panic("threads not available, setenv RUMP_THREADS 1");
314 }
315
316 KASSERT(fmt != NULL);
317 if (ci != NULL)
318 panic("%s: bounded threads not supported", __func__);
319
320 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
321 k->f = func;
322 k->arg = arg;
323 k->mylwp = l = rump_lwp_alloc(0, rump_nextlid());
324 if (flags & KTHREAD_MPSAFE)
325 l->l_pflag |= LP_MPSAFE;
326 rv = rumpuser_thread_create(threadbouncer, k, thrname);
327 if (rv)
328 return rv;
329
330 if (newlp)
331 *newlp = l;
332 return 0;
333 }
334
335 void
336 kthread_exit(int ecode)
337 {
338
339 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
340 KERNEL_UNLOCK_ONE(NULL);
341 rump_lwp_release(curlwp);
342 rump_unschedule();
343 rumpuser_thread_exit();
344 }
345
346 struct proc *
347 p_find(pid_t pid, uint flags)
348 {
349
350 panic("%s: not implemented", __func__);
351 }
352
353 struct pgrp *
354 pg_find(pid_t pid, uint flags)
355 {
356
357 panic("%s: not implemented", __func__);
358 }
359
360 void
361 psignal(struct proc *p, int signo)
362 {
363
364 switch (signo) {
365 case SIGSYS:
366 break;
367 default:
368 panic("unhandled signal %d\n", signo);
369 }
370 }
371
372 void
373 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
374 {
375
376 panic("%s: not implemented", __func__);
377 }
378
379 void
380 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
381 {
382
383 panic("%s: not implemented", __func__);
384 }
385
386 int
387 pgid_in_session(struct proc *p, pid_t pg_id)
388 {
389
390 panic("%s: not implemented", __func__);
391 }
392
393 int
394 sigispending(struct lwp *l, int signo)
395 {
396
397 return 0;
398 }
399
400 void
401 sigpending1(struct lwp *l, sigset_t *ss)
402 {
403
404 panic("%s: not implemented", __func__);
405 }
406
407 int
408 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
409 {
410 extern int hz;
411 int rv, error;
412 uint64_t sec, nsec;
413
414 if (mtx)
415 mutex_exit(mtx);
416
417 sec = timeo / hz;
418 nsec = (timeo % hz) * (1000000000 / hz);
419 rv = rumpuser_nanosleep(&sec, &nsec, &error);
420
421 if (mtx)
422 mutex_enter(mtx);
423
424 if (rv)
425 return error;
426
427 return 0;
428 }
429
430 void
431 suspendsched(void)
432 {
433
434 /* we don't control scheduling currently, can't do anything now */
435 }
436
437 void
438 lwp_unsleep(lwp_t *l, bool cleanup)
439 {
440
441 KASSERT(mutex_owned(l->l_mutex));
442
443 (*l->l_syncobj->sobj_unsleep)(l, cleanup);
444 }
445
446 vaddr_t
447 calc_cache_size(struct vm_map *map, int pct, int va_pct)
448 {
449 paddr_t t;
450
451 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
452 if ((vaddr_t)t != t) {
453 panic("%s: needs tweak", __func__);
454 }
455 return t;
456 }
457
458 const char *
459 device_xname(device_t dv)
460 {
461 return "bogus0";
462 }
463
464 void
465 assert_sleepable(void)
466 {
467
468 /* always sleepable, although we should improve this */
469 }
470
471 void
472 tc_setclock(const struct timespec *ts)
473 {
474
475 panic("%s: not implemented", __func__);
476 }
477
478 int
479 proc_uidmatch(kauth_cred_t cred, kauth_cred_t target)
480 {
481
482 panic("%s: not implemented", __func__);
483 }
484
485 void
486 proc_crmod_enter(void)
487 {
488
489 panic("%s: not implemented", __func__);
490 }
491
492 void
493 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
494 {
495
496 panic("%s: not implemented", __func__);
497 }
498
499 void
500 module_init_md(void)
501 {
502
503 /*
504 * Nothing for now. However, we should load the librump
505 * symbol table.
506 */
507 }
508
509 /* us and them, after all we're only ordinary seconds */
510 static void
511 rump_delay(unsigned int us)
512 {
513 uint64_t sec, nsec;
514 int error;
515
516 sec = us / 1000000;
517 nsec = (us % 1000000) * 1000;
518
519 if (__predict_false(sec != 0))
520 printf("WARNING: over 1s delay\n");
521
522 rumpuser_nanosleep(&sec, &nsec, &error);
523 }
524 void (*delay_func)(unsigned int) = rump_delay;
525
526 void
527 kpreempt_disable(void)
528 {
529
530 /* XXX: see below */
531 KPREEMPT_DISABLE(curlwp);
532 }
533
534 void
535 kpreempt_enable(void)
536 {
537
538 /* try to make sure kpreempt_disable() is only used from panic() */
539 panic("kpreempt not supported");
540 }
541
542 void
543 proc_sesshold(struct session *ss)
544 {
545
546 panic("proc_sesshold() impossible, session %p", ss);
547 }
548
549 void
550 proc_sessrele(struct session *ss)
551 {
552
553 panic("proc_sessrele() impossible, session %p", ss);
554 }
555
556 int
557 proc_vmspace_getref(struct proc *p, struct vmspace **vm)
558 {
559
560 /* XXX */
561 *vm = p->p_vmspace;
562 return 0;
563 }
564
565 int
566 ttycheckoutq(struct tty *tp, int wait)
567 {
568
569 return 1;
570 }
571
572 void
573 cnputc(int c)
574 {
575 int error;
576
577 rumpuser_putchar(c, &error);
578 }
579
580 void
581 cnflush(void)
582 {
583
584 /* done */
585 }
586
587 int
588 tputchar(int c, int flags, struct tty *tp)
589 {
590
591 cnputc(c);
592 return 0;
593 }
594
595 void
596 cpu_reboot(int howto, char *bootstr)
597 {
598
599 rump_reboot(howto);
600
601 /* this function is __dead, we must exit */
602 rumpuser_exit(0);
603 }
604
605 bool
606 pmf_device_register1(struct device *dev,
607 bool (*suspend)(device_t PMF_FN_PROTO),
608 bool (*resume)(device_t PMF_FN_PROTO),
609 bool (*shutdown)(device_t, int))
610 {
611
612 return true;
613 }
614
615 void
616 pmf_device_deregister(struct device *dev)
617 {
618
619 /* nada */
620 }
621