emul.c revision 1.62 1 /* $NetBSD: emul.c,v 1.62 2008/12/20 09:17:55 cegger Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.62 2008/12/20 09:17:55 cegger Exp $");
32
33 #define malloc(a,b,c) __wrap_malloc(a,b,c)
34
35 #include <sys/param.h>
36 #include <sys/malloc.h>
37 #include <sys/null.h>
38 #include <sys/vnode.h>
39 #include <sys/stat.h>
40 #include <sys/select.h>
41 #include <sys/syslog.h>
42 #include <sys/namei.h>
43 #include <sys/kauth.h>
44 #include <sys/conf.h>
45 #include <sys/device.h>
46 #include <sys/queue.h>
47 #include <sys/file.h>
48 #include <sys/filedesc.h>
49 #include <sys/kthread.h>
50 #include <sys/cpu.h>
51 #include <sys/kmem.h>
52 #include <sys/poll.h>
53 #include <sys/tprintf.h>
54 #include <sys/timetc.h>
55
56 #include <machine/bswap.h>
57 #include <machine/stdarg.h>
58
59 #include <rump/rumpuser.h>
60
61 #include <uvm/uvm_map.h>
62
63 #include "rump_private.h"
64
65 time_t time_second = 1;
66
67 kmutex_t *proc_lock;
68 struct lwp lwp0;
69 struct vnode *rootvp;
70 struct device *root_device;
71 dev_t rootdev;
72 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
73 int doing_shutdown;
74 int ncpu = 1;
75 const int schedppq = 1;
76 int hardclock_ticks;
77 bool mp_online = false;
78 struct vm_map *mb_map;
79 struct timeval boottime;
80
81 char hostname[MAXHOSTNAMELEN];
82 size_t hostnamelen;
83
84 u_long bufmem_valimit;
85 u_long bufmem_hiwater;
86 u_long bufmem_lowater;
87 u_long bufmem;
88 u_int nbuf;
89
90 const char *panicstr;
91 const char ostype[] = "NetBSD";
92 const char osrelease[] = "999"; /* paradroid 4evah */
93 const char kernel_ident[] = "RUMP-ROAST";
94 const char *domainname;
95 int domainnamelen;
96
97 const struct filterops seltrue_filtops;
98
99 void
100 panic(const char *fmt, ...)
101 {
102 va_list ap;
103
104 va_start(ap, fmt);
105 printf("panic: ");
106 vprintf(fmt, ap);
107 va_end(ap);
108 printf("\n");
109 abort();
110 }
111
112 void
113 log(int level, const char *fmt, ...)
114 {
115 va_list ap;
116
117 va_start(ap, fmt);
118 vprintf(fmt, ap);
119 va_end(ap);
120 }
121
122 void
123 vlog(int level, const char *fmt, va_list ap)
124 {
125
126 vprintf(fmt, ap);
127 }
128
129 void
130 uprintf(const char *fmt, ...)
131 {
132 va_list ap;
133
134 va_start(ap, fmt);
135 vprintf(fmt, ap);
136 va_end(ap);
137 }
138
139 /* relegate this to regular printf */
140 tpr_t
141 tprintf_open(struct proc *p)
142 {
143
144 return (tpr_t)0x111;
145 }
146
147 void
148 tprintf(tpr_t tpr, const char *fmt, ...)
149 {
150 va_list ap;
151
152 va_start(ap, fmt);
153 vprintf(fmt, ap);
154 va_end(ap);
155 }
156
157 void
158 tprintf_close(tpr_t tpr)
159 {
160
161 }
162
163 void
164 printf_nolog(const char *fmt, ...)
165 {
166 va_list ap;
167
168 va_start(ap, fmt);
169 vprintf(fmt, ap);
170 va_end(ap);
171 }
172
173 void
174 aprint_normal(const char *fmt, ...)
175 {
176 va_list ap;
177
178 va_start(ap, fmt);
179 vprintf(fmt, ap);
180 va_end(ap);
181 }
182
183 int
184 copyin(const void *uaddr, void *kaddr, size_t len)
185 {
186
187 memcpy(kaddr, uaddr, len);
188 return 0;
189 }
190
191 int
192 copyout(const void *kaddr, void *uaddr, size_t len)
193 {
194
195 memcpy(uaddr, kaddr, len);
196 return 0;
197 }
198
199 int
200 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
201 {
202
203 return copyinstr(kfaddr, kdaddr, len, done);
204 }
205
206 int
207 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
208 {
209
210 strlcpy(kaddr, uaddr, len);
211 if (done)
212 *done = strlen(kaddr)+1; /* includes termination */
213 return 0;
214 }
215
216 int
217 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
218 {
219
220 return copyin(uaddr, kaddr, len);
221 }
222
223 int
224 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
225 {
226
227 return copyout(kaddr, uaddr, len);
228 }
229
230 int
231 kcopy(const void *src, void *dst, size_t len)
232 {
233
234 memcpy(dst, src, len);
235 return 0;
236 }
237
238 int
239 uiomove(void *buf, size_t n, struct uio *uio)
240 {
241 struct iovec *iov;
242 uint8_t *b = buf;
243 size_t cnt;
244
245 if (uio->uio_vmspace != UIO_VMSPACE_SYS)
246 panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
247
248 while (n && uio->uio_resid) {
249 iov = uio->uio_iov;
250 cnt = iov->iov_len;
251 if (cnt == 0) {
252 uio->uio_iov++;
253 uio->uio_iovcnt--;
254 continue;
255 }
256 if (cnt > n)
257 cnt = n;
258
259 if (uio->uio_rw == UIO_READ)
260 memcpy(iov->iov_base, b, cnt);
261 else
262 memcpy(b, iov->iov_base, cnt);
263
264 iov->iov_base = (uint8_t *)iov->iov_base + cnt;
265 iov->iov_len -= cnt;
266 b += cnt;
267 uio->uio_resid -= cnt;
268 uio->uio_offset += cnt;
269 n -= cnt;
270 }
271
272 return 0;
273 }
274
275 void
276 uio_setup_sysspace(struct uio *uio)
277 {
278
279 uio->uio_vmspace = UIO_VMSPACE_SYS;
280 }
281
282 const struct bdevsw *
283 bdevsw_lookup(dev_t dev)
284 {
285
286 return (const struct bdevsw *)1;
287 }
288
289 devclass_t
290 device_class(device_t dev)
291 {
292
293 if (dev != root_device)
294 panic("%s: dev != root_device not supported", __func__);
295
296 return DV_DISK;
297 }
298
299 void
300 getmicrouptime(struct timeval *tvp)
301 {
302 int error;
303
304 rumpuser_gettimeofday(tvp, &error);
305 }
306
307 void
308 malloc_type_attach(struct malloc_type *type)
309 {
310
311 return;
312 }
313
314 void
315 malloc_type_detach(struct malloc_type *type)
316 {
317
318 return;
319 }
320
321 void *
322 __wrap_malloc(unsigned long size, struct malloc_type *type, malloc_flag_t flags)
323 {
324 void *rv;
325
326 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
327 if (rv && flags & M_ZERO)
328 memset(rv, 0, size);
329
330 return rv;
331 }
332
333 void
334 nanotime(struct timespec *ts)
335 {
336 struct timeval tv;
337 int error;
338
339 rumpuser_gettimeofday(&tv, &error);
340 TIMEVAL_TO_TIMESPEC(&tv, ts);
341 }
342
343 /* hooray for mick, so what if I do */
344 void
345 getnanotime(struct timespec *ts)
346 {
347
348 nanotime(ts);
349 }
350
351 void
352 microtime(struct timeval *tv)
353 {
354 int error;
355
356 rumpuser_gettimeofday(tv, &error);
357 }
358
359 void
360 getmicrotime(struct timeval *tv)
361 {
362 int error;
363
364 rumpuser_gettimeofday(tv, &error);
365 }
366
367 void
368 bdev_strategy(struct buf *bp)
369 {
370
371 panic("%s: not supported", __func__);
372 }
373
374 int
375 bdev_type(dev_t dev)
376 {
377
378 return D_DISK;
379 }
380
381 struct kthdesc {
382 void (*f)(void *);
383 void *arg;
384 struct lwp *mylwp;
385 bool mpsafe;
386 };
387
388 static void *
389 threadbouncer(void *arg)
390 {
391 struct kthdesc *k = arg;
392 void (*f)(void *);
393 void *thrarg;
394
395 f = k->f;
396 thrarg = k->arg;
397 rumpuser_set_curlwp(k->mylwp);
398 kmem_free(k, sizeof(struct kthdesc));
399
400 if (!k->mpsafe)
401 KERNEL_LOCK(1, NULL);
402 f(thrarg);
403 panic("unreachable, should kthread_exit()");
404 }
405
406 int
407 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
408 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
409 {
410 char thrstore[MAXCOMLEN];
411 const char *thrname = NULL;
412 va_list ap;
413 struct kthdesc *k;
414 struct lwp *l;
415 int rv;
416
417 /*
418 * We don't want a module unload thread.
419 * (XXX: yes, this is a kludge too, and the kernel should
420 * have a more flexible method for configuring which threads
421 * we want).
422 */
423 if (strcmp(fmt, "modunload") == 0) {
424 return 0;
425 }
426
427 if (!rump_threads) {
428 /* fake them */
429 if (strcmp(fmt, "vrele") == 0) {
430 printf("rump warning: threads not enabled, not starting"
431 " vrele thread\n");
432 return 0;
433 } else if (strcmp(fmt, "cachegc") == 0) {
434 printf("rump warning: threads not enabled, not starting"
435 " namecache g/c thread\n");
436 return 0;
437 } else
438 panic("threads not available, setenv RUMP_THREADS 1");
439 }
440
441 KASSERT(fmt != NULL);
442 if (ci != NULL)
443 panic("%s: bounded threads not supported", __func__);
444
445 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
446 k->f = func;
447 k->arg = arg;
448 k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
449 k->mpsafe = flags & KTHREAD_MPSAFE;
450 if (fmt) {
451 va_start(ap, fmt);
452 vsnprintf(thrstore, sizeof(thrname), fmt, ap);
453 va_end(ap);
454 thrname = thrstore;
455 }
456 rv = rumpuser_thread_create(threadbouncer, k, thrname);
457 if (rv)
458 return rv;
459
460 if (newlp)
461 *newlp = l;
462 return 0;
463 }
464
465 void
466 kthread_exit(int ecode)
467 {
468
469 panic("FIXME: kthread_exit() does not support mpsafe locking");
470 rumpuser_thread_exit();
471 }
472
473 struct proc *
474 p_find(pid_t pid, uint flags)
475 {
476
477 panic("%s: not implemented", __func__);
478 }
479
480 struct pgrp *
481 pg_find(pid_t pid, uint flags)
482 {
483
484 panic("%s: not implemented", __func__);
485 }
486
487 void
488 psignal(struct proc *p, int signo)
489 {
490
491 switch (signo) {
492 case SIGSYS:
493 break;
494 default:
495 panic("unhandled signal %d", signo);
496 }
497 }
498
499 void
500 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
501 {
502
503 panic("%s: not implemented", __func__);
504 }
505
506 void
507 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
508 {
509
510 panic("%s: not implemented", __func__);
511 }
512
513 int
514 pgid_in_session(struct proc *p, pid_t pg_id)
515 {
516
517 panic("%s: not implemented", __func__);
518 }
519
520 int
521 sigispending(struct lwp *l, int signo)
522 {
523
524 return 0;
525 }
526
527 void
528 sigpending1(struct lwp *l, sigset_t *ss)
529 {
530
531 panic("%s: not implemented", __func__);
532 }
533
534 void
535 knote_fdclose(int fd)
536 {
537
538 /* since we don't add knotes, we don't have to remove them */
539 }
540
541 int
542 seltrue_kqfilter(dev_t dev, struct knote *kn)
543 {
544
545 panic("%s: not implemented", __func__);
546 }
547
548 int
549 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
550 {
551 extern int hz;
552 int rv, error;
553 struct timespec time;
554
555 if (mtx)
556 mutex_exit(mtx);
557
558 time.tv_sec = timeo / hz;
559 time.tv_nsec = (timeo % hz) * (1000000000 / hz);
560
561 rv = rumpuser_nanosleep(&time, NULL, &error);
562
563 if (mtx)
564 mutex_enter(mtx);
565
566 if (rv)
567 return error;
568
569 return 0;
570 }
571
572 void
573 suspendsched()
574 {
575
576 panic("%s: not implemented", __func__);
577 }
578
579 u_int
580 lwp_unsleep(lwp_t *l, bool cleanup)
581 {
582
583 KASSERT(mutex_owned(l->l_mutex));
584
585 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
586 }
587
588 vaddr_t
589 calc_cache_size(struct vm_map *map, int pct, int va_pct)
590 {
591 paddr_t t;
592
593 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
594 if ((vaddr_t)t != t) {
595 panic("%s: needs tweak", __func__);
596 }
597 return t;
598 }
599
600 int
601 seltrue(dev_t dev, int events, struct lwp *l)
602 {
603 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
604 }
605
606 void
607 selrecord(lwp_t *selector, struct selinfo *sip)
608 {
609 }
610
611 void
612 selinit(struct selinfo *sip)
613 {
614 }
615
616 void
617 selnotify(struct selinfo *sip, int events, long knhint)
618 {
619 }
620
621 void
622 seldestroy(struct selinfo *sip)
623 {
624 }
625
626 const char *
627 device_xname(device_t dv)
628 {
629 return "bogus0";
630 }
631
632 void
633 assert_sleepable(void)
634 {
635
636 /* always sleepable, although we should improve this */
637 }
638
639 int
640 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
641 const struct cdevsw *cdev, int *cmajor)
642 {
643
644 panic("%s: not implemented", __func__);
645 }
646
647 int
648 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
649 {
650
651 panic("%s: not implemented", __func__);
652 }
653
654 void
655 tc_setclock(struct timespec *ts)
656 {
657
658 panic("%s: not implemented", __func__);
659 }
660
661 void
662 proc_crmod_enter()
663 {
664
665 panic("%s: not implemented", __func__);
666 }
667
668 void
669 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
670 {
671
672 panic("%s: not implemented", __func__);
673 }
674
675 /*
676 * Byteswap is in slightly bad taste linked directly against libc.
677 * In case our machine uses namespace-renamed symbols, provide
678 * an escape route. We really should be including libkern, but
679 * leave that to a later date.
680 */
681 #ifdef __BSWAP_RENAME
682 #undef bswap16
683 #undef bswap32
684 uint16_t __bswap16(uint16_t);
685 uint32_t __bswap32(uint32_t);
686
687 uint16_t
688 bswap16(uint16_t v)
689 {
690
691 return __bswap16(v);
692 }
693
694 uint32_t
695 bswap32(uint32_t v)
696 {
697
698 return __bswap32(v);
699 }
700 #endif /* __BSWAP_RENAME */
701