emul.c revision 1.53.8.1 1 /* $NetBSD: emul.c,v 1.53.8.1 2014/02/15 17:48:09 matt Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #define malloc(a,b,c) __wrap_malloc(a,b,c)
31
32 #include <sys/param.h>
33 #include <sys/malloc.h>
34 #include <sys/null.h>
35 #include <sys/vnode.h>
36 #include <sys/stat.h>
37 #include <sys/select.h>
38 #include <sys/syslog.h>
39 #include <sys/namei.h>
40 #include <sys/kauth.h>
41 #include <sys/conf.h>
42 #include <sys/device.h>
43 #include <sys/queue.h>
44 #include <sys/file.h>
45 #include <sys/filedesc.h>
46 #include <sys/kthread.h>
47 #include <sys/cpu.h>
48 #include <sys/kmem.h>
49 #include <sys/poll.h>
50 #include <sys/tprintf.h>
51 #include <sys/timetc.h>
52
53 #include <machine/stdarg.h>
54
55 #include <rump/rumpuser.h>
56
57 #include <uvm/uvm_map.h>
58
59 #include "rump_private.h"
60
61 time_t time_second = 1;
62
63 kmutex_t *proc_lock;
64 struct lwp lwp0;
65 struct vnode *rootvp;
66 struct device *root_device;
67 dev_t rootdev;
68 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
69 int doing_shutdown;
70 int ncpu = 1;
71 const int schedppq = 1;
72 int hardclock_ticks;
73 bool mp_online = false;
74 struct vm_map *mb_map;
75
76 char hostname[MAXHOSTNAMELEN];
77 size_t hostnamelen;
78
79 u_long bufmem_valimit;
80 u_long bufmem_hiwater;
81 u_long bufmem_lowater;
82 u_long bufmem;
83 u_int nbuf;
84
85 const char *panicstr;
86 const char ostype[] = "NetBSD";
87 const char osrelease[] = "999"; /* paradroid 4evah */
88 const char kernel_ident[] = "RUMP-ROAST";
89 const char *domainname;
90 int domainnamelen;
91
92 const struct filterops seltrue_filtops;
93
94 void
95 vpanic(const char *fmt, va_list ap)
96 {
97
98 printf("panic: ");
99 vprintf(fmt, ap);
100 printf("\n");
101 abort();
102 }
103
104 void
105 panic(const char *fmt, ...)
106 {
107 va_list ap;
108
109 va_start(ap, fmt);
110 printf("panic: ");
111 vprintf(fmt, ap);
112 va_end(ap);
113 printf("\n");
114 abort();
115 }
116
117 void
118 log(int level, const char *fmt, ...)
119 {
120 va_list ap;
121
122 va_start(ap, fmt);
123 vprintf(fmt, ap);
124 va_end(ap);
125 }
126
127 void
128 vlog(int level, const char *fmt, va_list ap)
129 {
130
131 vprintf(fmt, ap);
132 }
133
134 void
135 uprintf(const char *fmt, ...)
136 {
137 va_list ap;
138
139 va_start(ap, fmt);
140 vprintf(fmt, ap);
141 va_end(ap);
142 }
143
144 /* relegate this to regular printf */
145 tpr_t
146 tprintf_open(struct proc *p)
147 {
148
149 return (tpr_t)0x111;
150 }
151
152 void
153 tprintf(tpr_t tpr, const char *fmt, ...)
154 {
155 va_list ap;
156
157 va_start(ap, fmt);
158 vprintf(fmt, ap);
159 va_end(ap);
160 }
161
162 void
163 tprintf_close(tpr_t tpr)
164 {
165
166 }
167
168 void
169 printf_nolog(const char *fmt, ...)
170 {
171 va_list ap;
172
173 va_start(ap, fmt);
174 vprintf(fmt, ap);
175 va_end(ap);
176 }
177
178 void
179 aprint_normal(const char *fmt, ...)
180 {
181 va_list ap;
182
183 va_start(ap, fmt);
184 vprintf(fmt, ap);
185 va_end(ap);
186 }
187
188 int
189 copyin(const void *uaddr, void *kaddr, size_t len)
190 {
191
192 memcpy(kaddr, uaddr, len);
193 return 0;
194 }
195
196 int
197 copyout(const void *kaddr, void *uaddr, size_t len)
198 {
199
200 memcpy(uaddr, kaddr, len);
201 return 0;
202 }
203
204 int
205 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
206 {
207
208 return copyinstr(kfaddr, kdaddr, len, done);
209 }
210
211 int
212 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
213 {
214
215 strlcpy(kaddr, uaddr, len);
216 if (done)
217 *done = strlen(kaddr)+1; /* includes termination */
218 return 0;
219 }
220
221 int
222 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
223 {
224
225 return copyin(uaddr, kaddr, len);
226 }
227
228 int
229 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
230 {
231
232 return copyout(kaddr, uaddr, len);
233 }
234
235 int
236 kcopy(const void *src, void *dst, size_t len)
237 {
238
239 memcpy(dst, src, len);
240 return 0;
241 }
242
243 int
244 uiomove(void *buf, size_t n, struct uio *uio)
245 {
246 struct iovec *iov;
247 uint8_t *b = buf;
248 size_t cnt;
249 int rv;
250
251 if (uio->uio_vmspace != UIO_VMSPACE_SYS)
252 panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
253
254 /*
255 * See if rump ubc code claims the offset. This is of course
256 * a blatant violation of abstraction levels, but let's keep
257 * me simple & stupid for now.
258 */
259 if (rump_ubc_magic_uiomove(buf, n, uio, &rv, NULL))
260 return rv;
261
262 while (n && uio->uio_resid) {
263 iov = uio->uio_iov;
264 cnt = iov->iov_len;
265 if (cnt == 0) {
266 uio->uio_iov++;
267 uio->uio_iovcnt--;
268 continue;
269 }
270 if (cnt > n)
271 cnt = n;
272
273 if (uio->uio_rw == UIO_READ)
274 memcpy(iov->iov_base, b, cnt);
275 else
276 memcpy(b, iov->iov_base, cnt);
277
278 iov->iov_base = (uint8_t *)iov->iov_base + cnt;
279 iov->iov_len -= cnt;
280 b += cnt;
281 uio->uio_resid -= cnt;
282 uio->uio_offset += cnt;
283 n -= cnt;
284 }
285
286 return 0;
287 }
288
289 void
290 uio_setup_sysspace(struct uio *uio)
291 {
292
293 uio->uio_vmspace = UIO_VMSPACE_SYS;
294 }
295
296 const struct bdevsw *
297 bdevsw_lookup(dev_t dev)
298 {
299
300 return (const struct bdevsw *)1;
301 }
302
303 devclass_t
304 device_class(device_t dev)
305 {
306
307 if (dev != root_device)
308 panic("%s: dev != root_device not supported", __func__);
309
310 return DV_DISK;
311 }
312
313 void
314 getmicrouptime(struct timeval *tvp)
315 {
316 int error;
317
318 rumpuser_gettimeofday(tvp, &error);
319 }
320
321 void
322 malloc_type_attach(struct malloc_type *type)
323 {
324
325 return;
326 }
327
328 void
329 malloc_type_detach(struct malloc_type *type)
330 {
331
332 return;
333 }
334
335 void *
336 __wrap_malloc(unsigned long size, struct malloc_type *type, int flags)
337 {
338 void *rv;
339
340 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
341 if (rv && flags & M_ZERO)
342 memset(rv, 0, size);
343
344 return rv;
345 }
346
347 void
348 nanotime(struct timespec *ts)
349 {
350 struct timeval tv;
351 int error;
352
353 rumpuser_gettimeofday(&tv, &error);
354 TIMEVAL_TO_TIMESPEC(&tv, ts);
355 }
356
357 /* hooray for mick, so what if I do */
358 void
359 getnanotime(struct timespec *ts)
360 {
361
362 nanotime(ts);
363 }
364
365 void
366 microtime(struct timeval *tv)
367 {
368 int error;
369
370 rumpuser_gettimeofday(tv, &error);
371 }
372
373 void
374 getmicrotime(struct timeval *tv)
375 {
376 int error;
377
378 rumpuser_gettimeofday(tv, &error);
379 }
380
381 void
382 bdev_strategy(struct buf *bp)
383 {
384
385 panic("%s: not supported", __func__);
386 }
387
388 int
389 bdev_type(dev_t dev)
390 {
391
392 return D_DISK;
393 }
394
395 struct kthdesc {
396 void (*f)(void *);
397 void *arg;
398 struct lwp *mylwp;
399 };
400
401 static void *
402 threadbouncer(void *arg)
403 {
404 struct kthdesc *k = arg;
405 void (*f)(void *);
406 void *thrarg;
407
408 f = k->f;
409 thrarg = k->arg;
410 rumpuser_set_curlwp(k->mylwp);
411 kmem_free(k, sizeof(struct kthdesc));
412
413 f(thrarg);
414 panic("unreachable, should kthread_exit()");
415 }
416
417 int
418 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
419 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
420 {
421 struct kthdesc *k;
422 struct lwp *l;
423 int rv;
424
425 if (!rump_threads) {
426 /* fake them */
427 if (strcmp(fmt, "vrele") == 0) {
428 printf("rump warning: threads not enabled, not starting"
429 " vrele thread\n");
430 return 0;
431 } else if (strcmp(fmt, "cachegc") == 0) {
432 printf("rump warning: threads not enabled, not starting"
433 " namecache g/c thread\n");
434 return 0;
435 } else
436 panic("threads not available, setenv RUMP_THREADS 1");
437 }
438
439 KASSERT(fmt != NULL);
440 if (ci != NULL)
441 panic("%s: bounded threads not supported", __func__);
442
443 k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
444 k->f = func;
445 k->arg = arg;
446 k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
447 rv = rumpuser_thread_create(threadbouncer, k);
448 if (rv)
449 return rv;
450
451 if (newlp)
452 *newlp = l;
453 return 0;
454 }
455
456 void
457 kthread_exit(int ecode)
458 {
459
460 rumpuser_thread_exit();
461 }
462
463 struct proc *
464 p_find(pid_t pid, uint flags)
465 {
466
467 panic("%s: not implemented", __func__);
468 }
469
470 struct pgrp *
471 pg_find(pid_t pid, uint flags)
472 {
473
474 panic("%s: not implemented", __func__);
475 }
476
477 void
478 psignal(struct proc *p, int signo)
479 {
480
481 switch (signo) {
482 case SIGSYS:
483 break;
484 default:
485 panic("unhandled signal %d", signo);
486 }
487 }
488
489 void
490 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
491 {
492
493 panic("%s: not implemented", __func__);
494 }
495
496 void
497 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
498 {
499
500 panic("%s: not implemented", __func__);
501 }
502
503 int
504 pgid_in_session(struct proc *p, pid_t pg_id)
505 {
506
507 panic("%s: not implemented", __func__);
508 }
509
510 int
511 sigispending(struct lwp *l, int signo)
512 {
513
514 return 0;
515 }
516
517 void
518 sigpending1(struct lwp *l, sigset_t *ss)
519 {
520
521 panic("%s: not implemented", __func__);
522 }
523
524 void
525 knote_fdclose(int fd)
526 {
527
528 /* since we don't add knotes, we don't have to remove them */
529 }
530
531 int
532 seltrue_kqfilter(dev_t dev, struct knote *kn)
533 {
534
535 panic("%s: not implemented", __func__);
536 }
537
538 int
539 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
540 {
541 extern int hz;
542 int rv, error;
543 struct timespec time;
544
545 if (mtx)
546 mutex_exit(mtx);
547
548 time.tv_sec = timeo / hz;
549 time.tv_nsec = (timeo % hz) * (1000000000 / hz);
550
551 rv = rumpuser_nanosleep(&time, NULL, &error);
552
553 if (mtx)
554 mutex_enter(mtx);
555
556 if (rv)
557 return error;
558
559 return 0;
560 }
561
562 void
563 suspendsched()
564 {
565
566 panic("%s: not implemented", __func__);
567 }
568
569 u_int
570 lwp_unsleep(lwp_t *l, bool cleanup)
571 {
572
573 KASSERT(mutex_owned(l->l_mutex));
574
575 return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
576 }
577
578 vaddr_t
579 calc_cache_size(struct vm_map *map, int pct, int va_pct)
580 {
581 paddr_t t;
582
583 t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
584 if ((vaddr_t)t != t) {
585 panic("%s: needs tweak", __func__);
586 }
587 return t;
588 }
589
590 int
591 seltrue(dev_t dev, int events, struct lwp *l)
592 {
593 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
594 }
595
596 void
597 selrecord(lwp_t *selector, struct selinfo *sip)
598 {
599 }
600
601 void
602 selinit(struct selinfo *sip)
603 {
604 }
605
606 void
607 selnotify(struct selinfo *sip, int events, long knhint)
608 {
609 }
610
611 void
612 seldestroy(struct selinfo *sip)
613 {
614 }
615
616 const char *
617 device_xname(device_t dv)
618 {
619 return "bogus0";
620 }
621
622 void
623 assert_sleepable(void)
624 {
625
626 /* always sleepable, although we should improve this */
627 }
628
629 int
630 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
631 const struct cdevsw *cdev, int *cmajor)
632 {
633
634 panic("%s: not implemented", __func__);
635 }
636
637 int
638 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
639 {
640
641 panic("%s: not implemented", __func__);
642 }
643
644 void
645 tc_setclock(struct timespec *ts)
646 {
647
648 panic("%s: not implemented", __func__);
649 }
650
651 void
652 proc_crmod_enter()
653 {
654
655 panic("%s: not implemented", __func__);
656 }
657
658 void
659 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
660 {
661
662 panic("%s: not implemented", __func__);
663 }
664