1 /* $NetBSD: rump.c,v 1.361 2023/10/05 19:41:07 ad Exp $ */ 2 3 /* 4 * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.361 2023/10/05 19:41:07 ad Exp $"); 30 31 #include <sys/systm.h> 32 #define ELFSIZE ARCH_ELFSIZE 33 34 #include <sys/param.h> 35 #include <sys/atomic.h> 36 #include <sys/buf.h> 37 #include <sys/callout.h> 38 #include <sys/conf.h> 39 #include <sys/cpu.h> 40 #include <sys/device.h> 41 #include <sys/device_impl.h> 42 #include <sys/evcnt.h> 43 #include <sys/event.h> 44 #include <sys/exec_elf.h> 45 #include <sys/filedesc.h> 46 #include <sys/iostat.h> 47 #include <sys/kauth.h> 48 #include <sys/kcpuset.h> 49 #include <sys/kernel.h> 50 #include <sys/kmem.h> 51 #include <sys/kprintf.h> 52 #include <sys/kthread.h> 53 #include <sys/ksyms.h> 54 #include <sys/msgbuf.h> 55 #include <sys/module.h> 56 #include <sys/module_hook.h> 57 #include <sys/namei.h> 58 #include <sys/once.h> 59 #include <sys/percpu.h> 60 #include <sys/pipe.h> 61 #include <sys/pool.h> 62 #include <sys/queue.h> 63 #include <sys/reboot.h> 64 #include <sys/resourcevar.h> 65 #include <sys/select.h> 66 #include <sys/sysctl.h> 67 #include <sys/syscall.h> 68 #include <sys/syscallvar.h> 69 #include <sys/threadpool.h> 70 #include <sys/timetc.h> 71 #include <sys/tty.h> 72 #include <sys/uidinfo.h> 73 #include <sys/vmem.h> 74 #include <sys/xcall.h> 75 #include <sys/cprng.h> 76 #include <sys/rnd.h> 77 #include <sys/ktrace.h> 78 #include <sys/pserialize.h> 79 #include <sys/psref.h> 80 81 #include <rump-sys/kern.h> 82 #include <rump-sys/dev.h> 83 #include <rump-sys/net.h> 84 #include <rump-sys/vfs.h> 85 86 #include <rump/rumpuser.h> 87 88 #include <prop/proplib.h> 89 90 #include <uvm/uvm_extern.h> 91 #include <uvm/uvm_readahead.h> 92 93 char machine[] = MACHINE; 94 char machine_arch[] = MACHINE_ARCH; 95 96 struct proc *initproc; 97 98 struct device rump_rootdev = { 99 .dv_class = DV_VIRTUAL 100 }; 101 102 #ifdef RUMP_WITHOUT_THREADS 103 int rump_threads = 0; 104 #else 105 int rump_threads = 1; 106 #endif 107 108 static void rump_component_addlocal(void); 109 static struct lwp *bootlwp; 110 111 /* 16k should be enough for std rump needs */ 112 static char rump_msgbuf[16*1024] __aligned(256); 113 114 bool rump_ttycomponent = false; 115 116 extern pool_cache_t pnbuf_cache; 117 118 static int rump_inited; 119 120 void (*rump_vfs_drainbufs)(int) = (void *)nullop; 121 int (*rump_vfs_makeonedevnode)(dev_t, const char *, 122 devmajor_t, devminor_t) = (void *)nullop; 123 int (*rump_vfs_makedevnodes)(dev_t, const char *, char, 124 devmajor_t, devminor_t, int) = (void *)nullop; 125 int (*rump_vfs_makesymlink)(const char *, const char *) = (void *)nullop; 126 127 rump_proc_vfs_init_fn rump_proc_vfs_init = (void *)nullop; 128 rump_proc_vfs_release_fn rump_proc_vfs_release = (void *)nullop; 129 130 static void add_linkedin_modules(const struct modinfo *const *, size_t); 131 static void add_static_evcnt(struct evcnt *); 132 133 static pid_t rspo_wrap_getpid(void) { 134 return rump_sysproxy_hyp_getpid(); 135 } 136 static int rspo_wrap_syscall(int num, void *arg, long *retval) { 137 return rump_sysproxy_hyp_syscall(num, arg, retval); 138 } 139 static int rspo_wrap_rfork(void *priv, int flag, const char *comm) { 140 return rump_sysproxy_hyp_rfork(priv, flag, comm); 141 } 142 static void rspo_wrap_lwpexit(void) { 143 rump_sysproxy_hyp_lwpexit(); 144 } 145 static void rspo_wrap_execnotify(const char *comm) { 146 rump_sysproxy_hyp_execnotify(comm); 147 } 148 static const struct rumpuser_hyperup hyp = { 149 .hyp_schedule = rump_schedule, 150 .hyp_unschedule = rump_unschedule, 151 .hyp_backend_unschedule = rump_user_unschedule, 152 .hyp_backend_schedule = rump_user_schedule, 153 .hyp_lwproc_switch = rump_lwproc_switch, 154 .hyp_lwproc_release = rump_lwproc_releaselwp, 155 .hyp_lwproc_newlwp = rump_lwproc_newlwp, 156 .hyp_lwproc_curlwp = rump_lwproc_curlwp, 157 158 .hyp_getpid = rspo_wrap_getpid, 159 .hyp_syscall = rspo_wrap_syscall, 160 .hyp_lwproc_rfork = rspo_wrap_rfork, 161 .hyp_lwpexit = rspo_wrap_lwpexit, 162 .hyp_execnotify = rspo_wrap_execnotify, 163 }; 164 struct rump_sysproxy_ops rump_sysproxy_ops = { 165 .rspo_copyin = (void *)enxio, 166 .rspo_copyinstr = (void *)enxio, 167 .rspo_copyout = (void *)enxio, 168 .rspo_copyoutstr = (void *)enxio, 169 .rspo_anonmmap = (void *)enxio, 170 .rspo_raise = (void *)enxio, 171 .rspo_fini = (void *)enxio, 172 .rspo_hyp_getpid = (void *)enxio, 173 .rspo_hyp_syscall = (void *)enxio, 174 .rspo_hyp_rfork = (void *)enxio, 175 .rspo_hyp_lwpexit = (void *)enxio, 176 .rspo_hyp_execnotify = (void *)enxio, 177 }; 178 179 int 180 rump_daemonize_begin(void) 181 { 182 183 if (rump_inited) 184 return EALREADY; 185 186 return rumpuser_daemonize_begin(); 187 } 188 189 int 190 rump_daemonize_done(int error) 191 { 192 193 return rumpuser_daemonize_done(error); 194 } 195 196 #ifdef RUMP_USE_CTOR 197 198 /* sysctl bootstrap handling */ 199 struct sysctl_boot_chain sysctl_boot_chain \ 200 = LIST_HEAD_INITIALIZER(sysctl_boot_chain); 201 __link_set_add_text(sysctl_funcs,voidop); /* ensure linkset is non-empty */ 202 203 #else /* RUMP_USE_CTOR */ 204 205 RUMP_COMPONENT(RUMP_COMPONENT_POSTINIT) 206 { 207 __link_set_decl(rump_components, struct rump_component); 208 209 /* 210 * Trick compiler into generating references so that statically 211 * linked rump kernels are generated with the link set symbols. 212 */ 213 asm("" :: "r"(__start_link_set_rump_components)); 214 asm("" :: "r"(__stop_link_set_rump_components)); 215 } 216 217 #endif /* RUMP_USE_CTOR */ 218 219 int 220 rump_init_callback(void (*cpuinit_callback) (void)) 221 { 222 char buf[256]; 223 struct timespec bts; 224 int64_t sec; 225 long nsec; 226 struct lwp *l, *initlwp; 227 int i, numcpu; 228 229 /* not reentrant */ 230 if (rump_inited) 231 return 0; 232 else if (rump_inited == -1) 233 panic("%s: host process restart required", __func__); 234 else 235 rump_inited = 1; 236 237 /* initialize hypervisor */ 238 if (rumpuser_init(RUMPUSER_VERSION, &hyp) != 0) { 239 rumpuser_dprintf("rumpuser init failed\n"); 240 return EINVAL; 241 } 242 243 /* init minimal lwp/cpu context */ 244 rump_lwproc_init(); 245 l = &lwp0; 246 l->l_cpu = l->l_target_cpu = &rump_bootcpu; 247 rump_lwproc_curlwp_set(l); 248 249 /* retrieve env vars which affect the early stage of bootstrap */ 250 if (rumpuser_getparam("RUMP_THREADS", buf, sizeof(buf)) == 0) { 251 rump_threads = *buf != '0'; 252 } 253 if (rumpuser_getparam("RUMP_VERBOSE", buf, sizeof(buf)) == 0) { 254 if (*buf != '0') 255 boothowto = AB_VERBOSE; 256 } 257 258 if (rumpuser_getparam(RUMPUSER_PARAM_NCPU, buf, sizeof(buf)) != 0) 259 panic("%s: mandatory hypervisor configuration (NCPU) missing", 260 __func__); 261 numcpu = strtoll(buf, NULL, 10); 262 if (numcpu < 1) { 263 panic("%s: rump kernels are not lightweight enough for %d CPUs", 264 __func__, numcpu); 265 } 266 267 rump_thread_init(); 268 rump_cpus_bootstrap(&numcpu); 269 270 rumpuser_clock_gettime(RUMPUSER_CLOCK_RELWALL, &sec, &nsec); 271 bts.tv_sec = sec; 272 bts.tv_nsec = nsec; 273 274 initmsgbuf(rump_msgbuf, sizeof(rump_msgbuf)); 275 aprint_verbose("%s%s", copyright, version); 276 277 rump_intr_init(numcpu); 278 279 rump_tsleep_init(); 280 281 rumpuser_mutex_init(&rump_giantlock, RUMPUSER_MTX_SPIN); 282 ksyms_init(); 283 uvm_init(); 284 evcnt_init(); 285 286 kcpuset_sysinit(); 287 once_init(); 288 kernconfig_lock_init(); 289 prop_kern_init(); 290 291 kmem_init(); 292 293 uvm_ra_init(); 294 uao_init(); 295 296 callout_startup(); 297 298 kprintf_init(); 299 percpu_init(); 300 pserialize_init(); 301 302 kauth_init(); 303 304 secmodel_init(); 305 sysctl_init(); 306 /* 307 * The above call to sysctl_init() only initializes sysctl nodes 308 * from link sets. Initialize sysctls in case we used ctors. 309 */ 310 #ifdef RUMP_USE_CTOR 311 { 312 struct sysctl_setup_chain *ssc; 313 314 while ((ssc = LIST_FIRST(&sysctl_boot_chain)) != NULL) { 315 LIST_REMOVE(ssc, ssc_entries); 316 ssc->ssc_func(NULL); 317 } 318 } 319 #endif /* RUMP_USE_CTOR */ 320 321 rnd_init(); 322 rump_hyperentropy_init(); 323 324 procinit(); 325 proc0_init(); 326 uid_init(); 327 chgproccnt(0, 1); 328 329 l->l_proc = &proc0; 330 l->l_cred = kauth_cred_hold(l->l_proc->p_cred); 331 332 lwpinit_specificdata(); 333 lwp_initspecific(&lwp0); 334 335 /* Must be called after lwpinit_specificdata */ 336 psref_init(); 337 338 threadpools_init(); 339 340 loginit(); 341 342 rump_biglock_init(); 343 344 rump_scheduler_init(numcpu); 345 /* revert temporary context and schedule a semireal context */ 346 rump_lwproc_curlwp_clear(l); 347 initproc = &proc0; /* borrow proc0 before we get initproc started */ 348 rump_schedule(); 349 bootlwp = curlwp; 350 351 inittimecounter(); 352 ntp_init(); 353 354 #ifdef KTRACE 355 ktrinit(); 356 #endif 357 358 tc_setclock(&bts); 359 360 extern krwlock_t exec_lock; 361 rw_init(&exec_lock); 362 363 /* we are mostly go. do per-cpu subsystem init */ 364 for (i = 0; i < numcpu; i++) { 365 struct cpu_info *ci = cpu_lookup(i); 366 367 /* attach non-bootstrap CPUs */ 368 if (i > 0) { 369 rump_cpu_attach(ci); 370 ncpu++; 371 } 372 snprintf(ci->ci_cpuname, sizeof ci->ci_cpuname, "cpu%d", i); 373 374 callout_init_cpu(ci); 375 softint_init(ci); 376 xc_init_cpu(ci); 377 pool_cache_cpu_init(ci); 378 selsysinit(ci); 379 percpu_init_cpu(ci); 380 381 TAILQ_INIT(&ci->ci_data.cpu_ld_locks); 382 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock); 383 384 aprint_verbose("cpu%d at thinair0: rump virtual cpu\n", i); 385 } 386 ncpuonline = ncpu; 387 388 /* Once all CPUs are detected, initialize the per-CPU cprng_fast. */ 389 cprng_init(); 390 cprng_fast_init(); 391 392 mp_online = true; 393 394 if (cpuinit_callback) 395 (*cpuinit_callback)(); 396 397 /* CPUs are up. allow kernel threads to run */ 398 rump_thread_allow(NULL); 399 400 rnd_init_softint(); 401 402 kqueue_init(); 403 iostat_init(); 404 fd_sys_init(); 405 module_init(); 406 module_hook_init(); 407 devsw_init(); 408 pipe_init(); 409 resource_init(); 410 procinit_sysctl(); 411 time_init(); 412 config_init(); 413 414 /* start page baroness */ 415 if (rump_threads) { 416 if (kthread_create(PRI_PGDAEMON, KTHREAD_MPSAFE, NULL, 417 uvm_pageout, NULL, &uvm.pagedaemon_lwp, "pdaemon") != 0) 418 panic("%s: pagedaemon create failed", __func__); 419 } else 420 uvm.pagedaemon_lwp = NULL; /* doesn't match curlwp */ 421 422 /* process dso's */ 423 rumpuser_dl_bootstrap(add_linkedin_modules, 424 rump_kernelfsym_load, rump_component_load, add_static_evcnt); 425 426 rump_component_addlocal(); 427 rump_component_init(RUMP_COMPONENT_KERN); 428 429 /* initialize factions, if present */ 430 rump_component_init(RUMP__FACTION_VFS); 431 /* pnbuf_cache is used even without vfs */ 432 if (rump_component_count(RUMP__FACTION_VFS) == 0) { 433 pnbuf_cache = pool_cache_init(MAXPATHLEN, 0, 0, 0, "pnbufpl", 434 NULL, IPL_NONE, NULL, NULL, NULL); 435 } 436 rump_component_init(RUMP__FACTION_NET); 437 rump_component_init(RUMP__FACTION_DEV); 438 KASSERT(rump_component_count(RUMP__FACTION_VFS) <= 1 439 && rump_component_count(RUMP__FACTION_NET) <= 1 440 && rump_component_count(RUMP__FACTION_DEV) <= 1); 441 442 rump_component_init(RUMP_COMPONENT_KERN_VFS); 443 444 /* 445 * if we initialized the tty component above, the tyttymtx is 446 * now initialized. otherwise, we need to initialize it. 447 */ 448 if (!rump_ttycomponent) 449 mutex_init(&tty_lock, MUTEX_DEFAULT, IPL_VM); 450 451 cold = 0; 452 453 sysctl_finalize(); 454 455 module_init_class(MODULE_CLASS_ANY); 456 457 if (rumpuser_getparam(RUMPUSER_PARAM_HOSTNAME, 458 hostname, MAXHOSTNAMELEN) != 0) { 459 panic( 460 "%s: mandatory hypervisor configuration (HOSTNAME) missing", 461 __func__); 462 } 463 hostnamelen = strlen(hostname); 464 465 sigemptyset(&sigcantmask); 466 467 if (rump_threads) 468 vmem_rehash_start(); 469 470 /* 471 * Create init (proc 1), used to attach implicit threads in rump. 472 * (note: must be done after vfsinit to get cwdi) 473 */ 474 initlwp = rump__lwproc_alloclwp(NULL); 475 mutex_enter(&proc_lock); 476 initproc = proc_find_raw(1); 477 mutex_exit(&proc_lock); 478 if (initproc == NULL) 479 panic("%s: where in the world is initproc?", __func__); 480 strlcpy(initproc->p_comm, "rumplocal", sizeof(initproc->p_comm)); 481 482 rump_component_init(RUMP_COMPONENT_POSTINIT); 483 484 /* load syscalls */ 485 rump_component_init(RUMP_COMPONENT_SYSCALL); 486 487 /* component inits done */ 488 bootlwp = NULL; 489 490 /* open 0/1/2 for init */ 491 KASSERT(rump_lwproc_curlwp() == NULL); 492 rump_lwproc_switch(initlwp); 493 rump_consdev_init(); 494 rump_lwproc_switch(NULL); 495 496 /* release cpu */ 497 rump_unschedule(); 498 499 return 0; 500 } 501 502 int 503 rump_init(void) 504 { 505 return rump_init_callback(NULL); 506 } 507 508 /* historic compat */ 509 __strong_alias(rump__init,rump_init); 510 511 static int compcounter[RUMP_COMPONENT_MAX]; 512 static int compinited[RUMP_COMPONENT_MAX]; 513 514 /* 515 * Yea, this is O(n^2), but we're only looking at a handful of components. 516 * Components are always initialized from the thread that called rump_init(). 517 */ 518 static LIST_HEAD(, rump_component) rchead = LIST_HEAD_INITIALIZER(rchead); 519 520 #ifdef RUMP_USE_CTOR 521 struct modinfo_boot_chain modinfo_boot_chain \ 522 = LIST_HEAD_INITIALIZER(modinfo_boot_chain); 523 524 static void 525 rump_component_addlocal(void) 526 { 527 struct modinfo_chain *mc; 528 529 while ((mc = LIST_FIRST(&modinfo_boot_chain)) != NULL) { 530 LIST_REMOVE(mc, mc_entries); 531 module_builtin_add(&mc->mc_info, 1, false); 532 } 533 } 534 535 #else /* RUMP_USE_CTOR */ 536 537 static void 538 rump_component_addlocal(void) 539 { 540 __link_set_decl(rump_components, struct rump_component); 541 struct rump_component *const *rc; 542 543 __link_set_foreach(rc, rump_components) { 544 rump_component_load(*rc); 545 } 546 } 547 #endif /* RUMP_USE_CTOR */ 548 549 void 550 rump_component_load(const struct rump_component *rc_const) 551 { 552 struct rump_component *rc, *rc_iter; 553 554 /* time for rump component loading and unloading has passed */ 555 if (!cold) 556 return; 557 558 /* 559 * XXX: this is ok since the "const" was removed from the 560 * definition of RUMP_COMPONENT(). 561 * 562 * However, to preserve the hypercall interface, the const 563 * remains here. This can be fixed in the next hypercall revision. 564 */ 565 rc = __UNCONST(rc_const); 566 567 KASSERT(!rump_inited || curlwp == bootlwp); 568 569 LIST_FOREACH(rc_iter, &rchead, rc_entries) { 570 if (rc_iter == rc) 571 return; 572 } 573 574 LIST_INSERT_HEAD(&rchead, rc, rc_entries); 575 KASSERT(rc->rc_type < RUMP_COMPONENT_MAX); 576 compcounter[rc->rc_type]++; 577 } 578 579 void 580 rump_component_unload(struct rump_component *rc) 581 { 582 583 /* 584 * Checking for cold is enough because rump_init() both 585 * flips it and handles component loading. 586 */ 587 if (!cold) 588 return; 589 590 LIST_REMOVE(rc, rc_entries); 591 } 592 593 int 594 rump_component_count(enum rump_component_type type) 595 { 596 597 KASSERT(curlwp == bootlwp); 598 KASSERT(type < RUMP_COMPONENT_MAX); 599 return compcounter[type]; 600 } 601 602 void 603 rump_component_init(enum rump_component_type type) 604 { 605 struct rump_component *rc, *rc_next, rc_marker; 606 607 KASSERT(curlwp == bootlwp); 608 KASSERT(!compinited[type]); 609 610 rc_marker.rc_type = RUMP_COMPONENT_MAX; 611 rc_marker.rc_init = NULL; 612 for (rc = LIST_FIRST(&rchead); rc != NULL; rc = rc_next) { 613 if (rc->rc_type == type) { 614 LIST_INSERT_AFTER(rc, &rc_marker, rc_entries); 615 rc->rc_init(); 616 LIST_REMOVE(rc, rc_entries); 617 rc_next = LIST_NEXT(&rc_marker, rc_entries); 618 LIST_REMOVE(&rc_marker, rc_entries); 619 } else { 620 rc_next = LIST_NEXT(rc, rc_entries); 621 } 622 } 623 compinited[type] = 1; 624 } 625 626 /* 627 * Initialize a module which has already been loaded and linked 628 * with dlopen(). This is fundamentally the same as a builtin module. 629 * 630 * XXX: this interface does not really work in the RUMP_USE_CTOR case, 631 * but I'm not sure it's anything to cry about. In feeling blue, 632 * things could somehow be handled via modinfo_boot_chain. 633 */ 634 int 635 rump_module_init(const struct modinfo * const *mip, size_t nmodinfo) 636 { 637 638 return module_builtin_add(mip, nmodinfo, true); 639 } 640 641 /* 642 * Finish module (flawless victory, fatality!). 643 */ 644 int 645 rump_module_fini(const struct modinfo *mi) 646 { 647 648 return module_builtin_remove(mi, true); 649 } 650 651 /* 652 * Add loaded and linked module to the builtin list. It will 653 * later be initialized with module_init_class(). 654 */ 655 656 static void 657 add_linkedin_modules(const struct modinfo * const *mip, size_t nmodinfo) 658 { 659 660 module_builtin_add(mip, nmodinfo, false); 661 } 662 663 /* 664 * Add an evcnt. 665 */ 666 static void 667 add_static_evcnt(struct evcnt *ev) 668 { 669 670 evcnt_attach_static(ev); 671 } 672 673 int 674 rump_kernelfsym_load(void *symtab, uint64_t symsize, 675 char *strtab, uint64_t strsize) 676 { 677 static int inited = 0; 678 Elf64_Ehdr ehdr; 679 680 if (inited) 681 return EBUSY; 682 inited = 1; 683 684 /* 685 * Use 64bit header since it's bigger. Shouldn't make a 686 * difference, since we're passing in all zeroes anyway. 687 */ 688 memset(&ehdr, 0, sizeof(ehdr)); 689 ksyms_addsyms_explicit(&ehdr, symtab, symsize, strtab, strsize); 690 691 return 0; 692 } 693 694 int 695 rump_boot_gethowto() 696 { 697 698 return boothowto; 699 } 700 701 void 702 rump_boot_sethowto(int howto) 703 { 704 705 boothowto = howto; 706 } 707 708 int 709 rump_getversion(void) 710 { 711 712 return __NetBSD_Version__; 713 } 714 /* compat */ 715 __strong_alias(rump_pub_getversion,rump_getversion); 716 717 /* 718 * Note: may be called unscheduled. Not fully safe since no locking 719 * of allevents (currently that's not even available). 720 */ 721 void 722 rump_printevcnts() 723 { 724 struct evcnt *ev; 725 726 TAILQ_FOREACH(ev, &allevents, ev_list) 727 rumpuser_dprintf("%s / %s: %" PRIu64 "\n", 728 ev->ev_group, ev->ev_name, ev->ev_count); 729 } 730 731 /* 732 * If you use this interface ... well ... all bets are off. 733 * The original purpose is for the p2k fs server library to be 734 * able to use the same pid/lid for VOPs as the host kernel. 735 */ 736 void 737 rump_allbetsareoff_setid(pid_t pid, int lid) 738 { 739 struct lwp *l = curlwp; 740 struct proc *p = l->l_proc; 741 742 l->l_lid = lid; 743 p->p_pid = pid; 744 } 745 746 static void 747 ipiemu(void *a1, void *a2) 748 { 749 750 xc__highpri_intr(NULL); 751 } 752 753 void 754 rump_xc_highpri(struct cpu_info *ci) 755 { 756 757 if (ci) 758 xc_unicast(0, ipiemu, NULL, NULL, ci); 759 else 760 xc_broadcast(0, ipiemu, NULL, NULL); 761 } 762 763 int 764 rump_syscall(int num, void *data, size_t dlen, register_t *retval) 765 { 766 struct proc *p; 767 struct emul *e; 768 struct sysent *callp; 769 const int *etrans = NULL; 770 int rv; 771 772 rump_schedule(); 773 p = curproc; 774 e = p->p_emul; 775 #ifndef __HAVE_MINIMAL_EMUL 776 num &= e->e_nsysent - 1; 777 #else 778 num &= SYS_NSYSENT - 1; 779 #endif 780 callp = e->e_sysent + num; 781 782 rv = sy_invoke(callp, curlwp, data, retval, num); 783 784 /* 785 * I hope that (!__HAVE_MINIMAL_EMUL || __HAVE_SYSCALL_INTERN) is 786 * an invariant ... 787 */ 788 #if !defined(__HAVE_MINIMAL_EMUL) 789 etrans = e->e_errno; 790 #elif defined(__HAVE_SYSCALL_INTERN) 791 etrans = p->p_emuldata; 792 #endif 793 794 if (etrans) { 795 rv = etrans[rv]; 796 /* 797 * XXX: small hack since Linux etrans vectors on some 798 * archs contain negative errnos, but rump_syscalls 799 * uses the -1 + errno ABI. Note that these 800 * negative values are always the result of translation, 801 * otherwise the above translation method would not 802 * work very well. 803 */ 804 if (rv < 0) 805 rv = -rv; 806 } 807 rump_unschedule(); 808 809 return rv; 810 } 811 812 void 813 rump_syscall_boot_establish(const struct rump_onesyscall *calls, size_t ncall) 814 { 815 struct sysent *callp; 816 size_t i; 817 818 for (i = 0; i < ncall; i++) { 819 callp = rump_sysent + calls[i].ros_num; 820 KASSERT(bootlwp != NULL 821 && callp->sy_call == (sy_call_t *)(void *)enosys); 822 callp->sy_call = calls[i].ros_handler; 823 } 824 } 825 826 struct rump_boot_etfs *ebstart; 827 void 828 rump_boot_etfs_register(struct rump_boot_etfs *eb) 829 { 830 831 /* 832 * Could use atomics, but, since caller would need to synchronize 833 * against calling rump_init() anyway, easier to just specify the 834 * interface as "caller serializes". This solve-by-specification 835 * approach avoids the grey area of using atomics before rump_init() 836 * runs. 837 */ 838 eb->_eb_next = ebstart; 839 eb->eb_status = -1; 840 ebstart = eb; 841 } 842