rumpuser_sp.c revision 1.36 1 /* $NetBSD: rumpuser_sp.c,v 1.36 2011/01/14 13:12:14 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * Sysproxy routines. This provides system RPC support over host sockets.
30 * The most notable limitation is that the client and server must share
31 * the same ABI. This does not mean that they have to be the same
32 * machine or that they need to run the same version of the host OS,
33 * just that they must agree on the data structures. This even *might*
34 * work correctly from one hardware architecture to another.
35 */
36
37 #include <sys/cdefs.h>
38 __RCSID("$NetBSD: rumpuser_sp.c,v 1.36 2011/01/14 13:12:14 pooka Exp $");
39
40 #include <sys/types.h>
41 #include <sys/atomic.h>
42 #include <sys/mman.h>
43 #include <sys/socket.h>
44
45 #include <arpa/inet.h>
46 #include <netinet/in.h>
47 #include <netinet/tcp.h>
48
49 #include <assert.h>
50 #include <errno.h>
51 #include <fcntl.h>
52 #include <poll.h>
53 #include <pthread.h>
54 #include <stdarg.h>
55 #include <stdio.h>
56 #include <stdlib.h>
57 #include <string.h>
58 #include <unistd.h>
59
60 #include <rump/rump.h> /* XXX: for rfork flags */
61 #include <rump/rumpuser.h>
62 #include "rumpuser_int.h"
63
64 #include "sp_common.c"
65
66 #ifndef MAXCLI
67 #define MAXCLI 256
68 #endif
69 #ifndef MAXWORKER
70 #define MAXWORKER 128
71 #endif
72 #ifndef IDLEWORKER
73 #define IDLEWORKER 16
74 #endif
75 int rumpsp_maxworker = MAXWORKER;
76 int rumpsp_idleworker = IDLEWORKER;
77
78 static struct pollfd pfdlist[MAXCLI];
79 static struct spclient spclist[MAXCLI];
80 static unsigned int disco;
81 static volatile int spfini;
82
83 static struct rumpuser_sp_ops spops;
84
85 static char banner[MAXBANNER];
86
87 #define PROTOMAJOR 0
88 #define PROTOMINOR 1
89
90 struct prefork {
91 uint32_t pf_auth[AUTHLEN];
92 struct lwp *pf_lwp;
93
94 LIST_ENTRY(prefork) pf_entries; /* global list */
95 LIST_ENTRY(prefork) pf_spcentries; /* linked from forking spc */
96 };
97 static LIST_HEAD(, prefork) preforks = LIST_HEAD_INITIALIZER(preforks);
98 static pthread_mutex_t pfmtx;
99
100 /*
101 * This version is for the server. It's optimized for multiple threads
102 * and is *NOT* reentrant wrt to signals.
103 */
104 static int
105 waitresp(struct spclient *spc, struct respwait *rw)
106 {
107 int spcstate;
108 int rv = 0;
109
110 pthread_mutex_lock(&spc->spc_mtx);
111 sendunlockl(spc);
112 while (!rw->rw_done && spc->spc_state != SPCSTATE_DYING) {
113 pthread_cond_wait(&rw->rw_cv, &spc->spc_mtx);
114 }
115 TAILQ_REMOVE(&spc->spc_respwait, rw, rw_entries);
116 spcstate = spc->spc_state;
117 pthread_mutex_unlock(&spc->spc_mtx);
118
119 pthread_cond_destroy(&rw->rw_cv);
120
121 if (rv)
122 return rv;
123 if (spcstate == SPCSTATE_DYING)
124 return ENOTCONN;
125 return rw->rw_error;
126 }
127
128 /*
129 * Manual wrappers, since librump does not have access to the
130 * user namespace wrapped interfaces.
131 */
132
133 static void
134 lwproc_switch(struct lwp *l)
135 {
136
137 spops.spop_schedule();
138 spops.spop_lwproc_switch(l);
139 spops.spop_unschedule();
140 }
141
142 static void
143 lwproc_release(void)
144 {
145
146 spops.spop_schedule();
147 spops.spop_lwproc_release();
148 spops.spop_unschedule();
149 }
150
151 static int
152 lwproc_rfork(struct spclient *spc, int flags)
153 {
154 int rv;
155
156 spops.spop_schedule();
157 rv = spops.spop_lwproc_rfork(spc, flags);
158 spops.spop_unschedule();
159
160 return rv;
161 }
162
163 static int
164 lwproc_newlwp(pid_t pid)
165 {
166 int rv;
167
168 spops.spop_schedule();
169 rv = spops.spop_lwproc_newlwp(pid);
170 spops.spop_unschedule();
171
172 return rv;
173 }
174
175 static struct lwp *
176 lwproc_curlwp(void)
177 {
178 struct lwp *l;
179
180 spops.spop_schedule();
181 l = spops.spop_lwproc_curlwp();
182 spops.spop_unschedule();
183
184 return l;
185 }
186
187 static pid_t
188 lwproc_getpid(void)
189 {
190 pid_t p;
191
192 spops.spop_schedule();
193 p = spops.spop_getpid();
194 spops.spop_unschedule();
195
196 return p;
197 }
198
199 static void
200 lwproc_procexit(void)
201 {
202
203 spops.spop_schedule();
204 spops.spop_procexit();
205 spops.spop_unschedule();
206 }
207
208 static int
209 rumpsyscall(int sysnum, void *data, register_t *retval)
210 {
211 int rv;
212
213 spops.spop_schedule();
214 rv = spops.spop_syscall(sysnum, data, retval);
215 spops.spop_unschedule();
216
217 return rv;
218 }
219
220 static uint64_t
221 nextreq(struct spclient *spc)
222 {
223 uint64_t nw;
224
225 pthread_mutex_lock(&spc->spc_mtx);
226 nw = spc->spc_nextreq++;
227 pthread_mutex_unlock(&spc->spc_mtx);
228
229 return nw;
230 }
231
232 static void
233 send_error_resp(struct spclient *spc, uint64_t reqno, int error)
234 {
235 struct rsp_hdr rhdr;
236
237 rhdr.rsp_len = sizeof(rhdr);
238 rhdr.rsp_reqno = reqno;
239 rhdr.rsp_class = RUMPSP_ERROR;
240 rhdr.rsp_type = 0;
241 rhdr.rsp_error = error;
242
243 sendlock(spc);
244 (void)dosend(spc, &rhdr, sizeof(rhdr));
245 sendunlock(spc);
246 }
247
248 static int
249 send_handshake_resp(struct spclient *spc, uint64_t reqno, int error)
250 {
251 struct rsp_hdr rhdr;
252 int rv;
253
254 rhdr.rsp_len = sizeof(rhdr) + sizeof(error);
255 rhdr.rsp_reqno = reqno;
256 rhdr.rsp_class = RUMPSP_RESP;
257 rhdr.rsp_type = RUMPSP_HANDSHAKE;
258 rhdr.rsp_error = 0;
259
260 sendlock(spc);
261 rv = dosend(spc, &rhdr, sizeof(rhdr));
262 rv = dosend(spc, &error, sizeof(error));
263 sendunlock(spc);
264
265 return rv;
266 }
267
268 static int
269 send_syscall_resp(struct spclient *spc, uint64_t reqno, int error,
270 register_t *retval)
271 {
272 struct rsp_hdr rhdr;
273 struct rsp_sysresp sysresp;
274 int rv;
275
276 rhdr.rsp_len = sizeof(rhdr) + sizeof(sysresp);
277 rhdr.rsp_reqno = reqno;
278 rhdr.rsp_class = RUMPSP_RESP;
279 rhdr.rsp_type = RUMPSP_SYSCALL;
280 rhdr.rsp_sysnum = 0;
281
282 sysresp.rsys_error = error;
283 memcpy(sysresp.rsys_retval, retval, sizeof(sysresp.rsys_retval));
284
285 sendlock(spc);
286 rv = dosend(spc, &rhdr, sizeof(rhdr));
287 rv = dosend(spc, &sysresp, sizeof(sysresp));
288 sendunlock(spc);
289
290 return rv;
291 }
292
293 static int
294 send_prefork_resp(struct spclient *spc, uint64_t reqno, uint32_t *auth)
295 {
296 struct rsp_hdr rhdr;
297 int rv;
298
299 rhdr.rsp_len = sizeof(rhdr) + AUTHLEN*sizeof(*auth);
300 rhdr.rsp_reqno = reqno;
301 rhdr.rsp_class = RUMPSP_RESP;
302 rhdr.rsp_type = RUMPSP_PREFORK;
303 rhdr.rsp_sysnum = 0;
304
305 sendlock(spc);
306 rv = dosend(spc, &rhdr, sizeof(rhdr));
307 rv = dosend(spc, auth, AUTHLEN*sizeof(*auth));
308 sendunlock(spc);
309
310 return rv;
311 }
312
313 static int
314 copyin_req(struct spclient *spc, const void *remaddr, size_t *dlen,
315 int wantstr, void **resp)
316 {
317 struct rsp_hdr rhdr;
318 struct rsp_copydata copydata;
319 struct respwait rw;
320 int rv;
321
322 DPRINTF(("copyin_req: %zu bytes from %p\n", *dlen, remaddr));
323
324 rhdr.rsp_len = sizeof(rhdr) + sizeof(copydata);
325 rhdr.rsp_class = RUMPSP_REQ;
326 if (wantstr)
327 rhdr.rsp_type = RUMPSP_COPYINSTR;
328 else
329 rhdr.rsp_type = RUMPSP_COPYIN;
330 rhdr.rsp_sysnum = 0;
331
332 copydata.rcp_addr = __UNCONST(remaddr);
333 copydata.rcp_len = *dlen;
334
335 putwait(spc, &rw, &rhdr);
336 rv = dosend(spc, &rhdr, sizeof(rhdr));
337 rv = dosend(spc, ©data, sizeof(copydata));
338 if (rv) {
339 unputwait(spc, &rw);
340 return rv;
341 }
342
343 rv = waitresp(spc, &rw);
344
345 DPRINTF(("copyin: response %d\n", rv));
346
347 *resp = rw.rw_data;
348 if (wantstr)
349 *dlen = rw.rw_dlen;
350
351 return rv;
352
353 }
354
355 static int
356 send_copyout_req(struct spclient *spc, const void *remaddr,
357 const void *data, size_t dlen)
358 {
359 struct rsp_hdr rhdr;
360 struct rsp_copydata copydata;
361 int rv;
362
363 DPRINTF(("copyout_req (async): %zu bytes to %p\n", dlen, remaddr));
364
365 rhdr.rsp_len = sizeof(rhdr) + sizeof(copydata) + dlen;
366 rhdr.rsp_reqno = nextreq(spc);
367 rhdr.rsp_class = RUMPSP_REQ;
368 rhdr.rsp_type = RUMPSP_COPYOUT;
369 rhdr.rsp_sysnum = 0;
370
371 copydata.rcp_addr = __UNCONST(remaddr);
372 copydata.rcp_len = dlen;
373
374 sendlock(spc);
375 rv = dosend(spc, &rhdr, sizeof(rhdr));
376 rv = dosend(spc, ©data, sizeof(copydata));
377 rv = dosend(spc, data, dlen);
378 sendunlock(spc);
379
380 return rv;
381 }
382
383 static int
384 anonmmap_req(struct spclient *spc, size_t howmuch, void **resp)
385 {
386 struct rsp_hdr rhdr;
387 struct respwait rw;
388 int rv;
389
390 DPRINTF(("anonmmap_req: %zu bytes\n", howmuch));
391
392 rhdr.rsp_len = sizeof(rhdr) + sizeof(howmuch);
393 rhdr.rsp_class = RUMPSP_REQ;
394 rhdr.rsp_type = RUMPSP_ANONMMAP;
395 rhdr.rsp_sysnum = 0;
396
397 putwait(spc, &rw, &rhdr);
398 rv = dosend(spc, &rhdr, sizeof(rhdr));
399 rv = dosend(spc, &howmuch, sizeof(howmuch));
400 if (rv) {
401 unputwait(spc, &rw);
402 return rv;
403 }
404
405 rv = waitresp(spc, &rw);
406
407 *resp = rw.rw_data;
408
409 DPRINTF(("anonmmap: mapped at %p\n", **(void ***)resp));
410
411 return rv;
412 }
413
414 static int
415 send_raise_req(struct spclient *spc, int signo)
416 {
417 struct rsp_hdr rhdr;
418 int rv;
419
420 rhdr.rsp_len = sizeof(rhdr);
421 rhdr.rsp_class = RUMPSP_REQ;
422 rhdr.rsp_type = RUMPSP_RAISE;
423 rhdr.rsp_signo = signo;
424
425 sendlock(spc);
426 rv = dosend(spc, &rhdr, sizeof(rhdr));
427 sendunlock(spc);
428
429 return rv;
430 }
431
432 static void
433 spcref(struct spclient *spc)
434 {
435
436 pthread_mutex_lock(&spc->spc_mtx);
437 spc->spc_refcnt++;
438 pthread_mutex_unlock(&spc->spc_mtx);
439 }
440
441 static void
442 spcrelease(struct spclient *spc)
443 {
444 int ref;
445
446 pthread_mutex_lock(&spc->spc_mtx);
447 ref = --spc->spc_refcnt;
448 pthread_mutex_unlock(&spc->spc_mtx);
449
450 if (ref > 0)
451 return;
452
453 DPRINTF(("rump_sp: spcrelease: spc %p fd %d\n", spc, spc->spc_fd));
454
455 _DIAGASSERT(TAILQ_EMPTY(&spc->spc_respwait));
456 _DIAGASSERT(spc->spc_buf == NULL);
457
458 if (spc->spc_mainlwp) {
459 lwproc_switch(spc->spc_mainlwp);
460 lwproc_release();
461 }
462 spc->spc_mainlwp = NULL;
463
464 close(spc->spc_fd);
465 spc->spc_fd = -1;
466 spc->spc_state = SPCSTATE_NEW;
467
468 atomic_inc_uint(&disco);
469 }
470
471 static void
472 serv_handledisco(unsigned int idx)
473 {
474 struct spclient *spc = &spclist[idx];
475
476 DPRINTF(("rump_sp: disconnecting [%u]\n", idx));
477
478 pfdlist[idx].fd = -1;
479 pfdlist[idx].revents = 0;
480 pthread_mutex_lock(&spc->spc_mtx);
481 spc->spc_state = SPCSTATE_DYING;
482 kickall(spc);
483 sendunlockl(spc);
484 pthread_mutex_unlock(&spc->spc_mtx);
485
486 if (spc->spc_mainlwp) {
487 lwproc_switch(spc->spc_mainlwp);
488 lwproc_procexit();
489 lwproc_switch(NULL);
490 }
491
492 /*
493 * Nobody's going to attempt to send/receive anymore,
494 * so reinit info relevant to that.
495 */
496 /*LINTED:pointer casts may be ok*/
497 memset((char *)spc + SPC_ZEROFF, 0, sizeof(*spc) - SPC_ZEROFF);
498
499 spcrelease(spc);
500 }
501
502 static void
503 serv_shutdown(void)
504 {
505 struct spclient *spc;
506 unsigned int i;
507
508 for (i = 1; i < MAXCLI; i++) {
509 spc = &spclist[i];
510 if (spc->spc_fd == -1)
511 continue;
512
513 shutdown(spc->spc_fd, SHUT_RDWR);
514 serv_handledisco(i);
515
516 spcrelease(spc);
517 }
518 }
519
520 static unsigned
521 serv_handleconn(int fd, connecthook_fn connhook, int busy)
522 {
523 struct sockaddr_storage ss;
524 socklen_t sl = sizeof(ss);
525 int newfd, flags;
526 unsigned i;
527
528 /*LINTED: cast ok */
529 newfd = accept(fd, (struct sockaddr *)&ss, &sl);
530 if (newfd == -1)
531 return 0;
532
533 if (busy) {
534 close(newfd); /* EBUSY */
535 return 0;
536 }
537
538 flags = fcntl(newfd, F_GETFL, 0);
539 if (fcntl(newfd, F_SETFL, flags | O_NONBLOCK) == -1) {
540 close(newfd);
541 return 0;
542 }
543
544 if (connhook(newfd) != 0) {
545 close(newfd);
546 return 0;
547 }
548
549 /* write out a banner for the client */
550 if (send(newfd, banner, strlen(banner), MSG_NOSIGNAL)
551 != (ssize_t)strlen(banner)) {
552 close(newfd);
553 return 0;
554 }
555
556 /* find empty slot the simple way */
557 for (i = 0; i < MAXCLI; i++) {
558 if (pfdlist[i].fd == -1 && spclist[i].spc_state == SPCSTATE_NEW)
559 break;
560 }
561
562 assert(i < MAXCLI);
563
564 pfdlist[i].fd = newfd;
565 spclist[i].spc_fd = newfd;
566 spclist[i].spc_istatus = SPCSTATUS_BUSY; /* dedicated receiver */
567 spclist[i].spc_refcnt = 1;
568
569 TAILQ_INIT(&spclist[i].spc_respwait);
570
571 DPRINTF(("rump_sp: added new connection fd %d at idx %u\n", newfd, i));
572
573 return i;
574 }
575
576 static void
577 serv_handlesyscall(struct spclient *spc, struct rsp_hdr *rhdr, uint8_t *data)
578 {
579 register_t retval[2] = {0, 0};
580 int rv, sysnum;
581
582 sysnum = (int)rhdr->rsp_sysnum;
583 DPRINTF(("rump_sp: handling syscall %d from client %d\n",
584 sysnum, spc->spc_pid));
585
586 lwproc_newlwp(spc->spc_pid);
587 rv = rumpsyscall(sysnum, data, retval);
588 lwproc_release();
589
590 DPRINTF(("rump_sp: got return value %d & %d/%d\n",
591 rv, retval[0], retval[1]));
592
593 send_syscall_resp(spc, rhdr->rsp_reqno, rv, retval);
594 }
595
596 struct sysbouncearg {
597 struct spclient *sba_spc;
598 struct rsp_hdr sba_hdr;
599 uint8_t *sba_data;
600
601 TAILQ_ENTRY(sysbouncearg) sba_entries;
602 };
603 static pthread_mutex_t sbamtx;
604 static pthread_cond_t sbacv;
605 static int nworker, idleworker;
606 static TAILQ_HEAD(, sysbouncearg) syslist = TAILQ_HEAD_INITIALIZER(syslist);
607
608 /*ARGSUSED*/
609 static void *
610 serv_syscallbouncer(void *arg)
611 {
612 struct sysbouncearg *sba;
613
614 for (;;) {
615 pthread_mutex_lock(&sbamtx);
616 if (idleworker >= rumpsp_idleworker) {
617 nworker--;
618 pthread_mutex_unlock(&sbamtx);
619 break;
620 }
621 idleworker++;
622 while (TAILQ_EMPTY(&syslist)) {
623 pthread_cond_wait(&sbacv, &sbamtx);
624 }
625
626 sba = TAILQ_FIRST(&syslist);
627 TAILQ_REMOVE(&syslist, sba, sba_entries);
628 idleworker--;
629 pthread_mutex_unlock(&sbamtx);
630
631 serv_handlesyscall(sba->sba_spc,
632 &sba->sba_hdr, sba->sba_data);
633 spcrelease(sba->sba_spc);
634 free(sba->sba_data);
635 free(sba);
636 }
637
638 return NULL;
639 }
640
641 static int
642 sp_copyin(void *arg, const void *raddr, void *laddr, size_t *len, int wantstr)
643 {
644 struct spclient *spc = arg;
645 void *rdata = NULL; /* XXXuninit */
646 int rv, nlocks;
647
648 rumpuser__kunlock(0, &nlocks, NULL);
649
650 rv = copyin_req(spc, raddr, len, wantstr, &rdata);
651 if (rv)
652 goto out;
653
654 memcpy(laddr, rdata, *len);
655 free(rdata);
656
657 out:
658 rumpuser__klock(nlocks, NULL);
659 if (rv)
660 return EFAULT;
661 return 0;
662 }
663
664 int
665 rumpuser_sp_copyin(void *arg, const void *raddr, void *laddr, size_t len)
666 {
667
668 return sp_copyin(arg, raddr, laddr, &len, 0);
669 }
670
671 int
672 rumpuser_sp_copyinstr(void *arg, const void *raddr, void *laddr, size_t *len)
673 {
674
675 return sp_copyin(arg, raddr, laddr, len, 1);
676 }
677
678 static int
679 sp_copyout(void *arg, const void *laddr, void *raddr, size_t dlen)
680 {
681 struct spclient *spc = arg;
682 int nlocks, rv;
683
684 rumpuser__kunlock(0, &nlocks, NULL);
685 rv = send_copyout_req(spc, raddr, laddr, dlen);
686 rumpuser__klock(nlocks, NULL);
687
688 if (rv)
689 return EFAULT;
690 return 0;
691 }
692
693 int
694 rumpuser_sp_copyout(void *arg, const void *laddr, void *raddr, size_t dlen)
695 {
696
697 return sp_copyout(arg, laddr, raddr, dlen);
698 }
699
700 int
701 rumpuser_sp_copyoutstr(void *arg, const void *laddr, void *raddr, size_t *dlen)
702 {
703
704 return sp_copyout(arg, laddr, raddr, *dlen);
705 }
706
707 int
708 rumpuser_sp_anonmmap(void *arg, size_t howmuch, void **addr)
709 {
710 struct spclient *spc = arg;
711 void *resp, *rdata;
712 int nlocks, rv;
713
714 rumpuser__kunlock(0, &nlocks, NULL);
715
716 rv = anonmmap_req(spc, howmuch, &rdata);
717 if (rv) {
718 rv = EFAULT;
719 goto out;
720 }
721
722 resp = *(void **)rdata;
723 free(rdata);
724
725 if (resp == NULL) {
726 rv = ENOMEM;
727 }
728
729 *addr = resp;
730
731 out:
732 rumpuser__klock(nlocks, NULL);
733
734 if (rv)
735 return rv;
736 return 0;
737 }
738
739 int
740 rumpuser_sp_raise(void *arg, int signo)
741 {
742 struct spclient *spc = arg;
743 int rv, nlocks;
744
745 rumpuser__kunlock(0, &nlocks, NULL);
746 rv = send_raise_req(spc, signo);
747 rumpuser__klock(nlocks, NULL);
748
749 return rv;
750 }
751
752 /*
753 *
754 * Startup routines and mainloop for server.
755 *
756 */
757
758 struct spservarg {
759 int sps_sock;
760 connecthook_fn sps_connhook;
761 };
762
763 static pthread_attr_t pattr_detached;
764 static void
765 handlereq(struct spclient *spc)
766 {
767 struct sysbouncearg *sba;
768 pthread_t pt;
769 int retries, error, i;
770
771 if (__predict_false(spc->spc_state == SPCSTATE_NEW)) {
772 if (spc->spc_hdr.rsp_type != RUMPSP_HANDSHAKE) {
773 send_error_resp(spc, spc->spc_hdr.rsp_reqno, EAUTH);
774 shutdown(spc->spc_fd, SHUT_RDWR);
775 spcfreebuf(spc);
776 return;
777 }
778
779 if (spc->spc_hdr.rsp_handshake == HANDSHAKE_GUEST) {
780 if ((error = lwproc_rfork(spc, RUMP_RFCFDG)) != 0) {
781 shutdown(spc->spc_fd, SHUT_RDWR);
782 }
783
784 spcfreebuf(spc);
785 if (error)
786 return;
787
788 spc->spc_mainlwp = lwproc_curlwp();
789
790 send_handshake_resp(spc, spc->spc_hdr.rsp_reqno, 0);
791 } else if (spc->spc_hdr.rsp_handshake == HANDSHAKE_FORK) {
792 struct lwp *tmpmain;
793 struct prefork *pf;
794 struct handshake_fork *rfp;
795 uint64_t reqno;
796 int cancel;
797
798 reqno = spc->spc_hdr.rsp_reqno;
799 if (spc->spc_off-HDRSZ != sizeof(*rfp)) {
800 send_error_resp(spc, reqno, EINVAL);
801 shutdown(spc->spc_fd, SHUT_RDWR);
802 spcfreebuf(spc);
803 return;
804 }
805
806 /*LINTED*/
807 rfp = (void *)spc->spc_buf;
808 cancel = rfp->rf_cancel;
809
810 pthread_mutex_lock(&pfmtx);
811 LIST_FOREACH(pf, &preforks, pf_entries) {
812 if (memcmp(rfp->rf_auth, pf->pf_auth,
813 sizeof(rfp->rf_auth)) == 0) {
814 LIST_REMOVE(pf, pf_entries);
815 LIST_REMOVE(pf, pf_spcentries);
816 break;
817 }
818 }
819 pthread_mutex_lock(&pfmtx);
820 spcfreebuf(spc);
821
822 if (!pf) {
823 send_error_resp(spc, reqno, ESRCH);
824 shutdown(spc->spc_fd, SHUT_RDWR);
825 return;
826 }
827
828 tmpmain = pf->pf_lwp;
829 free(pf);
830 lwproc_switch(tmpmain);
831 if (cancel) {
832 lwproc_release();
833 shutdown(spc->spc_fd, SHUT_RDWR);
834 return;
835 }
836
837 /*
838 * So, we forked already during "prefork" to save
839 * the file descriptors from a parent exit
840 * race condition. But now we need to fork
841 * a second time since the initial fork has
842 * the wrong spc pointer. (yea, optimize
843 * interfaces some day if anyone cares)
844 */
845 if ((error = lwproc_rfork(spc, 0)) != 0) {
846 send_error_resp(spc, reqno, error);
847 shutdown(spc->spc_fd, SHUT_RDWR);
848 lwproc_release();
849 return;
850 }
851 spc->spc_mainlwp = lwproc_curlwp();
852 lwproc_switch(tmpmain);
853 lwproc_release();
854 lwproc_switch(spc->spc_mainlwp);
855
856 send_handshake_resp(spc, reqno, 0);
857 }
858
859 spc->spc_pid = lwproc_getpid();
860
861 DPRINTF(("rump_sp: handshake for client %p complete, pid %d\n",
862 spc, spc->spc_pid));
863
864 lwproc_switch(NULL);
865 spc->spc_state = SPCSTATE_RUNNING;
866 return;
867 }
868
869 if (__predict_false(spc->spc_hdr.rsp_type == RUMPSP_PREFORK)) {
870 struct prefork *pf;
871 uint64_t reqno;
872 uint32_t auth[AUTHLEN];
873
874 DPRINTF(("rump_sp: prefork handler executing for %p\n", spc));
875 reqno = spc->spc_hdr.rsp_reqno;
876 spcfreebuf(spc);
877
878 pf = malloc(sizeof(*pf));
879 if (pf == NULL) {
880 send_error_resp(spc, reqno, ENOMEM);
881 return;
882 }
883
884 /*
885 * Use client main lwp to fork. this is never used by
886 * worker threads (except if spc refcount goes to 0),
887 * so we can safely use it here.
888 */
889 lwproc_switch(spc->spc_mainlwp);
890 if ((error = lwproc_rfork(spc, RUMP_RFFDG)) != 0) {
891 DPRINTF(("rump_sp: fork failed: %d (%p)\n",error, spc));
892 send_error_resp(spc, reqno, error);
893 lwproc_switch(NULL);
894 free(pf);
895 return;
896 }
897
898 /* Ok, we have a new process context and a new curlwp */
899 for (i = 0; i < AUTHLEN; i++) {
900 pf->pf_auth[i] = auth[i] = arc4random();
901 }
902 pf->pf_lwp = lwproc_curlwp();
903 lwproc_switch(NULL);
904
905 pthread_mutex_lock(&pfmtx);
906 LIST_INSERT_HEAD(&preforks, pf, pf_entries);
907 LIST_INSERT_HEAD(&spc->spc_pflist, pf, pf_spcentries);
908 pthread_mutex_unlock(&pfmtx);
909
910 DPRINTF(("rump_sp: prefork handler success %p\n", spc));
911
912 send_prefork_resp(spc, reqno, auth);
913 return;
914 }
915
916 if (__predict_false(spc->spc_hdr.rsp_type != RUMPSP_SYSCALL)) {
917 send_error_resp(spc, spc->spc_hdr.rsp_reqno, EINVAL);
918 spcfreebuf(spc);
919 return;
920 }
921
922 retries = 0;
923 while ((sba = malloc(sizeof(*sba))) == NULL) {
924 if (nworker == 0 || retries > 10) {
925 send_error_resp(spc, spc->spc_hdr.rsp_reqno, EAGAIN);
926 spcfreebuf(spc);
927 return;
928 }
929 /* slim chance of more memory? */
930 usleep(10000);
931 }
932
933 sba->sba_spc = spc;
934 sba->sba_hdr = spc->spc_hdr;
935 sba->sba_data = spc->spc_buf;
936 spcresetbuf(spc);
937
938 spcref(spc);
939
940 pthread_mutex_lock(&sbamtx);
941 TAILQ_INSERT_TAIL(&syslist, sba, sba_entries);
942 if (idleworker > 0) {
943 /* do we have a daemon's tool (i.e. idle threads)? */
944 pthread_cond_signal(&sbacv);
945 } else if (nworker < rumpsp_maxworker) {
946 /*
947 * Else, need to create one
948 * (if we can, otherwise just expect another
949 * worker to pick up the syscall)
950 */
951 if (pthread_create(&pt, &pattr_detached,
952 serv_syscallbouncer, NULL) == 0)
953 nworker++;
954 }
955 pthread_mutex_unlock(&sbamtx);
956 }
957
958 static void *
959 spserver(void *arg)
960 {
961 struct spservarg *sarg = arg;
962 struct spclient *spc;
963 unsigned idx;
964 int seen;
965 int rv;
966 unsigned int nfds, maxidx;
967
968 for (idx = 0; idx < MAXCLI; idx++) {
969 pfdlist[idx].fd = -1;
970 pfdlist[idx].events = POLLIN;
971
972 spc = &spclist[idx];
973 pthread_mutex_init(&spc->spc_mtx, NULL);
974 pthread_cond_init(&spc->spc_cv, NULL);
975 spc->spc_fd = -1;
976 }
977 pfdlist[0].fd = spclist[0].spc_fd = sarg->sps_sock;
978 pfdlist[0].events = POLLIN;
979 nfds = 1;
980 maxidx = 0;
981
982 pthread_attr_init(&pattr_detached);
983 pthread_attr_setdetachstate(&pattr_detached, PTHREAD_CREATE_DETACHED);
984 /* XXX: doesn't stacksize currently work on NetBSD */
985 pthread_attr_setstacksize(&pattr_detached, 32*1024);
986
987 pthread_mutex_init(&sbamtx, NULL);
988 pthread_cond_init(&sbacv, NULL);
989
990 DPRINTF(("rump_sp: server mainloop\n"));
991
992 for (;;) {
993 int discoed;
994
995 /* g/c hangarounds (eventually) */
996 discoed = atomic_swap_uint(&disco, 0);
997 while (discoed--) {
998 nfds--;
999 idx = maxidx;
1000 while (idx) {
1001 if (pfdlist[idx].fd != -1) {
1002 maxidx = idx;
1003 break;
1004 }
1005 idx--;
1006 }
1007 DPRINTF(("rump_sp: set maxidx to [%u]\n",
1008 maxidx));
1009 }
1010
1011 DPRINTF(("rump_sp: loop nfd %d\n", maxidx+1));
1012 seen = 0;
1013 rv = poll(pfdlist, maxidx+1, INFTIM);
1014 assert(maxidx+1 <= MAXCLI);
1015 assert(rv != 0);
1016 if (rv == -1) {
1017 if (errno == EINTR)
1018 continue;
1019 fprintf(stderr, "rump_spserver: poll returned %d\n",
1020 errno);
1021 break;
1022 }
1023
1024 for (idx = 0; seen < rv && idx < MAXCLI; idx++) {
1025 if ((pfdlist[idx].revents & POLLIN) == 0)
1026 continue;
1027
1028 seen++;
1029 DPRINTF(("rump_sp: activity at [%u] %d/%d\n",
1030 idx, seen, rv));
1031 if (idx > 0) {
1032 spc = &spclist[idx];
1033 DPRINTF(("rump_sp: mainloop read [%u]\n", idx));
1034 switch (readframe(spc)) {
1035 case 0:
1036 break;
1037 case -1:
1038 serv_handledisco(idx);
1039 break;
1040 default:
1041 switch (spc->spc_hdr.rsp_class) {
1042 case RUMPSP_RESP:
1043 kickwaiter(spc);
1044 break;
1045 case RUMPSP_REQ:
1046 handlereq(spc);
1047 break;
1048 default:
1049 send_error_resp(spc,
1050 spc->spc_hdr.rsp_reqno,
1051 ENOENT);
1052 spcfreebuf(spc);
1053 break;
1054 }
1055 break;
1056 }
1057
1058 } else {
1059 DPRINTF(("rump_sp: mainloop new connection\n"));
1060
1061 if (__predict_false(spfini)) {
1062 close(spclist[0].spc_fd);
1063 serv_shutdown();
1064 goto out;
1065 }
1066
1067 idx = serv_handleconn(pfdlist[0].fd,
1068 sarg->sps_connhook, nfds == MAXCLI);
1069 if (idx)
1070 nfds++;
1071 if (idx > maxidx)
1072 maxidx = idx;
1073 DPRINTF(("rump_sp: maxid now %d\n", maxidx));
1074 }
1075 }
1076 }
1077
1078 out:
1079 return NULL;
1080 }
1081
1082 static unsigned cleanupidx;
1083 static struct sockaddr *cleanupsa;
1084 int
1085 rumpuser_sp_init(const char *url, const struct rumpuser_sp_ops *spopsp,
1086 const char *ostype, const char *osrelease, const char *machine)
1087 {
1088 pthread_t pt;
1089 struct spservarg *sarg;
1090 struct sockaddr *sap;
1091 char *p;
1092 unsigned idx;
1093 int error, s;
1094
1095 p = strdup(url);
1096 if (p == NULL)
1097 return ENOMEM;
1098 error = parseurl(p, &sap, &idx, 1);
1099 free(p);
1100 if (error)
1101 return error;
1102
1103 snprintf(banner, sizeof(banner), "RUMPSP-%d.%d-%s-%s/%s\n",
1104 PROTOMAJOR, PROTOMINOR, ostype, osrelease, machine);
1105
1106 s = socket(parsetab[idx].domain, SOCK_STREAM, 0);
1107 if (s == -1)
1108 return errno;
1109
1110 spops = *spopsp;
1111 sarg = malloc(sizeof(*sarg));
1112 if (sarg == NULL) {
1113 close(s);
1114 return ENOMEM;
1115 }
1116
1117 sarg->sps_sock = s;
1118 sarg->sps_connhook = parsetab[idx].connhook;
1119
1120 cleanupidx = idx;
1121 cleanupsa = sap;
1122
1123 /* sloppy error recovery */
1124
1125 /*LINTED*/
1126 if (bind(s, sap, sap->sa_len) == -1) {
1127 fprintf(stderr, "rump_sp: server bind failed\n");
1128 return errno;
1129 }
1130
1131 if (listen(s, MAXCLI) == -1) {
1132 fprintf(stderr, "rump_sp: server listen failed\n");
1133 return errno;
1134 }
1135
1136 if ((error = pthread_create(&pt, NULL, spserver, sarg)) != 0) {
1137 fprintf(stderr, "rump_sp: cannot create wrkr thread\n");
1138 return errno;
1139 }
1140 pthread_detach(pt);
1141
1142 return 0;
1143 }
1144
1145 void
1146 rumpuser_sp_fini()
1147 {
1148
1149 if (spclist[0].spc_fd) {
1150 parsetab[cleanupidx].cleanup(cleanupsa);
1151 shutdown(spclist[0].spc_fd, SHUT_RDWR);
1152 spfini = 1;
1153 }
1154 }
1155