hijack.c revision 1.16.2.2 1 /* $NetBSD: hijack.c,v 1.16.2.2 2011/02/08 16:19:04 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __RCSID("$NetBSD: hijack.c,v 1.16.2.2 2011/02/08 16:19:04 bouyer Exp $");
30 #define __ssp_weak_name(fun) _hijack_ ## fun
31
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/event.h>
35 #include <sys/ioctl.h>
36 #include <sys/socket.h>
37 #include <sys/poll.h>
38
39 #include <rump/rumpclient.h>
40 #include <rump/rump_syscalls.h>
41
42 #include <assert.h>
43 #include <dlfcn.h>
44 #include <err.h>
45 #include <errno.h>
46 #include <fcntl.h>
47 #include <poll.h>
48 #include <pthread.h>
49 #include <signal.h>
50 #include <stdarg.h>
51 #include <stdbool.h>
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <time.h>
56 #include <unistd.h>
57
58 enum dualcall {
59 DUALCALL_WRITE, DUALCALL_WRITEV,
60 DUALCALL_IOCTL, DUALCALL_FCNTL,
61 DUALCALL_SOCKET, DUALCALL_ACCEPT, DUALCALL_BIND, DUALCALL_CONNECT,
62 DUALCALL_GETPEERNAME, DUALCALL_GETSOCKNAME, DUALCALL_LISTEN,
63 DUALCALL_RECVFROM, DUALCALL_RECVMSG,
64 DUALCALL_SENDTO, DUALCALL_SENDMSG,
65 DUALCALL_GETSOCKOPT, DUALCALL_SETSOCKOPT,
66 DUALCALL_SHUTDOWN,
67 DUALCALL_READ, DUALCALL_READV,
68 DUALCALL_DUP, DUALCALL_DUP2,
69 DUALCALL_CLOSE,
70 DUALCALL_POLLTS,
71 DUALCALL_KEVENT,
72 DUALCALL__NUM
73 };
74
75 #define RSYS_STRING(a) __STRING(a)
76 #define RSYS_NAME(a) RSYS_STRING(__CONCAT(RUMP_SYS_RENAME_,a))
77
78 /*
79 * Would be nice to get this automatically in sync with libc.
80 * Also, this does not work for compat-using binaries!
81 */
82 #if !__NetBSD_Prereq__(5,99,7)
83 #define REALSELECT select
84 #define REALPOLLTS pollts
85 #define REALKEVENT kevent
86 #else
87 #define REALSELECT _sys___select50
88 #define REALPOLLTS _sys___pollts50
89 #define REALKEVENT _sys___kevent50
90 #endif
91 #define REALREAD _sys_read
92
93 int REALSELECT(int, fd_set *, fd_set *, fd_set *, struct timeval *);
94 int REALPOLLTS(struct pollfd *, nfds_t,
95 const struct timespec *, const sigset_t *);
96 int REALKEVENT(int, const struct kevent *, size_t, struct kevent *, size_t,
97 const struct timespec *);
98 ssize_t REALREAD(int, void *, size_t);
99
100 #define S(a) __STRING(a)
101 struct sysnames {
102 enum dualcall scm_callnum;
103 const char *scm_hostname;
104 const char *scm_rumpname;
105 } syscnames[] = {
106 { DUALCALL_SOCKET, "__socket30", RSYS_NAME(SOCKET) },
107 { DUALCALL_ACCEPT, "accept", RSYS_NAME(ACCEPT) },
108 { DUALCALL_BIND, "bind", RSYS_NAME(BIND) },
109 { DUALCALL_CONNECT, "connect", RSYS_NAME(CONNECT) },
110 { DUALCALL_GETPEERNAME, "getpeername", RSYS_NAME(GETPEERNAME) },
111 { DUALCALL_GETSOCKNAME, "getsockname", RSYS_NAME(GETSOCKNAME) },
112 { DUALCALL_LISTEN, "listen", RSYS_NAME(LISTEN) },
113 { DUALCALL_RECVFROM, "recvfrom", RSYS_NAME(RECVFROM) },
114 { DUALCALL_RECVMSG, "recvmsg", RSYS_NAME(RECVMSG) },
115 { DUALCALL_SENDTO, "sendto", RSYS_NAME(SENDTO) },
116 { DUALCALL_SENDMSG, "sendmsg", RSYS_NAME(SENDMSG) },
117 { DUALCALL_GETSOCKOPT, "getsockopt", RSYS_NAME(GETSOCKOPT) },
118 { DUALCALL_SETSOCKOPT, "setsockopt", RSYS_NAME(SETSOCKOPT) },
119 { DUALCALL_SHUTDOWN, "shutdown", RSYS_NAME(SHUTDOWN) },
120 { DUALCALL_READ, S(REALREAD), RSYS_NAME(READ) },
121 { DUALCALL_READV, "readv", RSYS_NAME(READV) },
122 { DUALCALL_WRITE, "write", RSYS_NAME(WRITE) },
123 { DUALCALL_WRITEV, "writev", RSYS_NAME(WRITEV) },
124 { DUALCALL_IOCTL, "ioctl", RSYS_NAME(IOCTL) },
125 { DUALCALL_FCNTL, "fcntl", RSYS_NAME(FCNTL) },
126 { DUALCALL_DUP, "dup", RSYS_NAME(DUP) },
127 { DUALCALL_DUP2, "dup2", RSYS_NAME(DUP2) },
128 { DUALCALL_CLOSE, "close", RSYS_NAME(CLOSE) },
129 { DUALCALL_POLLTS, S(REALPOLLTS), RSYS_NAME(POLLTS) },
130 { DUALCALL_KEVENT, S(REALKEVENT), RSYS_NAME(KEVENT) },
131 };
132 #undef S
133
134 struct bothsys {
135 void *bs_host;
136 void *bs_rump;
137 } syscalls[DUALCALL__NUM];
138 #define GETSYSCALL(which, name) syscalls[DUALCALL_##name].bs_##which
139
140 pid_t (*host_fork)(void);
141 int (*host_daemon)(int, int);
142
143 static unsigned dup2mask;
144 #define ISDUP2D(fd) (1<<(fd) & dup2mask)
145
146 //#define DEBUGJACK
147 #ifdef DEBUGJACK
148 #define DPRINTF(x) mydprintf x
149 static void
150 mydprintf(const char *fmt, ...)
151 {
152 va_list ap;
153
154 if (ISDUP2D(STDERR_FILENO))
155 return;
156
157 va_start(ap, fmt);
158 vfprintf(stderr, fmt, ap);
159 va_end(ap);
160 }
161
162 #else
163 #define DPRINTF(x)
164 #endif
165
166 #define FDCALL(type, name, rcname, args, proto, vars) \
167 type name args \
168 { \
169 type (*fun) proto; \
170 \
171 DPRINTF(("%s -> %d\n", __STRING(name), fd)); \
172 if (fd_isrump(fd)) { \
173 fun = syscalls[rcname].bs_rump; \
174 fd = fd_host2rump(fd); \
175 } else { \
176 fun = syscalls[rcname].bs_host; \
177 } \
178 \
179 return fun vars; \
180 }
181
182 /*
183 * This is called from librumpclient in case of LD_PRELOAD.
184 * It ensures correct RTLD_NEXT.
185 *
186 * ... except, it's apparently extremely difficult to force
187 * at least gcc to generate an actual stack frame here. So
188 * sprinkle some volatile foobar and baz to throw the optimizer
189 * off the scent and generate a variable assignment with the
190 * return value. The posterboy for this meltdown is amd64
191 * with -O2. At least with gcc 4.1.3 i386 works regardless of
192 * optimization.
193 */
194 volatile int rumphijack_unrope; /* there, unhang yourself */
195 static void *
196 hijackdlsym(void *handle, const char *symbol)
197 {
198 void *rv;
199
200 rv = dlsym(handle, symbol);
201 rumphijack_unrope = *(volatile int *)rv;
202
203 return (void *)rv;
204 }
205
206 /* low calorie sockets? */
207 static bool hostlocalsockets = true;
208
209 static void __attribute__((constructor))
210 rcinit(void)
211 {
212 char buf[64];
213 extern void *(*rumpclient_dlsym)(void *, const char *);
214 unsigned i, j;
215
216 rumpclient_dlsym = hijackdlsym;
217 host_fork = dlsym(RTLD_NEXT, "fork");
218 host_daemon = dlsym(RTLD_NEXT, "daemon");
219
220 /*
221 * In theory cannot print anything during lookups because
222 * we might not have the call vector set up. so, the errx()
223 * is a bit of a strech, but it might work.
224 */
225
226 for (i = 0; i < DUALCALL__NUM; i++) {
227 /* build runtime O(1) access */
228 for (j = 0; j < __arraycount(syscnames); j++) {
229 if (syscnames[j].scm_callnum == i)
230 break;
231 }
232
233 if (j == __arraycount(syscnames))
234 errx(1, "rumphijack error: syscall pos %d missing", i);
235
236 syscalls[i].bs_host = dlsym(RTLD_NEXT,
237 syscnames[j].scm_hostname);
238 if (syscalls[i].bs_host == NULL)
239 errx(1, "hostcall %s not found missing",
240 syscnames[j].scm_hostname);
241
242 syscalls[i].bs_rump = dlsym(RTLD_NEXT,
243 syscnames[j].scm_rumpname);
244 if (syscalls[i].bs_rump == NULL)
245 errx(1, "rumpcall %s not found missing",
246 syscnames[j].scm_rumpname);
247 }
248
249 if (rumpclient_init() == -1)
250 err(1, "rumpclient init");
251
252 /* set client persistence level */
253 if (getenv_r("RUMPHIJACK_RETRY", buf, sizeof(buf)) == -1) {
254 if (errno == ERANGE)
255 err(1, "invalid RUMPHIJACK_RETRY");
256 rumpclient_setconnretry(RUMPCLIENT_RETRYCONN_INFTIME);
257 } else {
258 if (strcmp(buf, "die") == 0)
259 rumpclient_setconnretry(RUMPCLIENT_RETRYCONN_DIE);
260 else if (strcmp(buf, "inftime") == 0)
261 rumpclient_setconnretry(RUMPCLIENT_RETRYCONN_INFTIME);
262 else if (strcmp(buf, "once") == 0)
263 rumpclient_setconnretry(RUMPCLIENT_RETRYCONN_ONCE);
264 else {
265 time_t timeout;
266
267 timeout = (time_t)strtoll(buf, NULL, 10);
268 if (timeout <= 0)
269 errx(1, "RUMPHIJACK_RETRY must be keyword "
270 "or a positive integer, got: %s", buf);
271
272 rumpclient_setconnretry(timeout);
273 }
274 }
275 }
276
277 /* XXX: need runtime selection. low for now due to FD_SETSIZE */
278 #define HIJACK_FDOFF 128
279 #define HIJACK_ASSERT 128 /* XXX */
280 static int
281 fd_rump2host(int fd)
282 {
283
284 if (fd == -1)
285 return fd;
286
287 if (!ISDUP2D(fd))
288 fd += HIJACK_FDOFF;
289
290 return fd;
291 }
292
293 static int
294 fd_host2rump(int fd)
295 {
296
297 if (!ISDUP2D(fd))
298 fd -= HIJACK_FDOFF;
299 return fd;
300 }
301
302 static bool
303 fd_isrump(int fd)
304 {
305
306 return ISDUP2D(fd) || fd >= HIJACK_FDOFF;
307 }
308
309 #define assertfd(_fd_) assert(ISDUP2D(_fd_) || (_fd_) >= HIJACK_ASSERT)
310 #undef HIJACK_FDOFF
311
312 int __socket30(int, int, int);
313 int
314 __socket30(int domain, int type, int protocol)
315 {
316 int (*op_socket)(int, int, int);
317 int fd;
318 bool dohost;
319
320 dohost = hostlocalsockets && (domain == AF_LOCAL);
321
322 if (dohost)
323 op_socket = GETSYSCALL(host, SOCKET);
324 else
325 op_socket = GETSYSCALL(rump, SOCKET);
326 fd = op_socket(domain, type, protocol);
327
328 if (!dohost)
329 fd = fd_rump2host(fd);
330 DPRINTF(("socket <- %d\n", fd));
331
332 return fd;
333 }
334
335 int
336 accept(int s, struct sockaddr *addr, socklen_t *addrlen)
337 {
338 int (*op_accept)(int, struct sockaddr *, socklen_t *);
339 int fd;
340 bool isrump;
341
342 isrump = fd_isrump(s);
343
344 DPRINTF(("accept -> %d", s));
345 if (isrump) {
346 op_accept = GETSYSCALL(rump, ACCEPT);
347 s = fd_host2rump(s);
348 } else {
349 op_accept = GETSYSCALL(host, ACCEPT);
350 }
351 fd = op_accept(s, addr, addrlen);
352 if (fd != -1 && isrump)
353 fd = fd_rump2host(fd);
354
355 DPRINTF((" <- %d\n", fd));
356
357 return fd;
358 }
359
360 /*
361 * ioctl and fcntl are varargs calls and need special treatment
362 */
363 int
364 ioctl(int fd, unsigned long cmd, ...)
365 {
366 int (*op_ioctl)(int, unsigned long cmd, ...);
367 va_list ap;
368 int rv;
369
370 DPRINTF(("ioctl -> %d\n", fd));
371 if (fd_isrump(fd)) {
372 fd = fd_host2rump(fd);
373 op_ioctl = GETSYSCALL(rump, IOCTL);
374 } else {
375 op_ioctl = GETSYSCALL(host, IOCTL);
376 }
377
378 va_start(ap, cmd);
379 rv = op_ioctl(fd, cmd, va_arg(ap, void *));
380 va_end(ap);
381 return rv;
382 }
383
384
385 /* TODO: support F_DUPFD, F_CLOSEM, F_MAXFD */
386 int
387 fcntl(int fd, int cmd, ...)
388 {
389 int (*op_fcntl)(int, int, ...);
390 va_list ap;
391 int rv;
392
393 DPRINTF(("fcntl -> %d\n", fd));
394 if (fd_isrump(fd)) {
395 fd = fd_host2rump(fd);
396 op_fcntl = GETSYSCALL(rump, FCNTL);
397 } else {
398 op_fcntl = GETSYSCALL(host, FCNTL);
399 }
400
401 va_start(ap, cmd);
402 rv = op_fcntl(fd, cmd, va_arg(ap, void *));
403 va_end(ap);
404 return rv;
405 }
406
407 /*
408 * write cannot issue a standard debug printf due to recursion
409 */
410 ssize_t
411 write(int fd, const void *buf, size_t blen)
412 {
413 ssize_t (*op_write)(int, const void *, size_t);
414
415 if (fd_isrump(fd)) {
416 fd = fd_host2rump(fd);
417 op_write = GETSYSCALL(rump, WRITE);
418 } else {
419 op_write = GETSYSCALL(host, WRITE);
420 }
421
422 return op_write(fd, buf, blen);
423 }
424
425 /*
426 * dup2 is special. we allow dup2 of a rump kernel fd to 0-2 since
427 * many programs do that. dup2 of a rump kernel fd to another value
428 * not >= fdoff is an error.
429 *
430 * Note: cannot rump2host newd, because it is often hardcoded.
431 */
432 int
433 dup2(int oldd, int newd)
434 {
435 int (*host_dup2)(int, int);
436 int rv;
437
438 DPRINTF(("dup2 -> %d (o) -> %d (n)\n", oldd, newd));
439
440 if (fd_isrump(oldd)) {
441 if (!(newd >= 0 && newd <= 2))
442 return EBADF;
443 oldd = fd_host2rump(oldd);
444 rv = rump_sys_dup2(oldd, newd);
445 if (rv != -1)
446 dup2mask |= 1<<newd;
447 } else {
448 host_dup2 = syscalls[DUALCALL_DUP2].bs_host;
449 rv = host_dup2(oldd, newd);
450 }
451
452 return rv;
453 }
454
455 int
456 dup(int oldd)
457 {
458 int (*op_dup)(int);
459 int newd;
460
461 DPRINTF(("dup -> %d\n", oldd));
462 if (fd_isrump(oldd)) {
463 op_dup = GETSYSCALL(rump, DUP);
464 } else {
465 op_dup = GETSYSCALL(host, DUP);
466 }
467
468 newd = op_dup(oldd);
469
470 if (fd_isrump(oldd))
471 newd = fd_rump2host(newd);
472 DPRINTF(("dup <- %d\n", newd));
473
474 return newd;
475 }
476
477 /*
478 * We just wrap fork the appropriate rump client calls to preserve
479 * the file descriptors of the forked parent in the child, but
480 * prevent double use of connection fd.
481 */
482 pid_t
483 fork()
484 {
485 struct rumpclient_fork *rf;
486 pid_t rv;
487
488 DPRINTF(("fork\n"));
489
490 if ((rf = rumpclient_prefork()) == NULL)
491 return -1;
492
493 switch ((rv = host_fork())) {
494 case -1:
495 /* XXX: cancel rf */
496 break;
497 case 0:
498 if (rumpclient_fork_init(rf) == -1)
499 rv = -1;
500 break;
501 default:
502 break;
503 }
504
505 DPRINTF(("fork returns %d\n", rv));
506 return rv;
507 }
508
509 int
510 daemon(int nochdir, int noclose)
511 {
512 struct rumpclient_fork *rf;
513
514 if ((rf = rumpclient_prefork()) == NULL)
515 return -1;
516
517 if (host_daemon(nochdir, noclose) == -1)
518 return -1;
519
520 if (rumpclient_fork_init(rf) == -1)
521 return -1;
522
523 return 0;
524 }
525
526 /*
527 * select is done by calling poll.
528 */
529 int
530 REALSELECT(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
531 struct timeval *timeout)
532 {
533 struct pollfd *pfds;
534 struct timespec ts, *tsp = NULL;
535 nfds_t realnfds;
536 int i, j;
537 int rv, incr;
538
539 DPRINTF(("select\n"));
540
541 /*
542 * Well, first we must scan the fds to figure out how many
543 * fds there really are. This is because up to and including
544 * nb5 poll() silently refuses nfds > process_maxopen_fds.
545 * Seems to be fixed in current, thank the maker.
546 * god damn cluster...bomb.
547 */
548
549 for (i = 0, realnfds = 0; i < nfds; i++) {
550 if (readfds && FD_ISSET(i, readfds)) {
551 realnfds++;
552 continue;
553 }
554 if (writefds && FD_ISSET(i, writefds)) {
555 realnfds++;
556 continue;
557 }
558 if (exceptfds && FD_ISSET(i, exceptfds)) {
559 realnfds++;
560 continue;
561 }
562 }
563
564 if (realnfds) {
565 pfds = malloc(sizeof(*pfds) * realnfds);
566 if (!pfds)
567 return -1;
568 } else {
569 pfds = NULL;
570 }
571
572 for (i = 0, j = 0; i < nfds; i++) {
573 incr = 0;
574 pfds[j].events = pfds[j].revents = 0;
575 if (readfds && FD_ISSET(i, readfds)) {
576 pfds[j].fd = i;
577 pfds[j].events |= POLLIN;
578 incr=1;
579 }
580 if (writefds && FD_ISSET(i, writefds)) {
581 pfds[j].fd = i;
582 pfds[j].events |= POLLOUT;
583 incr=1;
584 }
585 if (exceptfds && FD_ISSET(i, exceptfds)) {
586 pfds[j].fd = i;
587 pfds[j].events |= POLLHUP|POLLERR;
588 incr=1;
589 }
590 if (incr)
591 j++;
592 }
593
594 if (timeout) {
595 TIMEVAL_TO_TIMESPEC(timeout, &ts);
596 tsp = &ts;
597 }
598 rv = REALPOLLTS(pfds, realnfds, tsp, NULL);
599 if (rv <= 0)
600 goto out;
601
602 /*
603 * ok, harvest results. first zero out entries (can't use
604 * FD_ZERO for the obvious select-me-not reason). whee.
605 */
606 for (i = 0; i < nfds; i++) {
607 if (readfds)
608 FD_CLR(i, readfds);
609 if (writefds)
610 FD_CLR(i, writefds);
611 if (exceptfds)
612 FD_CLR(i, exceptfds);
613 }
614
615 /* and then plug in the results */
616 for (i = 0; i < (int)realnfds; i++) {
617 if (readfds) {
618 if (pfds[i].revents & POLLIN) {
619 FD_SET(pfds[i].fd, readfds);
620 }
621 }
622 if (writefds) {
623 if (pfds[i].revents & POLLOUT) {
624 FD_SET(pfds[i].fd, writefds);
625 }
626 }
627 if (exceptfds) {
628 if (pfds[i].revents & (POLLHUP|POLLERR)) {
629 FD_SET(pfds[i].fd, exceptfds);
630 }
631 }
632 }
633
634 out:
635 free(pfds);
636 return rv;
637 }
638
639 static void
640 checkpoll(struct pollfd *fds, nfds_t nfds, int *hostcall, int *rumpcall)
641 {
642 nfds_t i;
643
644 for (i = 0; i < nfds; i++) {
645 if (fds[i].fd == -1)
646 continue;
647
648 if (fd_isrump(fds[i].fd))
649 (*rumpcall)++;
650 else
651 (*hostcall)++;
652 }
653 }
654
655 static void
656 adjustpoll(struct pollfd *fds, nfds_t nfds, int (*fdadj)(int))
657 {
658 nfds_t i;
659
660 for (i = 0; i < nfds; i++) {
661 fds[i].fd = fdadj(fds[i].fd);
662 }
663 }
664
665 /*
666 * poll is easy as long as the call comes in the fds only in one
667 * kernel. otherwise its quite tricky...
668 */
669 struct pollarg {
670 struct pollfd *pfds;
671 nfds_t nfds;
672 const struct timespec *ts;
673 const sigset_t *sigmask;
674 int pipefd;
675 int errnum;
676 };
677
678 static void *
679 hostpoll(void *arg)
680 {
681 int (*op_pollts)(struct pollfd *, nfds_t, const struct timespec *,
682 const sigset_t *);
683 struct pollarg *parg = arg;
684 intptr_t rv;
685
686 op_pollts = syscalls[DUALCALL_POLLTS].bs_host;
687 rv = op_pollts(parg->pfds, parg->nfds, parg->ts, parg->sigmask);
688 if (rv == -1)
689 parg->errnum = errno;
690 rump_sys_write(parg->pipefd, &rv, sizeof(rv));
691
692 return (void *)(intptr_t)rv;
693 }
694
695 int
696 REALPOLLTS(struct pollfd *fds, nfds_t nfds, const struct timespec *ts,
697 const sigset_t *sigmask)
698 {
699 int (*op_pollts)(struct pollfd *, nfds_t, const struct timespec *,
700 const sigset_t *);
701 int (*host_close)(int);
702 int hostcall = 0, rumpcall = 0;
703 pthread_t pt;
704 nfds_t i;
705 int rv;
706
707 DPRINTF(("poll\n"));
708 checkpoll(fds, nfds, &hostcall, &rumpcall);
709
710 if (hostcall && rumpcall) {
711 struct pollfd *pfd_host = NULL, *pfd_rump = NULL;
712 int rpipe[2] = {-1,-1}, hpipe[2] = {-1,-1};
713 struct pollarg parg;
714 uintptr_t lrv;
715 int sverrno = 0, trv;
716
717 /*
718 * ok, this is where it gets tricky. We must support
719 * this since it's a very common operation in certain
720 * types of software (telnet, netcat, etc). We allocate
721 * two vectors and run two poll commands in separate
722 * threads. Whichever returns first "wins" and the
723 * other kernel's fds won't show activity.
724 */
725 rv = -1;
726
727 /* allocate full vector for O(n) joining after call */
728 pfd_host = malloc(sizeof(*pfd_host)*(nfds+1));
729 if (!pfd_host)
730 goto out;
731 pfd_rump = malloc(sizeof(*pfd_rump)*(nfds+1));
732 if (!pfd_rump) {
733 goto out;
734 }
735
736 /* split vectors */
737 for (i = 0; i < nfds; i++) {
738 if (fds[i].fd == -1) {
739 pfd_host[i].fd = -1;
740 pfd_rump[i].fd = -1;
741 } else if (fd_isrump(fds[i].fd)) {
742 pfd_host[i].fd = -1;
743 pfd_rump[i].fd = fd_host2rump(fds[i].fd);
744 pfd_rump[i].events = fds[i].events;
745 } else {
746 pfd_rump[i].fd = -1;
747 pfd_host[i].fd = fds[i].fd;
748 pfd_host[i].events = fds[i].events;
749 }
750 fds[i].revents = 0;
751 }
752
753 /*
754 * then, open two pipes, one for notifications
755 * to each kernel.
756 */
757 if (rump_sys_pipe(rpipe) == -1)
758 goto out;
759 if (pipe(hpipe) == -1)
760 goto out;
761
762 pfd_host[nfds].fd = hpipe[0];
763 pfd_host[nfds].events = POLLIN;
764 pfd_rump[nfds].fd = rpipe[0];
765 pfd_rump[nfds].events = POLLIN;
766
767 /*
768 * then, create a thread to do host part and meanwhile
769 * do rump kernel part right here
770 */
771
772 parg.pfds = pfd_host;
773 parg.nfds = nfds+1;
774 parg.ts = ts;
775 parg.sigmask = sigmask;
776 parg.pipefd = rpipe[1];
777 pthread_create(&pt, NULL, hostpoll, &parg);
778
779 op_pollts = syscalls[DUALCALL_POLLTS].bs_rump;
780 lrv = op_pollts(pfd_rump, nfds+1, ts, NULL);
781 sverrno = errno;
782 write(hpipe[1], &rv, sizeof(rv));
783 pthread_join(pt, (void *)&trv);
784
785 /* check who "won" and merge results */
786 if (lrv != 0 && pfd_host[nfds].revents & POLLIN) {
787 rv = trv;
788
789 for (i = 0; i < nfds; i++) {
790 if (pfd_rump[i].fd != -1)
791 fds[i].revents = pfd_rump[i].revents;
792 }
793 sverrno = parg.errnum;
794 } else if (trv != 0 && pfd_rump[nfds].revents & POLLIN) {
795 rv = trv;
796
797 for (i = 0; i < nfds; i++) {
798 if (pfd_host[i].fd != -1)
799 fds[i].revents = pfd_host[i].revents;
800 }
801 } else {
802 rv = 0;
803 }
804
805 out:
806 host_close = syscalls[DUALCALL_CLOSE].bs_host;
807 if (rpipe[0] != -1)
808 rump_sys_close(rpipe[0]);
809 if (rpipe[1] != -1)
810 rump_sys_close(rpipe[1]);
811 if (hpipe[0] != -1)
812 host_close(hpipe[0]);
813 if (hpipe[1] != -1)
814 host_close(hpipe[1]);
815 free(pfd_host);
816 free(pfd_rump);
817 errno = sverrno;
818 } else {
819 if (hostcall) {
820 op_pollts = syscalls[DUALCALL_POLLTS].bs_host;
821 } else {
822 op_pollts = syscalls[DUALCALL_POLLTS].bs_rump;
823 adjustpoll(fds, nfds, fd_host2rump);
824 }
825
826 rv = op_pollts(fds, nfds, ts, sigmask);
827 if (rumpcall)
828 adjustpoll(fds, nfds, fd_rump2host);
829 }
830
831 return rv;
832 }
833
834 int
835 poll(struct pollfd *fds, nfds_t nfds, int timeout)
836 {
837 struct timespec ts;
838 struct timespec *tsp = NULL;
839
840 if (timeout != INFTIM) {
841 ts.tv_sec = timeout / 1000;
842 ts.tv_nsec = (timeout % 1000) * 1000*1000;
843
844 tsp = &ts;
845 }
846
847 return REALPOLLTS(fds, nfds, tsp, NULL);
848 }
849
850 int
851 REALKEVENT(int kq, const struct kevent *changelist, size_t nchanges,
852 struct kevent *eventlist, size_t nevents,
853 const struct timespec *timeout)
854 {
855 int (*op_kevent)(int, const struct kevent *, size_t,
856 struct kevent *, size_t, const struct timespec *);
857 const struct kevent *ev;
858 size_t i;
859
860 /*
861 * Check that we don't attempt to kevent rump kernel fd's.
862 * That needs similar treatment to select/poll, but is slightly
863 * trickier since we need to manage to different kq descriptors.
864 * (TODO, in case you're wondering).
865 */
866 for (i = 0; i < nchanges; i++) {
867 ev = &changelist[i];
868 if (ev->filter == EVFILT_READ || ev->filter == EVFILT_WRITE ||
869 ev->filter == EVFILT_VNODE) {
870 if (fd_isrump(ev->ident))
871 return ENOTSUP;
872 }
873 }
874
875 op_kevent = GETSYSCALL(host, ACCEPT);
876 return op_kevent(kq, changelist, nchanges, eventlist, nevents, timeout);
877 }
878
879 /*
880 * Rest are std type calls.
881 */
882
883 FDCALL(int, bind, DUALCALL_BIND, \
884 (int fd, const struct sockaddr *name, socklen_t namelen), \
885 (int, const struct sockaddr *, socklen_t), \
886 (fd, name, namelen))
887
888 FDCALL(int, connect, DUALCALL_CONNECT, \
889 (int fd, const struct sockaddr *name, socklen_t namelen), \
890 (int, const struct sockaddr *, socklen_t), \
891 (fd, name, namelen))
892
893 FDCALL(int, getpeername, DUALCALL_GETPEERNAME, \
894 (int fd, struct sockaddr *name, socklen_t *namelen), \
895 (int, struct sockaddr *, socklen_t *), \
896 (fd, name, namelen))
897
898 FDCALL(int, getsockname, DUALCALL_GETSOCKNAME, \
899 (int fd, struct sockaddr *name, socklen_t *namelen), \
900 (int, struct sockaddr *, socklen_t *), \
901 (fd, name, namelen))
902
903 FDCALL(int, listen, DUALCALL_LISTEN, \
904 (int fd, int backlog), \
905 (int, int), \
906 (fd, backlog))
907
908 FDCALL(ssize_t, recvfrom, DUALCALL_RECVFROM, \
909 (int fd, void *buf, size_t len, int flags, \
910 struct sockaddr *from, socklen_t *fromlen), \
911 (int, void *, size_t, int, struct sockaddr *, socklen_t *), \
912 (fd, buf, len, flags, from, fromlen))
913
914 FDCALL(ssize_t, sendto, DUALCALL_SENDTO, \
915 (int fd, const void *buf, size_t len, int flags, \
916 const struct sockaddr *to, socklen_t tolen), \
917 (int, const void *, size_t, int, \
918 const struct sockaddr *, socklen_t), \
919 (fd, buf, len, flags, to, tolen))
920
921 FDCALL(ssize_t, recvmsg, DUALCALL_RECVMSG, \
922 (int fd, struct msghdr *msg, int flags), \
923 (int, struct msghdr *, int), \
924 (fd, msg, flags))
925
926 FDCALL(ssize_t, sendmsg, DUALCALL_SENDMSG, \
927 (int fd, const struct msghdr *msg, int flags), \
928 (int, const struct msghdr *, int), \
929 (fd, msg, flags))
930
931 FDCALL(int, getsockopt, DUALCALL_GETSOCKOPT, \
932 (int fd, int level, int optn, void *optval, socklen_t *optlen), \
933 (int, int, int, void *, socklen_t *), \
934 (fd, level, optn, optval, optlen))
935
936 FDCALL(int, setsockopt, DUALCALL_SETSOCKOPT, \
937 (int fd, int level, int optn, \
938 const void *optval, socklen_t optlen), \
939 (int, int, int, const void *, socklen_t), \
940 (fd, level, optn, optval, optlen))
941
942 FDCALL(int, shutdown, DUALCALL_SHUTDOWN, \
943 (int fd, int how), \
944 (int, int), \
945 (fd, how))
946
947 #if _FORTIFY_SOURCE > 0
948 #define STUB(fun) __ssp_weak_name(fun)
949 ssize_t _sys_readlink(const char * __restrict, char * __restrict, size_t);
950 ssize_t
951 STUB(readlink)(const char * __restrict path, char * __restrict buf,
952 size_t bufsiz)
953 {
954 return _sys_readlink(path, buf, bufsiz);
955 }
956
957 char *_sys_getcwd(char *, size_t);
958 char *
959 STUB(getcwd)(char *buf, size_t size)
960 {
961 return _sys_getcwd(buf, size);
962 }
963 #else
964 #define STUB(fun) fun
965 #endif
966
967 FDCALL(ssize_t, REALREAD, DUALCALL_READ, \
968 (int fd, void *buf, size_t buflen), \
969 (int, void *, size_t), \
970 (fd, buf, buflen))
971
972 FDCALL(ssize_t, readv, DUALCALL_READV, \
973 (int fd, const struct iovec *iov, int iovcnt), \
974 (int, const struct iovec *, int), \
975 (fd, iov, iovcnt))
976
977 FDCALL(ssize_t, writev, DUALCALL_WRITEV, \
978 (int fd, const struct iovec *iov, int iovcnt), \
979 (int, const struct iovec *, int), \
980 (fd, iov, iovcnt))
981
982 FDCALL(int, close, DUALCALL_CLOSE, \
983 (int fd), \
984 (int), \
985 (fd))
986