linux_machdep.c revision 1.9 1 /* $NetBSD: linux_machdep.c,v 1.9 2005/06/24 22:57:05 manu Exp $ */
2
3 /*-
4 * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Emmanuel Dreyfus
17 * 4. The name of the author may not be used to endorse or promote
18 * products derived from this software without specific prior written
19 * permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS''
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35
36 __KERNEL_RCSID(0, "$NetBSD: linux_machdep.c,v 1.9 2005/06/24 22:57:05 manu Exp $");
37
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/systm.h>
41 #include <sys/signal.h>
42 #include <sys/exec.h>
43 #include <sys/proc.h>
44 #include <sys/ptrace.h> /* for process_read_fpregs() */
45 #include <sys/user.h>
46 #include <sys/ucontext.h>
47
48 #include <machine/reg.h>
49 #include <machine/pcb.h>
50 #include <machine/fpu.h>
51 #include <machine/mcontext.h>
52 #include <machine/specialreg.h>
53 #include <machine/vmparam.h>
54
55 #include <compat/linux/common/linux_signal.h>
56 #include <compat/linux/common/linux_errno.h>
57 #include <compat/linux/common/linux_exec.h>
58 #include <compat/linux/common/linux_ioctl.h>
59 #include <compat/linux/common/linux_prctl.h>
60 #include <compat/linux/common/linux_machdep.h>
61 #include <compat/linux/linux_syscall.h>
62 #include <compat/linux/linux_syscallargs.h>
63
64 static void linux_buildcontext(struct lwp *, void *, void *);
65
66 void
67 linux_setregs(l, epp, stack)
68 struct lwp *l;
69 struct exec_package *epp;
70 u_long stack;
71 {
72 struct pcb *pcb = &l->l_addr->u_pcb;
73 struct trapframe *tf;
74
75 /* If we were using the FPU, forget about it. */
76 if (l->l_addr->u_pcb.pcb_fpcpu != NULL)
77 fpusave_lwp(l, 0);
78
79 l->l_md.md_flags &= ~MDP_USEDFPU;
80 pcb->pcb_flags = 0;
81 pcb->pcb_savefpu.fp_fxsave.fx_fcw = __NetBSD_NPXCW__;
82 pcb->pcb_savefpu.fp_fxsave.fx_mxcsr = __INITIAL_MXCSR__;
83 pcb->pcb_savefpu.fp_fxsave.fx_mxcsr_mask = __INITIAL_MXCSR_MASK__;
84 pcb->pcb_fs = 0;
85 pcb->pcb_gs = 0;
86
87 l->l_proc->p_flag &= ~P_32;
88
89 tf = l->l_md.md_regs;
90 tf->tf_rax = 0;
91 tf->tf_rbx = 0;
92 tf->tf_rcx = epp->ep_entry;
93 tf->tf_rdx = 0;
94 tf->tf_rsi = 0;
95 tf->tf_rdi = 0;
96 tf->tf_rbp = 0;
97 tf->tf_rsp = stack;
98 tf->tf_r8 = 0;
99 tf->tf_r9 = 0;
100 tf->tf_r10 = 0;
101 tf->tf_r11 = 0;
102 tf->tf_r12 = 0;
103 tf->tf_r13 = 0;
104 tf->tf_r14 = 0;
105 tf->tf_r15 = 0;
106 tf->tf_rip = epp->ep_entry;
107 tf->tf_rflags = PSL_USERSET;
108 tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
109 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
110 tf->tf_ds = 0;
111 tf->tf_es = 0;
112 tf->tf_fs = 0;
113 tf->tf_gs = 0;
114
115 return;
116 }
117
118 void
119 linux_sendsig(ksi, mask)
120 const ksiginfo_t *ksi;
121 const sigset_t *mask;
122 {
123 struct lwp *l = curlwp;
124 struct proc *p = l->l_proc;
125 struct sigacts *ps = p->p_sigacts;
126 int onstack;
127 int sig = ksi->ksi_signo;
128 struct linux_rt_sigframe *sfp, sigframe;
129 struct linux__fpstate *fpsp, fpstate;
130 struct fpreg fpregs;
131 struct trapframe *tf = l->l_md.md_regs;
132 sig_t catcher = SIGACTION(p, sig).sa_handler;
133 linux_sigset_t lmask;
134 char *sp;
135 int error;
136
137 /* Do we need to jump onto the signal stack? */
138 onstack =
139 (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
140 (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
141
142 /* Allocate space for the signal handler context. */
143 if (onstack)
144 sp = ((caddr_t)p->p_sigctx.ps_sigstk.ss_sp +
145 p->p_sigctx.ps_sigstk.ss_size);
146 else
147 sp = (caddr_t)tf->tf_rsp - 128;
148
149
150 /*
151 * Save FPU state, if any
152 */
153 if (l->l_md.md_flags & MDP_USEDFPU) {
154 sp = (char *)
155 (((long)sp - sizeof(struct linux__fpstate)) & ~0xfUL);
156 fpsp = (struct linux__fpstate *)sp;
157
158 (void)process_read_fpregs(l, &fpregs);
159 bzero(&fpstate, sizeof(fpstate));
160
161 fpstate.cwd = fpregs.fp_fcw;
162 fpstate.swd = fpregs.fp_fsw;
163 fpstate.twd = fpregs.fp_ftw;
164 fpstate.fop = fpregs.fp_fop;
165 fpstate.rip = fpregs.fp_rip;
166 fpstate.rdp = fpregs.fp_rdp;
167 fpstate.mxcsr = fpregs.fp_mxcsr;
168 fpstate.mxcsr_mask = fpregs.fp_mxcsr_mask;
169 memcpy(&fpstate.st_space, &fpregs.fp_st,
170 sizeof(fpstate.st_space));
171 memcpy(&fpstate.xmm_space, &fpregs.fp_xmm,
172 sizeof(fpstate.xmm_space));
173
174 if ((error = copyout(&fpstate, fpsp, sizeof(fpstate))) != 0) {
175 sigexit(l, SIGILL);
176 return;
177 }
178 } else {
179 fpsp = NULL;
180 }
181
182 /*
183 * Populate the rt_sigframe
184 */
185 sp = (char *)
186 ((((long)sp - sizeof(struct linux_rt_sigframe)) & ~0xfUL) - 8);
187 sfp = (struct linux_rt_sigframe *)sp;
188
189 bzero(&sigframe, sizeof(sigframe));
190 if (ps->sa_sigdesc[sig].sd_vers != 0)
191 sigframe.pretcode =
192 (char *)(u_long)ps->sa_sigdesc[sig].sd_tramp;
193 else
194 sigframe.pretcode = NULL;
195
196 /*
197 * The user context
198 */
199 sigframe.uc.luc_flags = 0;
200 sigframe.uc.luc_link = NULL;
201
202 /* This is used regardless of SA_ONSTACK in Linux */
203 sigframe.uc.luc_stack.ss_sp = p->p_sigctx.ps_sigstk.ss_sp;
204 sigframe.uc.luc_stack.ss_size = p->p_sigctx.ps_sigstk.ss_size;
205 sigframe.uc.luc_stack.ss_flags = 0;
206 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
207 sigframe.uc.luc_stack.ss_flags |= LINUX_SS_ONSTACK;
208 if (p->p_sigctx.ps_sigstk.ss_flags & SS_DISABLE)
209 sigframe.uc.luc_stack.ss_flags |= LINUX_SS_DISABLE;
210
211 sigframe.uc.luc_mcontext.r8 = tf->tf_r8;
212 sigframe.uc.luc_mcontext.r9 = tf->tf_r9;
213 sigframe.uc.luc_mcontext.r10 = tf->tf_r10;
214 sigframe.uc.luc_mcontext.r11 = tf->tf_r11;
215 sigframe.uc.luc_mcontext.r12 = tf->tf_r12;
216 sigframe.uc.luc_mcontext.r13 = tf->tf_r13;
217 sigframe.uc.luc_mcontext.r14 = tf->tf_r14;
218 sigframe.uc.luc_mcontext.r15 = tf->tf_r15;
219 sigframe.uc.luc_mcontext.rdi = tf->tf_rdi;
220 sigframe.uc.luc_mcontext.rsi = tf->tf_rsi;
221 sigframe.uc.luc_mcontext.rbp = tf->tf_rbp;
222 sigframe.uc.luc_mcontext.rbx = tf->tf_rbx;
223 sigframe.uc.luc_mcontext.rdx = tf->tf_rdx;
224 sigframe.uc.luc_mcontext.rcx = tf->tf_rcx;
225 sigframe.uc.luc_mcontext.rsp = tf->tf_rsp;
226 sigframe.uc.luc_mcontext.rip = tf->tf_rip;
227 sigframe.uc.luc_mcontext.eflags = tf->tf_rflags;
228 sigframe.uc.luc_mcontext.cs = tf->tf_cs;
229 sigframe.uc.luc_mcontext.gs = tf->tf_gs;
230 sigframe.uc.luc_mcontext.fs = tf->tf_fs;
231 sigframe.uc.luc_mcontext.err = tf->tf_err;
232 sigframe.uc.luc_mcontext.trapno = tf->tf_trapno;
233 native_to_linux_sigset(&lmask, mask);
234 sigframe.uc.luc_mcontext.oldmask = lmask.sig[0];
235 sigframe.uc.luc_mcontext.cr2 = (long)l->l_addr->u_pcb.pcb_onfault;
236 sigframe.uc.luc_mcontext.fpstate = fpsp;
237 native_to_linux_sigset(&sigframe.uc.luc_sigmask, mask);
238
239 /*
240 * the siginfo structure
241 */
242 sigframe.info.lsi_signo = native_to_linux_signo[sig];
243 sigframe.info.lsi_errno = native_to_linux_errno[ksi->ksi_errno];
244 sigframe.info.lsi_code = ksi->ksi_code;
245
246 /* XXX This is a rought conversion, taken from i386 code */
247 switch (sigframe.info.lsi_signo) {
248 case LINUX_SIGILL:
249 case LINUX_SIGFPE:
250 case LINUX_SIGSEGV:
251 case LINUX_SIGBUS:
252 case LINUX_SIGTRAP:
253 sigframe.info._sifields._sigfault._addr = ksi->ksi_addr;
254 break;
255 case LINUX_SIGCHLD:
256 sigframe.info._sifields._sigchld._pid = ksi->ksi_pid;
257 sigframe.info._sifields._sigchld._uid = ksi->ksi_uid;
258 sigframe.info._sifields._sigchld._utime = ksi->ksi_utime;
259 sigframe.info._sifields._sigchld._stime = ksi->ksi_stime;
260
261 if (WCOREDUMP(ksi->ksi_status)) {
262 sigframe.info.lsi_code = LINUX_CLD_DUMPED;
263 sigframe.info._sifields._sigchld._status =
264 _WSTATUS(ksi->ksi_status);
265 } else if (_WSTATUS(ksi->ksi_status)) {
266 sigframe.info.lsi_code = LINUX_CLD_KILLED;
267 sigframe.info._sifields._sigchld._status =
268 _WSTATUS(ksi->ksi_status);
269 } else {
270 sigframe.info.lsi_code = LINUX_CLD_EXITED;
271 sigframe.info._sifields._sigchld._status =
272 ((ksi->ksi_status & 0xff00U) >> 8);
273 }
274 break;
275 case LINUX_SIGIO:
276 sigframe.info._sifields._sigpoll._band = ksi->ksi_band;
277 sigframe.info._sifields._sigpoll._fd = ksi->ksi_fd;
278 break;
279 default:
280 sigframe.info._sifields._sigchld._pid = ksi->ksi_pid;
281 sigframe.info._sifields._sigchld._uid = ksi->ksi_uid;
282 if ((sigframe.info.lsi_signo == LINUX_SIGALRM) ||
283 (sigframe.info.lsi_signo >= LINUX_SIGRTMIN))
284 sigframe.info._sifields._timer._sigval.sival_ptr =
285 ksi->ksi_sigval.sival_ptr;
286 break;
287 }
288
289 if ((error = copyout(&sigframe, sp, sizeof(sigframe))) != 0) {
290 sigexit(l, SIGILL);
291 return;
292 }
293
294 linux_buildcontext(l, catcher, sp);
295 tf->tf_rdi = sigframe.info.lsi_signo;
296 tf->tf_rax = 0;
297 tf->tf_rsi = (long)&sfp->info;
298 tf->tf_rdx = (long)&sfp->uc;
299
300 /*
301 * Remember we use signal stack
302 */
303 if (onstack)
304 p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
305 return;
306 }
307
308 int
309 linux_sys_modify_ldt(l, v, retval)
310 struct lwp *l;
311 void *v;
312 register_t *retval;
313 {
314 return 0;
315 }
316
317 int
318 linux_sys_iopl(l, v, retval)
319 struct lwp *l;
320 void *v;
321 register_t *retval;
322 {
323 return 0;
324 }
325
326 int
327 linux_sys_ioperm(l, v, retval)
328 struct lwp *l;
329 void *v;
330 register_t *retval;
331 {
332 return 0;
333 }
334
335 dev_t
336 linux_fakedev(dev, raw)
337 dev_t dev;
338 int raw;
339 {
340 return 0;
341 }
342
343 int
344 linux_machdepioctl(p, v, retval)
345 struct proc *p;
346 void *v;
347 register_t *retval;
348 {
349 return 0;
350 }
351
352 int
353 linux_sys_rt_sigreturn(l, v, retval)
354 struct lwp *l;
355 void *v;
356 register_t *retval;
357 {
358 struct linux_ucontext *luctx;
359 struct trapframe *tf = l->l_md.md_regs;
360 struct linux_sigcontext *lsigctx;
361 struct linux__fpstate fpstate;
362 struct linux_rt_sigframe frame, *fp;
363 ucontext_t uctx;
364 mcontext_t *mctx;
365 struct fxsave64 *fxsave;
366 int error;
367
368 fp = (struct linux_rt_sigframe *)(tf->tf_rsp - 8);
369 if ((error = copyin(fp, &frame, sizeof(frame))) != 0) {
370 sigexit(l, SIGILL);
371 return error;
372 }
373 luctx = &frame.uc;
374 lsigctx = &luctx->luc_mcontext;
375
376 bzero(&uctx, sizeof(uctx));
377 mctx = (mcontext_t *)&uctx.uc_mcontext;
378 fxsave = (struct fxsave64 *)&mctx->__fpregs;
379
380 /*
381 * Set the flags. Linux always have CPU, stack and signal state,
382 * FPU is optional. uc_flags is not used to tell what we have.
383 */
384 uctx.uc_flags = (_UC_SIGMASK|_UC_CPU|_UC_STACK|_UC_CLRSTACK);
385 if (lsigctx->fpstate != NULL)
386 uctx.uc_flags |= _UC_FPU;
387 uctx.uc_link = NULL;
388
389 /*
390 * Signal set
391 */
392 linux_to_native_sigset(&uctx.uc_sigmask, &luctx->luc_sigmask);
393
394 /*
395 * CPU state
396 */
397 mctx->__gregs[_REG_R8] = lsigctx->r8;
398 mctx->__gregs[_REG_R9] = lsigctx->r9;
399 mctx->__gregs[_REG_R10] = lsigctx->r10;
400 mctx->__gregs[_REG_R11] = lsigctx->r11;
401 mctx->__gregs[_REG_R12] = lsigctx->r12;
402 mctx->__gregs[_REG_R13] = lsigctx->r13;
403 mctx->__gregs[_REG_R14] = lsigctx->r14;
404 mctx->__gregs[_REG_R15] = lsigctx->r15;
405 mctx->__gregs[_REG_RDI] = lsigctx->rdi;
406 mctx->__gregs[_REG_RSI] = lsigctx->rsi;
407 mctx->__gregs[_REG_RBP] = lsigctx->rbp;
408 mctx->__gregs[_REG_RBX] = lsigctx->rbx;
409 mctx->__gregs[_REG_RAX] = tf->tf_rax;
410 mctx->__gregs[_REG_RDX] = lsigctx->rdx;
411 mctx->__gregs[_REG_RCX] = lsigctx->rcx;
412 mctx->__gregs[_REG_RIP] = lsigctx->rip;
413 mctx->__gregs[_REG_RFL] = lsigctx->eflags;
414 mctx->__gregs[_REG_CS] = lsigctx->cs;
415 mctx->__gregs[_REG_GS] = lsigctx->gs;
416 mctx->__gregs[_REG_FS] = lsigctx->fs;
417 mctx->__gregs[_REG_ERR] = lsigctx->err;
418 mctx->__gregs[_REG_TRAPNO] = lsigctx->trapno;
419 mctx->__gregs[_REG_ES] = tf->tf_es;
420 mctx->__gregs[_REG_DS] = tf->tf_ds;
421 mctx->__gregs[_REG_URSP] = lsigctx->rsp; /* XXX */
422 mctx->__gregs[_REG_SS] = tf->tf_ss;
423
424 /*
425 * FPU state
426 */
427 if (lsigctx->fpstate != NULL) {
428 error = copyin(lsigctx->fpstate, &fpstate, sizeof(fpstate));
429 if (error != 0) {
430 sigexit(l, SIGILL);
431 return error;
432 }
433
434 fxsave->fx_fcw = fpstate.cwd;
435 fxsave->fx_fsw = fpstate.swd;
436 fxsave->fx_ftw = fpstate.twd;
437 fxsave->fx_fop = fpstate.fop;
438 fxsave->fx_rip = fpstate.rip;
439 fxsave->fx_rdp = fpstate.rdp;
440 fxsave->fx_mxcsr = fpstate.mxcsr;
441 fxsave->fx_mxcsr_mask = fpstate.mxcsr_mask;
442 memcpy(&fxsave->fx_st, &fpstate.st_space,
443 sizeof(fxsave->fx_st));
444 memcpy(&fxsave->fx_xmm, &fpstate.xmm_space,
445 sizeof(fxsave->fx_xmm));
446 }
447
448 /*
449 * And the stack
450 */
451 uctx.uc_stack.ss_flags = 0;
452 if (luctx->luc_stack.ss_flags & LINUX_SS_ONSTACK);
453 uctx.uc_stack.ss_flags = SS_ONSTACK;
454
455 if (luctx->luc_stack.ss_flags & LINUX_SS_DISABLE);
456 uctx.uc_stack.ss_flags = SS_DISABLE;
457
458 uctx.uc_stack.ss_sp = luctx->luc_stack.ss_sp;
459 uctx.uc_stack.ss_size = luctx->luc_stack.ss_size;
460
461 /*
462 * And let setucontext deal with that.
463 */
464 return setucontext(l, &uctx);
465 }
466
467 int
468 linux_sys_arch_prctl(l, v, retval)
469 struct lwp *l;
470 void *v;
471 register_t *retval;
472 {
473 struct linux_sys_arch_prctl_args /* {
474 syscallarg(int) code;
475 syscallarg(unsigned long) addr;
476 } */ *uap = v;
477 struct pcb *pcb = &l->l_addr->u_pcb;
478 struct trapframe *tf = l->l_md.md_regs;
479 int error;
480 uint64_t taddr;
481
482 switch(SCARG(uap, code)) {
483 case LINUX_ARCH_SET_GS:
484 taddr = SCARG(uap, addr);
485 if (taddr >= VM_MAXUSER_ADDRESS)
486 return EINVAL;
487 pcb->pcb_gs = taddr;
488 pcb->pcb_flags |= PCB_GS64;
489 if (l == curlwp)
490 wrmsr(MSR_KERNELGSBASE, taddr);
491 break;
492
493 case LINUX_ARCH_GET_GS:
494 if (pcb->pcb_flags & PCB_GS64)
495 taddr = pcb->pcb_gs;
496 else {
497 error = memseg_baseaddr(l, tf->tf_fs, NULL, 0, &taddr);
498 if (error != 0)
499 return error;
500 }
501 error = copyout(&taddr, (char *)SCARG(uap, addr), 8);
502 if (error != 0)
503 return error;
504 break;
505
506 case LINUX_ARCH_SET_FS:
507 taddr = SCARG(uap, addr);
508 if (taddr >= VM_MAXUSER_ADDRESS)
509 return EINVAL;
510 pcb->pcb_fs = taddr;
511 pcb->pcb_flags |= PCB_FS64;
512 if (l == curlwp)
513 wrmsr(MSR_FSBASE, taddr);
514 break;
515
516 case LINUX_ARCH_GET_FS:
517 if (pcb->pcb_flags & PCB_FS64)
518 taddr = pcb->pcb_fs;
519 else {
520 error = memseg_baseaddr(l, tf->tf_fs, NULL, 0, &taddr);
521 if (error != 0)
522 return error;
523 }
524 error = copyout(&taddr, (char *)SCARG(uap, addr), 8);
525 if (error != 0)
526 return error;
527 break;
528
529 default:
530 #ifdef DEBUG_LINUX
531 printf("linux_sys_arch_prctl: unexpected code %d\n",
532 SCARG(uap, code));
533 #endif
534 return EINVAL;
535 }
536
537 return 0;
538 }
539
540 const int linux_vsyscall_to_syscall[] = {
541 LINUX_SYS_gettimeofday,
542 LINUX_SYS_time,
543 LINUX_SYS_nosys,
544 LINUX_SYS_nosys,
545 };
546
547 int
548 linux_usertrap(struct lwp *l, vaddr_t trapaddr, void *arg)
549 {
550 struct trapframe *tf = arg;
551 uint64_t retaddr;
552 int vsyscallnr;
553
554 /*
555 * Check for a vsyscall. %rip must be the fault address,
556 * and the address must be in the Linux vsyscall area.
557 * Also, vsyscalls are only done at 1024-byte boundaries.
558 */
559
560 if (__predict_true(trapaddr < LINUX_VSYSCALL_START))
561 return 0;
562
563 if (trapaddr != tf->tf_rip)
564 return 0;
565
566 if ((tf->tf_rip & (LINUX_VSYSCALL_SIZE - 1)) != 0)
567 return 0;
568
569 vsyscallnr = (tf->tf_rip - LINUX_VSYSCALL_START) / LINUX_VSYSCALL_SIZE;
570
571 if (vsyscallnr > LINUX_VSYSCALL_MAXNR)
572 return 0;
573
574 /*
575 * Get the return address from the top of the stack,
576 * and fix up the return address.
577 * This assumes the faulting instruction was callq *reg,
578 * which is the only way that vsyscalls are ever entered.
579 */
580 if (copyin((void *)tf->tf_rsp, &retaddr, sizeof retaddr) != 0)
581 return 0;
582 tf->tf_rip = retaddr;
583 tf->tf_rax = linux_vsyscall_to_syscall[vsyscallnr];
584 tf->tf_rsp += 8; /* "pop" the return address */
585
586 #if 0
587 printf("usertrap: rip %p rsp %p retaddr %p vsys %d sys %d\n",
588 (void *)tf->tf_rip, (void *)tf->tf_rsp, (void *)retaddr,
589 vsyscallnr, (int)tf->tf_rax);
590 #endif
591
592 (*l->l_proc->p_md.md_syscall)(tf);
593
594 return 1;
595 }
596
597 static void
598 linux_buildcontext(struct lwp *l, void *catcher, void *f)
599 {
600 struct trapframe *tf = l->l_md.md_regs;
601
602 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
603 tf->tf_rip = (u_int64_t)catcher;
604 tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
605 tf->tf_rflags &= ~(PSL_T|PSL_VM|PSL_AC);
606 tf->tf_rsp = (u_int64_t)f;
607 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
608 }
609