frameasm.h revision 1.20 1 /* $NetBSD: frameasm.h,v 1.20 2012/07/15 15:17:56 dsl Exp $ */
2
3 #ifndef _AMD64_MACHINE_FRAMEASM_H
4 #define _AMD64_MACHINE_FRAMEASM_H
5
6 #ifdef _KERNEL_OPT
7 #include "opt_xen.h"
8 #endif
9
10 /*
11 * Macros to define pushing/popping frames for interrupts, traps
12 * and system calls. Currently all the same; will diverge later.
13 */
14
15 #ifdef XEN
16 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
17 /* Xen do not need swapgs, done by hypervisor */
18 #define swapgs
19 #define iretq pushq $0 ; jmp HYPERVISOR_iret
20 #define XEN_ONLY2(x,y) x,y
21 #define NOT_XEN(x)
22
23 #define CLI(temp_reg) \
24 movq CPUVAR(VCPU),%r ## temp_reg ; \
25 movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
26
27 #define STI(temp_reg) \
28 movq CPUVAR(VCPU),%r ## temp_reg ; \
29 movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
30
31 #else /* XEN */
32 #define XEN_ONLY2(x,y)
33 #define NOT_XEN(x) x
34 #define CLI(temp_reg) cli
35 #define STI(temp_reg) sti
36 #endif /* XEN */
37
38 #define SWAPGS NOT_XEN(swapgs)
39
40 /*
41 * These are used on interrupt or trap entry or exit.
42 */
43 #define INTR_SAVE_GPRS \
44 movq %rdi,TF_RDI(%rsp) ; \
45 movq %rsi,TF_RSI(%rsp) ; \
46 movq %rdx,TF_RDX(%rsp) ; \
47 movq %rcx,TF_RCX(%rsp) ; \
48 movq %r8,TF_R8(%rsp) ; \
49 movq %r9,TF_R9(%rsp) ; \
50 movq %r10,TF_R10(%rsp) ; \
51 movq %r11,TF_R11(%rsp) ; \
52 movq %r12,TF_R12(%rsp) ; \
53 movq %r13,TF_R13(%rsp) ; \
54 movq %r14,TF_R14(%rsp) ; \
55 movq %r15,TF_R15(%rsp) ; \
56 movq %rbp,TF_RBP(%rsp) ; \
57 movq %rbx,TF_RBX(%rsp) ; \
58 movq %rax,TF_RAX(%rsp) ; \
59 cld
60
61 #define INTR_RESTORE_GPRS \
62 movq TF_RDI(%rsp),%rdi ; \
63 movq TF_RSI(%rsp),%rsi ; \
64 movq TF_RDX(%rsp),%rdx ; \
65 movq TF_RCX(%rsp),%rcx ; \
66 movq TF_R8(%rsp),%r8 ; \
67 movq TF_R9(%rsp),%r9 ; \
68 movq TF_R10(%rsp),%r10 ; \
69 movq TF_R11(%rsp),%r11 ; \
70 movq TF_R12(%rsp),%r12 ; \
71 movq TF_R13(%rsp),%r13 ; \
72 movq TF_R14(%rsp),%r14 ; \
73 movq TF_R15(%rsp),%r15 ; \
74 movq TF_RBP(%rsp),%rbp ; \
75 movq TF_RBX(%rsp),%rbx ; \
76 movq TF_RAX(%rsp),%rax
77
78 #define INTRENTRY_L(kernel_trap, usertrap) \
79 subq $TF_REGSIZE,%rsp ; \
80 INTR_SAVE_GPRS ; \
81 testb $SEL_UPL,TF_CS(%rsp) ; \
82 je kernel_trap ; \
83 usertrap ; \
84 SWAPGS ; \
85 movw %gs,TF_GS(%rsp) ; \
86 movw %fs,TF_FS(%rsp) ; \
87 movw %es,TF_ES(%rsp) ; \
88 movw %ds,TF_DS(%rsp)
89
90 #define INTRENTRY \
91 INTRENTRY_L(98f,) ; \
92 98:
93
94 #define INTRFASTEXIT \
95 INTR_RESTORE_GPRS ; \
96 testq $SEL_UPL,TF_CS(%rsp) /* Interrupted %cs */ ; \
97 je 99f ; \
98 /* Disable interrupts until the 'iret', user registers loaded. */ \
99 NOT_XEN(cli;) \
100 movw TF_ES(%rsp),%es ; \
101 movw TF_DS(%rsp),%ds ; \
102 SWAPGS ; \
103 99: addq $TF_REGSIZE+16,%rsp /* + T_xxx and error code */ ; \
104 iretq
105
106 #define INTR_RECURSE_HWFRAME \
107 movq %rsp,%r10 ; \
108 movl %ss,%r11d ; \
109 pushq %r11 ; \
110 pushq %r10 ; \
111 pushfq ; \
112 movl %cs,%r11d ; \
113 pushq %r11 ; \
114 /* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \
115 XEN_ONLY2(andb $0xfc,(%rsp);) \
116 pushq %r13 ;
117
118 #define DO_DEFERRED_SWITCH \
119 cmpl $0, CPUVAR(WANT_PMAPLOAD) ; \
120 jz 1f ; \
121 call _C_LABEL(do_pmap_load) ; \
122 1:
123
124 #define CHECK_DEFERRED_SWITCH \
125 cmpl $0, CPUVAR(WANT_PMAPLOAD)
126
127 #define CHECK_ASTPENDING(reg) cmpl $0, L_MD_ASTPENDING(reg)
128 #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg)
129
130 #endif /* _AMD64_MACHINE_FRAMEASM_H */
131