frameasm.h revision 1.40 1 /* $NetBSD: frameasm.h,v 1.40 2018/07/13 14:11:02 martin Exp $ */
2
3 #ifndef _AMD64_MACHINE_FRAMEASM_H
4 #define _AMD64_MACHINE_FRAMEASM_H
5
6 #ifdef _KERNEL_OPT
7 #include "opt_xen.h"
8 #include "opt_svs.h"
9 #endif
10
11 /*
12 * Macros to define pushing/popping frames for interrupts, traps
13 * and system calls. Currently all the same; will diverge later.
14 */
15
16 #ifdef XEN
17 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
18 /* Xen do not need swapgs, done by hypervisor */
19 #define swapgs
20 #define iretq pushq $0 ; jmp HYPERVISOR_iret
21 #define XEN_ONLY2(x,y) x,y
22 #define NOT_XEN(x)
23
24 #define CLI(temp_reg) \
25 movq CPUVAR(VCPU),%r ## temp_reg ; \
26 movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
27
28 #define STI(temp_reg) \
29 movq CPUVAR(VCPU),%r ## temp_reg ; \
30 movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
31
32 #else /* XEN */
33 #define XEN_ONLY2(x,y)
34 #define NOT_XEN(x) x
35 #define CLI(temp_reg) cli
36 #define STI(temp_reg) sti
37 #endif /* XEN */
38
39 #define HP_NAME_CLAC 1
40 #define HP_NAME_STAC 2
41 #define HP_NAME_NOLOCK 3
42 #define HP_NAME_RETFENCE 4
43 #define HP_NAME_SVS_ENTER 5
44 #define HP_NAME_SVS_LEAVE 6
45 #define HP_NAME_SVS_ENTER_ALT 7
46 #define HP_NAME_SVS_LEAVE_ALT 8
47 #define HP_NAME_IBRS_ENTER 9
48 #define HP_NAME_IBRS_LEAVE 10
49 #define HP_NAME_SVS_ENTER_NMI 11
50 #define HP_NAME_SVS_LEAVE_NMI 12
51
52 #define HOTPATCH(name, size) \
53 123: ; \
54 .pushsection .rodata.hotpatch, "a" ; \
55 .byte name ; \
56 .byte size ; \
57 .quad 123b ; \
58 .popsection
59
60 #define SMAP_ENABLE \
61 HOTPATCH(HP_NAME_CLAC, 3) ; \
62 .byte 0x0F, 0x1F, 0x00 ; \
63
64 #define SMAP_DISABLE \
65 HOTPATCH(HP_NAME_STAC, 3) ; \
66 .byte 0x0F, 0x1F, 0x00 ; \
67
68 /*
69 * IBRS
70 */
71
72 #define IBRS_ENTER_BYTES 17
73 #define IBRS_ENTER \
74 HOTPATCH(HP_NAME_IBRS_ENTER, IBRS_ENTER_BYTES) ; \
75 NOIBRS_ENTER
76 #define NOIBRS_ENTER \
77 .byte 0xEB, (IBRS_ENTER_BYTES-2) /* jmp */ ; \
78 .fill (IBRS_ENTER_BYTES-2),1,0xCC
79
80 #define IBRS_LEAVE_BYTES 21
81 #define IBRS_LEAVE \
82 HOTPATCH(HP_NAME_IBRS_LEAVE, IBRS_LEAVE_BYTES) ; \
83 NOIBRS_LEAVE
84 #define NOIBRS_LEAVE \
85 .byte 0xEB, (IBRS_LEAVE_BYTES-2) /* jmp */ ; \
86 .fill (IBRS_LEAVE_BYTES-2),1,0xCC
87
88 #define SWAPGS NOT_XEN(swapgs)
89
90 /*
91 * These are used on interrupt or trap entry or exit.
92 */
93 #define INTR_SAVE_GPRS \
94 movq %rdi,TF_RDI(%rsp) ; \
95 movq %rsi,TF_RSI(%rsp) ; \
96 movq %rdx,TF_RDX(%rsp) ; \
97 movq %rcx,TF_RCX(%rsp) ; \
98 movq %r8,TF_R8(%rsp) ; \
99 movq %r9,TF_R9(%rsp) ; \
100 movq %r10,TF_R10(%rsp) ; \
101 movq %r11,TF_R11(%rsp) ; \
102 movq %r12,TF_R12(%rsp) ; \
103 movq %r13,TF_R13(%rsp) ; \
104 movq %r14,TF_R14(%rsp) ; \
105 movq %r15,TF_R15(%rsp) ; \
106 movq %rbp,TF_RBP(%rsp) ; \
107 movq %rbx,TF_RBX(%rsp) ; \
108 movq %rax,TF_RAX(%rsp)
109
110 #define INTR_RESTORE_GPRS \
111 movq TF_RDI(%rsp),%rdi ; \
112 movq TF_RSI(%rsp),%rsi ; \
113 movq TF_RDX(%rsp),%rdx ; \
114 movq TF_RCX(%rsp),%rcx ; \
115 movq TF_R8(%rsp),%r8 ; \
116 movq TF_R9(%rsp),%r9 ; \
117 movq TF_R10(%rsp),%r10 ; \
118 movq TF_R11(%rsp),%r11 ; \
119 movq TF_R12(%rsp),%r12 ; \
120 movq TF_R13(%rsp),%r13 ; \
121 movq TF_R14(%rsp),%r14 ; \
122 movq TF_R15(%rsp),%r15 ; \
123 movq TF_RBP(%rsp),%rbp ; \
124 movq TF_RBX(%rsp),%rbx ; \
125 movq TF_RAX(%rsp),%rax
126
127 #define TEXT_USER_BEGIN .pushsection .text.user, "ax"
128 #define TEXT_USER_END .popsection
129
130 #ifdef SVS
131
132 /* XXX: put this somewhere else */
133 #define SVS_UTLS 0xffffc00000000000 /* PMAP_PCPU_BASE */
134 #define UTLS_KPDIRPA 0
135 #define UTLS_SCRATCH 8
136 #define UTLS_RSP0 16
137
138 #define SVS_ENTER_BYTES 22
139 #define NOSVS_ENTER \
140 .byte 0xEB, (SVS_ENTER_BYTES-2) /* jmp */ ; \
141 .fill (SVS_ENTER_BYTES-2),1,0xCC
142 #define SVS_ENTER \
143 HOTPATCH(HP_NAME_SVS_ENTER, SVS_ENTER_BYTES) ; \
144 NOSVS_ENTER
145
146 #define SVS_LEAVE_BYTES 31
147 #define NOSVS_LEAVE \
148 .byte 0xEB, (SVS_LEAVE_BYTES-2) /* jmp */ ; \
149 .fill (SVS_LEAVE_BYTES-2),1,0xCC
150 #define SVS_LEAVE \
151 HOTPATCH(HP_NAME_SVS_LEAVE, SVS_LEAVE_BYTES) ; \
152 NOSVS_LEAVE
153
154 #define SVS_ENTER_ALT_BYTES 23
155 #define NOSVS_ENTER_ALTSTACK \
156 .byte 0xEB, (SVS_ENTER_ALT_BYTES-2) /* jmp */ ; \
157 .fill (SVS_ENTER_ALT_BYTES-2),1,0xCC
158 #define SVS_ENTER_ALTSTACK \
159 HOTPATCH(HP_NAME_SVS_ENTER_ALT, SVS_ENTER_ALT_BYTES) ; \
160 NOSVS_ENTER_ALTSTACK
161
162 #define SVS_LEAVE_ALT_BYTES 22
163 #define NOSVS_LEAVE_ALTSTACK \
164 .byte 0xEB, (SVS_LEAVE_ALT_BYTES-2) /* jmp */ ; \
165 .fill (SVS_LEAVE_ALT_BYTES-2),1,0xCC
166 #define SVS_LEAVE_ALTSTACK \
167 HOTPATCH(HP_NAME_SVS_LEAVE_ALT, SVS_LEAVE_ALT_BYTES) ; \
168 NOSVS_LEAVE_ALTSTACK
169
170 #define SVS_ENTER_NMI_BYTES 22
171 #define NOSVS_ENTER_NMI \
172 .byte 0xEB, (SVS_ENTER_NMI_BYTES-2) /* jmp */ ; \
173 .fill (SVS_ENTER_NMI_BYTES-2),1,0xCC
174 #define SVS_ENTER_NMI \
175 HOTPATCH(HP_NAME_SVS_ENTER_NMI, SVS_ENTER_NMI_BYTES) ; \
176 NOSVS_ENTER_NMI
177
178 #define SVS_LEAVE_NMI_BYTES 11
179 #define NOSVS_LEAVE_NMI \
180 .byte 0xEB, (SVS_LEAVE_NMI_BYTES-2) /* jmp */ ; \
181 .fill (SVS_LEAVE_NMI_BYTES-2),1,0xCC
182 #define SVS_LEAVE_NMI \
183 HOTPATCH(HP_NAME_SVS_LEAVE_NMI, SVS_LEAVE_NMI_BYTES) ; \
184 NOSVS_LEAVE_NMI
185
186 #else
187 #define SVS_ENTER /* nothing */
188 #define SVS_ENTER_NMI /* nothing */
189 #define SVS_LEAVE /* nothing */
190 #define SVS_LEAVE_NMI /* nothing */
191 #define SVS_ENTER_ALTSTACK /* nothing */
192 #define SVS_LEAVE_ALTSTACK /* nothing */
193 #endif
194
195 #define INTRENTRY \
196 subq $TF_REGSIZE,%rsp ; \
197 INTR_SAVE_GPRS ; \
198 cld ; \
199 SMAP_ENABLE ; \
200 testb $SEL_UPL,TF_CS(%rsp) ; \
201 je 98f ; \
202 SWAPGS ; \
203 IBRS_ENTER ; \
204 SVS_ENTER ; \
205 movw %gs,TF_GS(%rsp) ; \
206 movw %fs,TF_FS(%rsp) ; \
207 movw %es,TF_ES(%rsp) ; \
208 movw %ds,TF_DS(%rsp) ; \
209 98:
210
211 #define INTRFASTEXIT \
212 jmp intrfastexit
213
214 #define INTR_RECURSE_HWFRAME \
215 movq %rsp,%r10 ; \
216 movl %ss,%r11d ; \
217 pushq %r11 ; \
218 pushq %r10 ; \
219 pushfq ; \
220 pushq $GSEL(GCODE_SEL,SEL_KPL); \
221 /* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \
222 XEN_ONLY2(andb $0xfc,(%rsp);) \
223 pushq %r13 ;
224
225 #define INTR_RECURSE_ENTRY \
226 subq $TF_REGSIZE,%rsp ; \
227 INTR_SAVE_GPRS ; \
228 cld
229
230 #define CHECK_DEFERRED_SWITCH \
231 cmpl $0, CPUVAR(WANT_PMAPLOAD)
232
233 #define CHECK_ASTPENDING(reg) cmpl $0, L_MD_ASTPENDING(reg)
234 #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg)
235
236 #endif /* _AMD64_MACHINE_FRAMEASM_H */
237