frameasm.h revision 1.38.2.2 1 /* $NetBSD: frameasm.h,v 1.38.2.2 2020/04/13 08:03:30 martin Exp $ */
2
3 #ifndef _AMD64_MACHINE_FRAMEASM_H
4 #define _AMD64_MACHINE_FRAMEASM_H
5
6 #ifdef _KERNEL_OPT
7 #include "opt_xen.h"
8 #include "opt_svs.h"
9 #include "opt_kcov.h"
10 #include "opt_kmsan.h"
11 #endif
12
13 /*
14 * Macros to define pushing/popping frames for interrupts, traps
15 * and system calls. Currently all the same; will diverge later.
16 */
17
18 #ifdef XENPV
19 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
20 /* Xen do not need swapgs, done by hypervisor */
21 #define swapgs
22 #define iretq pushq $0 ; jmp HYPERVISOR_iret
23 #define XEN_ONLY2(x,y) x,y
24 #define NOT_XEN(x)
25
26 #define CLI(temp_reg) \
27 movq CPUVAR(VCPU),%r ## temp_reg ; \
28 movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
29
30 #define STI(temp_reg) \
31 movq CPUVAR(VCPU),%r ## temp_reg ; \
32 movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
33
34 #else /* XENPV */
35 #define XEN_ONLY2(x,y)
36 #define NOT_XEN(x) x
37 #define CLI(temp_reg) cli
38 #define STI(temp_reg) sti
39 #endif /* XEN */
40
41 #define HP_NAME_CLAC 1
42 #define HP_NAME_STAC 2
43 #define HP_NAME_NOLOCK 3
44 #define HP_NAME_RETFENCE 4
45 #define HP_NAME_SVS_ENTER 5
46 #define HP_NAME_SVS_LEAVE 6
47 #define HP_NAME_SVS_ENTER_ALT 7
48 #define HP_NAME_SVS_LEAVE_ALT 8
49 #define HP_NAME_IBRS_ENTER 9
50 #define HP_NAME_IBRS_LEAVE 10
51 #define HP_NAME_SVS_ENTER_NMI 11
52 #define HP_NAME_SVS_LEAVE_NMI 12
53 #define HP_NAME_MDS_LEAVE 13
54
55 #define HOTPATCH(name, size) \
56 123: ; \
57 .pushsection .rodata.hotpatch, "a" ; \
58 .byte name ; \
59 .byte size ; \
60 .quad 123b ; \
61 .popsection
62
63 #define SMAP_ENABLE \
64 HOTPATCH(HP_NAME_CLAC, 3) ; \
65 .byte 0x0F, 0x1F, 0x00 ; \
66
67 #define SMAP_DISABLE \
68 HOTPATCH(HP_NAME_STAC, 3) ; \
69 .byte 0x0F, 0x1F, 0x00 ; \
70
71 /*
72 * IBRS
73 */
74
75 #define IBRS_ENTER_BYTES 12
76 #define IBRS_ENTER \
77 HOTPATCH(HP_NAME_IBRS_ENTER, IBRS_ENTER_BYTES) ; \
78 NOIBRS_ENTER
79 #define NOIBRS_ENTER \
80 .byte 0xEB, (IBRS_ENTER_BYTES-2) /* jmp */ ; \
81 .fill (IBRS_ENTER_BYTES-2),1,0xCC
82
83 #define IBRS_LEAVE_BYTES 12
84 #define IBRS_LEAVE \
85 HOTPATCH(HP_NAME_IBRS_LEAVE, IBRS_LEAVE_BYTES) ; \
86 NOIBRS_LEAVE
87 #define NOIBRS_LEAVE \
88 .byte 0xEB, (IBRS_LEAVE_BYTES-2) /* jmp */ ; \
89 .fill (IBRS_LEAVE_BYTES-2),1,0xCC
90
91 /*
92 * MDS
93 */
94
95 #define MDS_LEAVE_BYTES 10
96 #define MDS_LEAVE \
97 HOTPATCH(HP_NAME_MDS_LEAVE, MDS_LEAVE_BYTES) ; \
98 NOMDS_LEAVE
99 #define NOMDS_LEAVE \
100 .byte 0xEB, (MDS_LEAVE_BYTES-2) /* jmp */ ; \
101 .fill (MDS_LEAVE_BYTES-2),1,0xCC
102
103 #define SWAPGS NOT_XEN(swapgs)
104
105 /*
106 * These are used on interrupt or trap entry or exit.
107 */
108 #define INTR_SAVE_GPRS \
109 movq %rdi,TF_RDI(%rsp) ; \
110 movq %rsi,TF_RSI(%rsp) ; \
111 movq %rdx,TF_RDX(%rsp) ; \
112 movq %rcx,TF_RCX(%rsp) ; \
113 movq %r8,TF_R8(%rsp) ; \
114 movq %r9,TF_R9(%rsp) ; \
115 movq %r10,TF_R10(%rsp) ; \
116 movq %r11,TF_R11(%rsp) ; \
117 movq %r12,TF_R12(%rsp) ; \
118 movq %r13,TF_R13(%rsp) ; \
119 movq %r14,TF_R14(%rsp) ; \
120 movq %r15,TF_R15(%rsp) ; \
121 movq %rbp,TF_RBP(%rsp) ; \
122 movq %rbx,TF_RBX(%rsp) ; \
123 movq %rax,TF_RAX(%rsp)
124
125 #define INTR_RESTORE_GPRS \
126 movq TF_RDI(%rsp),%rdi ; \
127 movq TF_RSI(%rsp),%rsi ; \
128 movq TF_RDX(%rsp),%rdx ; \
129 movq TF_RCX(%rsp),%rcx ; \
130 movq TF_R8(%rsp),%r8 ; \
131 movq TF_R9(%rsp),%r9 ; \
132 movq TF_R10(%rsp),%r10 ; \
133 movq TF_R11(%rsp),%r11 ; \
134 movq TF_R12(%rsp),%r12 ; \
135 movq TF_R13(%rsp),%r13 ; \
136 movq TF_R14(%rsp),%r14 ; \
137 movq TF_R15(%rsp),%r15 ; \
138 movq TF_RBP(%rsp),%rbp ; \
139 movq TF_RBX(%rsp),%rbx ; \
140 movq TF_RAX(%rsp),%rax
141
142 #define TEXT_USER_BEGIN .pushsection .text.user, "ax"
143 #define TEXT_USER_END .popsection
144
145 #ifdef SVS
146
147 /* XXX: put this somewhere else */
148 #define SVS_UTLS 0xffffff0000000000 /* PMAP_PCPU_BASE */
149 #define UTLS_KPDIRPA 0
150 #define UTLS_SCRATCH 8
151 #define UTLS_RSP0 16
152
153 #define SVS_ENTER_BYTES 22
154 #define NOSVS_ENTER \
155 .byte 0xEB, (SVS_ENTER_BYTES-2) /* jmp */ ; \
156 .fill (SVS_ENTER_BYTES-2),1,0xCC
157 #define SVS_ENTER \
158 HOTPATCH(HP_NAME_SVS_ENTER, SVS_ENTER_BYTES) ; \
159 NOSVS_ENTER
160
161 #define SVS_LEAVE_BYTES 21
162 #define NOSVS_LEAVE \
163 .byte 0xEB, (SVS_LEAVE_BYTES-2) /* jmp */ ; \
164 .fill (SVS_LEAVE_BYTES-2),1,0xCC
165 #define SVS_LEAVE \
166 HOTPATCH(HP_NAME_SVS_LEAVE, SVS_LEAVE_BYTES) ; \
167 NOSVS_LEAVE
168
169 #define SVS_ENTER_ALT_BYTES 23
170 #define NOSVS_ENTER_ALTSTACK \
171 .byte 0xEB, (SVS_ENTER_ALT_BYTES-2) /* jmp */ ; \
172 .fill (SVS_ENTER_ALT_BYTES-2),1,0xCC
173 #define SVS_ENTER_ALTSTACK \
174 HOTPATCH(HP_NAME_SVS_ENTER_ALT, SVS_ENTER_ALT_BYTES) ; \
175 NOSVS_ENTER_ALTSTACK
176
177 #define SVS_LEAVE_ALT_BYTES 22
178 #define NOSVS_LEAVE_ALTSTACK \
179 .byte 0xEB, (SVS_LEAVE_ALT_BYTES-2) /* jmp */ ; \
180 .fill (SVS_LEAVE_ALT_BYTES-2),1,0xCC
181 #define SVS_LEAVE_ALTSTACK \
182 HOTPATCH(HP_NAME_SVS_LEAVE_ALT, SVS_LEAVE_ALT_BYTES) ; \
183 NOSVS_LEAVE_ALTSTACK
184
185 #define SVS_ENTER_NMI_BYTES 22
186 #define NOSVS_ENTER_NMI \
187 .byte 0xEB, (SVS_ENTER_NMI_BYTES-2) /* jmp */ ; \
188 .fill (SVS_ENTER_NMI_BYTES-2),1,0xCC
189 #define SVS_ENTER_NMI \
190 HOTPATCH(HP_NAME_SVS_ENTER_NMI, SVS_ENTER_NMI_BYTES) ; \
191 NOSVS_ENTER_NMI
192
193 #define SVS_LEAVE_NMI_BYTES 11
194 #define NOSVS_LEAVE_NMI \
195 .byte 0xEB, (SVS_LEAVE_NMI_BYTES-2) /* jmp */ ; \
196 .fill (SVS_LEAVE_NMI_BYTES-2),1,0xCC
197 #define SVS_LEAVE_NMI \
198 HOTPATCH(HP_NAME_SVS_LEAVE_NMI, SVS_LEAVE_NMI_BYTES) ; \
199 NOSVS_LEAVE_NMI
200
201 #else
202 #define SVS_ENTER /* nothing */
203 #define SVS_ENTER_NMI /* nothing */
204 #define SVS_LEAVE /* nothing */
205 #define SVS_LEAVE_NMI /* nothing */
206 #define SVS_ENTER_ALTSTACK /* nothing */
207 #define SVS_LEAVE_ALTSTACK /* nothing */
208 #endif
209
210 #ifdef KMSAN
211 #define KMSAN_ENTER \
212 movq %rsp,%rdi ; \
213 movq $TF_REGSIZE+16+40,%rsi ; \
214 xorq %rdx,%rdx ; \
215 callq kmsan_mark ; \
216 callq kmsan_intr_enter
217 #define KMSAN_LEAVE \
218 pushq %rbp ; \
219 movq %rsp,%rbp ; \
220 callq kmsan_intr_leave ; \
221 popq %rbp
222 #define KMSAN_INIT_ARG(sz) \
223 pushq %rax ; \
224 pushq %rcx ; \
225 pushq %rdx ; \
226 pushq %rsi ; \
227 pushq %rdi ; \
228 pushq %r8 ; \
229 pushq %r9 ; \
230 pushq %r10 ; \
231 pushq %r11 ; \
232 movq $sz,%rdi ; \
233 callq _C_LABEL(kmsan_init_arg); \
234 popq %r11 ; \
235 popq %r10 ; \
236 popq %r9 ; \
237 popq %r8 ; \
238 popq %rdi ; \
239 popq %rsi ; \
240 popq %rdx ; \
241 popq %rcx ; \
242 popq %rax
243 #define KMSAN_INIT_RET(sz) \
244 pushq %rax ; \
245 pushq %rcx ; \
246 pushq %rdx ; \
247 pushq %rsi ; \
248 pushq %rdi ; \
249 pushq %r8 ; \
250 pushq %r9 ; \
251 pushq %r10 ; \
252 pushq %r11 ; \
253 movq $sz,%rdi ; \
254 callq _C_LABEL(kmsan_init_ret); \
255 popq %r11 ; \
256 popq %r10 ; \
257 popq %r9 ; \
258 popq %r8 ; \
259 popq %rdi ; \
260 popq %rsi ; \
261 popq %rdx ; \
262 popq %rcx ; \
263 popq %rax
264 #else
265 #define KMSAN_ENTER /* nothing */
266 #define KMSAN_LEAVE /* nothing */
267 #define KMSAN_INIT_ARG(sz) /* nothing */
268 #define KMSAN_INIT_RET(sz) /* nothing */
269 #endif
270
271 #ifdef KCOV
272 #define KCOV_DISABLE \
273 incl CPUVAR(IDEPTH)
274 #define KCOV_ENABLE \
275 decl CPUVAR(IDEPTH)
276 #else
277 #define KCOV_DISABLE /* nothing */
278 #define KCOV_ENABLE /* nothing */
279 #endif
280
281 #define INTRENTRY \
282 subq $TF_REGSIZE,%rsp ; \
283 INTR_SAVE_GPRS ; \
284 cld ; \
285 SMAP_ENABLE ; \
286 testb $SEL_UPL,TF_CS(%rsp) ; \
287 je 98f ; \
288 SWAPGS ; \
289 IBRS_ENTER ; \
290 SVS_ENTER ; \
291 movw %gs,TF_GS(%rsp) ; \
292 movw %fs,TF_FS(%rsp) ; \
293 movw %es,TF_ES(%rsp) ; \
294 movw %ds,TF_DS(%rsp) ; \
295 98: KMSAN_ENTER
296
297 #define INTRFASTEXIT \
298 jmp intrfastexit
299
300 #define INTR_RECURSE_HWFRAME \
301 movq %rsp,%r10 ; \
302 movl %ss,%r11d ; \
303 pushq %r11 ; \
304 pushq %r10 ; \
305 pushfq ; \
306 pushq $GSEL(GCODE_SEL,SEL_KPL); \
307 /* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \
308 XEN_ONLY2(andb $0xfc,(%rsp);) \
309 pushq %r13 ;
310
311 #define INTR_RECURSE_ENTRY \
312 subq $TF_REGSIZE,%rsp ; \
313 INTR_SAVE_GPRS ; \
314 cld ; \
315 KMSAN_ENTER
316
317 #define CHECK_DEFERRED_SWITCH \
318 cmpl $0, CPUVAR(WANT_PMAPLOAD)
319
320 #define CHECK_ASTPENDING(reg) cmpl $0, L_MD_ASTPENDING(reg)
321 #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg)
322
323 /*
324 * If the FPU state is not in the CPU, restore it. Executed with interrupts
325 * disabled.
326 *
327 * %r14 is curlwp, must not be modified
328 * %rbx must not be modified
329 */
330 #define HANDLE_DEFERRED_FPU \
331 testl $MDL_FPU_IN_CPU,L_MD_FLAGS(%r14) ; \
332 jnz 1f ; \
333 call _C_LABEL(fpu_handle_deferred) ; \
334 orl $MDL_FPU_IN_CPU,L_MD_FLAGS(%r14) ; \
335 1:
336
337 #endif /* _AMD64_MACHINE_FRAMEASM_H */
338