frameasm.h revision 1.31 1 /* $NetBSD: frameasm.h,v 1.31 2018/01/21 11:21:40 maxv Exp $ */
2
3 #ifndef _AMD64_MACHINE_FRAMEASM_H
4 #define _AMD64_MACHINE_FRAMEASM_H
5
6 #ifdef _KERNEL_OPT
7 #include "opt_xen.h"
8 #include "opt_svs.h"
9 #endif
10
11 /*
12 * Macros to define pushing/popping frames for interrupts, traps
13 * and system calls. Currently all the same; will diverge later.
14 */
15
16 #ifdef XEN
17 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
18 /* Xen do not need swapgs, done by hypervisor */
19 #define swapgs
20 #define iretq pushq $0 ; jmp HYPERVISOR_iret
21 #define XEN_ONLY2(x,y) x,y
22 #define NOT_XEN(x)
23
24 #define CLI(temp_reg) \
25 movq CPUVAR(VCPU),%r ## temp_reg ; \
26 movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
27
28 #define STI(temp_reg) \
29 movq CPUVAR(VCPU),%r ## temp_reg ; \
30 movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
31
32 #else /* XEN */
33 #define XEN_ONLY2(x,y)
34 #define NOT_XEN(x) x
35 #define CLI(temp_reg) cli
36 #define STI(temp_reg) sti
37 #endif /* XEN */
38
39 #define HP_NAME_CLAC 1
40 #define HP_NAME_STAC 2
41 #define HP_NAME_NOLOCK 3
42 #define HP_NAME_RETFENCE 4
43
44 #define HOTPATCH(name, size) \
45 123: ; \
46 .pushsection .rodata.hotpatch, "a" ; \
47 .byte name ; \
48 .byte size ; \
49 .quad 123b ; \
50 .popsection
51
52 #define SMAP_ENABLE \
53 HOTPATCH(HP_NAME_CLAC, 3) ; \
54 .byte 0x0F, 0x1F, 0x00 ; \
55
56 #define SMAP_DISABLE \
57 HOTPATCH(HP_NAME_STAC, 3) ; \
58 .byte 0x0F, 0x1F, 0x00 ; \
59
60 #define SWAPGS NOT_XEN(swapgs)
61
62 /*
63 * These are used on interrupt or trap entry or exit.
64 */
65 #define INTR_SAVE_GPRS \
66 movq %rdi,TF_RDI(%rsp) ; \
67 movq %rsi,TF_RSI(%rsp) ; \
68 movq %rdx,TF_RDX(%rsp) ; \
69 movq %rcx,TF_RCX(%rsp) ; \
70 movq %r8,TF_R8(%rsp) ; \
71 movq %r9,TF_R9(%rsp) ; \
72 movq %r10,TF_R10(%rsp) ; \
73 movq %r11,TF_R11(%rsp) ; \
74 movq %r12,TF_R12(%rsp) ; \
75 movq %r13,TF_R13(%rsp) ; \
76 movq %r14,TF_R14(%rsp) ; \
77 movq %r15,TF_R15(%rsp) ; \
78 movq %rbp,TF_RBP(%rsp) ; \
79 movq %rbx,TF_RBX(%rsp) ; \
80 movq %rax,TF_RAX(%rsp)
81
82 #define INTR_RESTORE_GPRS \
83 movq TF_RDI(%rsp),%rdi ; \
84 movq TF_RSI(%rsp),%rsi ; \
85 movq TF_RDX(%rsp),%rdx ; \
86 movq TF_RCX(%rsp),%rcx ; \
87 movq TF_R8(%rsp),%r8 ; \
88 movq TF_R9(%rsp),%r9 ; \
89 movq TF_R10(%rsp),%r10 ; \
90 movq TF_R11(%rsp),%r11 ; \
91 movq TF_R12(%rsp),%r12 ; \
92 movq TF_R13(%rsp),%r13 ; \
93 movq TF_R14(%rsp),%r14 ; \
94 movq TF_R15(%rsp),%r15 ; \
95 movq TF_RBP(%rsp),%rbp ; \
96 movq TF_RBX(%rsp),%rbx ; \
97 movq TF_RAX(%rsp),%rax
98
99 #define TEXT_USER_BEGIN .pushsection .text.user, "ax"
100 #define TEXT_USER_END .popsection
101
102 #ifdef SVS
103
104 /* XXX: put this somewhere else */
105 #define SVS_UTLS 0xffffc00000000000 /* PMAP_PCPU_BASE */
106 #define UTLS_KPDIRPA 0
107 #define UTLS_SCRATCH 8
108 #define UTLS_RSP0 16
109
110 #define SVS_ENTER \
111 movq SVS_UTLS+UTLS_KPDIRPA,%rax ; \
112 movq %rax,%cr3 ; \
113 movq CPUVAR(KRSP0),%rsp
114
115 #define SVS_LEAVE \
116 testb $SEL_UPL,TF_CS(%rsp) ; \
117 jz 1234f ; \
118 movq CPUVAR(URSP0),%rsp ; \
119 movq CPUVAR(UPDIRPA),%rax ; \
120 movq %rax,%cr3 ; \
121 1234:
122
123 #define SVS_ENTER_ALTSTACK \
124 testb $SEL_UPL,TF_CS(%rsp) ; \
125 jz 1234f ; \
126 movq SVS_UTLS+UTLS_KPDIRPA,%rax ; \
127 movq %rax,%cr3 ; \
128 1234:
129
130 #define SVS_LEAVE_ALTSTACK \
131 testb $SEL_UPL,TF_CS(%rsp) ; \
132 jz 1234f ; \
133 movq CPUVAR(UPDIRPA),%rax ; \
134 movq %rax,%cr3 ; \
135 1234:
136 #else
137 #define SVS_ENTER /* nothing */
138 #define SVS_LEAVE /* nothing */
139 #define SVS_ENTER_ALTSTACK /* nothing */
140 #define SVS_LEAVE_ALTSTACK /* nothing */
141 #endif
142
143 #define INTRENTRY_L(kernel_trap, usertrap) \
144 subq $TF_REGSIZE,%rsp ; \
145 INTR_SAVE_GPRS ; \
146 cld ; \
147 SMAP_ENABLE ; \
148 testb $SEL_UPL,TF_CS(%rsp) ; \
149 je kernel_trap ; \
150 usertrap ; \
151 SWAPGS ; \
152 SVS_ENTER ; \
153 movw %gs,TF_GS(%rsp) ; \
154 movw %fs,TF_FS(%rsp) ; \
155 movw %es,TF_ES(%rsp) ; \
156 movw %ds,TF_DS(%rsp)
157
158 #define INTRENTRY \
159 INTRENTRY_L(98f,) ; \
160 98:
161
162 #define INTRFASTEXIT \
163 jmp intrfastexit
164
165 #define INTR_RECURSE_HWFRAME \
166 movq %rsp,%r10 ; \
167 movl %ss,%r11d ; \
168 pushq %r11 ; \
169 pushq %r10 ; \
170 pushfq ; \
171 movl %cs,%r11d ; \
172 pushq %r11 ; \
173 /* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \
174 XEN_ONLY2(andb $0xfc,(%rsp);) \
175 pushq %r13 ;
176
177 #define DO_DEFERRED_SWITCH \
178 cmpl $0, CPUVAR(WANT_PMAPLOAD) ; \
179 jz 1f ; \
180 call _C_LABEL(do_pmap_load) ; \
181 1:
182
183 #define CHECK_DEFERRED_SWITCH \
184 cmpl $0, CPUVAR(WANT_PMAPLOAD)
185
186 #define CHECK_ASTPENDING(reg) cmpl $0, L_MD_ASTPENDING(reg)
187 #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg)
188
189 #endif /* _AMD64_MACHINE_FRAMEASM_H */
190