nvmm_x86_svm.c revision 1.46 1 /* $NetBSD: nvmm_x86_svm.c,v 1.46 2019/05/11 07:31:56 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.46 2019/05/11 07:31:56 maxv Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/kmem.h>
39 #include <sys/cpu.h>
40 #include <sys/xcall.h>
41 #include <sys/mman.h>
42
43 #include <uvm/uvm.h>
44 #include <uvm/uvm_page.h>
45
46 #include <x86/cputypes.h>
47 #include <x86/specialreg.h>
48 #include <x86/pmap.h>
49 #include <x86/dbregs.h>
50 #include <x86/cpu_counter.h>
51 #include <machine/cpuvar.h>
52
53 #include <dev/nvmm/nvmm.h>
54 #include <dev/nvmm/nvmm_internal.h>
55 #include <dev/nvmm/x86/nvmm_x86.h>
56
57 int svm_vmrun(paddr_t, uint64_t *);
58
59 #define MSR_VM_HSAVE_PA 0xC0010117
60
61 /* -------------------------------------------------------------------------- */
62
63 #define VMCB_EXITCODE_CR0_READ 0x0000
64 #define VMCB_EXITCODE_CR1_READ 0x0001
65 #define VMCB_EXITCODE_CR2_READ 0x0002
66 #define VMCB_EXITCODE_CR3_READ 0x0003
67 #define VMCB_EXITCODE_CR4_READ 0x0004
68 #define VMCB_EXITCODE_CR5_READ 0x0005
69 #define VMCB_EXITCODE_CR6_READ 0x0006
70 #define VMCB_EXITCODE_CR7_READ 0x0007
71 #define VMCB_EXITCODE_CR8_READ 0x0008
72 #define VMCB_EXITCODE_CR9_READ 0x0009
73 #define VMCB_EXITCODE_CR10_READ 0x000A
74 #define VMCB_EXITCODE_CR11_READ 0x000B
75 #define VMCB_EXITCODE_CR12_READ 0x000C
76 #define VMCB_EXITCODE_CR13_READ 0x000D
77 #define VMCB_EXITCODE_CR14_READ 0x000E
78 #define VMCB_EXITCODE_CR15_READ 0x000F
79 #define VMCB_EXITCODE_CR0_WRITE 0x0010
80 #define VMCB_EXITCODE_CR1_WRITE 0x0011
81 #define VMCB_EXITCODE_CR2_WRITE 0x0012
82 #define VMCB_EXITCODE_CR3_WRITE 0x0013
83 #define VMCB_EXITCODE_CR4_WRITE 0x0014
84 #define VMCB_EXITCODE_CR5_WRITE 0x0015
85 #define VMCB_EXITCODE_CR6_WRITE 0x0016
86 #define VMCB_EXITCODE_CR7_WRITE 0x0017
87 #define VMCB_EXITCODE_CR8_WRITE 0x0018
88 #define VMCB_EXITCODE_CR9_WRITE 0x0019
89 #define VMCB_EXITCODE_CR10_WRITE 0x001A
90 #define VMCB_EXITCODE_CR11_WRITE 0x001B
91 #define VMCB_EXITCODE_CR12_WRITE 0x001C
92 #define VMCB_EXITCODE_CR13_WRITE 0x001D
93 #define VMCB_EXITCODE_CR14_WRITE 0x001E
94 #define VMCB_EXITCODE_CR15_WRITE 0x001F
95 #define VMCB_EXITCODE_DR0_READ 0x0020
96 #define VMCB_EXITCODE_DR1_READ 0x0021
97 #define VMCB_EXITCODE_DR2_READ 0x0022
98 #define VMCB_EXITCODE_DR3_READ 0x0023
99 #define VMCB_EXITCODE_DR4_READ 0x0024
100 #define VMCB_EXITCODE_DR5_READ 0x0025
101 #define VMCB_EXITCODE_DR6_READ 0x0026
102 #define VMCB_EXITCODE_DR7_READ 0x0027
103 #define VMCB_EXITCODE_DR8_READ 0x0028
104 #define VMCB_EXITCODE_DR9_READ 0x0029
105 #define VMCB_EXITCODE_DR10_READ 0x002A
106 #define VMCB_EXITCODE_DR11_READ 0x002B
107 #define VMCB_EXITCODE_DR12_READ 0x002C
108 #define VMCB_EXITCODE_DR13_READ 0x002D
109 #define VMCB_EXITCODE_DR14_READ 0x002E
110 #define VMCB_EXITCODE_DR15_READ 0x002F
111 #define VMCB_EXITCODE_DR0_WRITE 0x0030
112 #define VMCB_EXITCODE_DR1_WRITE 0x0031
113 #define VMCB_EXITCODE_DR2_WRITE 0x0032
114 #define VMCB_EXITCODE_DR3_WRITE 0x0033
115 #define VMCB_EXITCODE_DR4_WRITE 0x0034
116 #define VMCB_EXITCODE_DR5_WRITE 0x0035
117 #define VMCB_EXITCODE_DR6_WRITE 0x0036
118 #define VMCB_EXITCODE_DR7_WRITE 0x0037
119 #define VMCB_EXITCODE_DR8_WRITE 0x0038
120 #define VMCB_EXITCODE_DR9_WRITE 0x0039
121 #define VMCB_EXITCODE_DR10_WRITE 0x003A
122 #define VMCB_EXITCODE_DR11_WRITE 0x003B
123 #define VMCB_EXITCODE_DR12_WRITE 0x003C
124 #define VMCB_EXITCODE_DR13_WRITE 0x003D
125 #define VMCB_EXITCODE_DR14_WRITE 0x003E
126 #define VMCB_EXITCODE_DR15_WRITE 0x003F
127 #define VMCB_EXITCODE_EXCP0 0x0040
128 #define VMCB_EXITCODE_EXCP1 0x0041
129 #define VMCB_EXITCODE_EXCP2 0x0042
130 #define VMCB_EXITCODE_EXCP3 0x0043
131 #define VMCB_EXITCODE_EXCP4 0x0044
132 #define VMCB_EXITCODE_EXCP5 0x0045
133 #define VMCB_EXITCODE_EXCP6 0x0046
134 #define VMCB_EXITCODE_EXCP7 0x0047
135 #define VMCB_EXITCODE_EXCP8 0x0048
136 #define VMCB_EXITCODE_EXCP9 0x0049
137 #define VMCB_EXITCODE_EXCP10 0x004A
138 #define VMCB_EXITCODE_EXCP11 0x004B
139 #define VMCB_EXITCODE_EXCP12 0x004C
140 #define VMCB_EXITCODE_EXCP13 0x004D
141 #define VMCB_EXITCODE_EXCP14 0x004E
142 #define VMCB_EXITCODE_EXCP15 0x004F
143 #define VMCB_EXITCODE_EXCP16 0x0050
144 #define VMCB_EXITCODE_EXCP17 0x0051
145 #define VMCB_EXITCODE_EXCP18 0x0052
146 #define VMCB_EXITCODE_EXCP19 0x0053
147 #define VMCB_EXITCODE_EXCP20 0x0054
148 #define VMCB_EXITCODE_EXCP21 0x0055
149 #define VMCB_EXITCODE_EXCP22 0x0056
150 #define VMCB_EXITCODE_EXCP23 0x0057
151 #define VMCB_EXITCODE_EXCP24 0x0058
152 #define VMCB_EXITCODE_EXCP25 0x0059
153 #define VMCB_EXITCODE_EXCP26 0x005A
154 #define VMCB_EXITCODE_EXCP27 0x005B
155 #define VMCB_EXITCODE_EXCP28 0x005C
156 #define VMCB_EXITCODE_EXCP29 0x005D
157 #define VMCB_EXITCODE_EXCP30 0x005E
158 #define VMCB_EXITCODE_EXCP31 0x005F
159 #define VMCB_EXITCODE_INTR 0x0060
160 #define VMCB_EXITCODE_NMI 0x0061
161 #define VMCB_EXITCODE_SMI 0x0062
162 #define VMCB_EXITCODE_INIT 0x0063
163 #define VMCB_EXITCODE_VINTR 0x0064
164 #define VMCB_EXITCODE_CR0_SEL_WRITE 0x0065
165 #define VMCB_EXITCODE_IDTR_READ 0x0066
166 #define VMCB_EXITCODE_GDTR_READ 0x0067
167 #define VMCB_EXITCODE_LDTR_READ 0x0068
168 #define VMCB_EXITCODE_TR_READ 0x0069
169 #define VMCB_EXITCODE_IDTR_WRITE 0x006A
170 #define VMCB_EXITCODE_GDTR_WRITE 0x006B
171 #define VMCB_EXITCODE_LDTR_WRITE 0x006C
172 #define VMCB_EXITCODE_TR_WRITE 0x006D
173 #define VMCB_EXITCODE_RDTSC 0x006E
174 #define VMCB_EXITCODE_RDPMC 0x006F
175 #define VMCB_EXITCODE_PUSHF 0x0070
176 #define VMCB_EXITCODE_POPF 0x0071
177 #define VMCB_EXITCODE_CPUID 0x0072
178 #define VMCB_EXITCODE_RSM 0x0073
179 #define VMCB_EXITCODE_IRET 0x0074
180 #define VMCB_EXITCODE_SWINT 0x0075
181 #define VMCB_EXITCODE_INVD 0x0076
182 #define VMCB_EXITCODE_PAUSE 0x0077
183 #define VMCB_EXITCODE_HLT 0x0078
184 #define VMCB_EXITCODE_INVLPG 0x0079
185 #define VMCB_EXITCODE_INVLPGA 0x007A
186 #define VMCB_EXITCODE_IOIO 0x007B
187 #define VMCB_EXITCODE_MSR 0x007C
188 #define VMCB_EXITCODE_TASK_SWITCH 0x007D
189 #define VMCB_EXITCODE_FERR_FREEZE 0x007E
190 #define VMCB_EXITCODE_SHUTDOWN 0x007F
191 #define VMCB_EXITCODE_VMRUN 0x0080
192 #define VMCB_EXITCODE_VMMCALL 0x0081
193 #define VMCB_EXITCODE_VMLOAD 0x0082
194 #define VMCB_EXITCODE_VMSAVE 0x0083
195 #define VMCB_EXITCODE_STGI 0x0084
196 #define VMCB_EXITCODE_CLGI 0x0085
197 #define VMCB_EXITCODE_SKINIT 0x0086
198 #define VMCB_EXITCODE_RDTSCP 0x0087
199 #define VMCB_EXITCODE_ICEBP 0x0088
200 #define VMCB_EXITCODE_WBINVD 0x0089
201 #define VMCB_EXITCODE_MONITOR 0x008A
202 #define VMCB_EXITCODE_MWAIT 0x008B
203 #define VMCB_EXITCODE_MWAIT_CONDITIONAL 0x008C
204 #define VMCB_EXITCODE_XSETBV 0x008D
205 #define VMCB_EXITCODE_EFER_WRITE_TRAP 0x008F
206 #define VMCB_EXITCODE_CR0_WRITE_TRAP 0x0090
207 #define VMCB_EXITCODE_CR1_WRITE_TRAP 0x0091
208 #define VMCB_EXITCODE_CR2_WRITE_TRAP 0x0092
209 #define VMCB_EXITCODE_CR3_WRITE_TRAP 0x0093
210 #define VMCB_EXITCODE_CR4_WRITE_TRAP 0x0094
211 #define VMCB_EXITCODE_CR5_WRITE_TRAP 0x0095
212 #define VMCB_EXITCODE_CR6_WRITE_TRAP 0x0096
213 #define VMCB_EXITCODE_CR7_WRITE_TRAP 0x0097
214 #define VMCB_EXITCODE_CR8_WRITE_TRAP 0x0098
215 #define VMCB_EXITCODE_CR9_WRITE_TRAP 0x0099
216 #define VMCB_EXITCODE_CR10_WRITE_TRAP 0x009A
217 #define VMCB_EXITCODE_CR11_WRITE_TRAP 0x009B
218 #define VMCB_EXITCODE_CR12_WRITE_TRAP 0x009C
219 #define VMCB_EXITCODE_CR13_WRITE_TRAP 0x009D
220 #define VMCB_EXITCODE_CR14_WRITE_TRAP 0x009E
221 #define VMCB_EXITCODE_CR15_WRITE_TRAP 0x009F
222 #define VMCB_EXITCODE_NPF 0x0400
223 #define VMCB_EXITCODE_AVIC_INCOMP_IPI 0x0401
224 #define VMCB_EXITCODE_AVIC_NOACCEL 0x0402
225 #define VMCB_EXITCODE_VMGEXIT 0x0403
226 #define VMCB_EXITCODE_INVALID -1
227
228 /* -------------------------------------------------------------------------- */
229
230 struct vmcb_ctrl {
231 uint32_t intercept_cr;
232 #define VMCB_CTRL_INTERCEPT_RCR(x) __BIT( 0 + x)
233 #define VMCB_CTRL_INTERCEPT_WCR(x) __BIT(16 + x)
234
235 uint32_t intercept_dr;
236 #define VMCB_CTRL_INTERCEPT_RDR(x) __BIT( 0 + x)
237 #define VMCB_CTRL_INTERCEPT_WDR(x) __BIT(16 + x)
238
239 uint32_t intercept_vec;
240 #define VMCB_CTRL_INTERCEPT_VEC(x) __BIT(x)
241
242 uint32_t intercept_misc1;
243 #define VMCB_CTRL_INTERCEPT_INTR __BIT(0)
244 #define VMCB_CTRL_INTERCEPT_NMI __BIT(1)
245 #define VMCB_CTRL_INTERCEPT_SMI __BIT(2)
246 #define VMCB_CTRL_INTERCEPT_INIT __BIT(3)
247 #define VMCB_CTRL_INTERCEPT_VINTR __BIT(4)
248 #define VMCB_CTRL_INTERCEPT_CR0_SPEC __BIT(5)
249 #define VMCB_CTRL_INTERCEPT_RIDTR __BIT(6)
250 #define VMCB_CTRL_INTERCEPT_RGDTR __BIT(7)
251 #define VMCB_CTRL_INTERCEPT_RLDTR __BIT(8)
252 #define VMCB_CTRL_INTERCEPT_RTR __BIT(9)
253 #define VMCB_CTRL_INTERCEPT_WIDTR __BIT(10)
254 #define VMCB_CTRL_INTERCEPT_WGDTR __BIT(11)
255 #define VMCB_CTRL_INTERCEPT_WLDTR __BIT(12)
256 #define VMCB_CTRL_INTERCEPT_WTR __BIT(13)
257 #define VMCB_CTRL_INTERCEPT_RDTSC __BIT(14)
258 #define VMCB_CTRL_INTERCEPT_RDPMC __BIT(15)
259 #define VMCB_CTRL_INTERCEPT_PUSHF __BIT(16)
260 #define VMCB_CTRL_INTERCEPT_POPF __BIT(17)
261 #define VMCB_CTRL_INTERCEPT_CPUID __BIT(18)
262 #define VMCB_CTRL_INTERCEPT_RSM __BIT(19)
263 #define VMCB_CTRL_INTERCEPT_IRET __BIT(20)
264 #define VMCB_CTRL_INTERCEPT_INTN __BIT(21)
265 #define VMCB_CTRL_INTERCEPT_INVD __BIT(22)
266 #define VMCB_CTRL_INTERCEPT_PAUSE __BIT(23)
267 #define VMCB_CTRL_INTERCEPT_HLT __BIT(24)
268 #define VMCB_CTRL_INTERCEPT_INVLPG __BIT(25)
269 #define VMCB_CTRL_INTERCEPT_INVLPGA __BIT(26)
270 #define VMCB_CTRL_INTERCEPT_IOIO_PROT __BIT(27)
271 #define VMCB_CTRL_INTERCEPT_MSR_PROT __BIT(28)
272 #define VMCB_CTRL_INTERCEPT_TASKSW __BIT(29)
273 #define VMCB_CTRL_INTERCEPT_FERR_FREEZE __BIT(30)
274 #define VMCB_CTRL_INTERCEPT_SHUTDOWN __BIT(31)
275
276 uint32_t intercept_misc2;
277 #define VMCB_CTRL_INTERCEPT_VMRUN __BIT(0)
278 #define VMCB_CTRL_INTERCEPT_VMMCALL __BIT(1)
279 #define VMCB_CTRL_INTERCEPT_VMLOAD __BIT(2)
280 #define VMCB_CTRL_INTERCEPT_VMSAVE __BIT(3)
281 #define VMCB_CTRL_INTERCEPT_STGI __BIT(4)
282 #define VMCB_CTRL_INTERCEPT_CLGI __BIT(5)
283 #define VMCB_CTRL_INTERCEPT_SKINIT __BIT(6)
284 #define VMCB_CTRL_INTERCEPT_RDTSCP __BIT(7)
285 #define VMCB_CTRL_INTERCEPT_ICEBP __BIT(8)
286 #define VMCB_CTRL_INTERCEPT_WBINVD __BIT(9)
287 #define VMCB_CTRL_INTERCEPT_MONITOR __BIT(10)
288 #define VMCB_CTRL_INTERCEPT_MWAIT __BIT(12)
289 #define VMCB_CTRL_INTERCEPT_XSETBV __BIT(13)
290 #define VMCB_CTRL_INTERCEPT_EFER_SPEC __BIT(15)
291 #define VMCB_CTRL_INTERCEPT_WCR_SPEC(x) __BIT(16 + x)
292
293 uint8_t rsvd1[40];
294 uint16_t pause_filt_thresh;
295 uint16_t pause_filt_cnt;
296 uint64_t iopm_base_pa;
297 uint64_t msrpm_base_pa;
298 uint64_t tsc_offset;
299 uint32_t guest_asid;
300
301 uint32_t tlb_ctrl;
302 #define VMCB_CTRL_TLB_CTRL_FLUSH_ALL 0x01
303 #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST 0x03
304 #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST_NONGLOBAL 0x07
305
306 uint64_t v;
307 #define VMCB_CTRL_V_TPR __BITS(3,0)
308 #define VMCB_CTRL_V_IRQ __BIT(8)
309 #define VMCB_CTRL_V_VGIF __BIT(9)
310 #define VMCB_CTRL_V_INTR_PRIO __BITS(19,16)
311 #define VMCB_CTRL_V_IGN_TPR __BIT(20)
312 #define VMCB_CTRL_V_INTR_MASKING __BIT(24)
313 #define VMCB_CTRL_V_GUEST_VGIF __BIT(25)
314 #define VMCB_CTRL_V_AVIC_EN __BIT(31)
315 #define VMCB_CTRL_V_INTR_VECTOR __BITS(39,32)
316
317 uint64_t intr;
318 #define VMCB_CTRL_INTR_SHADOW __BIT(0)
319
320 uint64_t exitcode;
321 uint64_t exitinfo1;
322 uint64_t exitinfo2;
323
324 uint64_t exitintinfo;
325 #define VMCB_CTRL_EXITINTINFO_VECTOR __BITS(7,0)
326 #define VMCB_CTRL_EXITINTINFO_TYPE __BITS(10,8)
327 #define VMCB_CTRL_EXITINTINFO_EV __BIT(11)
328 #define VMCB_CTRL_EXITINTINFO_V __BIT(31)
329 #define VMCB_CTRL_EXITINTINFO_ERRORCODE __BITS(63,32)
330
331 uint64_t enable1;
332 #define VMCB_CTRL_ENABLE_NP __BIT(0)
333 #define VMCB_CTRL_ENABLE_SEV __BIT(1)
334 #define VMCB_CTRL_ENABLE_ES_SEV __BIT(2)
335
336 uint64_t avic;
337 #define VMCB_CTRL_AVIC_APIC_BAR __BITS(51,0)
338
339 uint64_t ghcb;
340
341 uint64_t eventinj;
342 #define VMCB_CTRL_EVENTINJ_VECTOR __BITS(7,0)
343 #define VMCB_CTRL_EVENTINJ_TYPE __BITS(10,8)
344 #define VMCB_CTRL_EVENTINJ_EV __BIT(11)
345 #define VMCB_CTRL_EVENTINJ_V __BIT(31)
346 #define VMCB_CTRL_EVENTINJ_ERRORCODE __BITS(63,32)
347
348 uint64_t n_cr3;
349
350 uint64_t enable2;
351 #define VMCB_CTRL_ENABLE_LBR __BIT(0)
352 #define VMCB_CTRL_ENABLE_VVMSAVE __BIT(1)
353
354 uint32_t vmcb_clean;
355 #define VMCB_CTRL_VMCB_CLEAN_I __BIT(0)
356 #define VMCB_CTRL_VMCB_CLEAN_IOPM __BIT(1)
357 #define VMCB_CTRL_VMCB_CLEAN_ASID __BIT(2)
358 #define VMCB_CTRL_VMCB_CLEAN_TPR __BIT(3)
359 #define VMCB_CTRL_VMCB_CLEAN_NP __BIT(4)
360 #define VMCB_CTRL_VMCB_CLEAN_CR __BIT(5)
361 #define VMCB_CTRL_VMCB_CLEAN_DR __BIT(6)
362 #define VMCB_CTRL_VMCB_CLEAN_DT __BIT(7)
363 #define VMCB_CTRL_VMCB_CLEAN_SEG __BIT(8)
364 #define VMCB_CTRL_VMCB_CLEAN_CR2 __BIT(9)
365 #define VMCB_CTRL_VMCB_CLEAN_LBR __BIT(10)
366 #define VMCB_CTRL_VMCB_CLEAN_AVIC __BIT(11)
367
368 uint32_t rsvd2;
369 uint64_t nrip;
370 uint8_t inst_len;
371 uint8_t inst_bytes[15];
372 uint64_t avic_abpp;
373 uint64_t rsvd3;
374 uint64_t avic_ltp;
375
376 uint64_t avic_phys;
377 #define VMCB_CTRL_AVIC_PHYS_TABLE_PTR __BITS(51,12)
378 #define VMCB_CTRL_AVIC_PHYS_MAX_INDEX __BITS(7,0)
379
380 uint64_t rsvd4;
381 uint64_t vmcb_ptr;
382
383 uint8_t pad[752];
384 } __packed;
385
386 CTASSERT(sizeof(struct vmcb_ctrl) == 1024);
387
388 struct vmcb_segment {
389 uint16_t selector;
390 uint16_t attrib; /* hidden */
391 uint32_t limit; /* hidden */
392 uint64_t base; /* hidden */
393 } __packed;
394
395 CTASSERT(sizeof(struct vmcb_segment) == 16);
396
397 struct vmcb_state {
398 struct vmcb_segment es;
399 struct vmcb_segment cs;
400 struct vmcb_segment ss;
401 struct vmcb_segment ds;
402 struct vmcb_segment fs;
403 struct vmcb_segment gs;
404 struct vmcb_segment gdt;
405 struct vmcb_segment ldt;
406 struct vmcb_segment idt;
407 struct vmcb_segment tr;
408 uint8_t rsvd1[43];
409 uint8_t cpl;
410 uint8_t rsvd2[4];
411 uint64_t efer;
412 uint8_t rsvd3[112];
413 uint64_t cr4;
414 uint64_t cr3;
415 uint64_t cr0;
416 uint64_t dr7;
417 uint64_t dr6;
418 uint64_t rflags;
419 uint64_t rip;
420 uint8_t rsvd4[88];
421 uint64_t rsp;
422 uint8_t rsvd5[24];
423 uint64_t rax;
424 uint64_t star;
425 uint64_t lstar;
426 uint64_t cstar;
427 uint64_t sfmask;
428 uint64_t kernelgsbase;
429 uint64_t sysenter_cs;
430 uint64_t sysenter_esp;
431 uint64_t sysenter_eip;
432 uint64_t cr2;
433 uint8_t rsvd6[32];
434 uint64_t g_pat;
435 uint64_t dbgctl;
436 uint64_t br_from;
437 uint64_t br_to;
438 uint64_t int_from;
439 uint64_t int_to;
440 uint8_t pad[2408];
441 } __packed;
442
443 CTASSERT(sizeof(struct vmcb_state) == 0xC00);
444
445 struct vmcb {
446 struct vmcb_ctrl ctrl;
447 struct vmcb_state state;
448 } __packed;
449
450 CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
451 CTASSERT(offsetof(struct vmcb, state) == 0x400);
452
453 /* -------------------------------------------------------------------------- */
454
455 static void svm_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
456 static void svm_vcpu_state_commit(struct nvmm_cpu *);
457
458 struct svm_hsave {
459 paddr_t pa;
460 };
461
462 static struct svm_hsave hsave[MAXCPUS];
463
464 static uint8_t *svm_asidmap __read_mostly;
465 static uint32_t svm_maxasid __read_mostly;
466 static kmutex_t svm_asidlock __cacheline_aligned;
467
468 static bool svm_decode_assist __read_mostly;
469 static uint32_t svm_ctrl_tlb_flush __read_mostly;
470
471 #define SVM_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE)
472 static uint64_t svm_xcr0_mask __read_mostly;
473
474 #define SVM_NCPUIDS 32
475
476 #define VMCB_NPAGES 1
477
478 #define MSRBM_NPAGES 2
479 #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE)
480
481 #define IOBM_NPAGES 3
482 #define IOBM_SIZE (IOBM_NPAGES * PAGE_SIZE)
483
484 /* Does not include EFER_LMSLE. */
485 #define EFER_VALID \
486 (EFER_SCE|EFER_LME|EFER_LMA|EFER_NXE|EFER_SVME|EFER_FFXSR|EFER_TCE)
487
488 #define EFER_TLB_FLUSH \
489 (EFER_NXE|EFER_LMA|EFER_LME)
490 #define CR0_TLB_FLUSH \
491 (CR0_PG|CR0_WP|CR0_CD|CR0_NW)
492 #define CR4_TLB_FLUSH \
493 (CR4_PGE|CR4_PAE|CR4_PSE)
494
495 /* -------------------------------------------------------------------------- */
496
497 struct svm_machdata {
498 bool cpuidpresent[SVM_NCPUIDS];
499 struct nvmm_mach_conf_x86_cpuid cpuid[SVM_NCPUIDS];
500 volatile uint64_t mach_htlb_gen;
501 };
502
503 static const size_t svm_conf_sizes[NVMM_X86_NCONF] = {
504 [NVMM_MACH_CONF_MD(NVMM_MACH_CONF_X86_CPUID)] =
505 sizeof(struct nvmm_mach_conf_x86_cpuid)
506 };
507
508 struct svm_cpudata {
509 /* General */
510 bool shared_asid;
511 bool gtlb_want_flush;
512 bool gtsc_want_update;
513 uint64_t vcpu_htlb_gen;
514
515 /* VMCB */
516 struct vmcb *vmcb;
517 paddr_t vmcb_pa;
518
519 /* I/O bitmap */
520 uint8_t *iobm;
521 paddr_t iobm_pa;
522
523 /* MSR bitmap */
524 uint8_t *msrbm;
525 paddr_t msrbm_pa;
526
527 /* Host state */
528 uint64_t hxcr0;
529 uint64_t star;
530 uint64_t lstar;
531 uint64_t cstar;
532 uint64_t sfmask;
533 uint64_t fsbase;
534 uint64_t kernelgsbase;
535 bool ts_set;
536 struct xsave_header hfpu __aligned(64);
537
538 /* Intr state */
539 bool int_window_exit;
540 bool nmi_window_exit;
541 bool evt_pending;
542
543 /* Guest state */
544 uint64_t gxcr0;
545 uint64_t gprs[NVMM_X64_NGPR];
546 uint64_t drs[NVMM_X64_NDR];
547 uint64_t gtsc;
548 struct xsave_header gfpu __aligned(64);
549 };
550
551 static void
552 svm_vmcb_cache_default(struct vmcb *vmcb)
553 {
554 vmcb->ctrl.vmcb_clean =
555 VMCB_CTRL_VMCB_CLEAN_I |
556 VMCB_CTRL_VMCB_CLEAN_IOPM |
557 VMCB_CTRL_VMCB_CLEAN_ASID |
558 VMCB_CTRL_VMCB_CLEAN_TPR |
559 VMCB_CTRL_VMCB_CLEAN_NP |
560 VMCB_CTRL_VMCB_CLEAN_CR |
561 VMCB_CTRL_VMCB_CLEAN_DR |
562 VMCB_CTRL_VMCB_CLEAN_DT |
563 VMCB_CTRL_VMCB_CLEAN_SEG |
564 VMCB_CTRL_VMCB_CLEAN_CR2 |
565 VMCB_CTRL_VMCB_CLEAN_LBR |
566 VMCB_CTRL_VMCB_CLEAN_AVIC;
567 }
568
569 static void
570 svm_vmcb_cache_update(struct vmcb *vmcb, uint64_t flags)
571 {
572 if (flags & NVMM_X64_STATE_SEGS) {
573 vmcb->ctrl.vmcb_clean &=
574 ~(VMCB_CTRL_VMCB_CLEAN_SEG | VMCB_CTRL_VMCB_CLEAN_DT);
575 }
576 if (flags & NVMM_X64_STATE_CRS) {
577 vmcb->ctrl.vmcb_clean &=
578 ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_CR2 |
579 VMCB_CTRL_VMCB_CLEAN_TPR);
580 }
581 if (flags & NVMM_X64_STATE_DRS) {
582 vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_DR;
583 }
584 if (flags & NVMM_X64_STATE_MSRS) {
585 /* CR for EFER, NP for PAT. */
586 vmcb->ctrl.vmcb_clean &=
587 ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_NP);
588 }
589 }
590
591 static inline void
592 svm_vmcb_cache_flush(struct vmcb *vmcb, uint64_t flags)
593 {
594 vmcb->ctrl.vmcb_clean &= ~flags;
595 }
596
597 static inline void
598 svm_vmcb_cache_flush_all(struct vmcb *vmcb)
599 {
600 vmcb->ctrl.vmcb_clean = 0;
601 }
602
603 #define SVM_EVENT_TYPE_HW_INT 0
604 #define SVM_EVENT_TYPE_NMI 2
605 #define SVM_EVENT_TYPE_EXC 3
606 #define SVM_EVENT_TYPE_SW_INT 4
607
608 static void
609 svm_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
610 {
611 struct svm_cpudata *cpudata = vcpu->cpudata;
612 struct vmcb *vmcb = cpudata->vmcb;
613
614 if (nmi) {
615 vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_IRET;
616 cpudata->nmi_window_exit = true;
617 } else {
618 vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR;
619 vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
620 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
621 cpudata->int_window_exit = true;
622 }
623
624 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
625 }
626
627 static void
628 svm_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
629 {
630 struct svm_cpudata *cpudata = vcpu->cpudata;
631 struct vmcb *vmcb = cpudata->vmcb;
632
633 if (nmi) {
634 vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_IRET;
635 cpudata->nmi_window_exit = false;
636 } else {
637 vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR;
638 vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
639 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
640 cpudata->int_window_exit = false;
641 }
642
643 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
644 }
645
646 static inline int
647 svm_event_has_error(uint64_t vector)
648 {
649 switch (vector) {
650 case 8: /* #DF */
651 case 10: /* #TS */
652 case 11: /* #NP */
653 case 12: /* #SS */
654 case 13: /* #GP */
655 case 14: /* #PF */
656 case 17: /* #AC */
657 case 30: /* #SX */
658 return 1;
659 default:
660 return 0;
661 }
662 }
663
664 static int
665 svm_vcpu_inject(struct nvmm_cpu *vcpu)
666 {
667 struct nvmm_comm_page *comm = vcpu->comm;
668 struct svm_cpudata *cpudata = vcpu->cpudata;
669 struct vmcb *vmcb = cpudata->vmcb;
670 enum nvmm_event_type evtype;
671 uint64_t vector, error;
672 int type = 0, err = 0;
673
674 evtype = comm->event.type;
675 vector = comm->event.vector;
676 error = comm->event.u.error;
677 __insn_barrier();
678
679 if (__predict_false(vector >= 256)) {
680 return EINVAL;
681 }
682
683 switch (evtype) {
684 case NVMM_EVENT_INTERRUPT_HW:
685 type = SVM_EVENT_TYPE_HW_INT;
686 if (vector == 2) {
687 type = SVM_EVENT_TYPE_NMI;
688 svm_event_waitexit_enable(vcpu, true);
689 }
690 err = 0;
691 break;
692 case NVMM_EVENT_EXCEPTION:
693 type = SVM_EVENT_TYPE_EXC;
694 if (vector == 2 || vector >= 32)
695 return EINVAL;
696 if (vector == 3 || vector == 0)
697 return EINVAL;
698 err = svm_event_has_error(vector);
699 break;
700 default:
701 return EINVAL;
702 }
703
704 vmcb->ctrl.eventinj =
705 __SHIFTIN(vector, VMCB_CTRL_EVENTINJ_VECTOR) |
706 __SHIFTIN(type, VMCB_CTRL_EVENTINJ_TYPE) |
707 __SHIFTIN(err, VMCB_CTRL_EVENTINJ_EV) |
708 __SHIFTIN(1, VMCB_CTRL_EVENTINJ_V) |
709 __SHIFTIN(error, VMCB_CTRL_EVENTINJ_ERRORCODE);
710
711 cpudata->evt_pending = true;
712
713 return 0;
714 }
715
716 static void
717 svm_inject_ud(struct nvmm_cpu *vcpu)
718 {
719 struct nvmm_comm_page *comm = vcpu->comm;
720 int ret __diagused;
721
722 comm->event.type = NVMM_EVENT_EXCEPTION;
723 comm->event.vector = 6;
724 comm->event.u.error = 0;
725
726 ret = svm_vcpu_inject(vcpu);
727 KASSERT(ret == 0);
728 }
729
730 static void
731 svm_inject_gp(struct nvmm_cpu *vcpu)
732 {
733 struct nvmm_comm_page *comm = vcpu->comm;
734 int ret __diagused;
735
736 comm->event.type = NVMM_EVENT_EXCEPTION;
737 comm->event.vector = 13;
738 comm->event.u.error = 0;
739
740 ret = svm_vcpu_inject(vcpu);
741 KASSERT(ret == 0);
742 }
743
744 static inline int
745 svm_vcpu_event_commit(struct nvmm_cpu *vcpu)
746 {
747 if (__predict_true(!vcpu->comm->event_commit)) {
748 return 0;
749 }
750 vcpu->comm->event_commit = false;
751 return svm_vcpu_inject(vcpu);
752 }
753
754 static inline void
755 svm_inkernel_advance(struct vmcb *vmcb)
756 {
757 /*
758 * Maybe we should also apply single-stepping and debug exceptions.
759 * Matters for guest-ring3, because it can execute 'cpuid' under a
760 * debugger.
761 */
762 vmcb->state.rip = vmcb->ctrl.nrip;
763 vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
764 }
765
766 static void
767 svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
768 {
769 struct svm_cpudata *cpudata = vcpu->cpudata;
770 uint64_t cr4;
771
772 switch (eax) {
773 case 0x00000001:
774 cpudata->vmcb->state.rax &= nvmm_cpuid_00000001.eax;
775
776 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
777 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
778 CPUID_LOCAL_APIC_ID);
779
780 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
781 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
782
783 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
784
785 /* CPUID2_OSXSAVE depends on CR4. */
786 cr4 = cpudata->vmcb->state.cr4;
787 if (!(cr4 & CR4_OSXSAVE)) {
788 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
789 }
790 break;
791 case 0x00000005:
792 case 0x00000006:
793 cpudata->vmcb->state.rax = 0;
794 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
795 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
796 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
797 break;
798 case 0x00000007:
799 cpudata->vmcb->state.rax &= nvmm_cpuid_00000007.eax;
800 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
801 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
802 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
803 break;
804 case 0x0000000D:
805 if (svm_xcr0_mask == 0) {
806 break;
807 }
808 switch (ecx) {
809 case 0:
810 cpudata->vmcb->state.rax = svm_xcr0_mask & 0xFFFFFFFF;
811 if (cpudata->gxcr0 & XCR0_SSE) {
812 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
813 } else {
814 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
815 }
816 cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
817 cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64;
818 cpudata->gprs[NVMM_X64_GPR_RDX] = svm_xcr0_mask >> 32;
819 break;
820 case 1:
821 cpudata->vmcb->state.rax &= ~CPUID_PES1_XSAVES;
822 break;
823 }
824 break;
825 case 0x40000000:
826 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
827 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
828 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
829 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
830 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
831 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
832 break;
833 case 0x80000001:
834 cpudata->vmcb->state.rax &= nvmm_cpuid_80000001.eax;
835 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
836 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
837 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
838 break;
839 default:
840 break;
841 }
842 }
843
844 static void
845 svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
846 struct nvmm_exit *exit)
847 {
848 struct svm_machdata *machdata = mach->machdata;
849 struct svm_cpudata *cpudata = vcpu->cpudata;
850 struct nvmm_mach_conf_x86_cpuid *cpuid;
851 uint64_t eax, ecx;
852 u_int descs[4];
853 size_t i;
854
855 eax = cpudata->vmcb->state.rax;
856 ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
857 x86_cpuid2(eax, ecx, descs);
858
859 cpudata->vmcb->state.rax = descs[0];
860 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
861 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
862 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
863
864 svm_inkernel_handle_cpuid(vcpu, eax, ecx);
865
866 for (i = 0; i < SVM_NCPUIDS; i++) {
867 cpuid = &machdata->cpuid[i];
868 if (!machdata->cpuidpresent[i]) {
869 continue;
870 }
871 if (cpuid->leaf != eax) {
872 continue;
873 }
874
875 /* del */
876 cpudata->vmcb->state.rax &= ~cpuid->del.eax;
877 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->del.ebx;
878 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->del.ecx;
879 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->del.edx;
880
881 /* set */
882 cpudata->vmcb->state.rax |= cpuid->set.eax;
883 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->set.ebx;
884 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->set.ecx;
885 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->set.edx;
886
887 break;
888 }
889
890 svm_inkernel_advance(cpudata->vmcb);
891 exit->reason = NVMM_EXIT_NONE;
892 }
893
894 static void
895 svm_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
896 struct nvmm_exit *exit)
897 {
898 struct svm_cpudata *cpudata = vcpu->cpudata;
899 struct vmcb *vmcb = cpudata->vmcb;
900
901 if (cpudata->int_window_exit && (vmcb->state.rflags & PSL_I)) {
902 svm_event_waitexit_disable(vcpu, false);
903 }
904
905 svm_inkernel_advance(cpudata->vmcb);
906 exit->reason = NVMM_EXIT_HALTED;
907 }
908
909 #define SVM_EXIT_IO_PORT __BITS(31,16)
910 #define SVM_EXIT_IO_SEG __BITS(12,10)
911 #define SVM_EXIT_IO_A64 __BIT(9)
912 #define SVM_EXIT_IO_A32 __BIT(8)
913 #define SVM_EXIT_IO_A16 __BIT(7)
914 #define SVM_EXIT_IO_SZ32 __BIT(6)
915 #define SVM_EXIT_IO_SZ16 __BIT(5)
916 #define SVM_EXIT_IO_SZ8 __BIT(4)
917 #define SVM_EXIT_IO_REP __BIT(3)
918 #define SVM_EXIT_IO_STR __BIT(2)
919 #define SVM_EXIT_IO_IN __BIT(0)
920
921 static void
922 svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
923 struct nvmm_exit *exit)
924 {
925 struct svm_cpudata *cpudata = vcpu->cpudata;
926 uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
927 uint64_t nextpc = cpudata->vmcb->ctrl.exitinfo2;
928
929 exit->reason = NVMM_EXIT_IO;
930
931 if (info & SVM_EXIT_IO_IN) {
932 exit->u.io.type = NVMM_EXIT_IO_IN;
933 } else {
934 exit->u.io.type = NVMM_EXIT_IO_OUT;
935 }
936
937 exit->u.io.port = __SHIFTOUT(info, SVM_EXIT_IO_PORT);
938
939 if (svm_decode_assist) {
940 KASSERT(__SHIFTOUT(info, SVM_EXIT_IO_SEG) < 6);
941 exit->u.io.seg = __SHIFTOUT(info, SVM_EXIT_IO_SEG);
942 } else {
943 exit->u.io.seg = -1;
944 }
945
946 if (info & SVM_EXIT_IO_A64) {
947 exit->u.io.address_size = 8;
948 } else if (info & SVM_EXIT_IO_A32) {
949 exit->u.io.address_size = 4;
950 } else if (info & SVM_EXIT_IO_A16) {
951 exit->u.io.address_size = 2;
952 }
953
954 if (info & SVM_EXIT_IO_SZ32) {
955 exit->u.io.operand_size = 4;
956 } else if (info & SVM_EXIT_IO_SZ16) {
957 exit->u.io.operand_size = 2;
958 } else if (info & SVM_EXIT_IO_SZ8) {
959 exit->u.io.operand_size = 1;
960 }
961
962 exit->u.io.rep = (info & SVM_EXIT_IO_REP) != 0;
963 exit->u.io.str = (info & SVM_EXIT_IO_STR) != 0;
964 exit->u.io.npc = nextpc;
965
966 svm_vcpu_state_provide(vcpu,
967 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
968 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
969 }
970
971 static const uint64_t msr_ignore_list[] = {
972 0xc0010055, /* MSR_CMPHALT */
973 MSR_DE_CFG,
974 MSR_IC_CFG,
975 MSR_UCODE_AMD_PATCHLEVEL
976 };
977
978 static bool
979 svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
980 struct nvmm_exit *exit)
981 {
982 struct svm_cpudata *cpudata = vcpu->cpudata;
983 struct vmcb *vmcb = cpudata->vmcb;
984 uint64_t val;
985 size_t i;
986
987 switch (exit->u.msr.type) {
988 case NVMM_EXIT_MSR_RDMSR:
989 if (exit->u.msr.msr == MSR_NB_CFG) {
990 val = NB_CFG_INITAPICCPUIDLO;
991 vmcb->state.rax = (val & 0xFFFFFFFF);
992 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
993 goto handled;
994 }
995 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
996 if (msr_ignore_list[i] != exit->u.msr.msr)
997 continue;
998 val = 0;
999 vmcb->state.rax = (val & 0xFFFFFFFF);
1000 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1001 goto handled;
1002 }
1003 break;
1004 case NVMM_EXIT_MSR_WRMSR:
1005 if (exit->u.msr.msr == MSR_EFER) {
1006 if (__predict_false(exit->u.msr.val & ~EFER_VALID)) {
1007 goto error;
1008 }
1009 if ((vmcb->state.efer ^ exit->u.msr.val) &
1010 EFER_TLB_FLUSH) {
1011 cpudata->gtlb_want_flush = true;
1012 }
1013 vmcb->state.efer = exit->u.msr.val | EFER_SVME;
1014 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_CR);
1015 goto handled;
1016 }
1017 if (exit->u.msr.msr == MSR_TSC) {
1018 cpudata->gtsc = exit->u.msr.val;
1019 cpudata->gtsc_want_update = true;
1020 goto handled;
1021 }
1022 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1023 if (msr_ignore_list[i] != exit->u.msr.msr)
1024 continue;
1025 goto handled;
1026 }
1027 break;
1028 }
1029
1030 return false;
1031
1032 handled:
1033 svm_inkernel_advance(cpudata->vmcb);
1034 return true;
1035
1036 error:
1037 svm_inject_gp(vcpu);
1038 return true;
1039 }
1040
1041 static void
1042 svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1043 struct nvmm_exit *exit)
1044 {
1045 struct svm_cpudata *cpudata = vcpu->cpudata;
1046 uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
1047
1048 if (info == 0) {
1049 exit->u.msr.type = NVMM_EXIT_MSR_RDMSR;
1050 } else {
1051 exit->u.msr.type = NVMM_EXIT_MSR_WRMSR;
1052 }
1053
1054 exit->u.msr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1055
1056 if (info == 1) {
1057 uint64_t rdx, rax;
1058 rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
1059 rax = cpudata->vmcb->state.rax;
1060 exit->u.msr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
1061 } else {
1062 exit->u.msr.val = 0;
1063 }
1064
1065 if (svm_inkernel_handle_msr(mach, vcpu, exit)) {
1066 exit->reason = NVMM_EXIT_NONE;
1067 return;
1068 }
1069
1070 exit->reason = NVMM_EXIT_MSR;
1071 exit->u.msr.npc = cpudata->vmcb->ctrl.nrip;
1072
1073 svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1074 }
1075
1076 static void
1077 svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1078 struct nvmm_exit *exit)
1079 {
1080 struct svm_cpudata *cpudata = vcpu->cpudata;
1081 gpaddr_t gpa = cpudata->vmcb->ctrl.exitinfo2;
1082
1083 exit->reason = NVMM_EXIT_MEMORY;
1084 if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W)
1085 exit->u.mem.prot = PROT_WRITE;
1086 else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_X)
1087 exit->u.mem.prot = PROT_EXEC;
1088 else
1089 exit->u.mem.prot = PROT_READ;
1090 exit->u.mem.gpa = gpa;
1091 exit->u.mem.inst_len = cpudata->vmcb->ctrl.inst_len;
1092 memcpy(exit->u.mem.inst_bytes, cpudata->vmcb->ctrl.inst_bytes,
1093 sizeof(exit->u.mem.inst_bytes));
1094
1095 svm_vcpu_state_provide(vcpu,
1096 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1097 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1098 }
1099
1100 static void
1101 svm_exit_insn(struct vmcb *vmcb, struct nvmm_exit *exit, uint64_t reason)
1102 {
1103 exit->u.insn.npc = vmcb->ctrl.nrip;
1104 exit->reason = reason;
1105 }
1106
1107 static void
1108 svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1109 struct nvmm_exit *exit)
1110 {
1111 struct svm_cpudata *cpudata = vcpu->cpudata;
1112 struct vmcb *vmcb = cpudata->vmcb;
1113 uint64_t val;
1114
1115 exit->reason = NVMM_EXIT_NONE;
1116
1117 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
1118 (vmcb->state.rax & 0xFFFFFFFF);
1119
1120 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1121 goto error;
1122 } else if (__predict_false(vmcb->state.cpl != 0)) {
1123 goto error;
1124 } else if (__predict_false((val & ~svm_xcr0_mask) != 0)) {
1125 goto error;
1126 } else if (__predict_false((val & XCR0_X87) == 0)) {
1127 goto error;
1128 }
1129
1130 cpudata->gxcr0 = val;
1131
1132 svm_inkernel_advance(cpudata->vmcb);
1133 return;
1134
1135 error:
1136 svm_inject_gp(vcpu);
1137 }
1138
1139 static void
1140 svm_exit_invalid(struct nvmm_exit *exit, uint64_t code)
1141 {
1142 exit->u.inv.hwcode = code;
1143 exit->reason = NVMM_EXIT_INVALID;
1144 }
1145
1146 /* -------------------------------------------------------------------------- */
1147
1148 static void
1149 svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
1150 {
1151 struct svm_cpudata *cpudata = vcpu->cpudata;
1152
1153 cpudata->ts_set = (rcr0() & CR0_TS) != 0;
1154
1155 fpu_area_save(&cpudata->hfpu, svm_xcr0_mask);
1156 fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask);
1157
1158 if (svm_xcr0_mask != 0) {
1159 cpudata->hxcr0 = rdxcr(0);
1160 wrxcr(0, cpudata->gxcr0);
1161 }
1162 }
1163
1164 static void
1165 svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
1166 {
1167 struct svm_cpudata *cpudata = vcpu->cpudata;
1168
1169 if (svm_xcr0_mask != 0) {
1170 cpudata->gxcr0 = rdxcr(0);
1171 wrxcr(0, cpudata->hxcr0);
1172 }
1173
1174 fpu_area_save(&cpudata->gfpu, svm_xcr0_mask);
1175 fpu_area_restore(&cpudata->hfpu, svm_xcr0_mask);
1176
1177 if (cpudata->ts_set) {
1178 stts();
1179 }
1180 }
1181
1182 static void
1183 svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
1184 {
1185 struct svm_cpudata *cpudata = vcpu->cpudata;
1186
1187 x86_dbregs_save(curlwp);
1188
1189 ldr7(0);
1190
1191 ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
1192 ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
1193 ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
1194 ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
1195 }
1196
1197 static void
1198 svm_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
1199 {
1200 struct svm_cpudata *cpudata = vcpu->cpudata;
1201
1202 cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
1203 cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
1204 cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
1205 cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
1206
1207 x86_dbregs_restore(curlwp);
1208 }
1209
1210 static void
1211 svm_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
1212 {
1213 struct svm_cpudata *cpudata = vcpu->cpudata;
1214
1215 cpudata->fsbase = rdmsr(MSR_FSBASE);
1216 cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
1217 }
1218
1219 static void
1220 svm_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
1221 {
1222 struct svm_cpudata *cpudata = vcpu->cpudata;
1223
1224 wrmsr(MSR_STAR, cpudata->star);
1225 wrmsr(MSR_LSTAR, cpudata->lstar);
1226 wrmsr(MSR_CSTAR, cpudata->cstar);
1227 wrmsr(MSR_SFMASK, cpudata->sfmask);
1228 wrmsr(MSR_FSBASE, cpudata->fsbase);
1229 wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
1230 }
1231
1232 /* -------------------------------------------------------------------------- */
1233
1234 static inline void
1235 svm_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
1236 {
1237 struct svm_cpudata *cpudata = vcpu->cpudata;
1238
1239 if (vcpu->hcpu_last != hcpu || cpudata->shared_asid) {
1240 cpudata->gtlb_want_flush = true;
1241 }
1242 }
1243
1244 static inline void
1245 svm_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
1246 {
1247 /*
1248 * Nothing to do. If an hTLB flush was needed, either the VCPU was
1249 * executing on this hCPU and the hTLB already got flushed, or it
1250 * was executing on another hCPU in which case the catchup is done
1251 * in svm_gtlb_catchup().
1252 */
1253 }
1254
1255 static inline uint64_t
1256 svm_htlb_flush(struct svm_machdata *machdata, struct svm_cpudata *cpudata)
1257 {
1258 struct vmcb *vmcb = cpudata->vmcb;
1259 uint64_t machgen;
1260
1261 machgen = machdata->mach_htlb_gen;
1262 if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
1263 return machgen;
1264 }
1265
1266 vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
1267 return machgen;
1268 }
1269
1270 static inline void
1271 svm_htlb_flush_ack(struct svm_cpudata *cpudata, uint64_t machgen)
1272 {
1273 struct vmcb *vmcb = cpudata->vmcb;
1274
1275 if (__predict_true(vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID)) {
1276 cpudata->vcpu_htlb_gen = machgen;
1277 }
1278 }
1279
1280 static inline void
1281 svm_exit_evt(struct svm_cpudata *cpudata, struct vmcb *vmcb)
1282 {
1283 cpudata->evt_pending = false;
1284
1285 if (__predict_false(vmcb->ctrl.exitintinfo & VMCB_CTRL_EXITINTINFO_V)) {
1286 vmcb->ctrl.eventinj = vmcb->ctrl.exitintinfo;
1287 cpudata->evt_pending = true;
1288 }
1289 }
1290
1291 static int
1292 svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1293 struct nvmm_exit *exit)
1294 {
1295 struct nvmm_comm_page *comm = vcpu->comm;
1296 struct svm_machdata *machdata = mach->machdata;
1297 struct svm_cpudata *cpudata = vcpu->cpudata;
1298 struct vmcb *vmcb = cpudata->vmcb;
1299 uint64_t machgen;
1300 int hcpu, s;
1301
1302 if (__predict_false(svm_vcpu_event_commit(vcpu) != 0)) {
1303 return EINVAL;
1304 }
1305 svm_vcpu_state_commit(vcpu);
1306 comm->state_cached = 0;
1307
1308 kpreempt_disable();
1309 hcpu = cpu_number();
1310
1311 svm_gtlb_catchup(vcpu, hcpu);
1312 svm_htlb_catchup(vcpu, hcpu);
1313
1314 if (vcpu->hcpu_last != hcpu) {
1315 svm_vmcb_cache_flush_all(vmcb);
1316 cpudata->gtsc_want_update = true;
1317 }
1318
1319 svm_vcpu_guest_dbregs_enter(vcpu);
1320 svm_vcpu_guest_misc_enter(vcpu);
1321
1322 while (1) {
1323 if (cpudata->gtlb_want_flush) {
1324 vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
1325 } else {
1326 vmcb->ctrl.tlb_ctrl = 0;
1327 }
1328
1329 if (__predict_false(cpudata->gtsc_want_update)) {
1330 vmcb->ctrl.tsc_offset = cpudata->gtsc - rdtsc();
1331 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
1332 }
1333
1334 s = splhigh();
1335 machgen = svm_htlb_flush(machdata, cpudata);
1336 svm_vcpu_guest_fpu_enter(vcpu);
1337 svm_vmrun(cpudata->vmcb_pa, cpudata->gprs);
1338 svm_vcpu_guest_fpu_leave(vcpu);
1339 svm_htlb_flush_ack(cpudata, machgen);
1340 splx(s);
1341
1342 svm_vmcb_cache_default(vmcb);
1343
1344 if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) {
1345 cpudata->gtlb_want_flush = false;
1346 cpudata->gtsc_want_update = false;
1347 vcpu->hcpu_last = hcpu;
1348 }
1349 svm_exit_evt(cpudata, vmcb);
1350
1351 switch (vmcb->ctrl.exitcode) {
1352 case VMCB_EXITCODE_INTR:
1353 case VMCB_EXITCODE_NMI:
1354 exit->reason = NVMM_EXIT_NONE;
1355 break;
1356 case VMCB_EXITCODE_VINTR:
1357 svm_event_waitexit_disable(vcpu, false);
1358 exit->reason = NVMM_EXIT_INT_READY;
1359 break;
1360 case VMCB_EXITCODE_IRET:
1361 svm_event_waitexit_disable(vcpu, true);
1362 exit->reason = NVMM_EXIT_NMI_READY;
1363 break;
1364 case VMCB_EXITCODE_CPUID:
1365 svm_exit_cpuid(mach, vcpu, exit);
1366 break;
1367 case VMCB_EXITCODE_HLT:
1368 svm_exit_hlt(mach, vcpu, exit);
1369 break;
1370 case VMCB_EXITCODE_IOIO:
1371 svm_exit_io(mach, vcpu, exit);
1372 break;
1373 case VMCB_EXITCODE_MSR:
1374 svm_exit_msr(mach, vcpu, exit);
1375 break;
1376 case VMCB_EXITCODE_SHUTDOWN:
1377 exit->reason = NVMM_EXIT_SHUTDOWN;
1378 break;
1379 case VMCB_EXITCODE_RDPMC:
1380 case VMCB_EXITCODE_RSM:
1381 case VMCB_EXITCODE_INVLPGA:
1382 case VMCB_EXITCODE_VMRUN:
1383 case VMCB_EXITCODE_VMMCALL:
1384 case VMCB_EXITCODE_VMLOAD:
1385 case VMCB_EXITCODE_VMSAVE:
1386 case VMCB_EXITCODE_STGI:
1387 case VMCB_EXITCODE_CLGI:
1388 case VMCB_EXITCODE_SKINIT:
1389 case VMCB_EXITCODE_RDTSCP:
1390 svm_inject_ud(vcpu);
1391 exit->reason = NVMM_EXIT_NONE;
1392 break;
1393 case VMCB_EXITCODE_MONITOR:
1394 svm_exit_insn(vmcb, exit, NVMM_EXIT_MONITOR);
1395 break;
1396 case VMCB_EXITCODE_MWAIT:
1397 svm_exit_insn(vmcb, exit, NVMM_EXIT_MWAIT);
1398 break;
1399 case VMCB_EXITCODE_MWAIT_CONDITIONAL:
1400 svm_exit_insn(vmcb, exit, NVMM_EXIT_MWAIT_COND);
1401 break;
1402 case VMCB_EXITCODE_XSETBV:
1403 svm_exit_xsetbv(mach, vcpu, exit);
1404 break;
1405 case VMCB_EXITCODE_NPF:
1406 svm_exit_npf(mach, vcpu, exit);
1407 break;
1408 case VMCB_EXITCODE_FERR_FREEZE: /* ? */
1409 default:
1410 svm_exit_invalid(exit, vmcb->ctrl.exitcode);
1411 break;
1412 }
1413
1414 /* If no reason to return to userland, keep rolling. */
1415 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
1416 break;
1417 }
1418 if (curcpu()->ci_data.cpu_softints != 0) {
1419 break;
1420 }
1421 if (curlwp->l_flag & LW_USERRET) {
1422 break;
1423 }
1424 if (exit->reason != NVMM_EXIT_NONE) {
1425 break;
1426 }
1427 }
1428
1429 cpudata->gtsc = rdtsc() + vmcb->ctrl.tsc_offset;
1430
1431 svm_vcpu_guest_misc_leave(vcpu);
1432 svm_vcpu_guest_dbregs_leave(vcpu);
1433
1434 kpreempt_enable();
1435
1436 exit->exitstate[NVMM_X64_EXITSTATE_CR8] = __SHIFTOUT(vmcb->ctrl.v,
1437 VMCB_CTRL_V_TPR);
1438 exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] = vmcb->state.rflags;
1439
1440 exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] =
1441 ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0);
1442 exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] =
1443 cpudata->int_window_exit;
1444 exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT] =
1445 cpudata->nmi_window_exit;
1446 exit->exitstate[NVMM_X64_EXITSTATE_EVT_PENDING] =
1447 cpudata->evt_pending;
1448
1449 return 0;
1450 }
1451
1452 /* -------------------------------------------------------------------------- */
1453
1454 static int
1455 svm_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
1456 {
1457 struct pglist pglist;
1458 paddr_t _pa;
1459 vaddr_t _va;
1460 size_t i;
1461 int ret;
1462
1463 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
1464 &pglist, 1, 0);
1465 if (ret != 0)
1466 return ENOMEM;
1467 _pa = TAILQ_FIRST(&pglist)->phys_addr;
1468 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
1469 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
1470 if (_va == 0)
1471 goto error;
1472
1473 for (i = 0; i < npages; i++) {
1474 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
1475 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
1476 }
1477 pmap_update(pmap_kernel());
1478
1479 memset((void *)_va, 0, npages * PAGE_SIZE);
1480
1481 *pa = _pa;
1482 *va = _va;
1483 return 0;
1484
1485 error:
1486 for (i = 0; i < npages; i++) {
1487 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
1488 }
1489 return ENOMEM;
1490 }
1491
1492 static void
1493 svm_memfree(paddr_t pa, vaddr_t va, size_t npages)
1494 {
1495 size_t i;
1496
1497 pmap_kremove(va, npages * PAGE_SIZE);
1498 pmap_update(pmap_kernel());
1499 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
1500 for (i = 0; i < npages; i++) {
1501 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
1502 }
1503 }
1504
1505 /* -------------------------------------------------------------------------- */
1506
1507 #define SVM_MSRBM_READ __BIT(0)
1508 #define SVM_MSRBM_WRITE __BIT(1)
1509
1510 static void
1511 svm_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
1512 {
1513 uint64_t byte;
1514 uint8_t bitoff;
1515
1516 if (msr < 0x00002000) {
1517 /* Range 1 */
1518 byte = ((msr - 0x00000000) >> 2UL) + 0x0000;
1519 } else if (msr >= 0xC0000000 && msr < 0xC0002000) {
1520 /* Range 2 */
1521 byte = ((msr - 0xC0000000) >> 2UL) + 0x0800;
1522 } else if (msr >= 0xC0010000 && msr < 0xC0012000) {
1523 /* Range 3 */
1524 byte = ((msr - 0xC0010000) >> 2UL) + 0x1000;
1525 } else {
1526 panic("%s: wrong range", __func__);
1527 }
1528
1529 bitoff = (msr & 0x3) << 1;
1530
1531 if (read) {
1532 bitmap[byte] &= ~(SVM_MSRBM_READ << bitoff);
1533 }
1534 if (write) {
1535 bitmap[byte] &= ~(SVM_MSRBM_WRITE << bitoff);
1536 }
1537 }
1538
1539 #define SVM_SEG_ATTRIB_TYPE __BITS(3,0)
1540 #define SVM_SEG_ATTRIB_S __BIT(4)
1541 #define SVM_SEG_ATTRIB_DPL __BITS(6,5)
1542 #define SVM_SEG_ATTRIB_P __BIT(7)
1543 #define SVM_SEG_ATTRIB_AVL __BIT(8)
1544 #define SVM_SEG_ATTRIB_L __BIT(9)
1545 #define SVM_SEG_ATTRIB_DEF __BIT(10)
1546 #define SVM_SEG_ATTRIB_G __BIT(11)
1547
1548 static void
1549 svm_vcpu_setstate_seg(const struct nvmm_x64_state_seg *seg,
1550 struct vmcb_segment *vseg)
1551 {
1552 vseg->selector = seg->selector;
1553 vseg->attrib =
1554 __SHIFTIN(seg->attrib.type, SVM_SEG_ATTRIB_TYPE) |
1555 __SHIFTIN(seg->attrib.s, SVM_SEG_ATTRIB_S) |
1556 __SHIFTIN(seg->attrib.dpl, SVM_SEG_ATTRIB_DPL) |
1557 __SHIFTIN(seg->attrib.p, SVM_SEG_ATTRIB_P) |
1558 __SHIFTIN(seg->attrib.avl, SVM_SEG_ATTRIB_AVL) |
1559 __SHIFTIN(seg->attrib.l, SVM_SEG_ATTRIB_L) |
1560 __SHIFTIN(seg->attrib.def, SVM_SEG_ATTRIB_DEF) |
1561 __SHIFTIN(seg->attrib.g, SVM_SEG_ATTRIB_G);
1562 vseg->limit = seg->limit;
1563 vseg->base = seg->base;
1564 }
1565
1566 static void
1567 svm_vcpu_getstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg)
1568 {
1569 seg->selector = vseg->selector;
1570 seg->attrib.type = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_TYPE);
1571 seg->attrib.s = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_S);
1572 seg->attrib.dpl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DPL);
1573 seg->attrib.p = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_P);
1574 seg->attrib.avl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_AVL);
1575 seg->attrib.l = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_L);
1576 seg->attrib.def = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DEF);
1577 seg->attrib.g = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_G);
1578 seg->limit = vseg->limit;
1579 seg->base = vseg->base;
1580 }
1581
1582 static inline bool
1583 svm_state_tlb_flush(const struct vmcb *vmcb, const struct nvmm_x64_state *state,
1584 uint64_t flags)
1585 {
1586 if (flags & NVMM_X64_STATE_CRS) {
1587 if ((vmcb->state.cr0 ^
1588 state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
1589 return true;
1590 }
1591 if (vmcb->state.cr3 != state->crs[NVMM_X64_CR_CR3]) {
1592 return true;
1593 }
1594 if ((vmcb->state.cr4 ^
1595 state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
1596 return true;
1597 }
1598 }
1599
1600 if (flags & NVMM_X64_STATE_MSRS) {
1601 if ((vmcb->state.efer ^
1602 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
1603 return true;
1604 }
1605 }
1606
1607 return false;
1608 }
1609
1610 static void
1611 svm_vcpu_setstate(struct nvmm_cpu *vcpu)
1612 {
1613 struct nvmm_comm_page *comm = vcpu->comm;
1614 const struct nvmm_x64_state *state = &comm->state;
1615 struct svm_cpudata *cpudata = vcpu->cpudata;
1616 struct vmcb *vmcb = cpudata->vmcb;
1617 struct fxsave *fpustate;
1618 uint64_t flags;
1619
1620 flags = comm->state_wanted;
1621
1622 if (svm_state_tlb_flush(vmcb, state, flags)) {
1623 cpudata->gtlb_want_flush = true;
1624 }
1625
1626 if (flags & NVMM_X64_STATE_SEGS) {
1627 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_CS],
1628 &vmcb->state.cs);
1629 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_DS],
1630 &vmcb->state.ds);
1631 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_ES],
1632 &vmcb->state.es);
1633 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_FS],
1634 &vmcb->state.fs);
1635 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GS],
1636 &vmcb->state.gs);
1637 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_SS],
1638 &vmcb->state.ss);
1639 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GDT],
1640 &vmcb->state.gdt);
1641 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_IDT],
1642 &vmcb->state.idt);
1643 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_LDT],
1644 &vmcb->state.ldt);
1645 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_TR],
1646 &vmcb->state.tr);
1647
1648 vmcb->state.cpl = state->segs[NVMM_X64_SEG_SS].attrib.dpl;
1649 }
1650
1651 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
1652 if (flags & NVMM_X64_STATE_GPRS) {
1653 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
1654
1655 vmcb->state.rip = state->gprs[NVMM_X64_GPR_RIP];
1656 vmcb->state.rsp = state->gprs[NVMM_X64_GPR_RSP];
1657 vmcb->state.rax = state->gprs[NVMM_X64_GPR_RAX];
1658 vmcb->state.rflags = state->gprs[NVMM_X64_GPR_RFLAGS];
1659 }
1660
1661 if (flags & NVMM_X64_STATE_CRS) {
1662 vmcb->state.cr0 = state->crs[NVMM_X64_CR_CR0];
1663 vmcb->state.cr2 = state->crs[NVMM_X64_CR_CR2];
1664 vmcb->state.cr3 = state->crs[NVMM_X64_CR_CR3];
1665 vmcb->state.cr4 = state->crs[NVMM_X64_CR_CR4];
1666
1667 vmcb->ctrl.v &= ~VMCB_CTRL_V_TPR;
1668 vmcb->ctrl.v |= __SHIFTIN(state->crs[NVMM_X64_CR_CR8],
1669 VMCB_CTRL_V_TPR);
1670
1671 if (svm_xcr0_mask != 0) {
1672 /* Clear illegal XCR0 bits, set mandatory X87 bit. */
1673 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
1674 cpudata->gxcr0 &= svm_xcr0_mask;
1675 cpudata->gxcr0 |= XCR0_X87;
1676 }
1677 }
1678
1679 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
1680 if (flags & NVMM_X64_STATE_DRS) {
1681 memcpy(cpudata->drs, state->drs, sizeof(state->drs));
1682
1683 vmcb->state.dr6 = state->drs[NVMM_X64_DR_DR6];
1684 vmcb->state.dr7 = state->drs[NVMM_X64_DR_DR7];
1685 }
1686
1687 if (flags & NVMM_X64_STATE_MSRS) {
1688 /*
1689 * EFER_SVME is mandatory.
1690 */
1691 vmcb->state.efer = state->msrs[NVMM_X64_MSR_EFER] | EFER_SVME;
1692 vmcb->state.star = state->msrs[NVMM_X64_MSR_STAR];
1693 vmcb->state.lstar = state->msrs[NVMM_X64_MSR_LSTAR];
1694 vmcb->state.cstar = state->msrs[NVMM_X64_MSR_CSTAR];
1695 vmcb->state.sfmask = state->msrs[NVMM_X64_MSR_SFMASK];
1696 vmcb->state.kernelgsbase =
1697 state->msrs[NVMM_X64_MSR_KERNELGSBASE];
1698 vmcb->state.sysenter_cs =
1699 state->msrs[NVMM_X64_MSR_SYSENTER_CS];
1700 vmcb->state.sysenter_esp =
1701 state->msrs[NVMM_X64_MSR_SYSENTER_ESP];
1702 vmcb->state.sysenter_eip =
1703 state->msrs[NVMM_X64_MSR_SYSENTER_EIP];
1704 vmcb->state.g_pat = state->msrs[NVMM_X64_MSR_PAT];
1705
1706 cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC];
1707 cpudata->gtsc_want_update = true;
1708 }
1709
1710 if (flags & NVMM_X64_STATE_INTR) {
1711 if (state->intr.int_shadow) {
1712 vmcb->ctrl.intr |= VMCB_CTRL_INTR_SHADOW;
1713 } else {
1714 vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
1715 }
1716
1717 if (state->intr.int_window_exiting) {
1718 svm_event_waitexit_enable(vcpu, false);
1719 } else {
1720 svm_event_waitexit_disable(vcpu, false);
1721 }
1722
1723 if (state->intr.nmi_window_exiting) {
1724 svm_event_waitexit_enable(vcpu, true);
1725 } else {
1726 svm_event_waitexit_disable(vcpu, true);
1727 }
1728 }
1729
1730 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
1731 if (flags & NVMM_X64_STATE_FPU) {
1732 memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
1733 sizeof(state->fpu));
1734
1735 fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
1736 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
1737 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
1738
1739 if (svm_xcr0_mask != 0) {
1740 /* Reset XSTATE_BV, to force a reload. */
1741 cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
1742 }
1743 }
1744
1745 svm_vmcb_cache_update(vmcb, flags);
1746
1747 comm->state_wanted = 0;
1748 comm->state_cached |= flags;
1749 }
1750
1751 static void
1752 svm_vcpu_getstate(struct nvmm_cpu *vcpu)
1753 {
1754 struct nvmm_comm_page *comm = vcpu->comm;
1755 struct nvmm_x64_state *state = &comm->state;
1756 struct svm_cpudata *cpudata = vcpu->cpudata;
1757 struct vmcb *vmcb = cpudata->vmcb;
1758 uint64_t flags;
1759
1760 flags = comm->state_wanted;
1761
1762 if (flags & NVMM_X64_STATE_SEGS) {
1763 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_CS],
1764 &vmcb->state.cs);
1765 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_DS],
1766 &vmcb->state.ds);
1767 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_ES],
1768 &vmcb->state.es);
1769 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_FS],
1770 &vmcb->state.fs);
1771 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GS],
1772 &vmcb->state.gs);
1773 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_SS],
1774 &vmcb->state.ss);
1775 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GDT],
1776 &vmcb->state.gdt);
1777 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_IDT],
1778 &vmcb->state.idt);
1779 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_LDT],
1780 &vmcb->state.ldt);
1781 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_TR],
1782 &vmcb->state.tr);
1783
1784 state->segs[NVMM_X64_SEG_SS].attrib.dpl = vmcb->state.cpl;
1785 }
1786
1787 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
1788 if (flags & NVMM_X64_STATE_GPRS) {
1789 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
1790
1791 state->gprs[NVMM_X64_GPR_RIP] = vmcb->state.rip;
1792 state->gprs[NVMM_X64_GPR_RSP] = vmcb->state.rsp;
1793 state->gprs[NVMM_X64_GPR_RAX] = vmcb->state.rax;
1794 state->gprs[NVMM_X64_GPR_RFLAGS] = vmcb->state.rflags;
1795 }
1796
1797 if (flags & NVMM_X64_STATE_CRS) {
1798 state->crs[NVMM_X64_CR_CR0] = vmcb->state.cr0;
1799 state->crs[NVMM_X64_CR_CR2] = vmcb->state.cr2;
1800 state->crs[NVMM_X64_CR_CR3] = vmcb->state.cr3;
1801 state->crs[NVMM_X64_CR_CR4] = vmcb->state.cr4;
1802 state->crs[NVMM_X64_CR_CR8] = __SHIFTOUT(vmcb->ctrl.v,
1803 VMCB_CTRL_V_TPR);
1804 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
1805 }
1806
1807 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
1808 if (flags & NVMM_X64_STATE_DRS) {
1809 memcpy(state->drs, cpudata->drs, sizeof(state->drs));
1810
1811 state->drs[NVMM_X64_DR_DR6] = vmcb->state.dr6;
1812 state->drs[NVMM_X64_DR_DR7] = vmcb->state.dr7;
1813 }
1814
1815 if (flags & NVMM_X64_STATE_MSRS) {
1816 state->msrs[NVMM_X64_MSR_EFER] = vmcb->state.efer;
1817 state->msrs[NVMM_X64_MSR_STAR] = vmcb->state.star;
1818 state->msrs[NVMM_X64_MSR_LSTAR] = vmcb->state.lstar;
1819 state->msrs[NVMM_X64_MSR_CSTAR] = vmcb->state.cstar;
1820 state->msrs[NVMM_X64_MSR_SFMASK] = vmcb->state.sfmask;
1821 state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
1822 vmcb->state.kernelgsbase;
1823 state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
1824 vmcb->state.sysenter_cs;
1825 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
1826 vmcb->state.sysenter_esp;
1827 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
1828 vmcb->state.sysenter_eip;
1829 state->msrs[NVMM_X64_MSR_PAT] = vmcb->state.g_pat;
1830 state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc;
1831
1832 /* Hide SVME. */
1833 state->msrs[NVMM_X64_MSR_EFER] &= ~EFER_SVME;
1834 }
1835
1836 if (flags & NVMM_X64_STATE_INTR) {
1837 state->intr.int_shadow =
1838 (vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0;
1839 state->intr.int_window_exiting = cpudata->int_window_exit;
1840 state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
1841 state->intr.evt_pending = cpudata->evt_pending;
1842 }
1843
1844 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
1845 if (flags & NVMM_X64_STATE_FPU) {
1846 memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
1847 sizeof(state->fpu));
1848 }
1849
1850 comm->state_wanted = 0;
1851 comm->state_cached |= flags;
1852 }
1853
1854 static void
1855 svm_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
1856 {
1857 vcpu->comm->state_wanted = flags;
1858 svm_vcpu_getstate(vcpu);
1859 }
1860
1861 static void
1862 svm_vcpu_state_commit(struct nvmm_cpu *vcpu)
1863 {
1864 vcpu->comm->state_wanted = vcpu->comm->state_commit;
1865 vcpu->comm->state_commit = 0;
1866 svm_vcpu_setstate(vcpu);
1867 }
1868
1869 /* -------------------------------------------------------------------------- */
1870
1871 static void
1872 svm_asid_alloc(struct nvmm_cpu *vcpu)
1873 {
1874 struct svm_cpudata *cpudata = vcpu->cpudata;
1875 struct vmcb *vmcb = cpudata->vmcb;
1876 size_t i, oct, bit;
1877
1878 mutex_enter(&svm_asidlock);
1879
1880 for (i = 0; i < svm_maxasid; i++) {
1881 oct = i / 8;
1882 bit = i % 8;
1883
1884 if (svm_asidmap[oct] & __BIT(bit)) {
1885 continue;
1886 }
1887
1888 svm_asidmap[oct] |= __BIT(bit);
1889 vmcb->ctrl.guest_asid = i;
1890 mutex_exit(&svm_asidlock);
1891 return;
1892 }
1893
1894 /*
1895 * No free ASID. Use the last one, which is shared and requires
1896 * special TLB handling.
1897 */
1898 cpudata->shared_asid = true;
1899 vmcb->ctrl.guest_asid = svm_maxasid - 1;
1900 mutex_exit(&svm_asidlock);
1901 }
1902
1903 static void
1904 svm_asid_free(struct nvmm_cpu *vcpu)
1905 {
1906 struct svm_cpudata *cpudata = vcpu->cpudata;
1907 struct vmcb *vmcb = cpudata->vmcb;
1908 size_t oct, bit;
1909
1910 if (cpudata->shared_asid) {
1911 return;
1912 }
1913
1914 oct = vmcb->ctrl.guest_asid / 8;
1915 bit = vmcb->ctrl.guest_asid % 8;
1916
1917 mutex_enter(&svm_asidlock);
1918 svm_asidmap[oct] &= ~__BIT(bit);
1919 mutex_exit(&svm_asidlock);
1920 }
1921
1922 static void
1923 svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
1924 {
1925 struct svm_cpudata *cpudata = vcpu->cpudata;
1926 struct vmcb *vmcb = cpudata->vmcb;
1927
1928 /* Allow reads/writes of Control Registers. */
1929 vmcb->ctrl.intercept_cr = 0;
1930
1931 /* Allow reads/writes of Debug Registers. */
1932 vmcb->ctrl.intercept_dr = 0;
1933
1934 /* Allow exceptions 0 to 31. */
1935 vmcb->ctrl.intercept_vec = 0;
1936
1937 /*
1938 * Allow:
1939 * - SMI [smm interrupts]
1940 * - VINTR [virtual interrupts]
1941 * - CR0_SPEC [CR0 writes changing other fields than CR0.TS or CR0.MP]
1942 * - RIDTR [reads of IDTR]
1943 * - RGDTR [reads of GDTR]
1944 * - RLDTR [reads of LDTR]
1945 * - RTR [reads of TR]
1946 * - WIDTR [writes of IDTR]
1947 * - WGDTR [writes of GDTR]
1948 * - WLDTR [writes of LDTR]
1949 * - WTR [writes of TR]
1950 * - RDTSC [rdtsc instruction]
1951 * - PUSHF [pushf instruction]
1952 * - POPF [popf instruction]
1953 * - IRET [iret instruction]
1954 * - INTN [int $n instructions]
1955 * - INVD [invd instruction]
1956 * - PAUSE [pause instruction]
1957 * - INVLPG [invplg instruction]
1958 * - TASKSW [task switches]
1959 *
1960 * Intercept the rest below.
1961 */
1962 vmcb->ctrl.intercept_misc1 =
1963 VMCB_CTRL_INTERCEPT_INTR |
1964 VMCB_CTRL_INTERCEPT_NMI |
1965 VMCB_CTRL_INTERCEPT_INIT |
1966 VMCB_CTRL_INTERCEPT_RDPMC |
1967 VMCB_CTRL_INTERCEPT_CPUID |
1968 VMCB_CTRL_INTERCEPT_RSM |
1969 VMCB_CTRL_INTERCEPT_HLT |
1970 VMCB_CTRL_INTERCEPT_INVLPGA |
1971 VMCB_CTRL_INTERCEPT_IOIO_PROT |
1972 VMCB_CTRL_INTERCEPT_MSR_PROT |
1973 VMCB_CTRL_INTERCEPT_FERR_FREEZE |
1974 VMCB_CTRL_INTERCEPT_SHUTDOWN;
1975
1976 /*
1977 * Allow:
1978 * - ICEBP [icebp instruction]
1979 * - WBINVD [wbinvd instruction]
1980 * - WCR_SPEC(0..15) [writes of CR0-15, received after instruction]
1981 *
1982 * Intercept the rest below.
1983 */
1984 vmcb->ctrl.intercept_misc2 =
1985 VMCB_CTRL_INTERCEPT_VMRUN |
1986 VMCB_CTRL_INTERCEPT_VMMCALL |
1987 VMCB_CTRL_INTERCEPT_VMLOAD |
1988 VMCB_CTRL_INTERCEPT_VMSAVE |
1989 VMCB_CTRL_INTERCEPT_STGI |
1990 VMCB_CTRL_INTERCEPT_CLGI |
1991 VMCB_CTRL_INTERCEPT_SKINIT |
1992 VMCB_CTRL_INTERCEPT_RDTSCP |
1993 VMCB_CTRL_INTERCEPT_MONITOR |
1994 VMCB_CTRL_INTERCEPT_MWAIT |
1995 VMCB_CTRL_INTERCEPT_XSETBV;
1996
1997 /* Intercept all I/O accesses. */
1998 memset(cpudata->iobm, 0xFF, IOBM_SIZE);
1999 vmcb->ctrl.iopm_base_pa = cpudata->iobm_pa;
2000
2001 /* Allow direct access to certain MSRs. */
2002 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
2003 svm_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, false);
2004 svm_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
2005 svm_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
2006 svm_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
2007 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
2008 svm_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
2009 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
2010 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
2011 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
2012 svm_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
2013 svm_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
2014 svm_vcpu_msr_allow(cpudata->msrbm, MSR_CR_PAT, true, true);
2015 svm_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
2016 vmcb->ctrl.msrpm_base_pa = cpudata->msrbm_pa;
2017
2018 /* Generate ASID. */
2019 svm_asid_alloc(vcpu);
2020
2021 /* Virtual TPR. */
2022 vmcb->ctrl.v = VMCB_CTRL_V_INTR_MASKING;
2023
2024 /* Enable Nested Paging. */
2025 vmcb->ctrl.enable1 = VMCB_CTRL_ENABLE_NP;
2026 vmcb->ctrl.n_cr3 = mach->vm->vm_map.pmap->pm_pdirpa[0];
2027
2028 /* Init XSAVE header. */
2029 cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
2030 cpudata->gfpu.xsh_xcomp_bv = 0;
2031
2032 /* These MSRs are static. */
2033 cpudata->star = rdmsr(MSR_STAR);
2034 cpudata->lstar = rdmsr(MSR_LSTAR);
2035 cpudata->cstar = rdmsr(MSR_CSTAR);
2036 cpudata->sfmask = rdmsr(MSR_SFMASK);
2037
2038 /* Install the RESET state. */
2039 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
2040 sizeof(nvmm_x86_reset_state));
2041 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
2042 vcpu->comm->state_cached = 0;
2043 svm_vcpu_setstate(vcpu);
2044 }
2045
2046 static int
2047 svm_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2048 {
2049 struct svm_cpudata *cpudata;
2050 int error;
2051
2052 /* Allocate the SVM cpudata. */
2053 cpudata = (struct svm_cpudata *)uvm_km_alloc(kernel_map,
2054 roundup(sizeof(*cpudata), PAGE_SIZE), 0,
2055 UVM_KMF_WIRED|UVM_KMF_ZERO);
2056 vcpu->cpudata = cpudata;
2057
2058 /* VMCB */
2059 error = svm_memalloc(&cpudata->vmcb_pa, (vaddr_t *)&cpudata->vmcb,
2060 VMCB_NPAGES);
2061 if (error)
2062 goto error;
2063
2064 /* I/O Bitmap */
2065 error = svm_memalloc(&cpudata->iobm_pa, (vaddr_t *)&cpudata->iobm,
2066 IOBM_NPAGES);
2067 if (error)
2068 goto error;
2069
2070 /* MSR Bitmap */
2071 error = svm_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
2072 MSRBM_NPAGES);
2073 if (error)
2074 goto error;
2075
2076 /* Init the VCPU info. */
2077 svm_vcpu_init(mach, vcpu);
2078
2079 return 0;
2080
2081 error:
2082 if (cpudata->vmcb_pa) {
2083 svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb,
2084 VMCB_NPAGES);
2085 }
2086 if (cpudata->iobm_pa) {
2087 svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm,
2088 IOBM_NPAGES);
2089 }
2090 if (cpudata->msrbm_pa) {
2091 svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
2092 MSRBM_NPAGES);
2093 }
2094 uvm_km_free(kernel_map, (vaddr_t)cpudata,
2095 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
2096 return error;
2097 }
2098
2099 static void
2100 svm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2101 {
2102 struct svm_cpudata *cpudata = vcpu->cpudata;
2103
2104 svm_asid_free(vcpu);
2105
2106 svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, VMCB_NPAGES);
2107 svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, IOBM_NPAGES);
2108 svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
2109
2110 uvm_km_free(kernel_map, (vaddr_t)cpudata,
2111 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
2112 }
2113
2114 /* -------------------------------------------------------------------------- */
2115
2116 static void
2117 svm_tlb_flush(struct pmap *pm)
2118 {
2119 struct nvmm_machine *mach = pm->pm_data;
2120 struct svm_machdata *machdata = mach->machdata;
2121
2122 atomic_inc_64(&machdata->mach_htlb_gen);
2123
2124 /* Generates IPIs, which cause #VMEXITs. */
2125 pmap_tlb_shootdown(pmap_kernel(), -1, PG_G, TLBSHOOT_UPDATE);
2126 }
2127
2128 static void
2129 svm_machine_create(struct nvmm_machine *mach)
2130 {
2131 struct svm_machdata *machdata;
2132
2133 /* Fill in pmap info. */
2134 mach->vm->vm_map.pmap->pm_data = (void *)mach;
2135 mach->vm->vm_map.pmap->pm_tlb_flush = svm_tlb_flush;
2136
2137 machdata = kmem_zalloc(sizeof(struct svm_machdata), KM_SLEEP);
2138 mach->machdata = machdata;
2139
2140 /* Start with an hTLB flush everywhere. */
2141 machdata->mach_htlb_gen = 1;
2142 }
2143
2144 static void
2145 svm_machine_destroy(struct nvmm_machine *mach)
2146 {
2147 kmem_free(mach->machdata, sizeof(struct svm_machdata));
2148 }
2149
2150 static int
2151 svm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
2152 {
2153 struct nvmm_mach_conf_x86_cpuid *cpuid = data;
2154 struct svm_machdata *machdata = (struct svm_machdata *)mach->machdata;
2155 size_t i;
2156
2157 if (__predict_false(op != NVMM_MACH_CONF_MD(NVMM_MACH_CONF_X86_CPUID))) {
2158 return EINVAL;
2159 }
2160
2161 if (__predict_false((cpuid->set.eax & cpuid->del.eax) ||
2162 (cpuid->set.ebx & cpuid->del.ebx) ||
2163 (cpuid->set.ecx & cpuid->del.ecx) ||
2164 (cpuid->set.edx & cpuid->del.edx))) {
2165 return EINVAL;
2166 }
2167
2168 /* If already here, replace. */
2169 for (i = 0; i < SVM_NCPUIDS; i++) {
2170 if (!machdata->cpuidpresent[i]) {
2171 continue;
2172 }
2173 if (machdata->cpuid[i].leaf == cpuid->leaf) {
2174 memcpy(&machdata->cpuid[i], cpuid,
2175 sizeof(struct nvmm_mach_conf_x86_cpuid));
2176 return 0;
2177 }
2178 }
2179
2180 /* Not here, insert. */
2181 for (i = 0; i < SVM_NCPUIDS; i++) {
2182 if (!machdata->cpuidpresent[i]) {
2183 machdata->cpuidpresent[i] = true;
2184 memcpy(&machdata->cpuid[i], cpuid,
2185 sizeof(struct nvmm_mach_conf_x86_cpuid));
2186 return 0;
2187 }
2188 }
2189
2190 return ENOBUFS;
2191 }
2192
2193 /* -------------------------------------------------------------------------- */
2194
2195 static bool
2196 svm_ident(void)
2197 {
2198 u_int descs[4];
2199 uint64_t msr;
2200
2201 if (cpu_vendor != CPUVENDOR_AMD) {
2202 return false;
2203 }
2204 if (!(cpu_feature[3] & CPUID_SVM)) {
2205 return false;
2206 }
2207
2208 if (curcpu()->ci_max_ext_cpuid < 0x8000000a) {
2209 return false;
2210 }
2211 x86_cpuid(0x8000000a, descs);
2212
2213 /* Want Nested Paging. */
2214 if (!(descs[3] & CPUID_AMD_SVM_NP)) {
2215 return false;
2216 }
2217
2218 /* Want nRIP. */
2219 if (!(descs[3] & CPUID_AMD_SVM_NRIPS)) {
2220 return false;
2221 }
2222
2223 svm_decode_assist = (descs[3] & CPUID_AMD_SVM_DecodeAssist) != 0;
2224
2225 msr = rdmsr(MSR_VMCR);
2226 if ((msr & VMCR_SVMED) && (msr & VMCR_LOCK)) {
2227 return false;
2228 }
2229
2230 return true;
2231 }
2232
2233 static void
2234 svm_init_asid(uint32_t maxasid)
2235 {
2236 size_t i, j, allocsz;
2237
2238 mutex_init(&svm_asidlock, MUTEX_DEFAULT, IPL_NONE);
2239
2240 /* Arbitrarily limit. */
2241 maxasid = uimin(maxasid, 8192);
2242
2243 svm_maxasid = maxasid;
2244 allocsz = roundup(maxasid, 8) / 8;
2245 svm_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
2246
2247 /* ASID 0 is reserved for the host. */
2248 svm_asidmap[0] |= __BIT(0);
2249
2250 /* ASID n-1 is special, we share it. */
2251 i = (maxasid - 1) / 8;
2252 j = (maxasid - 1) % 8;
2253 svm_asidmap[i] |= __BIT(j);
2254 }
2255
2256 static void
2257 svm_change_cpu(void *arg1, void *arg2)
2258 {
2259 bool enable = (bool)arg1;
2260 uint64_t msr;
2261
2262 msr = rdmsr(MSR_VMCR);
2263 if (msr & VMCR_SVMED) {
2264 wrmsr(MSR_VMCR, msr & ~VMCR_SVMED);
2265 }
2266
2267 if (!enable) {
2268 wrmsr(MSR_VM_HSAVE_PA, 0);
2269 }
2270
2271 msr = rdmsr(MSR_EFER);
2272 if (enable) {
2273 msr |= EFER_SVME;
2274 } else {
2275 msr &= ~EFER_SVME;
2276 }
2277 wrmsr(MSR_EFER, msr);
2278
2279 if (enable) {
2280 wrmsr(MSR_VM_HSAVE_PA, hsave[cpu_index(curcpu())].pa);
2281 }
2282 }
2283
2284 static void
2285 svm_init(void)
2286 {
2287 CPU_INFO_ITERATOR cii;
2288 struct cpu_info *ci;
2289 struct vm_page *pg;
2290 u_int descs[4];
2291 uint64_t xc;
2292
2293 x86_cpuid(0x8000000a, descs);
2294
2295 /* The guest TLB flush command. */
2296 if (descs[3] & CPUID_AMD_SVM_FlushByASID) {
2297 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST;
2298 } else {
2299 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL;
2300 }
2301
2302 /* Init the ASID. */
2303 svm_init_asid(descs[1]);
2304
2305 /* Init the XCR0 mask. */
2306 svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features;
2307
2308 memset(hsave, 0, sizeof(hsave));
2309 for (CPU_INFO_FOREACH(cii, ci)) {
2310 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
2311 hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg);
2312 }
2313
2314 xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL);
2315 xc_wait(xc);
2316 }
2317
2318 static void
2319 svm_fini_asid(void)
2320 {
2321 size_t allocsz;
2322
2323 allocsz = roundup(svm_maxasid, 8) / 8;
2324 kmem_free(svm_asidmap, allocsz);
2325
2326 mutex_destroy(&svm_asidlock);
2327 }
2328
2329 static void
2330 svm_fini(void)
2331 {
2332 uint64_t xc;
2333 size_t i;
2334
2335 xc = xc_broadcast(0, svm_change_cpu, (void *)false, NULL);
2336 xc_wait(xc);
2337
2338 for (i = 0; i < MAXCPUS; i++) {
2339 if (hsave[i].pa != 0)
2340 uvm_pagefree(PHYS_TO_VM_PAGE(hsave[i].pa));
2341 }
2342
2343 svm_fini_asid();
2344 }
2345
2346 static void
2347 svm_capability(struct nvmm_capability *cap)
2348 {
2349 cap->arch.xcr0_mask = svm_xcr0_mask;
2350 cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
2351 cap->arch.conf_cpuid_maxops = SVM_NCPUIDS;
2352 }
2353
2354 const struct nvmm_impl nvmm_x86_svm = {
2355 .ident = svm_ident,
2356 .init = svm_init,
2357 .fini = svm_fini,
2358 .capability = svm_capability,
2359 .conf_max = NVMM_X86_NCONF,
2360 .conf_sizes = svm_conf_sizes,
2361 .state_size = sizeof(struct nvmm_x64_state),
2362 .machine_create = svm_machine_create,
2363 .machine_destroy = svm_machine_destroy,
2364 .machine_configure = svm_machine_configure,
2365 .vcpu_create = svm_vcpu_create,
2366 .vcpu_destroy = svm_vcpu_destroy,
2367 .vcpu_setstate = svm_vcpu_setstate,
2368 .vcpu_getstate = svm_vcpu_getstate,
2369 .vcpu_inject = svm_vcpu_inject,
2370 .vcpu_run = svm_vcpu_run
2371 };
2372