cpufunc.h revision 1.37 1 /* $NetBSD: cpufunc.h,v 1.37 2019/10/30 17:06:57 maxv Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _X86_CPUFUNC_H_
33 #define _X86_CPUFUNC_H_
34
35 /*
36 * Functions to provide access to x86-specific instructions.
37 */
38
39 #include <sys/cdefs.h>
40 #include <sys/types.h>
41
42 #include <machine/segments.h>
43 #include <machine/specialreg.h>
44
45 #ifdef _KERNEL
46 #if defined(_KERNEL_OPT)
47 #include "opt_xen.h"
48 #endif
49
50 static inline void
51 x86_pause(void)
52 {
53 __asm volatile ("pause");
54 }
55
56 void x86_lfence(void);
57 void x86_sfence(void);
58 void x86_mfence(void);
59 void x86_flush(void);
60 void x86_hlt(void);
61 void x86_stihlt(void);
62 void tlbflush(void);
63 void tlbflushg(void);
64 void invlpg(vaddr_t);
65 void wbinvd(void);
66 void breakpoint(void);
67
68 #define INVPCID_ADDRESS 0
69 #define INVPCID_CONTEXT 1
70 #define INVPCID_ALL 2
71 #define INVPCID_ALL_NONGLOBAL 3
72
73 static inline void
74 invpcid(register_t op, uint64_t pcid, vaddr_t va)
75 {
76 struct {
77 uint64_t pcid;
78 uint64_t addr;
79 } desc = {
80 .pcid = pcid,
81 .addr = va
82 };
83
84 __asm volatile (
85 "invpcid %[desc],%[op]"
86 :
87 : [desc] "m" (desc), [op] "r" (op)
88 : "memory"
89 );
90 }
91
92 static inline uint64_t
93 rdtsc(void)
94 {
95 uint32_t low, high;
96
97 __asm volatile (
98 "rdtsc"
99 : "=a" (low), "=d" (high)
100 :
101 );
102
103 return (low | ((uint64_t)high << 32));
104 }
105
106 #ifndef XEN
107 void x86_hotpatch(uint32_t, const uint8_t *, size_t);
108 void x86_patch_window_open(u_long *, u_long *);
109 void x86_patch_window_close(u_long, u_long);
110 void x86_patch(bool);
111 #endif
112
113 void x86_monitor(const void *, uint32_t, uint32_t);
114 void x86_mwait(uint32_t, uint32_t);
115
116 static inline void
117 x86_cpuid2(uint32_t eax, uint32_t ecx, uint32_t *regs)
118 {
119 uint32_t ebx, edx;
120
121 __asm volatile (
122 "cpuid"
123 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
124 : "a" (eax), "c" (ecx)
125 );
126
127 regs[0] = eax;
128 regs[1] = ebx;
129 regs[2] = ecx;
130 regs[3] = edx;
131 }
132 #define x86_cpuid(a,b) x86_cpuid2((a), 0, (b))
133
134 /* -------------------------------------------------------------------------- */
135
136 void lidt(struct region_descriptor *);
137 void lldt(u_short);
138 void ltr(u_short);
139
140 static inline uint16_t
141 x86_getss(void)
142 {
143 uint16_t val;
144
145 __asm volatile (
146 "mov %%ss,%[val]"
147 : [val] "=r" (val)
148 :
149 );
150 return val;
151 }
152
153 static inline void
154 setds(uint16_t val)
155 {
156 __asm volatile (
157 "mov %[val],%%ds"
158 :
159 : [val] "r" (val)
160 );
161 }
162
163 static inline void
164 setes(uint16_t val)
165 {
166 __asm volatile (
167 "mov %[val],%%es"
168 :
169 : [val] "r" (val)
170 );
171 }
172
173 static inline void
174 setfs(uint16_t val)
175 {
176 __asm volatile (
177 "mov %[val],%%fs"
178 :
179 : [val] "r" (val)
180 );
181 }
182
183 void setusergs(int);
184
185 /* -------------------------------------------------------------------------- */
186
187 #define FUNC_CR(crnum) \
188 static inline void lcr##crnum(register_t val) \
189 { \
190 __asm volatile ( \
191 "mov %[val],%%cr" #crnum \
192 : \
193 : [val] "r" (val) \
194 : "memory" \
195 ); \
196 } \
197 static inline register_t rcr##crnum(void) \
198 { \
199 register_t val; \
200 __asm volatile ( \
201 "mov %%cr" #crnum ",%[val]" \
202 : [val] "=r" (val) \
203 : \
204 ); \
205 return val; \
206 }
207
208 #define PROTO_CR(crnum) \
209 void lcr##crnum(register_t); \
210 register_t rcr##crnum(void);
211
212 #ifndef XENPV
213 FUNC_CR(0)
214 FUNC_CR(2)
215 FUNC_CR(3)
216 #else
217 PROTO_CR(0)
218 PROTO_CR(2)
219 PROTO_CR(3)
220 #endif
221
222 FUNC_CR(4)
223 FUNC_CR(8)
224
225 /* -------------------------------------------------------------------------- */
226
227 #define FUNC_DR(drnum) \
228 static inline void ldr##drnum(register_t val) \
229 { \
230 __asm volatile ( \
231 "mov %[val],%%dr" #drnum \
232 : \
233 : [val] "r" (val) \
234 ); \
235 } \
236 static inline register_t rdr##drnum(void) \
237 { \
238 register_t val; \
239 __asm volatile ( \
240 "mov %%dr" #drnum ",%[val]" \
241 : [val] "=r" (val) \
242 : \
243 ); \
244 return val; \
245 }
246
247 #define PROTO_DR(drnum) \
248 register_t rdr##drnum(void); \
249 void ldr##drnum(register_t);
250
251 #ifndef XENPV
252 FUNC_DR(0)
253 FUNC_DR(1)
254 FUNC_DR(2)
255 FUNC_DR(3)
256 FUNC_DR(6)
257 FUNC_DR(7)
258 #else
259 PROTO_DR(0)
260 PROTO_DR(1)
261 PROTO_DR(2)
262 PROTO_DR(3)
263 PROTO_DR(6)
264 PROTO_DR(7)
265 #endif
266
267 /* -------------------------------------------------------------------------- */
268
269 union savefpu;
270
271 static inline void
272 fninit(void)
273 {
274 __asm volatile ("fninit" ::: "memory");
275 }
276
277 static inline void
278 fnclex(void)
279 {
280 __asm volatile ("fnclex");
281 }
282
283 static inline void
284 fnstcw(uint16_t *val)
285 {
286 __asm volatile (
287 "fnstcw %[val]"
288 : [val] "=m" (*val)
289 :
290 );
291 }
292
293 static inline void
294 fnstsw(uint16_t *val)
295 {
296 __asm volatile (
297 "fnstsw %[val]"
298 : [val] "=m" (*val)
299 :
300 );
301 }
302
303 static inline void
304 clts(void)
305 {
306 __asm volatile ("clts" ::: "memory");
307 }
308
309 void stts(void);
310
311 static inline void
312 x86_stmxcsr(uint32_t *val)
313 {
314 __asm volatile (
315 "stmxcsr %[val]"
316 : [val] "=m" (*val)
317 :
318 );
319 }
320
321 static inline void
322 x86_ldmxcsr(uint32_t *val)
323 {
324 __asm volatile (
325 "ldmxcsr %[val]"
326 :
327 : [val] "m" (*val)
328 );
329 }
330
331 void fldummy(void);
332
333 static inline uint64_t
334 rdxcr(uint32_t xcr)
335 {
336 uint32_t low, high;
337
338 __asm volatile (
339 "xgetbv"
340 : "=a" (low), "=d" (high)
341 : "c" (xcr)
342 );
343
344 return (low | ((uint64_t)high << 32));
345 }
346
347 static inline void
348 wrxcr(uint32_t xcr, uint64_t val)
349 {
350 uint32_t low, high;
351
352 low = val;
353 high = val >> 32;
354 __asm volatile (
355 "xsetbv"
356 :
357 : "a" (low), "d" (high), "c" (xcr)
358 );
359 }
360
361 static inline void
362 fnsave(void *addr)
363 {
364 uint8_t *area = addr;
365
366 __asm volatile (
367 "fnsave %[area]"
368 : [area] "=m" (*area)
369 :
370 : "memory"
371 );
372 }
373
374 static inline void
375 frstor(void *addr)
376 {
377 const uint8_t *area = addr;
378
379 __asm volatile (
380 "frstor %[area]"
381 :
382 : [area] "m" (*area)
383 : "memory"
384 );
385 }
386
387 static inline void
388 fxsave(void *addr)
389 {
390 uint8_t *area = addr;
391
392 __asm volatile (
393 "fxsave %[area]"
394 : [area] "=m" (*area)
395 :
396 : "memory"
397 );
398 }
399
400 static inline void
401 fxrstor(void *addr)
402 {
403 const uint8_t *area = addr;
404
405 __asm volatile (
406 "fxrstor %[area]"
407 :
408 : [area] "m" (*area)
409 : "memory"
410 );
411 }
412
413 static inline void
414 xsave(void *addr, uint64_t mask)
415 {
416 uint8_t *area = addr;
417 uint32_t low, high;
418
419 low = mask;
420 high = mask >> 32;
421 __asm volatile (
422 "xsave %[area]"
423 : [area] "=m" (*area)
424 : "a" (low), "d" (high)
425 : "memory"
426 );
427 }
428
429 static inline void
430 xsaveopt(void *addr, uint64_t mask)
431 {
432 uint8_t *area = addr;
433 uint32_t low, high;
434
435 low = mask;
436 high = mask >> 32;
437 __asm volatile (
438 "xsaveopt %[area]"
439 : [area] "=m" (*area)
440 : "a" (low), "d" (high)
441 : "memory"
442 );
443 }
444
445 static inline void
446 xrstor(void *addr, uint64_t mask)
447 {
448 const uint8_t *area = addr;
449 uint32_t low, high;
450
451 low = mask;
452 high = mask >> 32;
453 __asm volatile (
454 "xrstor %[area]"
455 :
456 : [area] "m" (*area), "a" (low), "d" (high)
457 : "memory"
458 );
459 }
460
461 /* -------------------------------------------------------------------------- */
462
463 #ifdef XENPV
464 void x86_disable_intr(void);
465 void x86_enable_intr(void);
466 #else
467 static inline void
468 x86_disable_intr(void)
469 {
470 __asm volatile ("cli" ::: "memory");
471 }
472
473 static inline void
474 x86_enable_intr(void)
475 {
476 __asm volatile ("sti" ::: "memory");
477 }
478 #endif /* XENPV */
479
480 /* Use read_psl, write_psl when saving and restoring interrupt state. */
481 u_long x86_read_psl(void);
482 void x86_write_psl(u_long);
483
484 /* Use read_flags, write_flags to adjust other members of %eflags. */
485 u_long x86_read_flags(void);
486 void x86_write_flags(u_long);
487
488 void x86_reset(void);
489
490 /* -------------------------------------------------------------------------- */
491
492 /*
493 * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
494 * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
495 */
496 #define OPTERON_MSR_PASSCODE 0x9c5a203aU
497
498 static inline uint64_t
499 rdmsr(u_int msr)
500 {
501 uint32_t low, high;
502
503 __asm volatile (
504 "rdmsr"
505 : "=a" (low), "=d" (high)
506 : "c" (msr)
507 );
508
509 return (low | ((uint64_t)high << 32));
510 }
511
512 static inline uint64_t
513 rdmsr_locked(u_int msr)
514 {
515 uint32_t low, high, pass = OPTERON_MSR_PASSCODE;
516
517 __asm volatile (
518 "rdmsr"
519 : "=a" (low), "=d" (high)
520 : "c" (msr), "D" (pass)
521 );
522
523 return (low | ((uint64_t)high << 32));
524 }
525
526 int rdmsr_safe(u_int, uint64_t *);
527
528 static inline void
529 wrmsr(u_int msr, uint64_t val)
530 {
531 uint32_t low, high;
532
533 low = val;
534 high = val >> 32;
535 __asm volatile (
536 "wrmsr"
537 :
538 : "a" (low), "d" (high), "c" (msr)
539 : "memory"
540 );
541 }
542
543 static inline void
544 wrmsr_locked(u_int msr, uint64_t val)
545 {
546 uint32_t low, high, pass = OPTERON_MSR_PASSCODE;
547
548 low = val;
549 high = val >> 32;
550 __asm volatile (
551 "wrmsr"
552 :
553 : "a" (low), "d" (high), "c" (msr), "D" (pass)
554 : "memory"
555 );
556 }
557
558 #endif /* _KERNEL */
559
560 #endif /* !_X86_CPUFUNC_H_ */
561