cpufunc.h revision 1.39 1 /* $NetBSD: cpufunc.h,v 1.39 2020/05/02 11:37:17 maxv Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _X86_CPUFUNC_H_
33 #define _X86_CPUFUNC_H_
34
35 /*
36 * Functions to provide access to x86-specific instructions.
37 */
38
39 #include <sys/cdefs.h>
40 #include <sys/types.h>
41
42 #include <machine/segments.h>
43 #include <machine/specialreg.h>
44
45 #ifdef _KERNEL
46 #if defined(_KERNEL_OPT)
47 #include "opt_xen.h"
48 #endif
49
50 static inline void
51 x86_pause(void)
52 {
53 __asm volatile ("pause");
54 }
55
56 void x86_lfence(void);
57 void x86_sfence(void);
58 void x86_mfence(void);
59 void x86_flush(void);
60 void x86_hlt(void);
61 void x86_stihlt(void);
62 void tlbflush(void);
63 void tlbflushg(void);
64 void invlpg(vaddr_t);
65 void wbinvd(void);
66 void breakpoint(void);
67
68 #define INVPCID_ADDRESS 0
69 #define INVPCID_CONTEXT 1
70 #define INVPCID_ALL 2
71 #define INVPCID_ALL_NONGLOBAL 3
72
73 static inline void
74 invpcid(register_t op, uint64_t pcid, vaddr_t va)
75 {
76 struct {
77 uint64_t pcid;
78 uint64_t addr;
79 } desc = {
80 .pcid = pcid,
81 .addr = va
82 };
83
84 __asm volatile (
85 "invpcid %[desc],%[op]"
86 :
87 : [desc] "m" (desc), [op] "r" (op)
88 : "memory"
89 );
90 }
91
92 static inline uint64_t
93 rdtsc(void)
94 {
95 uint32_t low, high;
96
97 __asm volatile (
98 "rdtsc"
99 : "=a" (low), "=d" (high)
100 :
101 );
102
103 return (low | ((uint64_t)high << 32));
104 }
105
106 #ifndef XENPV
107 struct x86_hotpatch_source {
108 uint8_t *saddr;
109 uint8_t *eaddr;
110 };
111
112 struct x86_hotpatch_descriptor {
113 uint8_t name;
114 uint8_t nsrc;
115 const struct x86_hotpatch_source *srcs[];
116 };
117
118 void x86_hotpatch(uint8_t, uint8_t);
119 void x86_patch(bool);
120 #endif
121
122 void x86_monitor(const void *, uint32_t, uint32_t);
123 void x86_mwait(uint32_t, uint32_t);
124
125 static inline void
126 x86_cpuid2(uint32_t eax, uint32_t ecx, uint32_t *regs)
127 {
128 uint32_t ebx, edx;
129
130 __asm volatile (
131 "cpuid"
132 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
133 : "a" (eax), "c" (ecx)
134 );
135
136 regs[0] = eax;
137 regs[1] = ebx;
138 regs[2] = ecx;
139 regs[3] = edx;
140 }
141 #define x86_cpuid(a,b) x86_cpuid2((a), 0, (b))
142
143 /* -------------------------------------------------------------------------- */
144
145 void lidt(struct region_descriptor *);
146 void lldt(u_short);
147 void ltr(u_short);
148
149 static inline uint16_t
150 x86_getss(void)
151 {
152 uint16_t val;
153
154 __asm volatile (
155 "mov %%ss,%[val]"
156 : [val] "=r" (val)
157 :
158 );
159 return val;
160 }
161
162 static inline void
163 setds(uint16_t val)
164 {
165 __asm volatile (
166 "mov %[val],%%ds"
167 :
168 : [val] "r" (val)
169 );
170 }
171
172 static inline void
173 setes(uint16_t val)
174 {
175 __asm volatile (
176 "mov %[val],%%es"
177 :
178 : [val] "r" (val)
179 );
180 }
181
182 static inline void
183 setfs(uint16_t val)
184 {
185 __asm volatile (
186 "mov %[val],%%fs"
187 :
188 : [val] "r" (val)
189 );
190 }
191
192 void setusergs(int);
193
194 /* -------------------------------------------------------------------------- */
195
196 #define FUNC_CR(crnum) \
197 static inline void lcr##crnum(register_t val) \
198 { \
199 __asm volatile ( \
200 "mov %[val],%%cr" #crnum \
201 : \
202 : [val] "r" (val) \
203 : "memory" \
204 ); \
205 } \
206 static inline register_t rcr##crnum(void) \
207 { \
208 register_t val; \
209 __asm volatile ( \
210 "mov %%cr" #crnum ",%[val]" \
211 : [val] "=r" (val) \
212 : \
213 ); \
214 return val; \
215 }
216
217 #define PROTO_CR(crnum) \
218 void lcr##crnum(register_t); \
219 register_t rcr##crnum(void);
220
221 #ifndef XENPV
222 FUNC_CR(0)
223 FUNC_CR(2)
224 FUNC_CR(3)
225 #else
226 PROTO_CR(0)
227 PROTO_CR(2)
228 PROTO_CR(3)
229 #endif
230
231 FUNC_CR(4)
232 FUNC_CR(8)
233
234 /* -------------------------------------------------------------------------- */
235
236 #define FUNC_DR(drnum) \
237 static inline void ldr##drnum(register_t val) \
238 { \
239 __asm volatile ( \
240 "mov %[val],%%dr" #drnum \
241 : \
242 : [val] "r" (val) \
243 ); \
244 } \
245 static inline register_t rdr##drnum(void) \
246 { \
247 register_t val; \
248 __asm volatile ( \
249 "mov %%dr" #drnum ",%[val]" \
250 : [val] "=r" (val) \
251 : \
252 ); \
253 return val; \
254 }
255
256 #define PROTO_DR(drnum) \
257 register_t rdr##drnum(void); \
258 void ldr##drnum(register_t);
259
260 #ifndef XENPV
261 FUNC_DR(0)
262 FUNC_DR(1)
263 FUNC_DR(2)
264 FUNC_DR(3)
265 FUNC_DR(6)
266 FUNC_DR(7)
267 #else
268 PROTO_DR(0)
269 PROTO_DR(1)
270 PROTO_DR(2)
271 PROTO_DR(3)
272 PROTO_DR(6)
273 PROTO_DR(7)
274 #endif
275
276 /* -------------------------------------------------------------------------- */
277
278 union savefpu;
279
280 static inline void
281 fninit(void)
282 {
283 __asm volatile ("fninit" ::: "memory");
284 }
285
286 static inline void
287 fnclex(void)
288 {
289 __asm volatile ("fnclex");
290 }
291
292 static inline void
293 fnstcw(uint16_t *val)
294 {
295 __asm volatile (
296 "fnstcw %[val]"
297 : [val] "=m" (*val)
298 :
299 );
300 }
301
302 static inline void
303 fnstsw(uint16_t *val)
304 {
305 __asm volatile (
306 "fnstsw %[val]"
307 : [val] "=m" (*val)
308 :
309 );
310 }
311
312 static inline void
313 clts(void)
314 {
315 __asm volatile ("clts" ::: "memory");
316 }
317
318 void stts(void);
319
320 static inline void
321 x86_stmxcsr(uint32_t *val)
322 {
323 __asm volatile (
324 "stmxcsr %[val]"
325 : [val] "=m" (*val)
326 :
327 );
328 }
329
330 static inline void
331 x86_ldmxcsr(uint32_t *val)
332 {
333 __asm volatile (
334 "ldmxcsr %[val]"
335 :
336 : [val] "m" (*val)
337 );
338 }
339
340 void fldummy(void);
341
342 static inline uint64_t
343 rdxcr(uint32_t xcr)
344 {
345 uint32_t low, high;
346
347 __asm volatile (
348 "xgetbv"
349 : "=a" (low), "=d" (high)
350 : "c" (xcr)
351 );
352
353 return (low | ((uint64_t)high << 32));
354 }
355
356 static inline void
357 wrxcr(uint32_t xcr, uint64_t val)
358 {
359 uint32_t low, high;
360
361 low = val;
362 high = val >> 32;
363 __asm volatile (
364 "xsetbv"
365 :
366 : "a" (low), "d" (high), "c" (xcr)
367 );
368 }
369
370 static inline void
371 fnsave(void *addr)
372 {
373 uint8_t *area = addr;
374
375 __asm volatile (
376 "fnsave %[area]"
377 : [area] "=m" (*area)
378 :
379 : "memory"
380 );
381 }
382
383 static inline void
384 frstor(void *addr)
385 {
386 const uint8_t *area = addr;
387
388 __asm volatile (
389 "frstor %[area]"
390 :
391 : [area] "m" (*area)
392 : "memory"
393 );
394 }
395
396 static inline void
397 fxsave(void *addr)
398 {
399 uint8_t *area = addr;
400
401 __asm volatile (
402 "fxsave %[area]"
403 : [area] "=m" (*area)
404 :
405 : "memory"
406 );
407 }
408
409 static inline void
410 fxrstor(void *addr)
411 {
412 const uint8_t *area = addr;
413
414 __asm volatile (
415 "fxrstor %[area]"
416 :
417 : [area] "m" (*area)
418 : "memory"
419 );
420 }
421
422 static inline void
423 xsave(void *addr, uint64_t mask)
424 {
425 uint8_t *area = addr;
426 uint32_t low, high;
427
428 low = mask;
429 high = mask >> 32;
430 __asm volatile (
431 "xsave %[area]"
432 : [area] "=m" (*area)
433 : "a" (low), "d" (high)
434 : "memory"
435 );
436 }
437
438 static inline void
439 xsaveopt(void *addr, uint64_t mask)
440 {
441 uint8_t *area = addr;
442 uint32_t low, high;
443
444 low = mask;
445 high = mask >> 32;
446 __asm volatile (
447 "xsaveopt %[area]"
448 : [area] "=m" (*area)
449 : "a" (low), "d" (high)
450 : "memory"
451 );
452 }
453
454 static inline void
455 xrstor(void *addr, uint64_t mask)
456 {
457 const uint8_t *area = addr;
458 uint32_t low, high;
459
460 low = mask;
461 high = mask >> 32;
462 __asm volatile (
463 "xrstor %[area]"
464 :
465 : [area] "m" (*area), "a" (low), "d" (high)
466 : "memory"
467 );
468 }
469
470 /* -------------------------------------------------------------------------- */
471
472 #ifdef XENPV
473 void x86_disable_intr(void);
474 void x86_enable_intr(void);
475 #else
476 static inline void
477 x86_disable_intr(void)
478 {
479 __asm volatile ("cli" ::: "memory");
480 }
481
482 static inline void
483 x86_enable_intr(void)
484 {
485 __asm volatile ("sti" ::: "memory");
486 }
487 #endif /* XENPV */
488
489 /* Use read_psl, write_psl when saving and restoring interrupt state. */
490 u_long x86_read_psl(void);
491 void x86_write_psl(u_long);
492
493 /* Use read_flags, write_flags to adjust other members of %eflags. */
494 u_long x86_read_flags(void);
495 void x86_write_flags(u_long);
496
497 void x86_reset(void);
498
499 /* -------------------------------------------------------------------------- */
500
501 /*
502 * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
503 * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
504 */
505 #define OPTERON_MSR_PASSCODE 0x9c5a203aU
506
507 static inline uint64_t
508 rdmsr(u_int msr)
509 {
510 uint32_t low, high;
511
512 __asm volatile (
513 "rdmsr"
514 : "=a" (low), "=d" (high)
515 : "c" (msr)
516 );
517
518 return (low | ((uint64_t)high << 32));
519 }
520
521 static inline uint64_t
522 rdmsr_locked(u_int msr)
523 {
524 uint32_t low, high, pass = OPTERON_MSR_PASSCODE;
525
526 __asm volatile (
527 "rdmsr"
528 : "=a" (low), "=d" (high)
529 : "c" (msr), "D" (pass)
530 );
531
532 return (low | ((uint64_t)high << 32));
533 }
534
535 int rdmsr_safe(u_int, uint64_t *);
536
537 static inline void
538 wrmsr(u_int msr, uint64_t val)
539 {
540 uint32_t low, high;
541
542 low = val;
543 high = val >> 32;
544 __asm volatile (
545 "wrmsr"
546 :
547 : "a" (low), "d" (high), "c" (msr)
548 : "memory"
549 );
550 }
551
552 static inline void
553 wrmsr_locked(u_int msr, uint64_t val)
554 {
555 uint32_t low, high, pass = OPTERON_MSR_PASSCODE;
556
557 low = val;
558 high = val >> 32;
559 __asm volatile (
560 "wrmsr"
561 :
562 : "a" (low), "d" (high), "c" (msr), "D" (pass)
563 : "memory"
564 );
565 }
566
567 #endif /* _KERNEL */
568
569 #endif /* !_X86_CPUFUNC_H_ */
570