Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.29
      1 /*	$NetBSD: cpufunc.h,v 1.29 2019/05/11 12:58:17 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _X86_CPUFUNC_H_
     33 #define	_X86_CPUFUNC_H_
     34 
     35 /*
     36  * Functions to provide access to x86-specific instructions.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 #include <sys/types.h>
     41 
     42 #include <machine/segments.h>
     43 #include <machine/specialreg.h>
     44 
     45 #ifdef _KERNEL
     46 #if defined(_KERNEL_OPT)
     47 #include "opt_xen.h"
     48 #endif
     49 
     50 static inline void
     51 x86_pause(void)
     52 {
     53 	asm volatile ("pause");
     54 }
     55 
     56 void	x86_lfence(void);
     57 void	x86_sfence(void);
     58 void	x86_mfence(void);
     59 void	x86_flush(void);
     60 void	x86_hlt(void);
     61 void	x86_stihlt(void);
     62 void	tlbflush(void);
     63 void	tlbflushg(void);
     64 void	invlpg(vaddr_t);
     65 void	wbinvd(void);
     66 void	breakpoint(void);
     67 
     68 static inline uint64_t
     69 rdtsc(void)
     70 {
     71 	uint32_t low, high;
     72 
     73 	asm volatile (
     74 		"rdtsc"
     75 		: "=a" (low), "=d" (high)
     76 		:
     77 	);
     78 
     79 	return (low | ((uint64_t)high << 32));
     80 }
     81 
     82 #ifndef XEN
     83 void	x86_hotpatch(uint32_t, const uint8_t *, size_t);
     84 void	x86_patch_window_open(u_long *, u_long *);
     85 void	x86_patch_window_close(u_long, u_long);
     86 void	x86_patch(bool);
     87 #endif
     88 
     89 void	x86_monitor(const void *, uint32_t, uint32_t);
     90 void	x86_mwait(uint32_t, uint32_t);
     91 /* x86_cpuid2() writes four 32bit values, %eax, %ebx, %ecx and %edx */
     92 #define	x86_cpuid(a,b)	x86_cpuid2((a),0,(b))
     93 void	x86_cpuid2(uint32_t, uint32_t, uint32_t *);
     94 
     95 /* -------------------------------------------------------------------------- */
     96 
     97 void	lidt(struct region_descriptor *);
     98 void	lldt(u_short);
     99 void	ltr(u_short);
    100 
    101 static inline uint16_t
    102 x86_getss(void)
    103 {
    104 	uint16_t val;
    105 
    106 	asm volatile (
    107 		"mov	%%ss,%[val]"
    108 		: [val] "=r" (val)
    109 		:
    110 	);
    111 	return val;
    112 }
    113 
    114 static inline void
    115 setds(uint16_t val)
    116 {
    117 	asm volatile (
    118 		"mov	%[val],%%ds"
    119 		:
    120 		: [val] "r" (val)
    121 	);
    122 }
    123 
    124 static inline void
    125 setes(uint16_t val)
    126 {
    127 	asm volatile (
    128 		"mov	%[val],%%es"
    129 		:
    130 		: [val] "r" (val)
    131 	);
    132 }
    133 
    134 static inline void
    135 setfs(uint16_t val)
    136 {
    137 	asm volatile (
    138 		"mov	%[val],%%fs"
    139 		:
    140 		: [val] "r" (val)
    141 	);
    142 }
    143 
    144 void	setusergs(int);
    145 
    146 /* -------------------------------------------------------------------------- */
    147 
    148 #define FUNC_CR(crnum)					\
    149 	static inline void lcr##crnum(register_t val)	\
    150 	{						\
    151 		asm volatile (				\
    152 			"mov	%[val],%%cr" #crnum	\
    153 			:				\
    154 			: [val] "r" (val)		\
    155 		);					\
    156 	}						\
    157 	static inline register_t rcr##crnum(void)	\
    158 	{						\
    159 		register_t val;				\
    160 		asm volatile (				\
    161 			"mov	%%cr" #crnum ",%[val]"	\
    162 			: [val] "=r" (val)		\
    163 			:				\
    164 		);					\
    165 		return val;				\
    166 	}
    167 
    168 #define PROTO_CR(crnum)					\
    169 	void lcr##crnum(register_t);			\
    170 	register_t rcr##crnum(void);
    171 
    172 #ifndef XENPV
    173 FUNC_CR(0)
    174 FUNC_CR(2)
    175 FUNC_CR(3)
    176 #else
    177 PROTO_CR(0)
    178 PROTO_CR(2)
    179 PROTO_CR(3)
    180 #endif
    181 
    182 FUNC_CR(4)
    183 FUNC_CR(8)
    184 
    185 /* -------------------------------------------------------------------------- */
    186 
    187 #define FUNC_DR(drnum)					\
    188 	static inline void ldr##drnum(register_t val)	\
    189 	{						\
    190 		asm volatile (				\
    191 			"mov	%[val],%%dr" #drnum	\
    192 			:				\
    193 			: [val] "r" (val)		\
    194 		);					\
    195 	}						\
    196 	static inline register_t rdr##drnum(void)	\
    197 	{						\
    198 		register_t val;				\
    199 		asm volatile (				\
    200 			"mov	%%dr" #drnum ",%[val]"	\
    201 			: [val] "=r" (val)		\
    202 			:				\
    203 		);					\
    204 		return val;				\
    205 	}
    206 
    207 #define PROTO_DR(drnum)					\
    208 	register_t rdr##drnum(void);			\
    209 	void ldr##drnum(register_t);
    210 
    211 #ifndef XENPV
    212 FUNC_DR(0)
    213 FUNC_DR(1)
    214 FUNC_DR(2)
    215 FUNC_DR(3)
    216 FUNC_DR(6)
    217 FUNC_DR(7)
    218 #else
    219 PROTO_DR(0)
    220 PROTO_DR(1)
    221 PROTO_DR(2)
    222 PROTO_DR(3)
    223 PROTO_DR(6)
    224 PROTO_DR(7)
    225 #endif
    226 
    227 /* -------------------------------------------------------------------------- */
    228 
    229 union savefpu;
    230 
    231 static inline void
    232 fninit(void)
    233 {
    234 	asm volatile ("fninit");
    235 }
    236 
    237 static inline void
    238 fnclex(void)
    239 {
    240 	asm volatile ("fnclex");
    241 }
    242 
    243 void	fnsave(union savefpu *);
    244 void	fnstcw(uint16_t *);
    245 uint16_t fngetsw(void);
    246 void	fnstsw(uint16_t *);
    247 void	frstor(const union savefpu *);
    248 
    249 static inline void
    250 clts(void)
    251 {
    252 	asm volatile ("clts");
    253 }
    254 
    255 void	stts(void);
    256 void	fxsave(union savefpu *);
    257 void	fxrstor(const union savefpu *);
    258 
    259 void	x86_ldmxcsr(const uint32_t *);
    260 void	x86_stmxcsr(uint32_t *);
    261 void	fldummy(void);
    262 
    263 static inline uint64_t
    264 rdxcr(uint32_t xcr)
    265 {
    266 	uint32_t low, high;
    267 
    268 	asm volatile (
    269 		"xgetbv"
    270 		: "=a" (low), "=d" (high)
    271 		: "c" (xcr)
    272 	);
    273 
    274 	return (low | ((uint64_t)high << 32));
    275 }
    276 
    277 static inline void
    278 wrxcr(uint32_t xcr, uint64_t val)
    279 {
    280 	uint32_t low, high;
    281 
    282 	low = val;
    283 	high = val >> 32;
    284 	asm volatile (
    285 		"xsetbv"
    286 		:
    287 		: "a" (low), "d" (high), "c" (xcr)
    288 	);
    289 }
    290 
    291 void	xrstor(const union savefpu *, uint64_t);
    292 void	xsave(union savefpu *, uint64_t);
    293 void	xsaveopt(union savefpu *, uint64_t);
    294 
    295 /* -------------------------------------------------------------------------- */
    296 
    297 #ifdef XENPV
    298 void x86_disable_intr(void);
    299 void x86_enable_intr(void);
    300 #else
    301 static inline void
    302 x86_disable_intr(void)
    303 {
    304 	asm volatile ("cli");
    305 }
    306 
    307 static inline void
    308 x86_enable_intr(void)
    309 {
    310 	asm volatile ("sti");
    311 }
    312 #endif /* XENPV */
    313 
    314 /* Use read_psl, write_psl when saving and restoring interrupt state. */
    315 u_long	x86_read_psl(void);
    316 void	x86_write_psl(u_long);
    317 
    318 /* Use read_flags, write_flags to adjust other members of %eflags. */
    319 u_long	x86_read_flags(void);
    320 void	x86_write_flags(u_long);
    321 
    322 void	x86_reset(void);
    323 
    324 /* -------------------------------------------------------------------------- */
    325 
    326 /*
    327  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
    328  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
    329  */
    330 #define	OPTERON_MSR_PASSCODE	0x9c5a203aU
    331 
    332 uint64_t	rdmsr_locked(u_int);
    333 int		rdmsr_safe(u_int, uint64_t *);
    334 void		wrmsr_locked(u_int, uint64_t);
    335 
    336 #endif /* _KERNEL */
    337 
    338 static inline uint64_t
    339 rdmsr(u_int msr)
    340 {
    341 	uint32_t low, high;
    342 
    343 	asm volatile (
    344 		"rdmsr"
    345 		: "=a" (low), "=d" (high)
    346 		: "c" (msr)
    347 	);
    348 
    349 	return (low | ((uint64_t)high << 32));
    350 }
    351 
    352 static inline void
    353 wrmsr(u_int msr, uint64_t val)
    354 {
    355 	uint32_t low, high;
    356 
    357 	low = val;
    358 	high = val >> 32;
    359 	asm volatile (
    360 		"wrmsr"
    361 		:
    362 		: "a" (low), "d" (high), "c" (msr)
    363 	);
    364 }
    365 
    366 #endif /* !_X86_CPUFUNC_H_ */
    367