Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.32
      1 /*	$NetBSD: cpufunc.h,v 1.32 2019/05/30 21:40:40 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _X86_CPUFUNC_H_
     33 #define	_X86_CPUFUNC_H_
     34 
     35 /*
     36  * Functions to provide access to x86-specific instructions.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 #include <sys/types.h>
     41 
     42 #include <machine/segments.h>
     43 #include <machine/specialreg.h>
     44 
     45 #ifdef _KERNEL
     46 #if defined(_KERNEL_OPT)
     47 #include "opt_xen.h"
     48 #endif
     49 
     50 static inline void
     51 x86_pause(void)
     52 {
     53 	__asm volatile ("pause");
     54 }
     55 
     56 void	x86_lfence(void);
     57 void	x86_sfence(void);
     58 void	x86_mfence(void);
     59 void	x86_flush(void);
     60 void	x86_hlt(void);
     61 void	x86_stihlt(void);
     62 void	tlbflush(void);
     63 void	tlbflushg(void);
     64 void	invlpg(vaddr_t);
     65 void	wbinvd(void);
     66 void	breakpoint(void);
     67 
     68 #define INVPCID_ADDRESS		0
     69 #define INVPCID_CONTEXT		1
     70 #define INVPCID_ALL		2
     71 #define INVPCID_ALL_NONGLOBAL	3
     72 
     73 static inline void
     74 invpcid(register_t op, uint64_t pcid, vaddr_t va)
     75 {
     76 	struct {
     77 		uint64_t pcid;
     78 		uint64_t addr;
     79 	} desc = {
     80 		.pcid = pcid,
     81 		.addr = va
     82 	};
     83 
     84 	__asm volatile (
     85 		"invpcid %[desc],%[op]"
     86 		:
     87 		: [desc] "m" (desc), [op] "r" (op)
     88 		: "memory"
     89 	);
     90 }
     91 
     92 static inline uint64_t
     93 rdtsc(void)
     94 {
     95 	uint32_t low, high;
     96 
     97 	__asm volatile (
     98 		"rdtsc"
     99 		: "=a" (low), "=d" (high)
    100 		:
    101 	);
    102 
    103 	return (low | ((uint64_t)high << 32));
    104 }
    105 
    106 #ifndef XEN
    107 void	x86_hotpatch(uint32_t, const uint8_t *, size_t);
    108 void	x86_patch_window_open(u_long *, u_long *);
    109 void	x86_patch_window_close(u_long, u_long);
    110 void	x86_patch(bool);
    111 #endif
    112 
    113 void	x86_monitor(const void *, uint32_t, uint32_t);
    114 void	x86_mwait(uint32_t, uint32_t);
    115 /* x86_cpuid2() writes four 32bit values, %eax, %ebx, %ecx and %edx */
    116 #define	x86_cpuid(a,b)	x86_cpuid2((a),0,(b))
    117 void	x86_cpuid2(uint32_t, uint32_t, uint32_t *);
    118 
    119 /* -------------------------------------------------------------------------- */
    120 
    121 void	lidt(struct region_descriptor *);
    122 void	lldt(u_short);
    123 void	ltr(u_short);
    124 
    125 static inline uint16_t
    126 x86_getss(void)
    127 {
    128 	uint16_t val;
    129 
    130 	__asm volatile (
    131 		"mov	%%ss,%[val]"
    132 		: [val] "=r" (val)
    133 		:
    134 	);
    135 	return val;
    136 }
    137 
    138 static inline void
    139 setds(uint16_t val)
    140 {
    141 	__asm volatile (
    142 		"mov	%[val],%%ds"
    143 		:
    144 		: [val] "r" (val)
    145 	);
    146 }
    147 
    148 static inline void
    149 setes(uint16_t val)
    150 {
    151 	__asm volatile (
    152 		"mov	%[val],%%es"
    153 		:
    154 		: [val] "r" (val)
    155 	);
    156 }
    157 
    158 static inline void
    159 setfs(uint16_t val)
    160 {
    161 	__asm volatile (
    162 		"mov	%[val],%%fs"
    163 		:
    164 		: [val] "r" (val)
    165 	);
    166 }
    167 
    168 void	setusergs(int);
    169 
    170 /* -------------------------------------------------------------------------- */
    171 
    172 #define FUNC_CR(crnum)					\
    173 	static inline void lcr##crnum(register_t val)	\
    174 	{						\
    175 		__asm volatile (				\
    176 			"mov	%[val],%%cr" #crnum	\
    177 			:				\
    178 			: [val] "r" (val)		\
    179 		);					\
    180 	}						\
    181 	static inline register_t rcr##crnum(void)	\
    182 	{						\
    183 		register_t val;				\
    184 		__asm volatile (				\
    185 			"mov	%%cr" #crnum ",%[val]"	\
    186 			: [val] "=r" (val)		\
    187 			:				\
    188 		);					\
    189 		return val;				\
    190 	}
    191 
    192 #define PROTO_CR(crnum)					\
    193 	void lcr##crnum(register_t);			\
    194 	register_t rcr##crnum(void);
    195 
    196 #ifndef XENPV
    197 FUNC_CR(0)
    198 FUNC_CR(2)
    199 FUNC_CR(3)
    200 #else
    201 PROTO_CR(0)
    202 PROTO_CR(2)
    203 PROTO_CR(3)
    204 #endif
    205 
    206 FUNC_CR(4)
    207 FUNC_CR(8)
    208 
    209 /* -------------------------------------------------------------------------- */
    210 
    211 #define FUNC_DR(drnum)					\
    212 	static inline void ldr##drnum(register_t val)	\
    213 	{						\
    214 		__asm volatile (				\
    215 			"mov	%[val],%%dr" #drnum	\
    216 			:				\
    217 			: [val] "r" (val)		\
    218 		);					\
    219 	}						\
    220 	static inline register_t rdr##drnum(void)	\
    221 	{						\
    222 		register_t val;				\
    223 		__asm volatile (				\
    224 			"mov	%%dr" #drnum ",%[val]"	\
    225 			: [val] "=r" (val)		\
    226 			:				\
    227 		);					\
    228 		return val;				\
    229 	}
    230 
    231 #define PROTO_DR(drnum)					\
    232 	register_t rdr##drnum(void);			\
    233 	void ldr##drnum(register_t);
    234 
    235 #ifndef XENPV
    236 FUNC_DR(0)
    237 FUNC_DR(1)
    238 FUNC_DR(2)
    239 FUNC_DR(3)
    240 FUNC_DR(6)
    241 FUNC_DR(7)
    242 #else
    243 PROTO_DR(0)
    244 PROTO_DR(1)
    245 PROTO_DR(2)
    246 PROTO_DR(3)
    247 PROTO_DR(6)
    248 PROTO_DR(7)
    249 #endif
    250 
    251 /* -------------------------------------------------------------------------- */
    252 
    253 union savefpu;
    254 
    255 static inline void
    256 fninit(void)
    257 {
    258 	__asm volatile ("fninit");
    259 }
    260 
    261 static inline void
    262 fnclex(void)
    263 {
    264 	__asm volatile ("fnclex");
    265 }
    266 
    267 void	fnsave(union savefpu *);
    268 void	fnstcw(uint16_t *);
    269 uint16_t fngetsw(void);
    270 void	fnstsw(uint16_t *);
    271 void	frstor(const union savefpu *);
    272 
    273 static inline void
    274 clts(void)
    275 {
    276 	__asm volatile ("clts");
    277 }
    278 
    279 void	stts(void);
    280 void	fxsave(union savefpu *);
    281 void	fxrstor(const union savefpu *);
    282 
    283 void	x86_ldmxcsr(const uint32_t *);
    284 void	x86_stmxcsr(uint32_t *);
    285 void	fldummy(void);
    286 
    287 static inline uint64_t
    288 rdxcr(uint32_t xcr)
    289 {
    290 	uint32_t low, high;
    291 
    292 	__asm volatile (
    293 		"xgetbv"
    294 		: "=a" (low), "=d" (high)
    295 		: "c" (xcr)
    296 	);
    297 
    298 	return (low | ((uint64_t)high << 32));
    299 }
    300 
    301 static inline void
    302 wrxcr(uint32_t xcr, uint64_t val)
    303 {
    304 	uint32_t low, high;
    305 
    306 	low = val;
    307 	high = val >> 32;
    308 	__asm volatile (
    309 		"xsetbv"
    310 		:
    311 		: "a" (low), "d" (high), "c" (xcr)
    312 	);
    313 }
    314 
    315 void	xrstor(const union savefpu *, uint64_t);
    316 void	xsave(union savefpu *, uint64_t);
    317 void	xsaveopt(union savefpu *, uint64_t);
    318 
    319 /* -------------------------------------------------------------------------- */
    320 
    321 #ifdef XENPV
    322 void x86_disable_intr(void);
    323 void x86_enable_intr(void);
    324 #else
    325 static inline void
    326 x86_disable_intr(void)
    327 {
    328 	__asm volatile ("cli");
    329 }
    330 
    331 static inline void
    332 x86_enable_intr(void)
    333 {
    334 	__asm volatile ("sti");
    335 }
    336 #endif /* XENPV */
    337 
    338 /* Use read_psl, write_psl when saving and restoring interrupt state. */
    339 u_long	x86_read_psl(void);
    340 void	x86_write_psl(u_long);
    341 
    342 /* Use read_flags, write_flags to adjust other members of %eflags. */
    343 u_long	x86_read_flags(void);
    344 void	x86_write_flags(u_long);
    345 
    346 void	x86_reset(void);
    347 
    348 /* -------------------------------------------------------------------------- */
    349 
    350 /*
    351  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
    352  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
    353  */
    354 #define	OPTERON_MSR_PASSCODE	0x9c5a203aU
    355 
    356 static inline uint64_t
    357 rdmsr(u_int msr)
    358 {
    359 	uint32_t low, high;
    360 
    361 	__asm volatile (
    362 		"rdmsr"
    363 		: "=a" (low), "=d" (high)
    364 		: "c" (msr)
    365 	);
    366 
    367 	return (low | ((uint64_t)high << 32));
    368 }
    369 
    370 uint64_t	rdmsr_locked(u_int);
    371 int		rdmsr_safe(u_int, uint64_t *);
    372 
    373 static inline void
    374 wrmsr(u_int msr, uint64_t val)
    375 {
    376 	uint32_t low, high;
    377 
    378 	low = val;
    379 	high = val >> 32;
    380 	__asm volatile (
    381 		"wrmsr"
    382 		:
    383 		: "a" (low), "d" (high), "c" (msr)
    384 	);
    385 }
    386 
    387 void		wrmsr_locked(u_int, uint64_t);
    388 
    389 #endif /* _KERNEL */
    390 
    391 #endif /* !_X86_CPUFUNC_H_ */
    392