Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.4.14.1
      1 /*	$NetBSD: cpufunc.h,v 1.4.14.1 2008/09/28 15:57:50 jdc Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #ifndef _AMD64_CPUFUNC_H_
     40 #define	_AMD64_CPUFUNC_H_
     41 
     42 /*
     43  * Functions to provide access to i386-specific instructions.
     44  */
     45 
     46 #include <sys/cdefs.h>
     47 #include <sys/types.h>
     48 
     49 #include <machine/specialreg.h>
     50 
     51 static __inline void
     52 x86_pause(void)
     53 {
     54 	/* nothing */
     55 }
     56 
     57 static __inline void
     58 x86_lfence(void)
     59 {
     60 
     61 	/*
     62 	 * XXX if lfence isn't available...
     63 	 *
     64 	 * memory clobber to avoid compiler reordering.
     65 	 */
     66 	__asm __volatile("lfence" : : : "memory");
     67 }
     68 
     69 static __inline void
     70 x86_mfence(void)
     71 {
     72 
     73 	/*
     74 	 * XXX if mfence isn't available...
     75 	 *
     76 	 * memory clobber to avoid compiler reordering.
     77 	 */
     78 	__asm __volatile("mfence" : : : "memory");
     79 }
     80 
     81 #ifdef _KERNEL
     82 
     83 extern int cpu_feature;
     84 
     85 static __inline void
     86 invlpg(u_int64_t addr)
     87 {
     88         __asm __volatile("invlpg (%0)" : : "r" (addr) : "memory");
     89 }
     90 
     91 static __inline void
     92 lidt(void *p)
     93 {
     94 	__asm __volatile("lidt (%0)" : : "r" (p));
     95 }
     96 
     97 static __inline void
     98 lldt(u_short sel)
     99 {
    100 	__asm __volatile("lldt %0" : : "r" (sel));
    101 }
    102 
    103 static __inline void
    104 ltr(u_short sel)
    105 {
    106 	__asm __volatile("ltr %0" : : "r" (sel));
    107 }
    108 
    109 static __inline void
    110 lcr8(u_int val)
    111 {
    112 	u_int64_t val64 = val;
    113 	__asm __volatile("movq %0,%%cr8" : : "r" (val64));
    114 }
    115 
    116 /*
    117  * Upper 32 bits are reserved anyway, so just keep this 32bits.
    118  */
    119 static __inline void
    120 lcr0(u_int val)
    121 {
    122 	u_int64_t val64 = val;
    123 	__asm __volatile("movq %0,%%cr0" : : "r" (val64));
    124 }
    125 
    126 static __inline u_int
    127 rcr0(void)
    128 {
    129 	u_int64_t val64;
    130 	u_int val;
    131 	__asm __volatile("movq %%cr0,%0" : "=r" (val64));
    132 	val = val64;
    133 	return val;
    134 }
    135 
    136 static __inline u_int64_t
    137 rcr2(void)
    138 {
    139 	u_int64_t val;
    140 	__asm __volatile("movq %%cr2,%0" : "=r" (val));
    141 	return val;
    142 }
    143 
    144 static __inline void
    145 lcr3(u_int64_t val)
    146 {
    147 	__asm __volatile("movq %0,%%cr3" : : "r" (val));
    148 }
    149 
    150 static __inline u_int64_t
    151 rcr3(void)
    152 {
    153 	u_int64_t val;
    154 	__asm __volatile("movq %%cr3,%0" : "=r" (val));
    155 	return val;
    156 }
    157 
    158 /*
    159  * Same as for cr0. Don't touch upper 32 bits.
    160  */
    161 static __inline void
    162 lcr4(u_int val)
    163 {
    164 	u_int64_t val64 = val;
    165 
    166 	__asm __volatile("movq %0,%%cr4" : : "r" (val64));
    167 }
    168 
    169 static __inline u_int
    170 rcr4(void)
    171 {
    172 	u_int val;
    173 	u_int64_t val64;
    174 	__asm __volatile("movq %%cr4,%0" : "=r" (val64));
    175 	val = val64;
    176 	return val;
    177 }
    178 
    179 static __inline void
    180 tlbflush(void)
    181 {
    182 	u_int64_t val;
    183 	__asm __volatile("movq %%cr3,%0" : "=r" (val));
    184 	__asm __volatile("movq %0,%%cr3" : : "r" (val));
    185 }
    186 
    187 static __inline void
    188 tlbflushg(void)
    189 {
    190 	/*
    191 	 * Big hammer: flush all TLB entries, including ones from PTE's
    192 	 * with the G bit set.  This should only be necessary if TLB
    193 	 * shootdown falls far behind.
    194 	 *
    195 	 * Intel Architecture Software Developer's Manual, Volume 3,
    196 	 *	System Programming, section 9.10, "Invalidating the
    197 	 * Translation Lookaside Buffers (TLBS)":
    198 	 * "The following operations invalidate all TLB entries, irrespective
    199 	 * of the setting of the G flag:
    200 	 * ...
    201 	 * "(P6 family processors only): Writing to control register CR4 to
    202 	 * modify the PSE, PGE, or PAE flag."
    203 	 *
    204 	 * (the alternatives not quoted above are not an option here.)
    205 	 *
    206 	 * If PGE is not in use, we reload CR3 for the benefit of
    207 	 * pre-P6-family processors.
    208 	 */
    209 
    210 	if (cpu_feature & CPUID_PGE) {
    211 		u_int cr4 = rcr4();
    212 		lcr4(cr4 & ~CR4_PGE);
    213 		lcr4(cr4);
    214 	} else
    215 		tlbflush();
    216 }
    217 
    218 #ifdef notyet
    219 void	setidt	__P((int idx, /*XXX*/caddr_t func, int typ, int dpl));
    220 #endif
    221 
    222 
    223 /* XXXX ought to be in psl.h with spl() functions */
    224 
    225 static __inline void
    226 disable_intr(void)
    227 {
    228 	__asm __volatile("cli");
    229 }
    230 
    231 static __inline void
    232 enable_intr(void)
    233 {
    234 	__asm __volatile("sti");
    235 }
    236 
    237 static __inline u_long
    238 read_rflags(void)
    239 {
    240 	u_long	ef;
    241 
    242 	__asm __volatile("pushfq; popq %0" : "=r" (ef));
    243 	return (ef);
    244 }
    245 
    246 static __inline void
    247 write_rflags(u_long ef)
    248 {
    249 	__asm __volatile("pushq %0; popfq" : : "r" (ef));
    250 }
    251 
    252 static __inline u_int64_t
    253 rdmsr(u_int msr)
    254 {
    255 	uint32_t hi, lo;
    256 	__asm __volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
    257 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    258 }
    259 
    260 static __inline void
    261 wrmsr(u_int msr, u_int64_t newval)
    262 {
    263 	__asm __volatile("wrmsr" :
    264 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
    265 }
    266 
    267 static __inline void
    268 wbinvd(void)
    269 {
    270 	__asm __volatile("wbinvd");
    271 }
    272 
    273 static __inline u_int64_t
    274 rdtsc(void)
    275 {
    276 	uint32_t hi, lo;
    277 
    278 	__asm __volatile("rdtsc" : "=d" (hi), "=a" (lo));
    279 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    280 }
    281 
    282 static __inline u_int64_t
    283 rdpmc(u_int pmc)
    284 {
    285 	uint32_t hi, lo;
    286 
    287 	__asm __volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
    288 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    289 }
    290 
    291 /* Break into DDB/KGDB. */
    292 static __inline void
    293 breakpoint(void)
    294 {
    295 	__asm __volatile("int $3");
    296 }
    297 
    298 #define read_psl()	read_rflags()
    299 #define write_psl(x)	write_rflags(x)
    300 
    301 #endif /* _KERNEL */
    302 
    303 #endif /* !_AMD64_CPUFUNC_H_ */
    304