Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.15.10.1
      1  1.15.10.1      matt /*	$NetBSD: cpufunc.h,v 1.15.10.1 2007/05/22 17:26:34 matt Exp $	*/
      2        1.1      fvdl 
      3        1.1      fvdl /*-
      4        1.1      fvdl  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5        1.1      fvdl  * All rights reserved.
      6        1.1      fvdl  *
      7        1.1      fvdl  * This code is derived from software contributed to The NetBSD Foundation
      8        1.1      fvdl  * by Charles M. Hannum.
      9        1.1      fvdl  *
     10        1.1      fvdl  * Redistribution and use in source and binary forms, with or without
     11        1.1      fvdl  * modification, are permitted provided that the following conditions
     12        1.1      fvdl  * are met:
     13        1.1      fvdl  * 1. Redistributions of source code must retain the above copyright
     14        1.1      fvdl  *    notice, this list of conditions and the following disclaimer.
     15        1.1      fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     16        1.1      fvdl  *    notice, this list of conditions and the following disclaimer in the
     17        1.1      fvdl  *    documentation and/or other materials provided with the distribution.
     18        1.1      fvdl  * 3. All advertising materials mentioning features or use of this software
     19        1.1      fvdl  *    must display the following acknowledgement:
     20        1.1      fvdl  *        This product includes software developed by the NetBSD
     21        1.1      fvdl  *        Foundation, Inc. and its contributors.
     22        1.1      fvdl  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23        1.1      fvdl  *    contributors may be used to endorse or promote products derived
     24        1.1      fvdl  *    from this software without specific prior written permission.
     25        1.1      fvdl  *
     26        1.1      fvdl  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27        1.1      fvdl  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28        1.1      fvdl  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29        1.1      fvdl  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30        1.1      fvdl  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31        1.1      fvdl  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32        1.1      fvdl  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33        1.1      fvdl  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34        1.1      fvdl  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35        1.1      fvdl  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36        1.1      fvdl  * POSSIBILITY OF SUCH DAMAGE.
     37        1.1      fvdl  */
     38        1.1      fvdl 
     39        1.1      fvdl #ifndef _AMD64_CPUFUNC_H_
     40        1.1      fvdl #define	_AMD64_CPUFUNC_H_
     41        1.1      fvdl 
     42        1.1      fvdl /*
     43        1.1      fvdl  * Functions to provide access to i386-specific instructions.
     44        1.1      fvdl  */
     45        1.1      fvdl 
     46        1.1      fvdl #include <sys/cdefs.h>
     47        1.1      fvdl #include <sys/types.h>
     48        1.1      fvdl 
     49        1.8       dsl #include <machine/segments.h>
     50        1.1      fvdl #include <machine/specialreg.h>
     51        1.1      fvdl 
     52       1.13        ad #ifdef _KERNEL
     53       1.12        ad void	x86_pause(void);
     54       1.13        ad #else
     55       1.13        ad static __inline void
     56       1.13        ad x86_pause(void)
     57       1.13        ad {
     58       1.13        ad 	__asm volatile("pause");
     59       1.13        ad }
     60       1.13        ad #endif
     61        1.3      fvdl 
     62        1.9        ad /*
     63        1.9        ad  * XXX if lfence isn't available...
     64        1.9        ad  *
     65        1.9        ad  * memory clobber to avoid compiler reordering.
     66        1.9        ad  */
     67        1.7     perry static __inline void
     68        1.4      yamt x86_lfence(void)
     69        1.4      yamt {
     70        1.4      yamt 
     71        1.6     perry 	__asm volatile("lfence" : : : "memory");
     72        1.4      yamt }
     73        1.4      yamt 
     74        1.9        ad static __inline void
     75        1.9        ad x86_sfence(void)
     76        1.9        ad {
     77        1.9        ad 
     78        1.9        ad 	__asm volatile("sfence" : : : "memory");
     79        1.9        ad }
     80        1.9        ad 
     81        1.9        ad static __inline void
     82        1.9        ad x86_mfence(void)
     83        1.9        ad {
     84        1.9        ad 
     85        1.9        ad 	__asm volatile("mfence" : : : "memory");
     86        1.9        ad }
     87        1.9        ad 
     88        1.1      fvdl #ifdef _KERNEL
     89        1.1      fvdl 
     90       1.14        ad void	x86_flush(void);
     91       1.14        ad void	x86_patch(void);
     92       1.14        ad 
     93        1.2      fvdl extern int cpu_feature;
     94        1.2      fvdl 
     95        1.7     perry static __inline void
     96        1.1      fvdl invlpg(u_int64_t addr)
     97        1.1      fvdl {
     98        1.6     perry         __asm volatile("invlpg (%0)" : : "r" (addr) : "memory");
     99        1.1      fvdl }
    100        1.1      fvdl 
    101        1.7     perry static __inline void
    102        1.8       dsl lidt(struct region_descriptor *region)
    103        1.1      fvdl {
    104        1.8       dsl 	__asm volatile("lidt %0" : : "m" (*region));
    105        1.1      fvdl }
    106        1.1      fvdl 
    107        1.7     perry static __inline void
    108        1.1      fvdl lldt(u_short sel)
    109        1.1      fvdl {
    110        1.6     perry 	__asm volatile("lldt %0" : : "r" (sel));
    111        1.1      fvdl }
    112        1.1      fvdl 
    113        1.7     perry static __inline void
    114        1.1      fvdl ltr(u_short sel)
    115        1.1      fvdl {
    116        1.6     perry 	__asm volatile("ltr %0" : : "r" (sel));
    117        1.1      fvdl }
    118        1.1      fvdl 
    119        1.7     perry static __inline void
    120        1.1      fvdl lcr8(u_int val)
    121        1.1      fvdl {
    122        1.1      fvdl 	u_int64_t val64 = val;
    123        1.6     perry 	__asm volatile("movq %0,%%cr8" : : "r" (val64));
    124        1.1      fvdl }
    125        1.1      fvdl 
    126        1.1      fvdl /*
    127        1.1      fvdl  * Upper 32 bits are reserved anyway, so just keep this 32bits.
    128        1.1      fvdl  */
    129        1.7     perry static __inline void
    130        1.1      fvdl lcr0(u_int val)
    131        1.1      fvdl {
    132        1.1      fvdl 	u_int64_t val64 = val;
    133        1.6     perry 	__asm volatile("movq %0,%%cr0" : : "r" (val64));
    134        1.1      fvdl }
    135        1.1      fvdl 
    136        1.7     perry static __inline u_int
    137        1.1      fvdl rcr0(void)
    138        1.1      fvdl {
    139        1.1      fvdl 	u_int64_t val64;
    140        1.1      fvdl 	u_int val;
    141        1.6     perry 	__asm volatile("movq %%cr0,%0" : "=r" (val64));
    142        1.1      fvdl 	val = val64;
    143        1.1      fvdl 	return val;
    144        1.1      fvdl }
    145        1.1      fvdl 
    146        1.7     perry static __inline u_int64_t
    147        1.1      fvdl rcr2(void)
    148        1.1      fvdl {
    149        1.1      fvdl 	u_int64_t val;
    150        1.6     perry 	__asm volatile("movq %%cr2,%0" : "=r" (val));
    151        1.1      fvdl 	return val;
    152        1.1      fvdl }
    153        1.1      fvdl 
    154        1.7     perry static __inline void
    155        1.1      fvdl lcr3(u_int64_t val)
    156        1.1      fvdl {
    157        1.6     perry 	__asm volatile("movq %0,%%cr3" : : "r" (val));
    158        1.1      fvdl }
    159        1.1      fvdl 
    160        1.7     perry static __inline u_int64_t
    161        1.1      fvdl rcr3(void)
    162        1.1      fvdl {
    163        1.1      fvdl 	u_int64_t val;
    164        1.6     perry 	__asm volatile("movq %%cr3,%0" : "=r" (val));
    165        1.1      fvdl 	return val;
    166        1.1      fvdl }
    167        1.1      fvdl 
    168        1.1      fvdl /*
    169        1.1      fvdl  * Same as for cr0. Don't touch upper 32 bits.
    170        1.1      fvdl  */
    171        1.7     perry static __inline void
    172        1.1      fvdl lcr4(u_int val)
    173        1.1      fvdl {
    174        1.1      fvdl 	u_int64_t val64 = val;
    175        1.1      fvdl 
    176        1.6     perry 	__asm volatile("movq %0,%%cr4" : : "r" (val64));
    177        1.1      fvdl }
    178        1.1      fvdl 
    179        1.7     perry static __inline u_int
    180        1.1      fvdl rcr4(void)
    181        1.1      fvdl {
    182        1.1      fvdl 	u_int val;
    183        1.1      fvdl 	u_int64_t val64;
    184        1.6     perry 	__asm volatile("movq %%cr4,%0" : "=r" (val64));
    185        1.1      fvdl 	val = val64;
    186        1.1      fvdl 	return val;
    187        1.1      fvdl }
    188        1.1      fvdl 
    189        1.7     perry static __inline void
    190        1.1      fvdl tlbflush(void)
    191        1.1      fvdl {
    192        1.1      fvdl 	u_int64_t val;
    193        1.6     perry 	__asm volatile("movq %%cr3,%0" : "=r" (val));
    194        1.6     perry 	__asm volatile("movq %0,%%cr3" : : "r" (val));
    195        1.1      fvdl }
    196        1.1      fvdl 
    197        1.7     perry static __inline void
    198        1.1      fvdl tlbflushg(void)
    199        1.1      fvdl {
    200        1.1      fvdl 	/*
    201        1.1      fvdl 	 * Big hammer: flush all TLB entries, including ones from PTE's
    202        1.1      fvdl 	 * with the G bit set.  This should only be necessary if TLB
    203        1.1      fvdl 	 * shootdown falls far behind.
    204        1.1      fvdl 	 *
    205        1.1      fvdl 	 * Intel Architecture Software Developer's Manual, Volume 3,
    206        1.1      fvdl 	 *	System Programming, section 9.10, "Invalidating the
    207        1.1      fvdl 	 * Translation Lookaside Buffers (TLBS)":
    208        1.1      fvdl 	 * "The following operations invalidate all TLB entries, irrespective
    209        1.1      fvdl 	 * of the setting of the G flag:
    210        1.1      fvdl 	 * ...
    211        1.1      fvdl 	 * "(P6 family processors only): Writing to control register CR4 to
    212        1.1      fvdl 	 * modify the PSE, PGE, or PAE flag."
    213        1.1      fvdl 	 *
    214        1.1      fvdl 	 * (the alternatives not quoted above are not an option here.)
    215        1.1      fvdl 	 *
    216        1.1      fvdl 	 * If PGE is not in use, we reload CR3 for the benefit of
    217        1.1      fvdl 	 * pre-P6-family processors.
    218        1.1      fvdl 	 */
    219        1.1      fvdl 
    220        1.1      fvdl 	if (cpu_feature & CPUID_PGE) {
    221        1.1      fvdl 		u_int cr4 = rcr4();
    222        1.1      fvdl 		lcr4(cr4 & ~CR4_PGE);
    223        1.1      fvdl 		lcr4(cr4);
    224        1.1      fvdl 	} else
    225        1.1      fvdl 		tlbflush();
    226        1.1      fvdl }
    227        1.1      fvdl 
    228        1.1      fvdl #ifdef notyet
    229       1.15  christos void	setidt	__P((int idx, /*XXX*/void *func, int typ, int dpl));
    230        1.1      fvdl #endif
    231        1.1      fvdl 
    232        1.1      fvdl 
    233        1.1      fvdl /* XXXX ought to be in psl.h with spl() functions */
    234        1.1      fvdl 
    235        1.7     perry static __inline void
    236        1.1      fvdl disable_intr(void)
    237        1.1      fvdl {
    238        1.6     perry 	__asm volatile("cli");
    239        1.1      fvdl }
    240        1.1      fvdl 
    241        1.7     perry static __inline void
    242        1.1      fvdl enable_intr(void)
    243        1.1      fvdl {
    244        1.6     perry 	__asm volatile("sti");
    245        1.1      fvdl }
    246        1.1      fvdl 
    247        1.7     perry static __inline u_long
    248        1.1      fvdl read_rflags(void)
    249        1.1      fvdl {
    250        1.1      fvdl 	u_long	ef;
    251        1.1      fvdl 
    252        1.6     perry 	__asm volatile("pushfq; popq %0" : "=r" (ef));
    253        1.1      fvdl 	return (ef);
    254        1.1      fvdl }
    255        1.1      fvdl 
    256        1.7     perry static __inline void
    257        1.1      fvdl write_rflags(u_long ef)
    258        1.1      fvdl {
    259        1.6     perry 	__asm volatile("pushq %0; popfq" : : "r" (ef));
    260        1.1      fvdl }
    261        1.1      fvdl 
    262       1.10        ad 
    263        1.7     perry static __inline u_int64_t
    264        1.1      fvdl rdmsr(u_int msr)
    265        1.1      fvdl {
    266        1.1      fvdl 	uint32_t hi, lo;
    267        1.6     perry 	__asm volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
    268        1.1      fvdl 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    269        1.1      fvdl }
    270        1.1      fvdl 
    271        1.7     perry static __inline void
    272        1.1      fvdl wrmsr(u_int msr, u_int64_t newval)
    273        1.1      fvdl {
    274        1.6     perry 	__asm volatile("wrmsr" :
    275        1.1      fvdl 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
    276        1.1      fvdl }
    277        1.1      fvdl 
    278       1.10        ad /*
    279       1.10        ad  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
    280       1.10        ad  *
    281       1.10        ad  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
    282       1.10        ad  */
    283       1.10        ad 
    284       1.10        ad #define	OPTERON_MSR_PASSCODE	0x9c5a203a
    285       1.10        ad 
    286       1.10        ad static __inline u_int64_t
    287       1.10        ad rdmsr_locked(u_int msr, u_int code)
    288       1.10        ad {
    289       1.10        ad 	uint32_t hi, lo;
    290       1.10        ad 	__asm volatile("rdmsr"
    291       1.10        ad 	    : "=d" (hi), "=a" (lo)
    292       1.10        ad 	    : "c" (msr), "D" (code));
    293       1.10        ad 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    294       1.10        ad }
    295       1.10        ad 
    296       1.10        ad static __inline void
    297       1.10        ad wrmsr_locked(u_int msr, u_int code, u_int64_t newval)
    298       1.10        ad {
    299       1.10        ad 	__asm volatile("wrmsr"
    300       1.10        ad 	    :
    301       1.10        ad 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr),
    302       1.10        ad 	      "D" (code));
    303       1.10        ad }
    304       1.10        ad 
    305        1.7     perry static __inline void
    306        1.1      fvdl wbinvd(void)
    307        1.1      fvdl {
    308        1.6     perry 	__asm volatile("wbinvd");
    309        1.1      fvdl }
    310        1.1      fvdl 
    311        1.7     perry static __inline u_int64_t
    312        1.1      fvdl rdtsc(void)
    313        1.1      fvdl {
    314        1.1      fvdl 	uint32_t hi, lo;
    315        1.1      fvdl 
    316        1.6     perry 	__asm volatile("rdtsc" : "=d" (hi), "=a" (lo));
    317        1.1      fvdl 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    318        1.1      fvdl }
    319        1.1      fvdl 
    320        1.7     perry static __inline u_int64_t
    321        1.1      fvdl rdpmc(u_int pmc)
    322        1.1      fvdl {
    323        1.1      fvdl 	uint32_t hi, lo;
    324        1.1      fvdl 
    325        1.6     perry 	__asm volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
    326        1.1      fvdl 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    327        1.1      fvdl }
    328        1.1      fvdl 
    329        1.1      fvdl /* Break into DDB/KGDB. */
    330        1.7     perry static __inline void
    331        1.1      fvdl breakpoint(void)
    332        1.1      fvdl {
    333        1.6     perry 	__asm volatile("int $3");
    334        1.1      fvdl }
    335        1.1      fvdl 
    336        1.1      fvdl #define read_psl()	read_rflags()
    337        1.1      fvdl #define write_psl(x)	write_rflags(x)
    338        1.1      fvdl 
    339        1.1      fvdl #endif /* _KERNEL */
    340        1.1      fvdl 
    341        1.1      fvdl #endif /* !_AMD64_CPUFUNC_H_ */
    342