Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.11
      1  1.11     ad /*	$NetBSD: cpufunc.h,v 1.11 2007/01/12 20:22:04 ad Exp $	*/
      2   1.1   fvdl 
      3   1.1   fvdl /*-
      4   1.1   fvdl  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5   1.1   fvdl  * All rights reserved.
      6   1.1   fvdl  *
      7   1.1   fvdl  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1   fvdl  * by Charles M. Hannum.
      9   1.1   fvdl  *
     10   1.1   fvdl  * Redistribution and use in source and binary forms, with or without
     11   1.1   fvdl  * modification, are permitted provided that the following conditions
     12   1.1   fvdl  * are met:
     13   1.1   fvdl  * 1. Redistributions of source code must retain the above copyright
     14   1.1   fvdl  *    notice, this list of conditions and the following disclaimer.
     15   1.1   fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1   fvdl  *    notice, this list of conditions and the following disclaimer in the
     17   1.1   fvdl  *    documentation and/or other materials provided with the distribution.
     18   1.1   fvdl  * 3. All advertising materials mentioning features or use of this software
     19   1.1   fvdl  *    must display the following acknowledgement:
     20   1.1   fvdl  *        This product includes software developed by the NetBSD
     21   1.1   fvdl  *        Foundation, Inc. and its contributors.
     22   1.1   fvdl  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23   1.1   fvdl  *    contributors may be used to endorse or promote products derived
     24   1.1   fvdl  *    from this software without specific prior written permission.
     25   1.1   fvdl  *
     26   1.1   fvdl  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27   1.1   fvdl  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28   1.1   fvdl  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29   1.1   fvdl  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30   1.1   fvdl  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31   1.1   fvdl  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32   1.1   fvdl  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33   1.1   fvdl  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34   1.1   fvdl  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35   1.1   fvdl  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36   1.1   fvdl  * POSSIBILITY OF SUCH DAMAGE.
     37   1.1   fvdl  */
     38   1.1   fvdl 
     39   1.1   fvdl #ifndef _AMD64_CPUFUNC_H_
     40   1.1   fvdl #define	_AMD64_CPUFUNC_H_
     41   1.1   fvdl 
     42   1.1   fvdl /*
     43   1.1   fvdl  * Functions to provide access to i386-specific instructions.
     44   1.1   fvdl  */
     45   1.1   fvdl 
     46   1.1   fvdl #include <sys/cdefs.h>
     47   1.1   fvdl #include <sys/types.h>
     48   1.1   fvdl 
     49   1.8    dsl #include <machine/segments.h>
     50   1.1   fvdl #include <machine/specialreg.h>
     51   1.1   fvdl 
     52   1.7  perry static __inline void
     53   1.3   fvdl x86_pause(void)
     54   1.3   fvdl {
     55  1.11     ad 	__asm volatile("pause");
     56   1.3   fvdl }
     57   1.3   fvdl 
     58   1.9     ad /*
     59   1.9     ad  * XXX if lfence isn't available...
     60   1.9     ad  *
     61   1.9     ad  * memory clobber to avoid compiler reordering.
     62   1.9     ad  */
     63   1.7  perry static __inline void
     64   1.4   yamt x86_lfence(void)
     65   1.4   yamt {
     66   1.4   yamt 
     67   1.6  perry 	__asm volatile("lfence" : : : "memory");
     68   1.4   yamt }
     69   1.4   yamt 
     70   1.9     ad static __inline void
     71   1.9     ad x86_sfence(void)
     72   1.9     ad {
     73   1.9     ad 
     74   1.9     ad 	__asm volatile("sfence" : : : "memory");
     75   1.9     ad }
     76   1.9     ad 
     77   1.9     ad static __inline void
     78   1.9     ad x86_mfence(void)
     79   1.9     ad {
     80   1.9     ad 
     81   1.9     ad 	__asm volatile("mfence" : : : "memory");
     82   1.9     ad }
     83   1.9     ad 
     84   1.1   fvdl #ifdef _KERNEL
     85   1.1   fvdl 
     86   1.2   fvdl extern int cpu_feature;
     87   1.2   fvdl 
     88   1.7  perry static __inline void
     89   1.1   fvdl invlpg(u_int64_t addr)
     90   1.1   fvdl {
     91   1.6  perry         __asm volatile("invlpg (%0)" : : "r" (addr) : "memory");
     92   1.1   fvdl }
     93   1.1   fvdl 
     94   1.7  perry static __inline void
     95   1.8    dsl lidt(struct region_descriptor *region)
     96   1.1   fvdl {
     97   1.8    dsl 	__asm volatile("lidt %0" : : "m" (*region));
     98   1.1   fvdl }
     99   1.1   fvdl 
    100   1.7  perry static __inline void
    101   1.1   fvdl lldt(u_short sel)
    102   1.1   fvdl {
    103   1.6  perry 	__asm volatile("lldt %0" : : "r" (sel));
    104   1.1   fvdl }
    105   1.1   fvdl 
    106   1.7  perry static __inline void
    107   1.1   fvdl ltr(u_short sel)
    108   1.1   fvdl {
    109   1.6  perry 	__asm volatile("ltr %0" : : "r" (sel));
    110   1.1   fvdl }
    111   1.1   fvdl 
    112   1.7  perry static __inline void
    113   1.1   fvdl lcr8(u_int val)
    114   1.1   fvdl {
    115   1.1   fvdl 	u_int64_t val64 = val;
    116   1.6  perry 	__asm volatile("movq %0,%%cr8" : : "r" (val64));
    117   1.1   fvdl }
    118   1.1   fvdl 
    119   1.1   fvdl /*
    120   1.1   fvdl  * Upper 32 bits are reserved anyway, so just keep this 32bits.
    121   1.1   fvdl  */
    122   1.7  perry static __inline void
    123   1.1   fvdl lcr0(u_int val)
    124   1.1   fvdl {
    125   1.1   fvdl 	u_int64_t val64 = val;
    126   1.6  perry 	__asm volatile("movq %0,%%cr0" : : "r" (val64));
    127   1.1   fvdl }
    128   1.1   fvdl 
    129   1.7  perry static __inline u_int
    130   1.1   fvdl rcr0(void)
    131   1.1   fvdl {
    132   1.1   fvdl 	u_int64_t val64;
    133   1.1   fvdl 	u_int val;
    134   1.6  perry 	__asm volatile("movq %%cr0,%0" : "=r" (val64));
    135   1.1   fvdl 	val = val64;
    136   1.1   fvdl 	return val;
    137   1.1   fvdl }
    138   1.1   fvdl 
    139   1.7  perry static __inline u_int64_t
    140   1.1   fvdl rcr2(void)
    141   1.1   fvdl {
    142   1.1   fvdl 	u_int64_t val;
    143   1.6  perry 	__asm volatile("movq %%cr2,%0" : "=r" (val));
    144   1.1   fvdl 	return val;
    145   1.1   fvdl }
    146   1.1   fvdl 
    147   1.7  perry static __inline void
    148   1.1   fvdl lcr3(u_int64_t val)
    149   1.1   fvdl {
    150   1.6  perry 	__asm volatile("movq %0,%%cr3" : : "r" (val));
    151   1.1   fvdl }
    152   1.1   fvdl 
    153   1.7  perry static __inline u_int64_t
    154   1.1   fvdl rcr3(void)
    155   1.1   fvdl {
    156   1.1   fvdl 	u_int64_t val;
    157   1.6  perry 	__asm volatile("movq %%cr3,%0" : "=r" (val));
    158   1.1   fvdl 	return val;
    159   1.1   fvdl }
    160   1.1   fvdl 
    161   1.1   fvdl /*
    162   1.1   fvdl  * Same as for cr0. Don't touch upper 32 bits.
    163   1.1   fvdl  */
    164   1.7  perry static __inline void
    165   1.1   fvdl lcr4(u_int val)
    166   1.1   fvdl {
    167   1.1   fvdl 	u_int64_t val64 = val;
    168   1.1   fvdl 
    169   1.6  perry 	__asm volatile("movq %0,%%cr4" : : "r" (val64));
    170   1.1   fvdl }
    171   1.1   fvdl 
    172   1.7  perry static __inline u_int
    173   1.1   fvdl rcr4(void)
    174   1.1   fvdl {
    175   1.1   fvdl 	u_int val;
    176   1.1   fvdl 	u_int64_t val64;
    177   1.6  perry 	__asm volatile("movq %%cr4,%0" : "=r" (val64));
    178   1.1   fvdl 	val = val64;
    179   1.1   fvdl 	return val;
    180   1.1   fvdl }
    181   1.1   fvdl 
    182   1.7  perry static __inline void
    183   1.1   fvdl tlbflush(void)
    184   1.1   fvdl {
    185   1.1   fvdl 	u_int64_t val;
    186   1.6  perry 	__asm volatile("movq %%cr3,%0" : "=r" (val));
    187   1.6  perry 	__asm volatile("movq %0,%%cr3" : : "r" (val));
    188   1.1   fvdl }
    189   1.1   fvdl 
    190   1.7  perry static __inline void
    191   1.1   fvdl tlbflushg(void)
    192   1.1   fvdl {
    193   1.1   fvdl 	/*
    194   1.1   fvdl 	 * Big hammer: flush all TLB entries, including ones from PTE's
    195   1.1   fvdl 	 * with the G bit set.  This should only be necessary if TLB
    196   1.1   fvdl 	 * shootdown falls far behind.
    197   1.1   fvdl 	 *
    198   1.1   fvdl 	 * Intel Architecture Software Developer's Manual, Volume 3,
    199   1.1   fvdl 	 *	System Programming, section 9.10, "Invalidating the
    200   1.1   fvdl 	 * Translation Lookaside Buffers (TLBS)":
    201   1.1   fvdl 	 * "The following operations invalidate all TLB entries, irrespective
    202   1.1   fvdl 	 * of the setting of the G flag:
    203   1.1   fvdl 	 * ...
    204   1.1   fvdl 	 * "(P6 family processors only): Writing to control register CR4 to
    205   1.1   fvdl 	 * modify the PSE, PGE, or PAE flag."
    206   1.1   fvdl 	 *
    207   1.1   fvdl 	 * (the alternatives not quoted above are not an option here.)
    208   1.1   fvdl 	 *
    209   1.1   fvdl 	 * If PGE is not in use, we reload CR3 for the benefit of
    210   1.1   fvdl 	 * pre-P6-family processors.
    211   1.1   fvdl 	 */
    212   1.1   fvdl 
    213   1.1   fvdl 	if (cpu_feature & CPUID_PGE) {
    214   1.1   fvdl 		u_int cr4 = rcr4();
    215   1.1   fvdl 		lcr4(cr4 & ~CR4_PGE);
    216   1.1   fvdl 		lcr4(cr4);
    217   1.1   fvdl 	} else
    218   1.1   fvdl 		tlbflush();
    219   1.1   fvdl }
    220   1.1   fvdl 
    221   1.1   fvdl #ifdef notyet
    222   1.1   fvdl void	setidt	__P((int idx, /*XXX*/caddr_t func, int typ, int dpl));
    223   1.1   fvdl #endif
    224   1.1   fvdl 
    225   1.1   fvdl 
    226   1.1   fvdl /* XXXX ought to be in psl.h with spl() functions */
    227   1.1   fvdl 
    228   1.7  perry static __inline void
    229   1.1   fvdl disable_intr(void)
    230   1.1   fvdl {
    231   1.6  perry 	__asm volatile("cli");
    232   1.1   fvdl }
    233   1.1   fvdl 
    234   1.7  perry static __inline void
    235   1.1   fvdl enable_intr(void)
    236   1.1   fvdl {
    237   1.6  perry 	__asm volatile("sti");
    238   1.1   fvdl }
    239   1.1   fvdl 
    240   1.7  perry static __inline u_long
    241   1.1   fvdl read_rflags(void)
    242   1.1   fvdl {
    243   1.1   fvdl 	u_long	ef;
    244   1.1   fvdl 
    245   1.6  perry 	__asm volatile("pushfq; popq %0" : "=r" (ef));
    246   1.1   fvdl 	return (ef);
    247   1.1   fvdl }
    248   1.1   fvdl 
    249   1.7  perry static __inline void
    250   1.1   fvdl write_rflags(u_long ef)
    251   1.1   fvdl {
    252   1.6  perry 	__asm volatile("pushq %0; popfq" : : "r" (ef));
    253   1.1   fvdl }
    254   1.1   fvdl 
    255  1.10     ad 
    256   1.7  perry static __inline u_int64_t
    257   1.1   fvdl rdmsr(u_int msr)
    258   1.1   fvdl {
    259   1.1   fvdl 	uint32_t hi, lo;
    260   1.6  perry 	__asm volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
    261   1.1   fvdl 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    262   1.1   fvdl }
    263   1.1   fvdl 
    264   1.7  perry static __inline void
    265   1.1   fvdl wrmsr(u_int msr, u_int64_t newval)
    266   1.1   fvdl {
    267   1.6  perry 	__asm volatile("wrmsr" :
    268   1.1   fvdl 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
    269   1.1   fvdl }
    270   1.1   fvdl 
    271  1.10     ad /*
    272  1.10     ad  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
    273  1.10     ad  *
    274  1.10     ad  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
    275  1.10     ad  */
    276  1.10     ad 
    277  1.10     ad #define	OPTERON_MSR_PASSCODE	0x9c5a203a
    278  1.10     ad 
    279  1.10     ad static __inline u_int64_t
    280  1.10     ad rdmsr_locked(u_int msr, u_int code)
    281  1.10     ad {
    282  1.10     ad 	uint32_t hi, lo;
    283  1.10     ad 	__asm volatile("rdmsr"
    284  1.10     ad 	    : "=d" (hi), "=a" (lo)
    285  1.10     ad 	    : "c" (msr), "D" (code));
    286  1.10     ad 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    287  1.10     ad }
    288  1.10     ad 
    289  1.10     ad static __inline void
    290  1.10     ad wrmsr_locked(u_int msr, u_int code, u_int64_t newval)
    291  1.10     ad {
    292  1.10     ad 	__asm volatile("wrmsr"
    293  1.10     ad 	    :
    294  1.10     ad 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr),
    295  1.10     ad 	      "D" (code));
    296  1.10     ad }
    297  1.10     ad 
    298   1.7  perry static __inline void
    299   1.1   fvdl wbinvd(void)
    300   1.1   fvdl {
    301   1.6  perry 	__asm volatile("wbinvd");
    302   1.1   fvdl }
    303   1.1   fvdl 
    304   1.7  perry static __inline u_int64_t
    305   1.1   fvdl rdtsc(void)
    306   1.1   fvdl {
    307   1.1   fvdl 	uint32_t hi, lo;
    308   1.1   fvdl 
    309   1.6  perry 	__asm volatile("rdtsc" : "=d" (hi), "=a" (lo));
    310   1.1   fvdl 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    311   1.1   fvdl }
    312   1.1   fvdl 
    313   1.7  perry static __inline u_int64_t
    314   1.1   fvdl rdpmc(u_int pmc)
    315   1.1   fvdl {
    316   1.1   fvdl 	uint32_t hi, lo;
    317   1.1   fvdl 
    318   1.6  perry 	__asm volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
    319   1.1   fvdl 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    320   1.1   fvdl }
    321   1.1   fvdl 
    322   1.1   fvdl /* Break into DDB/KGDB. */
    323   1.7  perry static __inline void
    324   1.1   fvdl breakpoint(void)
    325   1.1   fvdl {
    326   1.6  perry 	__asm volatile("int $3");
    327   1.1   fvdl }
    328   1.1   fvdl 
    329   1.1   fvdl #define read_psl()	read_rflags()
    330   1.1   fvdl #define write_psl(x)	write_rflags(x)
    331   1.1   fvdl 
    332   1.1   fvdl #endif /* _KERNEL */
    333   1.1   fvdl 
    334   1.1   fvdl #endif /* !_AMD64_CPUFUNC_H_ */
    335