Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.7.8.1
      1  1.7.8.1   yamt /*	$NetBSD: cpufunc.h,v 1.7.8.1 2006/09/03 15:22:41 yamt Exp $	*/
      2      1.1   fvdl 
      3      1.1   fvdl /*-
      4      1.1   fvdl  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5      1.1   fvdl  * All rights reserved.
      6      1.1   fvdl  *
      7      1.1   fvdl  * This code is derived from software contributed to The NetBSD Foundation
      8      1.1   fvdl  * by Charles M. Hannum.
      9      1.1   fvdl  *
     10      1.1   fvdl  * Redistribution and use in source and binary forms, with or without
     11      1.1   fvdl  * modification, are permitted provided that the following conditions
     12      1.1   fvdl  * are met:
     13      1.1   fvdl  * 1. Redistributions of source code must retain the above copyright
     14      1.1   fvdl  *    notice, this list of conditions and the following disclaimer.
     15      1.1   fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     16      1.1   fvdl  *    notice, this list of conditions and the following disclaimer in the
     17      1.1   fvdl  *    documentation and/or other materials provided with the distribution.
     18      1.1   fvdl  * 3. All advertising materials mentioning features or use of this software
     19      1.1   fvdl  *    must display the following acknowledgement:
     20      1.1   fvdl  *        This product includes software developed by the NetBSD
     21      1.1   fvdl  *        Foundation, Inc. and its contributors.
     22      1.1   fvdl  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23      1.1   fvdl  *    contributors may be used to endorse or promote products derived
     24      1.1   fvdl  *    from this software without specific prior written permission.
     25      1.1   fvdl  *
     26      1.1   fvdl  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27      1.1   fvdl  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28      1.1   fvdl  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29      1.1   fvdl  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30      1.1   fvdl  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31      1.1   fvdl  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32      1.1   fvdl  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33      1.1   fvdl  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34      1.1   fvdl  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35      1.1   fvdl  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36      1.1   fvdl  * POSSIBILITY OF SUCH DAMAGE.
     37      1.1   fvdl  */
     38      1.1   fvdl 
     39      1.1   fvdl #ifndef _AMD64_CPUFUNC_H_
     40      1.1   fvdl #define	_AMD64_CPUFUNC_H_
     41      1.1   fvdl 
     42      1.1   fvdl /*
     43      1.1   fvdl  * Functions to provide access to i386-specific instructions.
     44      1.1   fvdl  */
     45      1.1   fvdl 
     46      1.1   fvdl #include <sys/cdefs.h>
     47      1.1   fvdl #include <sys/types.h>
     48      1.1   fvdl 
     49  1.7.8.1   yamt #include <machine/segments.h>
     50      1.1   fvdl #include <machine/specialreg.h>
     51      1.1   fvdl 
     52      1.7  perry static __inline void
     53      1.3   fvdl x86_pause(void)
     54      1.3   fvdl {
     55      1.3   fvdl 	/* nothing */
     56      1.3   fvdl }
     57      1.3   fvdl 
     58  1.7.8.1   yamt /*
     59  1.7.8.1   yamt  * XXX if lfence isn't available...
     60  1.7.8.1   yamt  *
     61  1.7.8.1   yamt  * memory clobber to avoid compiler reordering.
     62  1.7.8.1   yamt  */
     63      1.7  perry static __inline void
     64      1.4   yamt x86_lfence(void)
     65      1.4   yamt {
     66      1.4   yamt 
     67      1.6  perry 	__asm volatile("lfence" : : : "memory");
     68      1.4   yamt }
     69      1.4   yamt 
     70  1.7.8.1   yamt static __inline void
     71  1.7.8.1   yamt x86_sfence(void)
     72  1.7.8.1   yamt {
     73  1.7.8.1   yamt 
     74  1.7.8.1   yamt 	__asm volatile("sfence" : : : "memory");
     75  1.7.8.1   yamt }
     76  1.7.8.1   yamt 
     77  1.7.8.1   yamt static __inline void
     78  1.7.8.1   yamt x86_mfence(void)
     79  1.7.8.1   yamt {
     80  1.7.8.1   yamt 
     81  1.7.8.1   yamt 	__asm volatile("mfence" : : : "memory");
     82  1.7.8.1   yamt }
     83  1.7.8.1   yamt 
     84      1.1   fvdl #ifdef _KERNEL
     85      1.1   fvdl 
     86      1.2   fvdl extern int cpu_feature;
     87      1.2   fvdl 
     88      1.7  perry static __inline void
     89      1.1   fvdl invlpg(u_int64_t addr)
     90      1.1   fvdl {
     91      1.6  perry         __asm volatile("invlpg (%0)" : : "r" (addr) : "memory");
     92      1.1   fvdl }
     93      1.1   fvdl 
     94      1.7  perry static __inline void
     95  1.7.8.1   yamt lidt(struct region_descriptor *region)
     96      1.1   fvdl {
     97  1.7.8.1   yamt 	__asm volatile("lidt %0" : : "m" (*region));
     98      1.1   fvdl }
     99      1.1   fvdl 
    100      1.7  perry static __inline void
    101      1.1   fvdl lldt(u_short sel)
    102      1.1   fvdl {
    103      1.6  perry 	__asm volatile("lldt %0" : : "r" (sel));
    104      1.1   fvdl }
    105      1.1   fvdl 
    106      1.7  perry static __inline void
    107      1.1   fvdl ltr(u_short sel)
    108      1.1   fvdl {
    109      1.6  perry 	__asm volatile("ltr %0" : : "r" (sel));
    110      1.1   fvdl }
    111      1.1   fvdl 
    112      1.7  perry static __inline void
    113      1.1   fvdl lcr8(u_int val)
    114      1.1   fvdl {
    115      1.1   fvdl 	u_int64_t val64 = val;
    116      1.6  perry 	__asm volatile("movq %0,%%cr8" : : "r" (val64));
    117      1.1   fvdl }
    118      1.1   fvdl 
    119      1.1   fvdl /*
    120      1.1   fvdl  * Upper 32 bits are reserved anyway, so just keep this 32bits.
    121      1.1   fvdl  */
    122      1.7  perry static __inline void
    123      1.1   fvdl lcr0(u_int val)
    124      1.1   fvdl {
    125      1.1   fvdl 	u_int64_t val64 = val;
    126      1.6  perry 	__asm volatile("movq %0,%%cr0" : : "r" (val64));
    127      1.1   fvdl }
    128      1.1   fvdl 
    129      1.7  perry static __inline u_int
    130      1.1   fvdl rcr0(void)
    131      1.1   fvdl {
    132      1.1   fvdl 	u_int64_t val64;
    133      1.1   fvdl 	u_int val;
    134      1.6  perry 	__asm volatile("movq %%cr0,%0" : "=r" (val64));
    135      1.1   fvdl 	val = val64;
    136      1.1   fvdl 	return val;
    137      1.1   fvdl }
    138      1.1   fvdl 
    139      1.7  perry static __inline u_int64_t
    140      1.1   fvdl rcr2(void)
    141      1.1   fvdl {
    142      1.1   fvdl 	u_int64_t val;
    143      1.6  perry 	__asm volatile("movq %%cr2,%0" : "=r" (val));
    144      1.1   fvdl 	return val;
    145      1.1   fvdl }
    146      1.1   fvdl 
    147      1.7  perry static __inline void
    148      1.1   fvdl lcr3(u_int64_t val)
    149      1.1   fvdl {
    150      1.6  perry 	__asm volatile("movq %0,%%cr3" : : "r" (val));
    151      1.1   fvdl }
    152      1.1   fvdl 
    153      1.7  perry static __inline u_int64_t
    154      1.1   fvdl rcr3(void)
    155      1.1   fvdl {
    156      1.1   fvdl 	u_int64_t val;
    157      1.6  perry 	__asm volatile("movq %%cr3,%0" : "=r" (val));
    158      1.1   fvdl 	return val;
    159      1.1   fvdl }
    160      1.1   fvdl 
    161      1.1   fvdl /*
    162      1.1   fvdl  * Same as for cr0. Don't touch upper 32 bits.
    163      1.1   fvdl  */
    164      1.7  perry static __inline void
    165      1.1   fvdl lcr4(u_int val)
    166      1.1   fvdl {
    167      1.1   fvdl 	u_int64_t val64 = val;
    168      1.1   fvdl 
    169      1.6  perry 	__asm volatile("movq %0,%%cr4" : : "r" (val64));
    170      1.1   fvdl }
    171      1.1   fvdl 
    172      1.7  perry static __inline u_int
    173      1.1   fvdl rcr4(void)
    174      1.1   fvdl {
    175      1.1   fvdl 	u_int val;
    176      1.1   fvdl 	u_int64_t val64;
    177      1.6  perry 	__asm volatile("movq %%cr4,%0" : "=r" (val64));
    178      1.1   fvdl 	val = val64;
    179      1.1   fvdl 	return val;
    180      1.1   fvdl }
    181      1.1   fvdl 
    182      1.7  perry static __inline void
    183      1.1   fvdl tlbflush(void)
    184      1.1   fvdl {
    185      1.1   fvdl 	u_int64_t val;
    186      1.6  perry 	__asm volatile("movq %%cr3,%0" : "=r" (val));
    187      1.6  perry 	__asm volatile("movq %0,%%cr3" : : "r" (val));
    188      1.1   fvdl }
    189      1.1   fvdl 
    190      1.7  perry static __inline void
    191      1.1   fvdl tlbflushg(void)
    192      1.1   fvdl {
    193      1.1   fvdl 	/*
    194      1.1   fvdl 	 * Big hammer: flush all TLB entries, including ones from PTE's
    195      1.1   fvdl 	 * with the G bit set.  This should only be necessary if TLB
    196      1.1   fvdl 	 * shootdown falls far behind.
    197      1.1   fvdl 	 *
    198      1.1   fvdl 	 * Intel Architecture Software Developer's Manual, Volume 3,
    199      1.1   fvdl 	 *	System Programming, section 9.10, "Invalidating the
    200      1.1   fvdl 	 * Translation Lookaside Buffers (TLBS)":
    201      1.1   fvdl 	 * "The following operations invalidate all TLB entries, irrespective
    202      1.1   fvdl 	 * of the setting of the G flag:
    203      1.1   fvdl 	 * ...
    204      1.1   fvdl 	 * "(P6 family processors only): Writing to control register CR4 to
    205      1.1   fvdl 	 * modify the PSE, PGE, or PAE flag."
    206      1.1   fvdl 	 *
    207      1.1   fvdl 	 * (the alternatives not quoted above are not an option here.)
    208      1.1   fvdl 	 *
    209      1.1   fvdl 	 * If PGE is not in use, we reload CR3 for the benefit of
    210      1.1   fvdl 	 * pre-P6-family processors.
    211      1.1   fvdl 	 */
    212      1.1   fvdl 
    213      1.1   fvdl 	if (cpu_feature & CPUID_PGE) {
    214      1.1   fvdl 		u_int cr4 = rcr4();
    215      1.1   fvdl 		lcr4(cr4 & ~CR4_PGE);
    216      1.1   fvdl 		lcr4(cr4);
    217      1.1   fvdl 	} else
    218      1.1   fvdl 		tlbflush();
    219      1.1   fvdl }
    220      1.1   fvdl 
    221      1.1   fvdl #ifdef notyet
    222      1.1   fvdl void	setidt	__P((int idx, /*XXX*/caddr_t func, int typ, int dpl));
    223      1.1   fvdl #endif
    224      1.1   fvdl 
    225      1.1   fvdl 
    226      1.1   fvdl /* XXXX ought to be in psl.h with spl() functions */
    227      1.1   fvdl 
    228      1.7  perry static __inline void
    229      1.1   fvdl disable_intr(void)
    230      1.1   fvdl {
    231      1.6  perry 	__asm volatile("cli");
    232      1.1   fvdl }
    233      1.1   fvdl 
    234      1.7  perry static __inline void
    235      1.1   fvdl enable_intr(void)
    236      1.1   fvdl {
    237      1.6  perry 	__asm volatile("sti");
    238      1.1   fvdl }
    239      1.1   fvdl 
    240      1.7  perry static __inline u_long
    241      1.1   fvdl read_rflags(void)
    242      1.1   fvdl {
    243      1.1   fvdl 	u_long	ef;
    244      1.1   fvdl 
    245      1.6  perry 	__asm volatile("pushfq; popq %0" : "=r" (ef));
    246      1.1   fvdl 	return (ef);
    247      1.1   fvdl }
    248      1.1   fvdl 
    249      1.7  perry static __inline void
    250      1.1   fvdl write_rflags(u_long ef)
    251      1.1   fvdl {
    252      1.6  perry 	__asm volatile("pushq %0; popfq" : : "r" (ef));
    253      1.1   fvdl }
    254      1.1   fvdl 
    255      1.7  perry static __inline u_int64_t
    256      1.1   fvdl rdmsr(u_int msr)
    257      1.1   fvdl {
    258      1.1   fvdl 	uint32_t hi, lo;
    259      1.6  perry 	__asm volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
    260      1.1   fvdl 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    261      1.1   fvdl }
    262      1.1   fvdl 
    263      1.7  perry static __inline void
    264      1.1   fvdl wrmsr(u_int msr, u_int64_t newval)
    265      1.1   fvdl {
    266      1.6  perry 	__asm volatile("wrmsr" :
    267      1.1   fvdl 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
    268      1.1   fvdl }
    269      1.1   fvdl 
    270      1.7  perry static __inline void
    271      1.1   fvdl wbinvd(void)
    272      1.1   fvdl {
    273      1.6  perry 	__asm volatile("wbinvd");
    274      1.1   fvdl }
    275      1.1   fvdl 
    276      1.7  perry static __inline u_int64_t
    277      1.1   fvdl rdtsc(void)
    278      1.1   fvdl {
    279      1.1   fvdl 	uint32_t hi, lo;
    280      1.1   fvdl 
    281      1.6  perry 	__asm volatile("rdtsc" : "=d" (hi), "=a" (lo));
    282      1.1   fvdl 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    283      1.1   fvdl }
    284      1.1   fvdl 
    285      1.7  perry static __inline u_int64_t
    286      1.1   fvdl rdpmc(u_int pmc)
    287      1.1   fvdl {
    288      1.1   fvdl 	uint32_t hi, lo;
    289      1.1   fvdl 
    290      1.6  perry 	__asm volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
    291      1.1   fvdl 	return (((uint64_t)hi << 32) | (uint64_t) lo);
    292      1.1   fvdl }
    293      1.1   fvdl 
    294      1.1   fvdl /* Break into DDB/KGDB. */
    295      1.7  perry static __inline void
    296      1.1   fvdl breakpoint(void)
    297      1.1   fvdl {
    298      1.6  perry 	__asm volatile("int $3");
    299      1.1   fvdl }
    300      1.1   fvdl 
    301      1.1   fvdl #define read_psl()	read_rflags()
    302      1.1   fvdl #define write_psl(x)	write_rflags(x)
    303      1.1   fvdl 
    304      1.1   fvdl #endif /* _KERNEL */
    305      1.1   fvdl 
    306      1.1   fvdl #endif /* !_AMD64_CPUFUNC_H_ */
    307