cpufunc.h revision 1.4 1 1.4 yamt /* $NetBSD: cpufunc.h,v 1.4 2004/01/14 11:31:55 yamt Exp $ */
2 1.1 fvdl
3 1.1 fvdl /*-
4 1.1 fvdl * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 1.1 fvdl * All rights reserved.
6 1.1 fvdl *
7 1.1 fvdl * This code is derived from software contributed to The NetBSD Foundation
8 1.1 fvdl * by Charles M. Hannum.
9 1.1 fvdl *
10 1.1 fvdl * Redistribution and use in source and binary forms, with or without
11 1.1 fvdl * modification, are permitted provided that the following conditions
12 1.1 fvdl * are met:
13 1.1 fvdl * 1. Redistributions of source code must retain the above copyright
14 1.1 fvdl * notice, this list of conditions and the following disclaimer.
15 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 fvdl * notice, this list of conditions and the following disclaimer in the
17 1.1 fvdl * documentation and/or other materials provided with the distribution.
18 1.1 fvdl * 3. All advertising materials mentioning features or use of this software
19 1.1 fvdl * must display the following acknowledgement:
20 1.1 fvdl * This product includes software developed by the NetBSD
21 1.1 fvdl * Foundation, Inc. and its contributors.
22 1.1 fvdl * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 fvdl * contributors may be used to endorse or promote products derived
24 1.1 fvdl * from this software without specific prior written permission.
25 1.1 fvdl *
26 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 fvdl * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 fvdl * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 fvdl * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 fvdl * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 fvdl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 fvdl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 fvdl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 fvdl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 fvdl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 fvdl * POSSIBILITY OF SUCH DAMAGE.
37 1.1 fvdl */
38 1.1 fvdl
39 1.1 fvdl #ifndef _AMD64_CPUFUNC_H_
40 1.1 fvdl #define _AMD64_CPUFUNC_H_
41 1.1 fvdl
42 1.1 fvdl /*
43 1.1 fvdl * Functions to provide access to i386-specific instructions.
44 1.1 fvdl */
45 1.1 fvdl
46 1.1 fvdl #include <sys/cdefs.h>
47 1.1 fvdl #include <sys/types.h>
48 1.1 fvdl
49 1.1 fvdl #include <machine/specialreg.h>
50 1.1 fvdl
51 1.3 fvdl static __inline void
52 1.3 fvdl x86_pause(void)
53 1.3 fvdl {
54 1.3 fvdl /* nothing */
55 1.3 fvdl }
56 1.3 fvdl
57 1.4 yamt static __inline void
58 1.4 yamt x86_lfence(void)
59 1.4 yamt {
60 1.4 yamt
61 1.4 yamt /*
62 1.4 yamt * XXX if lfence isn't available...
63 1.4 yamt *
64 1.4 yamt * memory clobber to avoid compiler reordering.
65 1.4 yamt */
66 1.4 yamt __asm __volatile("lfence" : : : "memory");
67 1.4 yamt }
68 1.4 yamt
69 1.1 fvdl #ifdef _KERNEL
70 1.1 fvdl
71 1.2 fvdl extern int cpu_feature;
72 1.2 fvdl
73 1.1 fvdl static __inline void
74 1.1 fvdl invlpg(u_int64_t addr)
75 1.1 fvdl {
76 1.1 fvdl __asm __volatile("invlpg (%0)" : : "r" (addr) : "memory");
77 1.1 fvdl }
78 1.1 fvdl
79 1.1 fvdl static __inline void
80 1.1 fvdl lidt(void *p)
81 1.1 fvdl {
82 1.1 fvdl __asm __volatile("lidt (%0)" : : "r" (p));
83 1.1 fvdl }
84 1.1 fvdl
85 1.1 fvdl static __inline void
86 1.1 fvdl lldt(u_short sel)
87 1.1 fvdl {
88 1.1 fvdl __asm __volatile("lldt %0" : : "r" (sel));
89 1.1 fvdl }
90 1.1 fvdl
91 1.1 fvdl static __inline void
92 1.1 fvdl ltr(u_short sel)
93 1.1 fvdl {
94 1.1 fvdl __asm __volatile("ltr %0" : : "r" (sel));
95 1.1 fvdl }
96 1.1 fvdl
97 1.1 fvdl static __inline void
98 1.1 fvdl lcr8(u_int val)
99 1.1 fvdl {
100 1.1 fvdl u_int64_t val64 = val;
101 1.1 fvdl __asm __volatile("movq %0,%%cr8" : : "r" (val64));
102 1.1 fvdl }
103 1.1 fvdl
104 1.1 fvdl /*
105 1.1 fvdl * Upper 32 bits are reserved anyway, so just keep this 32bits.
106 1.1 fvdl */
107 1.1 fvdl static __inline void
108 1.1 fvdl lcr0(u_int val)
109 1.1 fvdl {
110 1.1 fvdl u_int64_t val64 = val;
111 1.1 fvdl __asm __volatile("movq %0,%%cr0" : : "r" (val64));
112 1.1 fvdl }
113 1.1 fvdl
114 1.1 fvdl static __inline u_int
115 1.1 fvdl rcr0(void)
116 1.1 fvdl {
117 1.1 fvdl u_int64_t val64;
118 1.1 fvdl u_int val;
119 1.1 fvdl __asm __volatile("movq %%cr0,%0" : "=r" (val64));
120 1.1 fvdl val = val64;
121 1.1 fvdl return val;
122 1.1 fvdl }
123 1.1 fvdl
124 1.1 fvdl static __inline u_int64_t
125 1.1 fvdl rcr2(void)
126 1.1 fvdl {
127 1.1 fvdl u_int64_t val;
128 1.1 fvdl __asm __volatile("movq %%cr2,%0" : "=r" (val));
129 1.1 fvdl return val;
130 1.1 fvdl }
131 1.1 fvdl
132 1.1 fvdl static __inline void
133 1.1 fvdl lcr3(u_int64_t val)
134 1.1 fvdl {
135 1.1 fvdl __asm __volatile("movq %0,%%cr3" : : "r" (val));
136 1.1 fvdl }
137 1.1 fvdl
138 1.1 fvdl static __inline u_int64_t
139 1.1 fvdl rcr3(void)
140 1.1 fvdl {
141 1.1 fvdl u_int64_t val;
142 1.1 fvdl __asm __volatile("movq %%cr3,%0" : "=r" (val));
143 1.1 fvdl return val;
144 1.1 fvdl }
145 1.1 fvdl
146 1.1 fvdl /*
147 1.1 fvdl * Same as for cr0. Don't touch upper 32 bits.
148 1.1 fvdl */
149 1.1 fvdl static __inline void
150 1.1 fvdl lcr4(u_int val)
151 1.1 fvdl {
152 1.1 fvdl u_int64_t val64 = val;
153 1.1 fvdl
154 1.1 fvdl __asm __volatile("movq %0,%%cr4" : : "r" (val64));
155 1.1 fvdl }
156 1.1 fvdl
157 1.1 fvdl static __inline u_int
158 1.1 fvdl rcr4(void)
159 1.1 fvdl {
160 1.1 fvdl u_int val;
161 1.1 fvdl u_int64_t val64;
162 1.1 fvdl __asm __volatile("movq %%cr4,%0" : "=r" (val64));
163 1.1 fvdl val = val64;
164 1.1 fvdl return val;
165 1.1 fvdl }
166 1.1 fvdl
167 1.1 fvdl static __inline void
168 1.1 fvdl tlbflush(void)
169 1.1 fvdl {
170 1.1 fvdl u_int64_t val;
171 1.1 fvdl __asm __volatile("movq %%cr3,%0" : "=r" (val));
172 1.1 fvdl __asm __volatile("movq %0,%%cr3" : : "r" (val));
173 1.1 fvdl }
174 1.1 fvdl
175 1.1 fvdl static __inline void
176 1.1 fvdl tlbflushg(void)
177 1.1 fvdl {
178 1.1 fvdl /*
179 1.1 fvdl * Big hammer: flush all TLB entries, including ones from PTE's
180 1.1 fvdl * with the G bit set. This should only be necessary if TLB
181 1.1 fvdl * shootdown falls far behind.
182 1.1 fvdl *
183 1.1 fvdl * Intel Architecture Software Developer's Manual, Volume 3,
184 1.1 fvdl * System Programming, section 9.10, "Invalidating the
185 1.1 fvdl * Translation Lookaside Buffers (TLBS)":
186 1.1 fvdl * "The following operations invalidate all TLB entries, irrespective
187 1.1 fvdl * of the setting of the G flag:
188 1.1 fvdl * ...
189 1.1 fvdl * "(P6 family processors only): Writing to control register CR4 to
190 1.1 fvdl * modify the PSE, PGE, or PAE flag."
191 1.1 fvdl *
192 1.1 fvdl * (the alternatives not quoted above are not an option here.)
193 1.1 fvdl *
194 1.1 fvdl * If PGE is not in use, we reload CR3 for the benefit of
195 1.1 fvdl * pre-P6-family processors.
196 1.1 fvdl */
197 1.1 fvdl
198 1.1 fvdl if (cpu_feature & CPUID_PGE) {
199 1.1 fvdl u_int cr4 = rcr4();
200 1.1 fvdl lcr4(cr4 & ~CR4_PGE);
201 1.1 fvdl lcr4(cr4);
202 1.1 fvdl } else
203 1.1 fvdl tlbflush();
204 1.1 fvdl }
205 1.1 fvdl
206 1.1 fvdl #ifdef notyet
207 1.1 fvdl void setidt __P((int idx, /*XXX*/caddr_t func, int typ, int dpl));
208 1.1 fvdl #endif
209 1.1 fvdl
210 1.1 fvdl
211 1.1 fvdl /* XXXX ought to be in psl.h with spl() functions */
212 1.1 fvdl
213 1.1 fvdl static __inline void
214 1.1 fvdl disable_intr(void)
215 1.1 fvdl {
216 1.1 fvdl __asm __volatile("cli");
217 1.1 fvdl }
218 1.1 fvdl
219 1.1 fvdl static __inline void
220 1.1 fvdl enable_intr(void)
221 1.1 fvdl {
222 1.1 fvdl __asm __volatile("sti");
223 1.1 fvdl }
224 1.1 fvdl
225 1.1 fvdl static __inline u_long
226 1.1 fvdl read_rflags(void)
227 1.1 fvdl {
228 1.1 fvdl u_long ef;
229 1.1 fvdl
230 1.1 fvdl __asm __volatile("pushfq; popq %0" : "=r" (ef));
231 1.1 fvdl return (ef);
232 1.1 fvdl }
233 1.1 fvdl
234 1.1 fvdl static __inline void
235 1.1 fvdl write_rflags(u_long ef)
236 1.1 fvdl {
237 1.1 fvdl __asm __volatile("pushq %0; popfq" : : "r" (ef));
238 1.1 fvdl }
239 1.1 fvdl
240 1.1 fvdl static __inline u_int64_t
241 1.1 fvdl rdmsr(u_int msr)
242 1.1 fvdl {
243 1.1 fvdl uint32_t hi, lo;
244 1.1 fvdl __asm __volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
245 1.1 fvdl return (((uint64_t)hi << 32) | (uint64_t) lo);
246 1.1 fvdl }
247 1.1 fvdl
248 1.1 fvdl static __inline void
249 1.1 fvdl wrmsr(u_int msr, u_int64_t newval)
250 1.1 fvdl {
251 1.1 fvdl __asm __volatile("wrmsr" :
252 1.1 fvdl : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
253 1.1 fvdl }
254 1.1 fvdl
255 1.1 fvdl static __inline void
256 1.1 fvdl wbinvd(void)
257 1.1 fvdl {
258 1.1 fvdl __asm __volatile("wbinvd");
259 1.1 fvdl }
260 1.1 fvdl
261 1.1 fvdl static __inline u_int64_t
262 1.1 fvdl rdtsc(void)
263 1.1 fvdl {
264 1.1 fvdl uint32_t hi, lo;
265 1.1 fvdl
266 1.1 fvdl __asm __volatile("rdtsc" : "=d" (hi), "=a" (lo));
267 1.1 fvdl return (((uint64_t)hi << 32) | (uint64_t) lo);
268 1.1 fvdl }
269 1.1 fvdl
270 1.1 fvdl static __inline u_int64_t
271 1.1 fvdl rdpmc(u_int pmc)
272 1.1 fvdl {
273 1.1 fvdl uint32_t hi, lo;
274 1.1 fvdl
275 1.1 fvdl __asm __volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
276 1.1 fvdl return (((uint64_t)hi << 32) | (uint64_t) lo);
277 1.1 fvdl }
278 1.1 fvdl
279 1.1 fvdl /* Break into DDB/KGDB. */
280 1.1 fvdl static __inline void
281 1.1 fvdl breakpoint(void)
282 1.1 fvdl {
283 1.1 fvdl __asm __volatile("int $3");
284 1.1 fvdl }
285 1.1 fvdl
286 1.1 fvdl #define read_psl() read_rflags()
287 1.1 fvdl #define write_psl(x) write_rflags(x)
288 1.1 fvdl
289 1.1 fvdl #endif /* _KERNEL */
290 1.1 fvdl
291 1.1 fvdl #endif /* !_AMD64_CPUFUNC_H_ */
292