cpufunc.h revision 1.12 1 /* $NetBSD: cpufunc.h,v 1.12 2007/01/14 14:03:00 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #ifndef _AMD64_CPUFUNC_H_
40 #define _AMD64_CPUFUNC_H_
41
42 /*
43 * Functions to provide access to i386-specific instructions.
44 */
45
46 #include <sys/cdefs.h>
47 #include <sys/types.h>
48
49 #include <machine/segments.h>
50 #include <machine/specialreg.h>
51
52 void x86_pause(void);
53
54 /*
55 * XXX if lfence isn't available...
56 *
57 * memory clobber to avoid compiler reordering.
58 */
59 static __inline void
60 x86_lfence(void)
61 {
62
63 __asm volatile("lfence" : : : "memory");
64 }
65
66 static __inline void
67 x86_sfence(void)
68 {
69
70 __asm volatile("sfence" : : : "memory");
71 }
72
73 static __inline void
74 x86_mfence(void)
75 {
76
77 __asm volatile("mfence" : : : "memory");
78 }
79
80 #ifdef _KERNEL
81
82 extern int cpu_feature;
83
84 static __inline void
85 invlpg(u_int64_t addr)
86 {
87 __asm volatile("invlpg (%0)" : : "r" (addr) : "memory");
88 }
89
90 static __inline void
91 lidt(struct region_descriptor *region)
92 {
93 __asm volatile("lidt %0" : : "m" (*region));
94 }
95
96 static __inline void
97 lldt(u_short sel)
98 {
99 __asm volatile("lldt %0" : : "r" (sel));
100 }
101
102 static __inline void
103 ltr(u_short sel)
104 {
105 __asm volatile("ltr %0" : : "r" (sel));
106 }
107
108 static __inline void
109 lcr8(u_int val)
110 {
111 u_int64_t val64 = val;
112 __asm volatile("movq %0,%%cr8" : : "r" (val64));
113 }
114
115 /*
116 * Upper 32 bits are reserved anyway, so just keep this 32bits.
117 */
118 static __inline void
119 lcr0(u_int val)
120 {
121 u_int64_t val64 = val;
122 __asm volatile("movq %0,%%cr0" : : "r" (val64));
123 }
124
125 static __inline u_int
126 rcr0(void)
127 {
128 u_int64_t val64;
129 u_int val;
130 __asm volatile("movq %%cr0,%0" : "=r" (val64));
131 val = val64;
132 return val;
133 }
134
135 static __inline u_int64_t
136 rcr2(void)
137 {
138 u_int64_t val;
139 __asm volatile("movq %%cr2,%0" : "=r" (val));
140 return val;
141 }
142
143 static __inline void
144 lcr3(u_int64_t val)
145 {
146 __asm volatile("movq %0,%%cr3" : : "r" (val));
147 }
148
149 static __inline u_int64_t
150 rcr3(void)
151 {
152 u_int64_t val;
153 __asm volatile("movq %%cr3,%0" : "=r" (val));
154 return val;
155 }
156
157 /*
158 * Same as for cr0. Don't touch upper 32 bits.
159 */
160 static __inline void
161 lcr4(u_int val)
162 {
163 u_int64_t val64 = val;
164
165 __asm volatile("movq %0,%%cr4" : : "r" (val64));
166 }
167
168 static __inline u_int
169 rcr4(void)
170 {
171 u_int val;
172 u_int64_t val64;
173 __asm volatile("movq %%cr4,%0" : "=r" (val64));
174 val = val64;
175 return val;
176 }
177
178 static __inline void
179 tlbflush(void)
180 {
181 u_int64_t val;
182 __asm volatile("movq %%cr3,%0" : "=r" (val));
183 __asm volatile("movq %0,%%cr3" : : "r" (val));
184 }
185
186 static __inline void
187 tlbflushg(void)
188 {
189 /*
190 * Big hammer: flush all TLB entries, including ones from PTE's
191 * with the G bit set. This should only be necessary if TLB
192 * shootdown falls far behind.
193 *
194 * Intel Architecture Software Developer's Manual, Volume 3,
195 * System Programming, section 9.10, "Invalidating the
196 * Translation Lookaside Buffers (TLBS)":
197 * "The following operations invalidate all TLB entries, irrespective
198 * of the setting of the G flag:
199 * ...
200 * "(P6 family processors only): Writing to control register CR4 to
201 * modify the PSE, PGE, or PAE flag."
202 *
203 * (the alternatives not quoted above are not an option here.)
204 *
205 * If PGE is not in use, we reload CR3 for the benefit of
206 * pre-P6-family processors.
207 */
208
209 if (cpu_feature & CPUID_PGE) {
210 u_int cr4 = rcr4();
211 lcr4(cr4 & ~CR4_PGE);
212 lcr4(cr4);
213 } else
214 tlbflush();
215 }
216
217 #ifdef notyet
218 void setidt __P((int idx, /*XXX*/caddr_t func, int typ, int dpl));
219 #endif
220
221
222 /* XXXX ought to be in psl.h with spl() functions */
223
224 static __inline void
225 disable_intr(void)
226 {
227 __asm volatile("cli");
228 }
229
230 static __inline void
231 enable_intr(void)
232 {
233 __asm volatile("sti");
234 }
235
236 static __inline u_long
237 read_rflags(void)
238 {
239 u_long ef;
240
241 __asm volatile("pushfq; popq %0" : "=r" (ef));
242 return (ef);
243 }
244
245 static __inline void
246 write_rflags(u_long ef)
247 {
248 __asm volatile("pushq %0; popfq" : : "r" (ef));
249 }
250
251
252 static __inline u_int64_t
253 rdmsr(u_int msr)
254 {
255 uint32_t hi, lo;
256 __asm volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
257 return (((uint64_t)hi << 32) | (uint64_t) lo);
258 }
259
260 static __inline void
261 wrmsr(u_int msr, u_int64_t newval)
262 {
263 __asm volatile("wrmsr" :
264 : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
265 }
266
267 /*
268 * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
269 *
270 * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
271 */
272
273 #define OPTERON_MSR_PASSCODE 0x9c5a203a
274
275 static __inline u_int64_t
276 rdmsr_locked(u_int msr, u_int code)
277 {
278 uint32_t hi, lo;
279 __asm volatile("rdmsr"
280 : "=d" (hi), "=a" (lo)
281 : "c" (msr), "D" (code));
282 return (((uint64_t)hi << 32) | (uint64_t) lo);
283 }
284
285 static __inline void
286 wrmsr_locked(u_int msr, u_int code, u_int64_t newval)
287 {
288 __asm volatile("wrmsr"
289 :
290 : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr),
291 "D" (code));
292 }
293
294 static __inline void
295 wbinvd(void)
296 {
297 __asm volatile("wbinvd");
298 }
299
300 static __inline u_int64_t
301 rdtsc(void)
302 {
303 uint32_t hi, lo;
304
305 __asm volatile("rdtsc" : "=d" (hi), "=a" (lo));
306 return (((uint64_t)hi << 32) | (uint64_t) lo);
307 }
308
309 static __inline u_int64_t
310 rdpmc(u_int pmc)
311 {
312 uint32_t hi, lo;
313
314 __asm volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
315 return (((uint64_t)hi << 32) | (uint64_t) lo);
316 }
317
318 /* Break into DDB/KGDB. */
319 static __inline void
320 breakpoint(void)
321 {
322 __asm volatile("int $3");
323 }
324
325 #define read_psl() read_rflags()
326 #define write_psl(x) write_rflags(x)
327
328 #endif /* _KERNEL */
329
330 #endif /* !_AMD64_CPUFUNC_H_ */
331