cpufunc.h revision 1.25 1 /* $NetBSD: cpufunc.h,v 1.25 2003/05/08 10:27:43 fvdl Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #ifndef _I386_CPUFUNC_H_
40 #define _I386_CPUFUNC_H_
41
42 /*
43 * Functions to provide access to i386-specific instructions.
44 */
45
46 #include <sys/cdefs.h>
47 #include <sys/types.h>
48
49 #include <machine/specialreg.h>
50
51 static __inline void
52 x86_pause(void)
53 {
54 __asm __volatile("pause");
55 }
56
57 #ifdef _KERNEL
58
59 extern int cpu_feature;
60
61 static __inline void
62 invlpg(u_int addr)
63 {
64 __asm __volatile("invlpg (%0)" : : "r" (addr) : "memory");
65 }
66
67 static __inline void
68 lidt(void *p)
69 {
70 __asm __volatile("lidt (%0)" : : "r" (p));
71 }
72
73 static __inline void
74 lldt(u_short sel)
75 {
76 __asm __volatile("lldt %0" : : "r" (sel));
77 }
78
79 static __inline void
80 ltr(u_short sel)
81 {
82 __asm __volatile("ltr %0" : : "r" (sel));
83 }
84
85 static __inline void
86 lcr0(u_int val)
87 {
88 __asm __volatile("movl %0,%%cr0" : : "r" (val));
89 }
90
91 static __inline u_int
92 rcr0(void)
93 {
94 u_int val;
95 __asm __volatile("movl %%cr0,%0" : "=r" (val));
96 return val;
97 }
98
99 static __inline u_int
100 rcr2(void)
101 {
102 u_int val;
103 __asm __volatile("movl %%cr2,%0" : "=r" (val));
104 return val;
105 }
106
107 static __inline void
108 lcr3(u_int val)
109 {
110 __asm __volatile("movl %0,%%cr3" : : "r" (val));
111 }
112
113 static __inline u_int
114 rcr3(void)
115 {
116 u_int val;
117 __asm __volatile("movl %%cr3,%0" : "=r" (val));
118 return val;
119 }
120
121 static __inline void
122 lcr4(u_int val)
123 {
124 __asm __volatile("movl %0,%%cr4" : : "r" (val));
125 }
126
127 static __inline u_int
128 rcr4(void)
129 {
130 u_int val;
131 __asm __volatile("movl %%cr4,%0" : "=r" (val));
132 return val;
133 }
134
135 static __inline void
136 tlbflush(void)
137 {
138 u_int val;
139 val = rcr3();
140 lcr3(val);
141 }
142
143 static __inline void
144 tlbflushg(void)
145 {
146 /*
147 * Big hammer: flush all TLB entries, including ones from PTE's
148 * with the G bit set. This should only be necessary if TLB
149 * shootdown falls far behind.
150 *
151 * Intel Architecture Software Developer's Manual, Volume 3,
152 * System Programming, section 9.10, "Invalidating the
153 * Translation Lookaside Buffers (TLBS)":
154 * "The following operations invalidate all TLB entries, irrespective
155 * of the setting of the G flag:
156 * ...
157 * "(P6 family processors only): Writing to control register CR4 to
158 * modify the PSE, PGE, or PAE flag."
159 *
160 * (the alternatives not quoted above are not an option here.)
161 *
162 * If PGE is not in use, we reload CR3 for the benefit of
163 * pre-P6-family processors.
164 */
165
166 #if defined(I686_CPU)
167 if (cpu_feature & CPUID_PGE) {
168 u_int cr4 = rcr4();
169 lcr4(cr4 & ~CR4_PGE);
170 lcr4(cr4);
171 } else
172 #endif
173 tlbflush();
174 }
175
176
177 #ifdef notyet
178 void setidt __P((int idx, /*XXX*/caddr_t func, int typ, int dpl));
179 #endif
180
181 /* debug register */
182 void dr0(caddr_t, u_int32_t, u_int32_t, u_int32_t);
183
184 static __inline u_int
185 rdr6(void)
186 {
187 u_int val;
188
189 __asm __volatile("movl %%dr6,%0" : "=r" (val));
190 return val;
191 }
192
193 static __inline void
194 ldr6(u_int val)
195 {
196
197 __asm __volatile("movl %0,%%dr6" : : "r" (val));
198 }
199
200 /* XXXX ought to be in psl.h with spl() functions */
201
202 static __inline void
203 disable_intr(void)
204 {
205 __asm __volatile("cli");
206 }
207
208 static __inline void
209 enable_intr(void)
210 {
211 __asm __volatile("sti");
212 }
213
214 static __inline u_long
215 read_eflags(void)
216 {
217 u_long ef;
218
219 __asm __volatile("pushfl; popl %0" : "=r" (ef));
220 return (ef);
221 }
222
223 static __inline void
224 write_eflags(u_long ef)
225 {
226 __asm __volatile("pushl %0; popfl" : : "r" (ef));
227 }
228
229 static __inline u_int64_t
230 rdmsr(u_int msr)
231 {
232 u_int64_t rv;
233
234 __asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
235 return (rv);
236 }
237
238 static __inline void
239 wrmsr(u_int msr, u_int64_t newval)
240 {
241 __asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
242 }
243
244 static __inline void
245 wbinvd(void)
246 {
247 __asm __volatile("wbinvd");
248 }
249
250 static __inline u_int64_t
251 rdtsc(void)
252 {
253 u_int64_t rv;
254
255 __asm __volatile("rdtsc" : "=A" (rv));
256 return (rv);
257 }
258
259 static __inline u_int64_t
260 rdpmc(u_int pmc)
261 {
262 u_int64_t rv;
263
264 __asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc));
265 return (rv);
266 }
267
268 /* Break into DDB/KGDB. */
269 static __inline void
270 breakpoint(void)
271 {
272 __asm __volatile("int $3");
273 }
274
275 #define read_psl() read_eflags()
276 #define write_psl(x) write_eflags(x)
277
278 /*
279 * XXX Maybe these don't belong here...
280 */
281
282 extern int (*copyout_func)(const void *, void *, size_t);
283 extern int (*copyin_func)(const void *, void *, size_t);
284
285 int i386_copyout(const void *, void *, size_t);
286 int i486_copyout(const void *, void *, size_t);
287
288 int i386_copyin(const void *, void *, size_t);
289
290 #endif /* _KERNEL */
291
292 #endif /* !_I386_CPUFUNC_H_ */
293