cpufunc.h revision 1.28.16.4
1/*	$NetBSD: cpufunc.h,v 1.28.16.4 2007/09/03 14:26:47 yamt Exp $	*/
2
3/*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *        This product includes software developed by the NetBSD
21 *        Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 *    contributors may be used to endorse or promote products derived
24 *    from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#ifndef _I386_CPUFUNC_H_
40#define	_I386_CPUFUNC_H_
41
42/*
43 * Functions to provide access to i386-specific instructions.
44 */
45
46#include <sys/cdefs.h>
47#include <sys/types.h>
48
49#include <machine/segments.h>
50#include <machine/specialreg.h>
51
52#ifdef _KERNEL
53void	x86_pause(void);
54#else
55static __inline void
56x86_pause(void)
57{
58	__asm volatile("pause");
59}
60#endif
61
62/*
63 * XXX it's better to use real lfence insn if available.
64 *
65 * memory clobber to avoid compiler reordering.
66 */
67static __inline void
68x86_lfence(void)
69{
70
71	__asm volatile("lock; addl $0, 0(%%esp)" : : : "memory");
72}
73
74static __inline void
75x86_sfence(void)
76{
77
78	__asm volatile("lock; addl $0, 0(%%esp)" : : : "memory");
79}
80
81static __inline void
82x86_mfence(void)
83{
84
85	__asm volatile("lock; addl $0, 0(%%esp)" : : : "memory");
86}
87
88#ifdef _KERNEL
89
90void	x86_flush(void);
91void	x86_patch(void);
92
93extern unsigned int cpu_feature;
94
95static __inline void
96invlpg(u_int addr)
97{
98        __asm volatile("invlpg (%0)" : : "r" (addr) : "memory");
99}
100
101static __inline void
102lidt(struct region_descriptor *region)
103{
104	__asm volatile("lidt %0" : : "m" (*region));
105}
106
107static __inline void
108lldt(u_short sel)
109{
110	__asm volatile("lldt %0" : : "r" (sel));
111}
112
113static __inline void
114ltr(u_short sel)
115{
116	__asm volatile("ltr %0" : : "r" (sel));
117}
118
119static __inline void
120lcr0(u_int val)
121{
122	__asm volatile("movl %0,%%cr0" : : "r" (val));
123}
124
125static __inline u_int
126rcr0(void)
127{
128	u_int val;
129	__asm volatile("movl %%cr0,%0" : "=r" (val));
130	return val;
131}
132
133static __inline u_int
134rcr2(void)
135{
136	u_int val;
137	__asm volatile("movl %%cr2,%0" : "=r" (val));
138	return val;
139}
140
141static __inline void
142lcr3(u_int val)
143{
144	__asm volatile("movl %0,%%cr3" : : "r" (val));
145}
146
147static __inline u_int
148rcr3(void)
149{
150	u_int val;
151	__asm volatile("movl %%cr3,%0" : "=r" (val));
152	return val;
153}
154
155static __inline void
156lcr4(u_int val)
157{
158	__asm volatile("movl %0,%%cr4" : : "r" (val));
159}
160
161static __inline u_int
162rcr4(void)
163{
164	u_int val;
165	__asm volatile("movl %%cr4,%0" : "=r" (val));
166	return val;
167}
168
169static __inline void
170tlbflush(void)
171{
172	u_int val;
173	val = rcr3();
174	lcr3(val);
175}
176
177static __inline void
178tlbflushg(void)
179{
180	/*
181	 * Big hammer: flush all TLB entries, including ones from PTE's
182	 * with the G bit set.  This should only be necessary if TLB
183	 * shootdown falls far behind.
184	 *
185	 * Intel Architecture Software Developer's Manual, Volume 3,
186	 *	System Programming, section 9.10, "Invalidating the
187	 * Translation Lookaside Buffers (TLBS)":
188	 * "The following operations invalidate all TLB entries, irrespective
189	 * of the setting of the G flag:
190	 * ...
191	 * "(P6 family processors only): Writing to control register CR4 to
192	 * modify the PSE, PGE, or PAE flag."
193	 *
194	 * (the alternatives not quoted above are not an option here.)
195	 *
196	 * If PGE is not in use, we reload CR3 for the benefit of
197	 * pre-P6-family processors.
198	 */
199
200#if defined(I686_CPU)
201	if (cpu_feature & CPUID_PGE) {
202		u_int cr4 = rcr4();
203		lcr4(cr4 & ~CR4_PGE);
204		lcr4(cr4);
205	} else
206#endif
207		tlbflush();
208}
209
210
211#ifdef notyet
212void	setidt(int idx, /*XXX*/void *func, int typ, int dpl);
213#endif
214
215/* debug register */
216void dr0(void *, uint32_t, uint32_t, uint32_t);
217
218static __inline u_int
219rdr6(void)
220{
221	u_int val;
222
223	__asm volatile("movl %%dr6,%0" : "=r" (val));
224	return val;
225}
226
227static __inline void
228ldr6(u_int val)
229{
230
231	__asm volatile("movl %0,%%dr6" : : "r" (val));
232}
233
234/* XXXX ought to be in psl.h with spl() functions */
235
236static __inline void
237disable_intr(void)
238{
239	__asm volatile("cli");
240}
241
242static __inline void
243enable_intr(void)
244{
245	__asm volatile("sti");
246}
247
248static __inline u_long
249read_eflags(void)
250{
251	u_long	ef;
252
253	__asm volatile("pushfl; popl %0" : "=r" (ef));
254	return (ef);
255}
256
257static __inline void
258write_eflags(u_long ef)
259{
260	__asm volatile("pushl %0; popfl" : : "r" (ef));
261}
262
263static __inline uint64_t
264rdmsr(u_int msr)
265{
266	uint64_t rv;
267
268	__asm volatile("rdmsr" : "=A" (rv) : "c" (msr));
269	return (rv);
270}
271
272static __inline void
273wrmsr(u_int msr, uint64_t newval)
274{
275	__asm volatile("wrmsr" : : "A" (newval), "c" (msr));
276}
277
278/*
279 * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
280 *
281 * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
282 */
283
284#define	OPTERON_MSR_PASSCODE	0x9c5a203a
285
286static __inline u_int64_t
287rdmsr_locked(u_int msr, u_int code)
288{
289	uint64_t rv;
290	__asm volatile("rdmsr"
291	    : "=A" (rv)
292	    : "c" (msr), "D" (code));
293	return (rv);
294}
295
296static __inline void
297wrmsr_locked(u_int msr, u_int code, u_int64_t newval)
298{
299	__asm volatile("wrmsr"
300	    :
301	    : "A" (newval), "c" (msr), "D" (code));
302}
303
304static __inline void
305wbinvd(void)
306{
307	__asm volatile("wbinvd");
308}
309
310static __inline uint64_t
311rdtsc(void)
312{
313	uint64_t rv;
314
315	__asm volatile("rdtsc" : "=A" (rv));
316	return (rv);
317}
318
319static __inline uint64_t
320rdpmc(u_int pmc)
321{
322	uint64_t rv;
323
324	__asm volatile("rdpmc" : "=A" (rv) : "c" (pmc));
325	return (rv);
326}
327
328/* Break into DDB/KGDB. */
329static __inline void
330breakpoint(void)
331{
332	__asm volatile("int $3");
333}
334
335#define read_psl()	read_eflags()
336#define write_psl(x)	write_eflags(x)
337
338/*
339 * XXX Maybe these don't belong here...
340 */
341
342extern int (*copyout_func)(const void *, void *, size_t);
343extern int (*copyin_func)(const void *, void *, size_t);
344
345int	i386_copyout(const void *, void *, size_t);
346int	i486_copyout(const void *, void *, size_t);
347
348int	i386_copyin(const void *, void *, size_t);
349
350#endif /* _KERNEL */
351
352#endif /* !_I386_CPUFUNC_H_ */
353