cpu.h revision 1.45
1/*	$NetBSD: cpu.h,v 1.45 2008/02/27 18:26:15 xtraeme Exp $	*/
2
3/*-
4 * Copyright (c) 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)cpu.h	5.4 (Berkeley) 5/9/91
35 */
36
37#ifndef _AMD64_CPU_H_
38#define _AMD64_CPU_H_
39
40#if defined(_KERNEL)
41#if defined(_KERNEL_OPT)
42#include "opt_multiprocessor.h"
43#include "opt_lockdebug.h"
44#include "opt_xen.h"
45#endif
46
47/*
48 * Definitions unique to x86-64 cpu support.
49 */
50#include <machine/frame.h>
51#include <machine/segments.h>
52#include <machine/tss.h>
53#include <machine/intrdefs.h>
54#include <x86/cacheinfo.h>
55
56#include <sys/device.h>
57#include <sys/simplelock.h>
58#include <sys/cpu_data.h>
59#include <sys/cc_microtime.h>
60
61struct pmap;
62
63struct cpu_info {
64	struct device *ci_dev;
65	struct cpu_info *ci_self;
66
67	/*
68	 * Will be accessed by other CPUs.
69	 */
70	struct cpu_info *ci_next;
71	struct lwp *ci_curlwp;
72	struct pmap_cpu *ci_pmap_cpu;
73	struct lwp *ci_fpcurlwp;
74	int ci_fpsaving;
75	u_int ci_cpuid;
76	int ci_cpumask;			/* (1 << CPU ID) */
77	u_int ci_apicid;
78	uint8_t ci_initapicid;		/* our intitial APIC ID */
79	uint8_t ci_packageid;
80	uint8_t ci_coreid;
81	uint8_t ci_smtid;
82	struct cpu_data ci_data;	/* MI per-cpu data */
83	struct cc_microtime_state ci_cc;/* cc_microtime state */
84
85	/*
86	 * Private members.
87	 */
88	struct evcnt ci_tlb_evcnt;	/* tlb shootdown counter */
89	struct pmap *ci_pmap;		/* current pmap */
90	int ci_need_tlbwait;		/* need to wait for TLB invalidations */
91	int ci_want_pmapload;		/* pmap_load() is needed */
92	volatile int ci_tlbstate;	/* one of TLBSTATE_ states. see below */
93#define	TLBSTATE_VALID	0	/* all user tlbs are valid */
94#define	TLBSTATE_LAZY	1	/* tlbs are valid but won't be kept uptodate */
95#define	TLBSTATE_STALE	2	/* we might have stale user tlbs */
96	u_int64_t ci_scratch;
97#ifdef XEN
98	struct iplsource *ci_isources[NIPL];
99#else
100	struct intrsource *ci_isources[MAX_INTR_SOURCES];
101#endif
102	volatile int	ci_mtx_count;	/* Negative count of spin mutexes */
103	volatile int	ci_mtx_oldspl;	/* Old SPL at this ci_idepth */
104
105	/* The following must be aligned for cmpxchg8b. */
106	struct {
107		uint32_t	ipending;
108		int		ilevel;
109	} ci_istate __aligned(8);
110#define ci_ipending	ci_istate.ipending
111#define	ci_ilevel	ci_istate.ilevel
112
113	int		ci_idepth;
114	void *		ci_intrstack;
115	u_int32_t	ci_imask[NIPL];
116	u_int32_t	ci_iunmask[NIPL];
117
118	u_int		ci_flags;
119	u_int32_t	ci_ipis;
120
121	int32_t		ci_cpuid_level;
122	uint32_t	ci_signature;
123	uint32_t	ci_feature_flags;
124	uint32_t	ci_feature2_flags;
125	uint32_t	ci_vendor[4];	 /* vendor string */
126	u_int64_t	ci_tsc_freq;
127	volatile uint32_t	ci_lapic_counter;
128
129	const struct cpu_functions *ci_func;
130	void (*cpu_setup)(struct cpu_info *);
131	void (*ci_info)(struct cpu_info *);
132
133	int		ci_want_resched;
134	struct trapframe *ci_ddb_regs;
135
136	struct x86_cache_info ci_cinfo[CAI_COUNT];
137
138	char		*ci_gdt;
139
140	struct evcnt ci_ipi_events[X86_NIPI];
141
142	struct x86_64_tss ci_tss;	/* Per-cpu TSS; shared among LWPs */
143	int		ci_tss_sel;	/* TSS selector of this cpu */
144
145	/*
146	 * The following two are actually region_descriptors,
147	 * but that would pollute the namespace.
148	 */
149	uint64_t	ci_suspend_gdt;
150	uint16_t	ci_suspend_gdt_padding;
151	uint64_t	ci_suspend_idt;
152	uint16_t	ci_suspend_idt_padding;
153
154	uint16_t	ci_suspend_tr;
155	uint16_t	ci_suspend_ldt;
156	uint32_t	ci_suspend_fs_base_l;
157	uint32_t	ci_suspend_fs_base_h;
158	uint32_t	ci_suspend_gs_base_l;
159	uint32_t	ci_suspend_gs_base_h;
160	uint32_t	ci_suspend_gs_kernelbase_l;
161	uint32_t	ci_suspend_gs_kernelbase_h;
162	uint32_t	ci_suspend_msr_efer;
163	uint64_t	ci_suspend_rbx;
164	uint64_t	ci_suspend_rbp;
165	uint64_t	ci_suspend_rsp;
166	uint64_t	ci_suspend_r12;
167	uint64_t	ci_suspend_r13;
168	uint64_t	ci_suspend_r14;
169	uint64_t	ci_suspend_r15;
170	uint64_t	ci_suspend_rfl;
171	uint64_t	ci_suspend_cr0;
172	uint64_t	ci_suspend_cr2;
173	uint64_t	ci_suspend_cr3;
174	uint64_t	ci_suspend_cr4;
175	uint64_t	ci_suspend_cr8;
176};
177
178#define CPUF_BSP	0x0001		/* CPU is the original BSP */
179#define CPUF_AP		0x0002		/* CPU is an AP */
180#define CPUF_SP		0x0004		/* CPU is only processor */
181#define CPUF_PRIMARY	0x0008		/* CPU is active primary processor */
182
183#define CPUF_PRESENT	0x1000		/* CPU is present */
184#define CPUF_RUNNING	0x2000		/* CPU is running */
185#define CPUF_PAUSE	0x4000		/* CPU is paused in DDB */
186#define CPUF_GO		0x8000		/* CPU should start running */
187
188
189extern struct cpu_info cpu_info_primary;
190extern struct cpu_info *cpu_info_list;
191
192#define CPU_INFO_ITERATOR		int
193#define CPU_INFO_FOREACH(cii, ci)	cii = 0, ci = cpu_info_list; \
194					ci != NULL; ci = ci->ci_next
195
196#define X86_MAXPROCS		32	/* bitmask; can be bumped to 64 */
197
198#define CPU_STARTUP(_ci, _target)	((_ci)->ci_func->start(_ci, _target))
199#define CPU_STOP(_ci)			((_ci)->ci_func->stop(_ci))
200#define CPU_START_CLEANUP(_ci)		((_ci)->ci_func->cleanup(_ci))
201
202#if defined(__GNUC__) && defined(_KERNEL)
203static struct cpu_info *x86_curcpu(void);
204static lwp_t *x86_curlwp(void);
205
206__inline static struct cpu_info * __unused
207x86_curcpu(void)
208{
209	struct cpu_info *ci;
210
211	__asm volatile("movq %%gs:%1, %0" :
212	    "=r" (ci) :
213	    "m"
214	    (*(struct cpu_info * const *)offsetof(struct cpu_info, ci_self)));
215	return ci;
216}
217
218__inline static lwp_t * __unused
219x86_curlwp(void)
220{
221	lwp_t *l;
222
223	__asm volatile("movq %%gs:%1, %0" :
224	    "=r" (l) :
225	    "m"
226	    (*(struct cpu_info * const *)offsetof(struct cpu_info, ci_curlwp)));
227	return l;
228}
229#else	/* __GNUC__ && _KERNEL */
230/* For non-GCC and LKMs */
231struct cpu_info	*x86_curcpu(void);
232lwp_t	*x86_curlwp(void);
233#endif	/* __GNUC__ && _KERNEL */
234
235#define cpu_number()	(curcpu()->ci_cpuid)
236
237#define CPU_IS_PRIMARY(ci)	((ci)->ci_flags & CPUF_PRIMARY)
238
239extern struct cpu_info *cpu_info[X86_MAXPROCS];
240
241void cpu_boot_secondary_processors(void);
242void cpu_init_idle_lwps(void);
243
244#define aston(l)	((l)->l_md.md_astpending = 1)
245
246extern u_int32_t cpus_attached;
247
248#define curcpu()	x86_curcpu()
249#define curlwp		x86_curlwp()
250#define curpcb		(&curlwp->l_addr->u_pcb)
251
252/*
253 * Arguments to hardclock, softclock and statclock
254 * encapsulate the previous machine state in an opaque
255 * clockframe; for now, use generic intrframe.
256 */
257struct clockframe {
258	struct intrframe cf_if;
259};
260
261#define	CLKF_USERMODE(frame)	USERMODE((frame)->cf_if.if_tf.tf_cs, \
262				    (frame)->cf_if.if_tf.tf_rflags)
263#define CLKF_PC(frame)		((frame)->cf_if.if_tf.tf_rip)
264#define CLKF_INTR(frame)	(curcpu()->ci_idepth > 0)
265
266/*
267 * This is used during profiling to integrate system time.  It can safely
268 * assume that the process is resident.
269 */
270#define LWP_PC(l)		((l)->l_md.md_regs->tf_rip)
271
272/*
273 * Give a profiling tick to the current process when the user profiling
274 * buffer pages are invalid.  On the i386, request an ast to send us
275 * through trap(), marking the proc as needing a profiling tick.
276 */
277extern void cpu_need_proftick(struct lwp *);
278
279/*
280 * Notify an LWP that it has a signal pending, process as soon as possible.
281 */
282extern void cpu_signotify(struct lwp *);
283
284/*
285 * We need a machine-independent name for this.
286 */
287extern void (*delay_func)(unsigned int);
288
289#define DELAY(x)		(*delay_func)(x)
290#define delay(x)		(*delay_func)(x)
291
292
293/*
294 * pull in #defines for kinds of processors
295 */
296
297extern int biosbasemem;
298extern int biosextmem;
299extern int cpu;
300extern int cpu_feature;
301extern int cpu_feature2;
302extern int cpu_id;
303extern int cpuid_level;
304extern char cpu_vendorname[];
305
306/* identcpu.c */
307
308void	identifycpu(struct cpu_info *);
309void cpu_probe_features(struct cpu_info *);
310
311/* machdep.c */
312void	dumpconf(void);
313void	cpu_reset(void);
314void	x86_64_proc0_tss_ldt_init(void);
315void	x86_64_init_pcb_tss_ldt(struct cpu_info *);
316void	cpu_proc_fork(struct proc *, struct proc *);
317
318struct region_descriptor;
319void	lgdt(struct region_descriptor *);
320#ifdef XEN
321void	lgdt_finish(void);
322#endif
323void	fillw(short, void *, size_t);
324
325struct pcb;
326void	savectx(struct pcb *);
327void	lwp_trampoline(void);
328void	child_trampoline(void);
329
330#ifdef XEN
331void	startrtclock(void);
332void	xen_delay(unsigned int);
333void	xen_initclocks(void);
334#else
335/* clock.c */
336void	initrtclock(u_long);
337void	startrtclock(void);
338void	i8254_delay(unsigned int);
339void	i8254_initclocks(void);
340#endif
341
342void cpu_init_msrs(struct cpu_info *, bool);
343
344
345/* vm_machdep.c */
346int kvtop(void *);
347
348/* trap.c */
349void	child_return(void *);
350
351/* consinit.c */
352void kgdb_port_init(void);
353
354/* bus_machdep.c */
355void x86_bus_space_init(void);
356void x86_bus_space_mallocok(void);
357
358#endif /* _KERNEL */
359
360#include <machine/psl.h>
361
362/*
363 * CTL_MACHDEP definitions.
364 */
365#define	CPU_CONSDEV		1	/* dev_t: console terminal device */
366#define	CPU_BIOSBASEMEM		2	/* int: bios-reported base mem (K) */
367#define	CPU_BIOSEXTMEM		3	/* int: bios-reported ext. mem (K) */
368#define	CPU_NKPDE		4	/* int: number of kernel PDEs */
369#define	CPU_BOOTED_KERNEL	5	/* string: booted kernel name */
370#define CPU_DISKINFO		6	/* disk geometry information */
371#define CPU_FPU_PRESENT		7	/* FPU is present */
372#define	CPU_MAXID		8	/* number of valid machdep ids */
373
374
375/*
376 * Structure for CPU_DISKINFO sysctl call.
377 * XXX this should be somewhere else.
378 */
379#define MAX_BIOSDISKS	16
380
381struct disklist {
382	int dl_nbiosdisks;			   /* number of bios disks */
383	struct biosdisk_info {
384		int bi_dev;			   /* BIOS device # (0x80 ..) */
385		int bi_cyl;			   /* cylinders on disk */
386		int bi_head;			   /* heads per track */
387		int bi_sec;			   /* sectors per track */
388		u_int64_t bi_lbasecs;		   /* total sec. (iff ext13) */
389#define BIFLAG_INVALID		0x01
390#define BIFLAG_EXTINT13		0x02
391		int bi_flags;
392	} dl_biosdisks[MAX_BIOSDISKS];
393
394	int dl_nnativedisks;			   /* number of native disks */
395	struct nativedisk_info {
396		char ni_devname[16];		   /* native device name */
397		int ni_nmatches; 		   /* # of matches w/ BIOS */
398		int ni_biosmatches[MAX_BIOSDISKS]; /* indices in dl_biosdisks */
399	} dl_nativedisks[1];			   /* actually longer */
400};
401
402#endif /* !_AMD64_CPU_H_ */
403