cpu.h revision 1.54
1/* $NetBSD: cpu.h,v 1.54 2008/05/11 14:44:53 ad Exp $ */ 2 3/*- 4 * Copyright (c) 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)cpu.h 5.4 (Berkeley) 5/9/91 35 */ 36 37#ifndef _AMD64_CPU_H_ 38#define _AMD64_CPU_H_ 39 40#if defined(_KERNEL) 41#if defined(_KERNEL_OPT) 42#include "opt_multiprocessor.h" 43#include "opt_lockdebug.h" 44#include "opt_xen.h" 45#endif 46 47/* 48 * Definitions unique to x86-64 cpu support. 49 */ 50#include <machine/frame.h> 51#include <machine/segments.h> 52#include <machine/tss.h> 53#include <machine/intrdefs.h> 54#include <x86/cacheinfo.h> 55 56#include <sys/device.h> 57#include <sys/simplelock.h> 58#include <sys/cpu_data.h> 59#include <sys/systm.h> 60 61struct pmap; 62 63struct cpu_info { 64 struct device *ci_dev; 65 struct cpu_info *ci_self; 66 67#ifdef XEN 68 volatile struct vcpu_info *ci_vcpu; 69#endif 70 71 /* 72 * Will be accessed by other CPUs. 73 */ 74 struct cpu_info *ci_next; 75 struct lwp *ci_curlwp; 76 struct pmap_cpu *ci_pmap_cpu; 77 struct lwp *ci_fpcurlwp; 78 int ci_fpsaving; 79 u_int ci_cpuid; 80 int ci_cpumask; /* (1 << CPU ID) */ 81 u_int ci_apicid; 82 uint8_t ci_initapicid; /* our intitial APIC ID */ 83 uint8_t ci_packageid; 84 uint8_t ci_coreid; 85 uint8_t ci_smtid; 86 struct cpu_data ci_data; /* MI per-cpu data */ 87 88 /* 89 * Private members. 90 */ 91 struct evcnt ci_tlb_evcnt; /* tlb shootdown counter */ 92 struct pmap *ci_pmap; /* current pmap */ 93 int ci_need_tlbwait; /* need to wait for TLB invalidations */ 94 int ci_want_pmapload; /* pmap_load() is needed */ 95 volatile int ci_tlbstate; /* one of TLBSTATE_ states. see below */ 96#define TLBSTATE_VALID 0 /* all user tlbs are valid */ 97#define TLBSTATE_LAZY 1 /* tlbs are valid but won't be kept uptodate */ 98#define TLBSTATE_STALE 2 /* we might have stale user tlbs */ 99 uint64_t ci_scratch; 100#ifdef XEN 101 struct iplsource *ci_isources[NIPL]; 102#else 103 struct intrsource *ci_isources[MAX_INTR_SOURCES]; 104#endif 105 volatile int ci_mtx_count; /* Negative count of spin mutexes */ 106 volatile int ci_mtx_oldspl; /* Old SPL at this ci_idepth */ 107 108 /* The following must be aligned for cmpxchg8b. */ 109 struct { 110 uint32_t ipending; 111 int ilevel; 112 } ci_istate __aligned(8); 113#define ci_ipending ci_istate.ipending 114#define ci_ilevel ci_istate.ilevel 115 116 int ci_idepth; 117 void * ci_intrstack; 118 uint32_t ci_imask[NIPL]; 119 uint32_t ci_iunmask[NIPL]; 120 121 u_int ci_flags; 122 uint32_t ci_ipis; 123 124 int32_t ci_cpuid_level; 125 uint32_t ci_signature; 126 uint32_t ci_feature_flags; 127 uint32_t ci_feature2_flags; 128 uint32_t ci_feature3_flags; 129 uint32_t ci_padlock_flags; 130 uint32_t ci_cflush_lsize; 131 uint32_t ci_vendor[4]; /* vendor string */ 132 volatile uint32_t ci_lapic_counter; 133 134 const struct cpu_functions *ci_func; 135 void (*cpu_setup)(struct cpu_info *); 136 void (*ci_info)(struct cpu_info *); 137 138 struct trapframe *ci_ddb_regs; 139 140 struct x86_cache_info ci_cinfo[CAI_COUNT]; 141 142 char *ci_gdt; 143 144 struct evcnt ci_ipi_events[X86_NIPI]; 145 146 struct x86_64_tss ci_tss; /* Per-cpu TSS; shared among LWPs */ 147 int ci_tss_sel; /* TSS selector of this cpu */ 148 149 /* 150 * The following two are actually region_descriptors, 151 * but that would pollute the namespace. 152 */ 153 uint64_t ci_suspend_gdt; 154 uint16_t ci_suspend_gdt_padding; 155 uint64_t ci_suspend_idt; 156 uint16_t ci_suspend_idt_padding; 157 158 uint16_t ci_suspend_tr; 159 uint16_t ci_suspend_ldt; 160 uint32_t ci_suspend_fs_base_l; 161 uint32_t ci_suspend_fs_base_h; 162 uint32_t ci_suspend_gs_base_l; 163 uint32_t ci_suspend_gs_base_h; 164 uint32_t ci_suspend_gs_kernelbase_l; 165 uint32_t ci_suspend_gs_kernelbase_h; 166 uint32_t ci_suspend_msr_efer; 167 uint64_t ci_suspend_rbx; 168 uint64_t ci_suspend_rbp; 169 uint64_t ci_suspend_rsp; 170 uint64_t ci_suspend_r12; 171 uint64_t ci_suspend_r13; 172 uint64_t ci_suspend_r14; 173 uint64_t ci_suspend_r15; 174 uint64_t ci_suspend_rfl; 175 uint64_t ci_suspend_cr0; 176 uint64_t ci_suspend_cr2; 177 uint64_t ci_suspend_cr3; 178 uint64_t ci_suspend_cr4; 179 uint64_t ci_suspend_cr8; 180 181 /* The following must be in a single cache line. */ 182 int ci_want_resched __aligned(64); 183 int ci_padout __aligned(64); 184}; 185 186#define CPUF_BSP 0x0001 /* CPU is the original BSP */ 187#define CPUF_AP 0x0002 /* CPU is an AP */ 188#define CPUF_SP 0x0004 /* CPU is only processor */ 189#define CPUF_PRIMARY 0x0008 /* CPU is active primary processor */ 190 191#define CPUF_SYNCTSC 0x0800 /* Synchronize TSC */ 192#define CPUF_PRESENT 0x1000 /* CPU is present */ 193#define CPUF_RUNNING 0x2000 /* CPU is running */ 194#define CPUF_PAUSE 0x4000 /* CPU is paused in DDB */ 195#define CPUF_GO 0x8000 /* CPU should start running */ 196 197 198extern struct cpu_info cpu_info_primary; 199extern struct cpu_info *cpu_info_list; 200 201#define CPU_INFO_ITERATOR int 202#define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = cpu_info_list; \ 203 ci != NULL; ci = ci->ci_next 204 205#define X86_MAXPROCS 32 /* bitmask; can be bumped to 64 */ 206 207#define CPU_STARTUP(_ci, _target) ((_ci)->ci_func->start(_ci, _target)) 208#define CPU_STOP(_ci) ((_ci)->ci_func->stop(_ci)) 209#define CPU_START_CLEANUP(_ci) ((_ci)->ci_func->cleanup(_ci)) 210 211#if defined(__GNUC__) && !defined(_LKM) 212static struct cpu_info *x86_curcpu(void); 213static lwp_t *x86_curlwp(void); 214 215__inline static struct cpu_info * __unused 216x86_curcpu(void) 217{ 218 struct cpu_info *ci; 219 220 __asm volatile("movq %%gs:%1, %0" : 221 "=r" (ci) : 222 "m" 223 (*(struct cpu_info * const *)offsetof(struct cpu_info, ci_self))); 224 return ci; 225} 226 227__inline static lwp_t * __unused 228x86_curlwp(void) 229{ 230 lwp_t *l; 231 232 __asm volatile("movq %%gs:%1, %0" : 233 "=r" (l) : 234 "m" 235 (*(struct cpu_info * const *)offsetof(struct cpu_info, ci_curlwp))); 236 return l; 237} 238 239__inline static void __unused 240cpu_set_curpri(int pri) 241{ 242 243 __asm volatile( 244 "movl %1, %%gs:%0" : 245 "=m" (*(struct cpu_info *)offsetof(struct cpu_info, ci_schedstate.spc_curpriority)) : 246 "r" (pri) 247 ); 248} 249#else /* __GNUC__ && !_LKM */ 250/* For non-GCC and LKMs */ 251struct cpu_info *x86_curcpu(void); 252lwp_t *x86_curlwp(void); 253void cpu_set_curpri(int); 254#endif /* __GNUC__ && !_LKM */ 255 256#define cpu_number() (curcpu()->ci_cpuid) 257 258#define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) 259 260extern struct cpu_info *cpu_info[X86_MAXPROCS]; 261 262void cpu_boot_secondary_processors(void); 263void cpu_init_idle_lwps(void); 264 265#define X86_AST_GENERIC 0x01 266#define X86_AST_PREEMPT 0x02 267 268#define aston(l, why) ((l)->l_md.md_astpending |= (why)) 269#define cpu_did_resched(l) ((l)->l_md.md_astpending &= ~X86_AST_PREEMPT) 270 271extern uint32_t cpus_attached; 272 273#define curcpu() x86_curcpu() 274#define curlwp x86_curlwp() 275#define curpcb (&curlwp->l_addr->u_pcb) 276 277/* 278 * Arguments to hardclock, softclock and statclock 279 * encapsulate the previous machine state in an opaque 280 * clockframe; for now, use generic intrframe. 281 */ 282struct clockframe { 283 struct intrframe cf_if; 284}; 285 286#define CLKF_USERMODE(frame) USERMODE((frame)->cf_if.if_tf.tf_cs, \ 287 (frame)->cf_if.if_tf.tf_rflags) 288#define CLKF_PC(frame) ((frame)->cf_if.if_tf.tf_rip) 289#define CLKF_INTR(frame) (curcpu()->ci_idepth > 0) 290 291/* 292 * This is used during profiling to integrate system time. It can safely 293 * assume that the process is resident. 294 */ 295#define LWP_PC(l) ((l)->l_md.md_regs->tf_rip) 296 297/* 298 * Give a profiling tick to the current process when the user profiling 299 * buffer pages are invalid. On the i386, request an ast to send us 300 * through trap(), marking the proc as needing a profiling tick. 301 */ 302extern void cpu_need_proftick(struct lwp *); 303 304/* 305 * Notify an LWP that it has a signal pending, process as soon as possible. 306 */ 307extern void cpu_signotify(struct lwp *); 308 309/* 310 * We need a machine-independent name for this. 311 */ 312extern void (*delay_func)(unsigned int); 313 314#define DELAY(x) (*delay_func)(x) 315#define delay(x) (*delay_func)(x) 316 317 318/* 319 * pull in #defines for kinds of processors 320 */ 321 322extern int biosbasemem; 323extern int biosextmem; 324extern int cpu; 325extern int cpu_feature; 326extern int cpu_feature2; 327extern int cpu_id; 328extern int cpuid_level; 329extern int cpu_class; 330extern char cpu_brand_string[]; 331extern char cpu_vendorname[]; 332 333extern void (*x86_cpu_idle)(void); 334#define cpu_idle() (*x86_cpu_idle)() 335 336/* identcpu.c */ 337 338void cpu_probe(struct cpu_info *); 339void cpu_identify(struct cpu_info *); 340 341/* machdep.c */ 342void dumpconf(void); 343void cpu_reset(void); 344void x86_64_proc0_tss_ldt_init(void); 345void x86_64_init_pcb_tss_ldt(struct cpu_info *); 346void cpu_proc_fork(struct proc *, struct proc *); 347 348struct region_descriptor; 349void lgdt(struct region_descriptor *); 350#ifdef XEN 351void lgdt_finish(void); 352#endif 353void fillw(short, void *, size_t); 354 355struct pcb; 356void savectx(struct pcb *); 357void lwp_trampoline(void); 358void child_trampoline(void); 359 360#ifdef XEN 361void startrtclock(void); 362void xen_delay(unsigned int); 363void xen_initclocks(void); 364#else 365/* clock.c */ 366void initrtclock(u_long); 367void startrtclock(void); 368void i8254_delay(unsigned int); 369void i8254_initclocks(void); 370#endif 371 372void cpu_init_msrs(struct cpu_info *, bool); 373 374 375/* vm_machdep.c */ 376int kvtop(void *); 377 378/* trap.c */ 379void child_return(void *); 380 381/* consinit.c */ 382void kgdb_port_init(void); 383 384/* bus_machdep.c */ 385void x86_bus_space_init(void); 386void x86_bus_space_mallocok(void); 387 388#endif /* _KERNEL */ 389 390#include <machine/psl.h> 391 392/* 393 * CTL_MACHDEP definitions. 394 */ 395#define CPU_CONSDEV 1 /* dev_t: console terminal device */ 396#define CPU_BIOSBASEMEM 2 /* int: bios-reported base mem (K) */ 397#define CPU_BIOSEXTMEM 3 /* int: bios-reported ext. mem (K) */ 398#define CPU_NKPDE 4 /* int: number of kernel PDEs */ 399#define CPU_BOOTED_KERNEL 5 /* string: booted kernel name */ 400#define CPU_DISKINFO 6 /* disk geometry information */ 401#define CPU_FPU_PRESENT 7 /* FPU is present */ 402#define CPU_MAXID 8 /* number of valid machdep ids */ 403 404 405/* 406 * Structure for CPU_DISKINFO sysctl call. 407 * XXX this should be somewhere else. 408 */ 409#define MAX_BIOSDISKS 16 410 411struct disklist { 412 int dl_nbiosdisks; /* number of bios disks */ 413 struct biosdisk_info { 414 int bi_dev; /* BIOS device # (0x80 ..) */ 415 int bi_cyl; /* cylinders on disk */ 416 int bi_head; /* heads per track */ 417 int bi_sec; /* sectors per track */ 418 uint64_t bi_lbasecs; /* total sec. (iff ext13) */ 419#define BIFLAG_INVALID 0x01 420#define BIFLAG_EXTINT13 0x02 421 int bi_flags; 422 } dl_biosdisks[MAX_BIOSDISKS]; 423 424 int dl_nnativedisks; /* number of native disks */ 425 struct nativedisk_info { 426 char ni_devname[16]; /* native device name */ 427 int ni_nmatches; /* # of matches w/ BIOS */ 428 int ni_biosmatches[MAX_BIOSDISKS]; /* indices in dl_biosdisks */ 429 } dl_nativedisks[1]; /* actually longer */ 430}; 431 432#endif /* !_AMD64_CPU_H_ */ 433