Home | History | Annotate | Line # | Download | only in x86
cpu.c revision 1.56
      1 /*	$NetBSD: cpu.c,v 1.56 2008/06/03 23:05:01 jmcneill Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1999 Stefan Grefen
     34  *
     35  * Redistribution and use in source and binary forms, with or without
     36  * modification, are permitted provided that the following conditions
     37  * are met:
     38  * 1. Redistributions of source code must retain the above copyright
     39  *    notice, this list of conditions and the following disclaimer.
     40  * 2. Redistributions in binary form must reproduce the above copyright
     41  *    notice, this list of conditions and the following disclaimer in the
     42  *    documentation and/or other materials provided with the distribution.
     43  * 3. All advertising materials mentioning features or use of this software
     44  *    must display the following acknowledgement:
     45  *      This product includes software developed by the NetBSD
     46  *      Foundation, Inc. and its contributors.
     47  * 4. Neither the name of The NetBSD Foundation nor the names of its
     48  *    contributors may be used to endorse or promote products derived
     49  *    from this software without specific prior written permission.
     50  *
     51  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
     52  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
     55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     61  * SUCH DAMAGE.
     62  */
     63 
     64 #include <sys/cdefs.h>
     65 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.56 2008/06/03 23:05:01 jmcneill Exp $");
     66 
     67 #include "opt_ddb.h"
     68 #include "opt_mpbios.h"		/* for MPDEBUG */
     69 #include "opt_mtrr.h"
     70 
     71 #include "lapic.h"
     72 #include "ioapic.h"
     73 
     74 #include <sys/param.h>
     75 #include <sys/proc.h>
     76 #include <sys/user.h>
     77 #include <sys/systm.h>
     78 #include <sys/device.h>
     79 #include <sys/malloc.h>
     80 #include <sys/cpu.h>
     81 #include <sys/atomic.h>
     82 #include <sys/reboot.h>
     83 
     84 #include <uvm/uvm_extern.h>
     85 
     86 #include <machine/cpufunc.h>
     87 #include <machine/cpuvar.h>
     88 #include <machine/pmap.h>
     89 #include <machine/vmparam.h>
     90 #include <machine/mpbiosvar.h>
     91 #include <machine/pcb.h>
     92 #include <machine/specialreg.h>
     93 #include <machine/segments.h>
     94 #include <machine/gdt.h>
     95 #include <machine/mtrr.h>
     96 #include <machine/pio.h>
     97 #include <machine/cpu_counter.h>
     98 
     99 #ifdef i386
    100 #include <machine/tlog.h>
    101 #endif
    102 
    103 #include <machine/apicvar.h>
    104 #include <machine/i82489reg.h>
    105 #include <machine/i82489var.h>
    106 
    107 #include <dev/ic/mc146818reg.h>
    108 #include <i386/isa/nvram.h>
    109 #include <dev/isa/isareg.h>
    110 
    111 #include "tsc.h"
    112 
    113 #if MAXCPUS > 32
    114 #error cpu_info contains 32bit bitmasks
    115 #endif
    116 
    117 int     cpu_match(device_t, cfdata_t, void *);
    118 void    cpu_attach(device_t, device_t, void *);
    119 
    120 static bool	cpu_suspend(device_t PMF_FN_PROTO);
    121 static bool	cpu_resume(device_t PMF_FN_PROTO);
    122 
    123 struct cpu_softc {
    124 	device_t sc_dev;		/* device tree glue */
    125 	struct cpu_info *sc_info;	/* pointer to CPU info */
    126 	bool sc_wasonline;
    127 };
    128 
    129 int mp_cpu_start(struct cpu_info *, paddr_t);
    130 void mp_cpu_start_cleanup(struct cpu_info *);
    131 const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL,
    132 					    mp_cpu_start_cleanup };
    133 
    134 
    135 CFATTACH_DECL_NEW(cpu, sizeof(struct cpu_softc),
    136     cpu_match, cpu_attach, NULL, NULL);
    137 
    138 /*
    139  * Statically-allocated CPU info for the primary CPU (or the only
    140  * CPU, on uniprocessors).  The CPU info list is initialized to
    141  * point at it.
    142  */
    143 #ifdef TRAPLOG
    144 struct tlog tlog_primary;
    145 #endif
    146 struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    147 	.ci_dev = 0,
    148 	.ci_self = &cpu_info_primary,
    149 	.ci_idepth = -1,
    150 	.ci_curlwp = &lwp0,
    151 	.ci_curldt = -1,
    152 #ifdef TRAPLOG
    153 	.ci_tlog_base = &tlog_primary,
    154 #endif /* !TRAPLOG */
    155 };
    156 
    157 struct cpu_info *cpu_info_list = &cpu_info_primary;
    158 
    159 static void	cpu_set_tss_gates(struct cpu_info *);
    160 
    161 #ifdef i386
    162 static void	tss_init(struct i386tss *, void *, void *);
    163 #endif
    164 
    165 static void	cpu_init_idle_lwp(struct cpu_info *);
    166 
    167 uint32_t cpus_attached = 0;
    168 uint32_t cpus_running = 0;
    169 
    170 extern char x86_64_doubleflt_stack[];
    171 
    172 bool x86_mp_online;
    173 paddr_t mp_trampoline_paddr = MP_TRAMPOLINE;
    174 static vaddr_t cmos_data_mapping;
    175 struct cpu_info *cpu_starting;
    176 
    177 void    	cpu_hatch(void *);
    178 static void    	cpu_boot_secondary(struct cpu_info *ci);
    179 static void    	cpu_start_secondary(struct cpu_info *ci);
    180 static void	cpu_copy_trampoline(void);
    181 
    182 /*
    183  * Runs once per boot once multiprocessor goo has been detected and
    184  * the local APIC on the boot processor has been mapped.
    185  *
    186  * Called from lapic_boot_init() (from mpbios_scan()).
    187  */
    188 void
    189 cpu_init_first(void)
    190 {
    191 
    192 	cpu_info_primary.ci_cpuid = lapic_cpu_number();
    193 	cpu_copy_trampoline();
    194 
    195 	cmos_data_mapping = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_VAONLY);
    196 	if (cmos_data_mapping == 0)
    197 		panic("No KVA for page 0");
    198 	pmap_kenter_pa(cmos_data_mapping, 0, VM_PROT_READ|VM_PROT_WRITE);
    199 	pmap_update(pmap_kernel());
    200 }
    201 
    202 int
    203 cpu_match(device_t parent, cfdata_t match, void *aux)
    204 {
    205 
    206 	return 1;
    207 }
    208 
    209 static void
    210 cpu_vm_init(struct cpu_info *ci)
    211 {
    212 	int ncolors = 2, i;
    213 
    214 	for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) {
    215 		struct x86_cache_info *cai;
    216 		int tcolors;
    217 
    218 		cai = &ci->ci_cinfo[i];
    219 
    220 		tcolors = atop(cai->cai_totalsize);
    221 		switch(cai->cai_associativity) {
    222 		case 0xff:
    223 			tcolors = 1; /* fully associative */
    224 			break;
    225 		case 0:
    226 		case 1:
    227 			break;
    228 		default:
    229 			tcolors /= cai->cai_associativity;
    230 		}
    231 		ncolors = max(ncolors, tcolors);
    232 		/*
    233 		 * If the desired number of colors is not a power of
    234 		 * two, it won't be good.  Find the greatest power of
    235 		 * two which is an even divisor of the number of colors,
    236 		 * to preserve even coloring of pages.
    237 		 */
    238 		if (ncolors & (ncolors - 1) ) {
    239 			int try, picked = 1;
    240 			for (try = 1; try < ncolors; try *= 2) {
    241 				if (ncolors % try == 0) picked = try;
    242 			}
    243 			if (picked == 1) {
    244 				panic("desired number of cache colors %d is "
    245 			      	" > 1, but not even!", ncolors);
    246 			}
    247 			ncolors = picked;
    248 		}
    249 	}
    250 
    251 	/*
    252 	 * Knowing the size of the largest cache on this CPU, re-color
    253 	 * our pages.
    254 	 */
    255 	if (ncolors <= uvmexp.ncolors)
    256 		return;
    257 	aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors);
    258 	uvm_page_recolor(ncolors);
    259 }
    260 
    261 
    262 void
    263 cpu_attach(device_t parent, device_t self, void *aux)
    264 {
    265 	struct cpu_softc *sc = device_private(self);
    266 	struct cpu_attach_args *caa = aux;
    267 	struct cpu_info *ci;
    268 	uintptr_t ptr;
    269 	int cpunum = caa->cpu_number;
    270 	static bool again;
    271 
    272 	sc->sc_dev = self;
    273 
    274 	if (cpus_attached == ~0) {
    275 		aprint_error(": increase MAXCPUS\n");
    276 		return;
    277 	}
    278 
    279 	/*
    280 	 * If we're an Application Processor, allocate a cpu_info
    281 	 * structure, otherwise use the primary's.
    282 	 */
    283 	if (caa->cpu_role == CPU_ROLE_AP) {
    284 		if ((boothowto & RB_MD1) != 0) {
    285 			aprint_error(": multiprocessor boot disabled\n");
    286 			if (!pmf_device_register(self, NULL, NULL))
    287 				aprint_error_dev(self,
    288 				    "couldn't establish power handler\n");
    289 			return;
    290 		}
    291 		aprint_naive(": Application Processor\n");
    292 		ptr = (uintptr_t)malloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    293 		    M_DEVBUF, M_WAITOK);
    294 		ci = (struct cpu_info *)((ptr + CACHE_LINE_SIZE - 1) &
    295 		    ~(CACHE_LINE_SIZE - 1));
    296 		memset(ci, 0, sizeof(*ci));
    297 		ci->ci_curldt = -1;
    298 #ifdef TRAPLOG
    299 		ci->ci_tlog_base = malloc(sizeof(struct tlog),
    300 		    M_DEVBUF, M_WAITOK);
    301 #endif
    302 	} else {
    303 		aprint_naive(": %s Processor\n",
    304 		    caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
    305 		ci = &cpu_info_primary;
    306 		if (cpunum != lapic_cpu_number()) {
    307 			/* XXX should be done earlier. */
    308 			uint32_t reg;
    309 			aprint_verbose("\n");
    310 			aprint_verbose_dev(self, "running CPU at apic %d"
    311 			    " instead of at expected %d", lapic_cpu_number(),
    312 			    cpunum);
    313 			reg = i82489_readreg(LAPIC_ID);
    314 			i82489_writereg(LAPIC_ID, (reg & ~LAPIC_ID_MASK) |
    315 			    (cpunum << LAPIC_ID_SHIFT));
    316 		}
    317 		if (cpunum != lapic_cpu_number()) {
    318 			aprint_error_dev(self, "unable to reset apic id\n");
    319 		}
    320 	}
    321 
    322 	ci->ci_self = ci;
    323 	sc->sc_info = ci;
    324 	ci->ci_dev = self;
    325 	ci->ci_cpuid = caa->cpu_number;
    326 	ci->ci_func = caa->cpu_func;
    327 
    328 	/* Must be before mi_cpu_attach(). */
    329 	cpu_vm_init(ci);
    330 
    331 	if (caa->cpu_role == CPU_ROLE_AP) {
    332 		int error;
    333 
    334 		error = mi_cpu_attach(ci);
    335 		if (error != 0) {
    336 			aprint_normal("\n");
    337 			aprint_error_dev(self,
    338 			    "mi_cpu_attach failed with %d\n", error);
    339 			return;
    340 		}
    341 		cpu_init_tss(ci);
    342 	} else {
    343 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    344 	}
    345 
    346 	ci->ci_cpumask = (1 << cpu_index(ci));
    347 	pmap_reference(pmap_kernel());
    348 	ci->ci_pmap = pmap_kernel();
    349 	ci->ci_tlbstate = TLBSTATE_STALE;
    350 
    351 	/*
    352 	 * Boot processor may not be attached first, but the below
    353 	 * must be done to allow booting other processors.
    354 	 */
    355 	if (!again) {
    356 		atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
    357 		/* Basic init. */
    358 		cpu_intr_init(ci);
    359 		cpu_get_tsc_freq(ci);
    360 		cpu_init(ci);
    361 		cpu_set_tss_gates(ci);
    362 		pmap_cpu_init_late(ci);
    363 		if (caa->cpu_role != CPU_ROLE_SP) {
    364 			/* Enable lapic. */
    365 			lapic_enable();
    366 			lapic_set_lvt();
    367 			lapic_calibrate_timer(ci);
    368 		}
    369 		/* Make sure DELAY() is initialized. */
    370 		DELAY(1);
    371 		again = true;
    372 	}
    373 
    374 	/* further PCB init done later. */
    375 
    376 	switch (caa->cpu_role) {
    377 	case CPU_ROLE_SP:
    378 		atomic_or_32(&ci->ci_flags, CPUF_SP);
    379 		cpu_identify(ci);
    380 		x86_errata();
    381 		x86_cpu_idle_init();
    382 		break;
    383 
    384 	case CPU_ROLE_BP:
    385 		atomic_or_32(&ci->ci_flags, CPUF_BSP);
    386 		cpu_identify(ci);
    387 		x86_errata();
    388 		x86_cpu_idle_init();
    389 		break;
    390 
    391 	case CPU_ROLE_AP:
    392 		/*
    393 		 * report on an AP
    394 		 */
    395 		cpu_intr_init(ci);
    396 		gdt_alloc_cpu(ci);
    397 		cpu_set_tss_gates(ci);
    398 		pmap_cpu_init_early(ci);
    399 		pmap_cpu_init_late(ci);
    400 		cpu_start_secondary(ci);
    401 		if (ci->ci_flags & CPUF_PRESENT) {
    402 			cpu_identify(ci);
    403 			ci->ci_next = cpu_info_list->ci_next;
    404 			cpu_info_list->ci_next = ci;
    405 		}
    406 		break;
    407 
    408 	default:
    409 		aprint_normal("\n");
    410 		panic("unknown processor type??\n");
    411 	}
    412 
    413 	atomic_or_32(&cpus_attached, ci->ci_cpumask);
    414 
    415 	if (!pmf_device_register(self, cpu_suspend, cpu_resume))
    416 		aprint_error_dev(self, "couldn't establish power handler\n");
    417 
    418 	if (mp_verbose) {
    419 		struct lwp *l = ci->ci_data.cpu_idlelwp;
    420 
    421 		aprint_verbose_dev(self,
    422 		    "idle lwp at %p, idle sp at %p\n",
    423 		    l,
    424 #ifdef i386
    425 		    (void *)l->l_addr->u_pcb.pcb_esp
    426 #else
    427 		    (void *)l->l_addr->u_pcb.pcb_rsp
    428 #endif
    429 		);
    430 	}
    431 }
    432 
    433 /*
    434  * Initialize the processor appropriately.
    435  */
    436 
    437 void
    438 cpu_init(struct cpu_info *ci)
    439 {
    440 
    441 	lcr0(rcr0() | CR0_WP);
    442 
    443 	/*
    444 	 * On a P6 or above, enable global TLB caching if the
    445 	 * hardware supports it.
    446 	 */
    447 	if (cpu_feature & CPUID_PGE)
    448 		lcr4(rcr4() | CR4_PGE);	/* enable global TLB caching */
    449 
    450 	/*
    451 	 * If we have FXSAVE/FXRESTOR, use them.
    452 	 */
    453 	if (cpu_feature & CPUID_FXSR) {
    454 		lcr4(rcr4() | CR4_OSFXSR);
    455 
    456 		/*
    457 		 * If we have SSE/SSE2, enable XMM exceptions.
    458 		 */
    459 		if (cpu_feature & (CPUID_SSE|CPUID_SSE2))
    460 			lcr4(rcr4() | CR4_OSXMMEXCPT);
    461 	}
    462 
    463 #ifdef MTRR
    464 	/*
    465 	 * On a P6 or above, initialize MTRR's if the hardware supports them.
    466 	 */
    467 	if (cpu_feature & CPUID_MTRR) {
    468 		if ((ci->ci_flags & CPUF_AP) == 0)
    469 			i686_mtrr_init_first();
    470 		mtrr_init_cpu(ci);
    471 	}
    472 
    473 #ifdef i386
    474 	if (strcmp((char *)(ci->ci_vendor), "AuthenticAMD") == 0) {
    475 		/*
    476 		 * Must be a K6-2 Step >= 7 or a K6-III.
    477 		 */
    478 		if (CPUID2FAMILY(ci->ci_signature) == 5) {
    479 			if (CPUID2MODEL(ci->ci_signature) > 8 ||
    480 			    (CPUID2MODEL(ci->ci_signature) == 8 &&
    481 			     CPUID2STEPPING(ci->ci_signature) >= 7)) {
    482 				mtrr_funcs = &k6_mtrr_funcs;
    483 				k6_mtrr_init_first();
    484 				mtrr_init_cpu(ci);
    485 			}
    486 		}
    487 	}
    488 #endif	/* i386 */
    489 #endif /* MTRR */
    490 
    491 	atomic_or_32(&cpus_running, ci->ci_cpumask);
    492 
    493 	if (ci != &cpu_info_primary) {
    494 		/* Synchronize TSC again, and check for drift. */
    495 		wbinvd();
    496 		atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
    497 		tsc_sync_ap(ci);
    498 		tsc_sync_ap(ci);
    499 	} else {
    500 		atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
    501 	}
    502 }
    503 
    504 void
    505 cpu_boot_secondary_processors(void)
    506 {
    507 	struct cpu_info *ci;
    508 	u_long i;
    509 
    510 	/* Now that we know the number of CPUs, patch the text segment. */
    511 	x86_patch();
    512 
    513 	for (i=0; i < maxcpus; i++) {
    514 		ci = cpu_lookup_byindex(i);
    515 		if (ci == NULL)
    516 			continue;
    517 		if (ci->ci_data.cpu_idlelwp == NULL)
    518 			continue;
    519 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    520 			continue;
    521 		if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
    522 			continue;
    523 		cpu_boot_secondary(ci);
    524 	}
    525 
    526 	x86_mp_online = true;
    527 
    528 	/* Now that we know about the TSC, attach the timecounter. */
    529 	tsc_tc_init();
    530 
    531 	/* Enable zeroing of pages in the idle loop if we have SSE2. */
    532 	vm_page_zero_enable = ((cpu_feature & CPUID_SSE2) != 0);
    533 }
    534 
    535 static void
    536 cpu_init_idle_lwp(struct cpu_info *ci)
    537 {
    538 	struct lwp *l = ci->ci_data.cpu_idlelwp;
    539 	struct pcb *pcb = &l->l_addr->u_pcb;
    540 
    541 	pcb->pcb_cr0 = rcr0();
    542 }
    543 
    544 void
    545 cpu_init_idle_lwps(void)
    546 {
    547 	struct cpu_info *ci;
    548 	u_long i;
    549 
    550 	for (i = 0; i < maxcpus; i++) {
    551 		ci = cpu_lookup_byindex(i);
    552 		if (ci == NULL)
    553 			continue;
    554 		if (ci->ci_data.cpu_idlelwp == NULL)
    555 			continue;
    556 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    557 			continue;
    558 		cpu_init_idle_lwp(ci);
    559 	}
    560 }
    561 
    562 void
    563 cpu_start_secondary(struct cpu_info *ci)
    564 {
    565 	extern paddr_t mp_pdirpa;
    566 	u_long psl;
    567 	int i;
    568 
    569 	mp_pdirpa = pmap_init_tmp_pgtbl(mp_trampoline_paddr);
    570 	atomic_or_32(&ci->ci_flags, CPUF_AP);
    571 	ci->ci_curlwp = ci->ci_data.cpu_idlelwp;
    572 	if (CPU_STARTUP(ci, mp_trampoline_paddr) != 0) {
    573 		return;
    574 	}
    575 
    576 	/*
    577 	 * Wait for it to become ready.   Setting cpu_starting opens the
    578 	 * initial gate and allows the AP to start soft initialization.
    579 	 */
    580 	KASSERT(cpu_starting == NULL);
    581 	cpu_starting = ci;
    582 	for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) {
    583 #ifdef MPDEBUG
    584 		extern int cpu_trace[3];
    585 		static int otrace[3];
    586 		if (memcmp(otrace, cpu_trace, sizeof(otrace)) != 0) {
    587 			aprint_debug_dev(ci->ci_dev, "trace %02x %02x %02x\n",
    588 			    cpu_trace[0], cpu_trace[1], cpu_trace[2]);
    589 			memcpy(otrace, cpu_trace, sizeof(otrace));
    590 		}
    591 #endif
    592 		i8254_delay(10);
    593 	}
    594 
    595 	if ((ci->ci_flags & CPUF_PRESENT) == 0) {
    596 		aprint_error_dev(ci->ci_dev, "failed to become ready\n");
    597 #if defined(MPDEBUG) && defined(DDB)
    598 		printf("dropping into debugger; continue from here to resume boot\n");
    599 		Debugger();
    600 #endif
    601 	} else {
    602 		/*
    603 		 * Synchronize time stamp counters.  Invalidate cache and do twice
    604 		 * to try and minimize possible cache effects.  Disable interrupts
    605 		 * to try and rule out any external interference.
    606 		 */
    607 		psl = x86_read_psl();
    608 		x86_disable_intr();
    609 		wbinvd();
    610 		tsc_sync_bp(ci);
    611 		tsc_sync_bp(ci);
    612 		x86_write_psl(psl);
    613 	}
    614 
    615 	CPU_START_CLEANUP(ci);
    616 	cpu_starting = NULL;
    617 }
    618 
    619 void
    620 cpu_boot_secondary(struct cpu_info *ci)
    621 {
    622 	int64_t drift;
    623 	u_long psl;
    624 	int i;
    625 
    626 	atomic_or_32(&ci->ci_flags, CPUF_GO);
    627 	for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) {
    628 		i8254_delay(10);
    629 	}
    630 	if ((ci->ci_flags & CPUF_RUNNING) == 0) {
    631 		aprint_error_dev(ci->ci_dev, "failed to start\n");
    632 #if defined(MPDEBUG) && defined(DDB)
    633 		printf("dropping into debugger; continue from here to resume boot\n");
    634 		Debugger();
    635 #endif
    636 	} else {
    637 		/* Synchronize TSC again, check for drift. */
    638 		drift = ci->ci_data.cpu_cc_skew;
    639 		psl = x86_read_psl();
    640 		x86_disable_intr();
    641 		wbinvd();
    642 		tsc_sync_bp(ci);
    643 		tsc_sync_bp(ci);
    644 		x86_write_psl(psl);
    645 		drift -= ci->ci_data.cpu_cc_skew;
    646 		aprint_debug_dev(ci->ci_dev, "TSC skew=%lld drift=%lld\n",
    647 		    (long long)ci->ci_data.cpu_cc_skew, (long long)drift);
    648 		tsc_sync_drift(drift);
    649 	}
    650 }
    651 
    652 /*
    653  * The CPU ends up here when its ready to run
    654  * This is called from code in mptramp.s; at this point, we are running
    655  * in the idle pcb/idle stack of the new CPU.  When this function returns,
    656  * this processor will enter the idle loop and start looking for work.
    657  */
    658 void
    659 cpu_hatch(void *v)
    660 {
    661 	struct cpu_info *ci = (struct cpu_info *)v;
    662 	int s, i;
    663 
    664 #ifdef __x86_64__
    665 	cpu_init_msrs(ci, true);
    666 #endif
    667 	cpu_probe(ci);
    668 
    669 	ci->ci_data.cpu_cc_freq = cpu_info_primary.ci_data.cpu_cc_freq;
    670 	/* cpu_get_tsc_freq(ci); */
    671 
    672 	KDASSERT((ci->ci_flags & CPUF_PRESENT) == 0);
    673 
    674 	/*
    675 	 * Synchronize time stamp counters.  Invalidate cache and do twice
    676 	 * to try and minimize possible cache effects.  Note that interrupts
    677 	 * are off at this point.
    678 	 */
    679 	wbinvd();
    680 	atomic_or_32(&ci->ci_flags, CPUF_PRESENT);
    681 	tsc_sync_ap(ci);
    682 	tsc_sync_ap(ci);
    683 
    684 	/*
    685 	 * Wait to be brought online.  Use 'monitor/mwait' if available,
    686 	 * in order to make the TSC drift as much as possible. so that
    687 	 * we can detect it later.  If not available, try 'pause'.
    688 	 * We'd like to use 'hlt', but we have interrupts off.
    689 	 */
    690 	while ((ci->ci_flags & CPUF_GO) == 0) {
    691 		if ((ci->ci_feature2_flags & CPUID2_MONITOR) != 0) {
    692 			x86_monitor(&ci->ci_flags, 0, 0);
    693 			if ((ci->ci_flags & CPUF_GO) != 0) {
    694 				continue;
    695 			}
    696 			x86_mwait(0, 0);
    697 		} else {
    698 			for (i = 10000; i != 0; i--) {
    699 				x86_pause();
    700 			}
    701 		}
    702 	}
    703 
    704 	/* Because the text may have been patched in x86_patch(). */
    705 	wbinvd();
    706 	x86_flush();
    707 
    708 	KASSERT((ci->ci_flags & CPUF_RUNNING) == 0);
    709 
    710 	lcr3(pmap_kernel()->pm_pdirpa);
    711 	curlwp->l_addr->u_pcb.pcb_cr3 = pmap_kernel()->pm_pdirpa;
    712 	lcr0(ci->ci_data.cpu_idlelwp->l_addr->u_pcb.pcb_cr0);
    713 	cpu_init_idt();
    714 	gdt_init_cpu(ci);
    715 	lapic_enable();
    716 	lapic_set_lvt();
    717 	lapic_initclocks();
    718 
    719 #ifdef i386
    720 	npxinit(ci);
    721 #else
    722 	fpuinit(ci);
    723 #endif
    724 	lldt(GSYSSEL(GLDT_SEL, SEL_KPL));
    725 	ltr(ci->ci_tss_sel);
    726 
    727 	cpu_init(ci);
    728 	cpu_get_tsc_freq(ci);
    729 
    730 	s = splhigh();
    731 #ifdef i386
    732 	lapic_tpr = 0;
    733 #else
    734 	lcr8(0);
    735 #endif
    736 	x86_enable_intr();
    737 	splx(s);
    738 	x86_errata();
    739 
    740 	aprint_debug_dev(ci->ci_dev, "running\n");
    741 }
    742 
    743 #if defined(DDB)
    744 
    745 #include <ddb/db_output.h>
    746 #include <machine/db_machdep.h>
    747 
    748 /*
    749  * Dump CPU information from ddb.
    750  */
    751 void
    752 cpu_debug_dump(void)
    753 {
    754 	struct cpu_info *ci;
    755 	CPU_INFO_ITERATOR cii;
    756 
    757 	db_printf("addr		dev	id	flags	ipis	curlwp 		fpcurlwp\n");
    758 	for (CPU_INFO_FOREACH(cii, ci)) {
    759 		db_printf("%p	%s	%ld	%x	%x	%10p	%10p\n",
    760 		    ci,
    761 		    ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
    762 		    (long)ci->ci_cpuid,
    763 		    ci->ci_flags, ci->ci_ipis,
    764 		    ci->ci_curlwp,
    765 		    ci->ci_fpcurlwp);
    766 	}
    767 }
    768 #endif
    769 
    770 static void
    771 cpu_copy_trampoline(void)
    772 {
    773 	/*
    774 	 * Copy boot code.
    775 	 */
    776 	extern u_char cpu_spinup_trampoline[];
    777 	extern u_char cpu_spinup_trampoline_end[];
    778 
    779 	vaddr_t mp_trampoline_vaddr;
    780 
    781 	mp_trampoline_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    782 	    UVM_KMF_VAONLY);
    783 
    784 	pmap_kenter_pa(mp_trampoline_vaddr, mp_trampoline_paddr,
    785 	    VM_PROT_READ | VM_PROT_WRITE);
    786 	pmap_update(pmap_kernel());
    787 	memcpy((void *)mp_trampoline_vaddr,
    788 	    cpu_spinup_trampoline,
    789 	    cpu_spinup_trampoline_end - cpu_spinup_trampoline);
    790 
    791 	pmap_kremove(mp_trampoline_vaddr, PAGE_SIZE);
    792 	pmap_update(pmap_kernel());
    793 	uvm_km_free(kernel_map, mp_trampoline_vaddr, PAGE_SIZE, UVM_KMF_VAONLY);
    794 }
    795 
    796 #ifdef i386
    797 static void
    798 tss_init(struct i386tss *tss, void *stack, void *func)
    799 {
    800 	memset(tss, 0, sizeof *tss);
    801 	tss->tss_esp0 = tss->tss_esp = (int)((char *)stack + USPACE - 16);
    802 	tss->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
    803 	tss->__tss_cs = GSEL(GCODE_SEL, SEL_KPL);
    804 	tss->tss_fs = GSEL(GCPU_SEL, SEL_KPL);
    805 	tss->tss_gs = tss->__tss_es = tss->__tss_ds =
    806 	    tss->__tss_ss = GSEL(GDATA_SEL, SEL_KPL);
    807 	tss->tss_cr3 = pmap_kernel()->pm_pdirpa;
    808 	tss->tss_esp = (int)((char *)stack + USPACE - 16);
    809 	tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
    810 	tss->__tss_eflags = PSL_MBO | PSL_NT;	/* XXX not needed? */
    811 	tss->__tss_eip = (int)func;
    812 }
    813 
    814 /* XXX */
    815 #define IDTVEC(name)	__CONCAT(X, name)
    816 typedef void (vector)(void);
    817 extern vector IDTVEC(tss_trap08);
    818 #ifdef DDB
    819 extern vector Xintrddbipi;
    820 extern int ddb_vec;
    821 #endif
    822 
    823 static void
    824 cpu_set_tss_gates(struct cpu_info *ci)
    825 {
    826 	struct segment_descriptor sd;
    827 
    828 	ci->ci_doubleflt_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0,
    829 	    UVM_KMF_WIRED);
    830 	tss_init(&ci->ci_doubleflt_tss, ci->ci_doubleflt_stack,
    831 	    IDTVEC(tss_trap08));
    832 	setsegment(&sd, &ci->ci_doubleflt_tss, sizeof(struct i386tss) - 1,
    833 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
    834 	ci->ci_gdt[GTRAPTSS_SEL].sd = sd;
    835 	setgate(&idt[8], NULL, 0, SDT_SYSTASKGT, SEL_KPL,
    836 	    GSEL(GTRAPTSS_SEL, SEL_KPL));
    837 
    838 #if defined(DDB)
    839 	/*
    840 	 * Set up separate handler for the DDB IPI, so that it doesn't
    841 	 * stomp on a possibly corrupted stack.
    842 	 *
    843 	 * XXX overwriting the gate set in db_machine_init.
    844 	 * Should rearrange the code so that it's set only once.
    845 	 */
    846 	ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0,
    847 	    UVM_KMF_WIRED);
    848 	tss_init(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack, Xintrddbipi);
    849 
    850 	setsegment(&sd, &ci->ci_ddbipi_tss, sizeof(struct i386tss) - 1,
    851 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
    852 	ci->ci_gdt[GIPITSS_SEL].sd = sd;
    853 
    854 	setgate(&idt[ddb_vec], NULL, 0, SDT_SYSTASKGT, SEL_KPL,
    855 	    GSEL(GIPITSS_SEL, SEL_KPL));
    856 #endif
    857 }
    858 #else
    859 static void
    860 cpu_set_tss_gates(struct cpu_info *ci)
    861 {
    862 
    863 }
    864 #endif	/* i386 */
    865 
    866 int
    867 mp_cpu_start(struct cpu_info *ci, paddr_t target)
    868 {
    869 	unsigned short dwordptr[2];
    870 	int error;
    871 
    872 	/*
    873 	 * Bootstrap code must be addressable in real mode
    874 	 * and it must be page aligned.
    875 	 */
    876 	KASSERT(target < 0x10000 && target % PAGE_SIZE == 0);
    877 
    878 	/*
    879 	 * "The BSP must initialize CMOS shutdown code to 0Ah ..."
    880 	 */
    881 
    882 	outb(IO_RTC, NVRAM_RESET);
    883 	outb(IO_RTC+1, NVRAM_RESET_JUMP);
    884 
    885 	/*
    886 	 * "and the warm reset vector (DWORD based at 40:67) to point
    887 	 * to the AP startup code ..."
    888 	 */
    889 
    890 	dwordptr[0] = 0;
    891 	dwordptr[1] = target >> 4;
    892 
    893 	memcpy((uint8_t *)cmos_data_mapping + 0x467, dwordptr, 4);
    894 
    895 	if ((cpu_feature & CPUID_APIC) == 0) {
    896 		aprint_error("mp_cpu_start: CPU does not have APIC\n");
    897 		return ENODEV;
    898 	}
    899 
    900 	/*
    901 	 * ... prior to executing the following sequence:".  We'll also add in
    902 	 * local cache flush, in case the BIOS has left the AP with its cache
    903 	 * disabled.  It may not be able to cope with MP coherency.
    904 	 */
    905 	wbinvd();
    906 
    907 	if (ci->ci_flags & CPUF_AP) {
    908 		error = x86_ipi_init(ci->ci_cpuid);
    909 		if (error != 0) {
    910 			aprint_error_dev(ci->ci_dev, "%s: IPI not taken (1)\n",
    911 			    __func__);
    912 			return error;
    913 		}
    914 		i8254_delay(10000);
    915 
    916 		error = x86_ipi_startup(ci->ci_cpuid, target / PAGE_SIZE);
    917 		if (error != 0) {
    918 			aprint_error_dev(ci->ci_dev, "%s: IPI not taken (2)\n",
    919 			    __func__);
    920 			return error;
    921 		}
    922 		i8254_delay(200);
    923 
    924 		error = x86_ipi_startup(ci->ci_cpuid, target / PAGE_SIZE);
    925 		if (error != 0) {
    926 			aprint_error_dev(ci->ci_dev, "%s: IPI not taken (3)\n",
    927 			    __func__);
    928 			return error;
    929 		}
    930 		i8254_delay(200);
    931 	}
    932 
    933 	return 0;
    934 }
    935 
    936 void
    937 mp_cpu_start_cleanup(struct cpu_info *ci)
    938 {
    939 	/*
    940 	 * Ensure the NVRAM reset byte contains something vaguely sane.
    941 	 */
    942 
    943 	outb(IO_RTC, NVRAM_RESET);
    944 	outb(IO_RTC+1, NVRAM_RESET_RST);
    945 }
    946 
    947 #ifdef __x86_64__
    948 typedef void (vector)(void);
    949 extern vector Xsyscall, Xsyscall32;
    950 
    951 void
    952 cpu_init_msrs(struct cpu_info *ci, bool full)
    953 {
    954 	wrmsr(MSR_STAR,
    955 	    ((uint64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
    956 	    ((uint64_t)LSEL(LSYSRETBASE_SEL, SEL_UPL) << 48));
    957 	wrmsr(MSR_LSTAR, (uint64_t)Xsyscall);
    958 	wrmsr(MSR_CSTAR, (uint64_t)Xsyscall32);
    959 	wrmsr(MSR_SFMASK, PSL_NT|PSL_T|PSL_I|PSL_C);
    960 
    961 	if (full) {
    962 		wrmsr(MSR_FSBASE, 0);
    963 		wrmsr(MSR_GSBASE, (uint64_t)ci);
    964 		wrmsr(MSR_KERNELGSBASE, 0);
    965 	}
    966 
    967 	if (cpu_feature & CPUID_NOX)
    968 		wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE);
    969 }
    970 #endif	/* __x86_64__ */
    971 
    972 void
    973 cpu_offline_md(void)
    974 {
    975 	int s;
    976 
    977 	s = splhigh();
    978 #ifdef __i386__
    979 	npxsave_cpu(true);
    980 #else
    981 	fpusave_cpu(true);
    982 #endif
    983 	splx(s);
    984 }
    985 
    986 /* XXX joerg restructure and restart CPUs individually */
    987 static bool
    988 cpu_suspend(device_t dv PMF_FN_ARGS)
    989 {
    990 	struct cpu_softc *sc = device_private(dv);
    991 	struct cpu_info *ci = sc->sc_info;
    992 	int err;
    993 
    994 	if (ci->ci_flags & CPUF_PRIMARY)
    995 		return true;
    996 	if (ci->ci_data.cpu_idlelwp == NULL)
    997 		return true;
    998 	if ((ci->ci_flags & CPUF_PRESENT) == 0)
    999 		return true;
   1000 
   1001 	sc->sc_wasonline = !(ci->ci_schedstate.spc_flags & SPCF_OFFLINE);
   1002 
   1003 	if (sc->sc_wasonline) {
   1004 		mutex_enter(&cpu_lock);
   1005 		err = cpu_setonline(ci, false);
   1006 		mutex_exit(&cpu_lock);
   1007 
   1008 		if (err)
   1009 			return false;
   1010 	}
   1011 
   1012 	return true;
   1013 }
   1014 
   1015 static bool
   1016 cpu_resume(device_t dv PMF_FN_ARGS)
   1017 {
   1018 	struct cpu_softc *sc = device_private(dv);
   1019 	struct cpu_info *ci = sc->sc_info;
   1020 	int err = 0;
   1021 
   1022 	if (ci->ci_flags & CPUF_PRIMARY)
   1023 		return true;
   1024 	if (ci->ci_data.cpu_idlelwp == NULL)
   1025 		return true;
   1026 	if ((ci->ci_flags & CPUF_PRESENT) == 0)
   1027 		return true;
   1028 
   1029 	if (sc->sc_wasonline) {
   1030 		mutex_enter(&cpu_lock);
   1031 		err = cpu_setonline(ci, true);
   1032 		mutex_exit(&cpu_lock);
   1033 	}
   1034 
   1035 	return err == 0;
   1036 }
   1037 
   1038 void
   1039 cpu_get_tsc_freq(struct cpu_info *ci)
   1040 {
   1041 	uint64_t last_tsc;
   1042 
   1043 	if (ci->ci_feature_flags & CPUID_TSC) {
   1044 		last_tsc = rdmsr(MSR_TSC);
   1045 		i8254_delay(100000);
   1046 		ci->ci_data.cpu_cc_freq = (rdmsr(MSR_TSC) - last_tsc) * 10;
   1047 	}
   1048 }
   1049 
   1050 void
   1051 x86_cpu_idle_mwait(void)
   1052 {
   1053 	struct cpu_info *ci = curcpu();
   1054 
   1055 	KASSERT(ci->ci_ilevel == IPL_NONE);
   1056 
   1057 	x86_monitor(&ci->ci_want_resched, 0, 0);
   1058 	if (__predict_false(ci->ci_want_resched)) {
   1059 		return;
   1060 	}
   1061 	x86_mwait(0, 0);
   1062 }
   1063 
   1064 void
   1065 x86_cpu_idle_halt(void)
   1066 {
   1067 	struct cpu_info *ci = curcpu();
   1068 
   1069 	KASSERT(ci->ci_ilevel == IPL_NONE);
   1070 
   1071 	x86_disable_intr();
   1072 	if (!__predict_false(ci->ci_want_resched)) {
   1073 		x86_stihlt();
   1074 	} else {
   1075 		x86_enable_intr();
   1076 	}
   1077 }
   1078