Home | History | Annotate | Line # | Download | only in x86
cpu.c revision 1.120
      1 /*	$NetBSD: cpu.c,v 1.120 2016/07/07 06:55:40 msaitoh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000-2012 NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1999 Stefan Grefen
     34  *
     35  * Redistribution and use in source and binary forms, with or without
     36  * modification, are permitted provided that the following conditions
     37  * are met:
     38  * 1. Redistributions of source code must retain the above copyright
     39  *    notice, this list of conditions and the following disclaimer.
     40  * 2. Redistributions in binary form must reproduce the above copyright
     41  *    notice, this list of conditions and the following disclaimer in the
     42  *    documentation and/or other materials provided with the distribution.
     43  * 3. All advertising materials mentioning features or use of this software
     44  *    must display the following acknowledgement:
     45  *      This product includes software developed by the NetBSD
     46  *      Foundation, Inc. and its contributors.
     47  * 4. Neither the name of The NetBSD Foundation nor the names of its
     48  *    contributors may be used to endorse or promote products derived
     49  *    from this software without specific prior written permission.
     50  *
     51  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
     52  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
     55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     61  * SUCH DAMAGE.
     62  */
     63 
     64 #include <sys/cdefs.h>
     65 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.120 2016/07/07 06:55:40 msaitoh Exp $");
     66 
     67 #include "opt_ddb.h"
     68 #include "opt_mpbios.h"		/* for MPDEBUG */
     69 #include "opt_mtrr.h"
     70 #include "opt_multiprocessor.h"
     71 
     72 #include "lapic.h"
     73 #include "ioapic.h"
     74 
     75 #include <sys/param.h>
     76 #include <sys/proc.h>
     77 #include <sys/systm.h>
     78 #include <sys/device.h>
     79 #include <sys/kmem.h>
     80 #include <sys/cpu.h>
     81 #include <sys/cpufreq.h>
     82 #include <sys/idle.h>
     83 #include <sys/atomic.h>
     84 #include <sys/reboot.h>
     85 
     86 #include <uvm/uvm.h>
     87 
     88 #include "acpica.h"		/* for NACPICA, for mp_verbose */
     89 
     90 #include <machine/cpufunc.h>
     91 #include <machine/cpuvar.h>
     92 #include <machine/pmap.h>
     93 #include <machine/vmparam.h>
     94 #if defined(MULTIPROCESSOR)
     95 #include <machine/mpbiosvar.h>
     96 #endif
     97 #include <machine/mpconfig.h>		/* for mp_verbose */
     98 #include <machine/pcb.h>
     99 #include <machine/specialreg.h>
    100 #include <machine/segments.h>
    101 #include <machine/gdt.h>
    102 #include <machine/mtrr.h>
    103 #include <machine/pio.h>
    104 #include <machine/cpu_counter.h>
    105 
    106 #include <x86/fpu.h>
    107 
    108 #ifdef i386
    109 #include <machine/tlog.h>
    110 #endif
    111 
    112 #if NLAPIC > 0
    113 #include <machine/apicvar.h>
    114 #include <machine/i82489reg.h>
    115 #include <machine/i82489var.h>
    116 #endif
    117 
    118 #include <dev/ic/mc146818reg.h>
    119 #include <i386/isa/nvram.h>
    120 #include <dev/isa/isareg.h>
    121 
    122 #include "tsc.h"
    123 
    124 static int	cpu_match(device_t, cfdata_t, void *);
    125 static void	cpu_attach(device_t, device_t, void *);
    126 static void	cpu_defer(device_t);
    127 static int	cpu_rescan(device_t, const char *, const int *);
    128 static void	cpu_childdetached(device_t, device_t);
    129 static bool	cpu_stop(device_t);
    130 static bool	cpu_suspend(device_t, const pmf_qual_t *);
    131 static bool	cpu_resume(device_t, const pmf_qual_t *);
    132 static bool	cpu_shutdown(device_t, int);
    133 
    134 struct cpu_softc {
    135 	device_t sc_dev;		/* device tree glue */
    136 	struct cpu_info *sc_info;	/* pointer to CPU info */
    137 	bool sc_wasonline;
    138 };
    139 
    140 #ifdef MULTIPROCESSOR
    141 int mp_cpu_start(struct cpu_info *, paddr_t);
    142 void mp_cpu_start_cleanup(struct cpu_info *);
    143 const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL,
    144 					    mp_cpu_start_cleanup };
    145 #endif
    146 
    147 
    148 CFATTACH_DECL2_NEW(cpu, sizeof(struct cpu_softc),
    149     cpu_match, cpu_attach, NULL, NULL, cpu_rescan, cpu_childdetached);
    150 
    151 /*
    152  * Statically-allocated CPU info for the primary CPU (or the only
    153  * CPU, on uniprocessors).  The CPU info list is initialized to
    154  * point at it.
    155  */
    156 #ifdef TRAPLOG
    157 struct tlog tlog_primary;
    158 #endif
    159 struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    160 	.ci_dev = 0,
    161 	.ci_self = &cpu_info_primary,
    162 	.ci_idepth = -1,
    163 	.ci_curlwp = &lwp0,
    164 	.ci_curldt = -1,
    165 #ifdef TRAPLOG
    166 	.ci_tlog_base = &tlog_primary,
    167 #endif /* !TRAPLOG */
    168 };
    169 
    170 struct cpu_info *cpu_info_list = &cpu_info_primary;
    171 
    172 static void	cpu_set_tss_gates(struct cpu_info *);
    173 
    174 #ifdef i386
    175 static void	tss_init(struct i386tss *, void *, void *);
    176 #endif
    177 
    178 static void	cpu_init_idle_lwp(struct cpu_info *);
    179 
    180 uint32_t cpu_feature[7]; /* X86 CPUID feature bits */
    181 			/* [0] basic features cpuid.1:%edx
    182 			 * [1] basic features cpuid.1:%ecx (CPUID2_xxx bits)
    183 			 * [2] extended features cpuid:80000001:%edx
    184 			 * [3] extended features cpuid:80000001:%ecx
    185 			 * [4] VIA padlock features
    186 			 * [5] structured extended features cpuid.7:%ebx
    187 			 * [6] structured extended features cpuid.7:%ecx
    188 			 */
    189 
    190 extern char x86_64_doubleflt_stack[];
    191 
    192 #ifdef MULTIPROCESSOR
    193 bool x86_mp_online;
    194 paddr_t mp_trampoline_paddr = MP_TRAMPOLINE;
    195 #endif
    196 #if NLAPIC > 0
    197 static vaddr_t cmos_data_mapping;
    198 #endif
    199 struct cpu_info *cpu_starting;
    200 
    201 #ifdef MULTIPROCESSOR
    202 void    	cpu_hatch(void *);
    203 static void    	cpu_boot_secondary(struct cpu_info *ci);
    204 static void    	cpu_start_secondary(struct cpu_info *ci);
    205 #endif
    206 #if NLAPIC > 0
    207 static void	cpu_copy_trampoline(void);
    208 #endif
    209 
    210 /*
    211  * Runs once per boot once multiprocessor goo has been detected and
    212  * the local APIC on the boot processor has been mapped.
    213  *
    214  * Called from lapic_boot_init() (from mpbios_scan()).
    215  */
    216 #if NLAPIC > 0
    217 void
    218 cpu_init_first(void)
    219 {
    220 
    221 	cpu_info_primary.ci_cpuid = lapic_cpu_number();
    222 	cpu_copy_trampoline();
    223 
    224 	cmos_data_mapping = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_VAONLY);
    225 	if (cmos_data_mapping == 0)
    226 		panic("No KVA for page 0");
    227 	pmap_kenter_pa(cmos_data_mapping, 0, VM_PROT_READ|VM_PROT_WRITE, 0);
    228 	pmap_update(pmap_kernel());
    229 }
    230 #endif
    231 
    232 static int
    233 cpu_match(device_t parent, cfdata_t match, void *aux)
    234 {
    235 
    236 	return 1;
    237 }
    238 
    239 static void
    240 cpu_vm_init(struct cpu_info *ci)
    241 {
    242 	int ncolors = 2, i;
    243 
    244 	for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) {
    245 		struct x86_cache_info *cai;
    246 		int tcolors;
    247 
    248 		cai = &ci->ci_cinfo[i];
    249 
    250 		tcolors = atop(cai->cai_totalsize);
    251 		switch(cai->cai_associativity) {
    252 		case 0xff:
    253 			tcolors = 1; /* fully associative */
    254 			break;
    255 		case 0:
    256 		case 1:
    257 			break;
    258 		default:
    259 			tcolors /= cai->cai_associativity;
    260 		}
    261 		ncolors = max(ncolors, tcolors);
    262 		/*
    263 		 * If the desired number of colors is not a power of
    264 		 * two, it won't be good.  Find the greatest power of
    265 		 * two which is an even divisor of the number of colors,
    266 		 * to preserve even coloring of pages.
    267 		 */
    268 		if (ncolors & (ncolors - 1) ) {
    269 			int try, picked = 1;
    270 			for (try = 1; try < ncolors; try *= 2) {
    271 				if (ncolors % try == 0) picked = try;
    272 			}
    273 			if (picked == 1) {
    274 				panic("desired number of cache colors %d is "
    275 			      	" > 1, but not even!", ncolors);
    276 			}
    277 			ncolors = picked;
    278 		}
    279 	}
    280 
    281 	/*
    282 	 * Knowing the size of the largest cache on this CPU, potentially
    283 	 * re-color our pages.
    284 	 */
    285 	aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors);
    286 	uvm_page_recolor(ncolors);
    287 
    288 	pmap_tlb_cpu_init(ci);
    289 }
    290 
    291 static void
    292 cpu_attach(device_t parent, device_t self, void *aux)
    293 {
    294 	struct cpu_softc *sc = device_private(self);
    295 	struct cpu_attach_args *caa = aux;
    296 	struct cpu_info *ci;
    297 	uintptr_t ptr;
    298 #if NLAPIC > 0
    299 	int cpunum = caa->cpu_number;
    300 #endif
    301 	static bool again;
    302 
    303 	sc->sc_dev = self;
    304 
    305 	if (ncpu == maxcpus) {
    306 #ifndef _LP64
    307 		aprint_error(": too many CPUs, please use NetBSD/amd64\n");
    308 #else
    309 		aprint_error(": too many CPUs\n");
    310 #endif
    311 		return;
    312 	}
    313 
    314 	/*
    315 	 * If we're an Application Processor, allocate a cpu_info
    316 	 * structure, otherwise use the primary's.
    317 	 */
    318 	if (caa->cpu_role == CPU_ROLE_AP) {
    319 		if ((boothowto & RB_MD1) != 0) {
    320 			aprint_error(": multiprocessor boot disabled\n");
    321 			if (!pmf_device_register(self, NULL, NULL))
    322 				aprint_error_dev(self,
    323 				    "couldn't establish power handler\n");
    324 			return;
    325 		}
    326 		aprint_naive(": Application Processor\n");
    327 		ptr = (uintptr_t)kmem_zalloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    328 		    KM_SLEEP);
    329 		ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
    330 		ci->ci_curldt = -1;
    331 #ifdef TRAPLOG
    332 		ci->ci_tlog_base = kmem_zalloc(sizeof(struct tlog), KM_SLEEP);
    333 #endif
    334 	} else {
    335 		aprint_naive(": %s Processor\n",
    336 		    caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
    337 		ci = &cpu_info_primary;
    338 #if NLAPIC > 0
    339 		if (cpunum != lapic_cpu_number()) {
    340 			/* XXX should be done earlier. */
    341 			uint32_t reg;
    342 			aprint_verbose("\n");
    343 			aprint_verbose_dev(self, "running CPU at apic %d"
    344 			    " instead of at expected %d", lapic_cpu_number(),
    345 			    cpunum);
    346 			reg = i82489_readreg(LAPIC_ID);
    347 			i82489_writereg(LAPIC_ID, (reg & ~LAPIC_ID_MASK) |
    348 			    (cpunum << LAPIC_ID_SHIFT));
    349 		}
    350 		if (cpunum != lapic_cpu_number()) {
    351 			aprint_error_dev(self, "unable to reset apic id\n");
    352 		}
    353 #endif
    354 	}
    355 
    356 	ci->ci_self = ci;
    357 	sc->sc_info = ci;
    358 	ci->ci_dev = self;
    359 	ci->ci_acpiid = caa->cpu_id;
    360 	ci->ci_cpuid = caa->cpu_number;
    361 	ci->ci_func = caa->cpu_func;
    362 	aprint_normal("\n");
    363 
    364 	/* Must be before mi_cpu_attach(). */
    365 	cpu_vm_init(ci);
    366 
    367 	if (caa->cpu_role == CPU_ROLE_AP) {
    368 		int error;
    369 
    370 		error = mi_cpu_attach(ci);
    371 		if (error != 0) {
    372 			aprint_error_dev(self,
    373 			    "mi_cpu_attach failed with %d\n", error);
    374 			return;
    375 		}
    376 		cpu_init_tss(ci);
    377 	} else {
    378 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    379 	}
    380 
    381 	pmap_reference(pmap_kernel());
    382 	ci->ci_pmap = pmap_kernel();
    383 	ci->ci_tlbstate = TLBSTATE_STALE;
    384 
    385 	/*
    386 	 * Boot processor may not be attached first, but the below
    387 	 * must be done to allow booting other processors.
    388 	 */
    389 	if (!again) {
    390 		atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
    391 		/* Basic init. */
    392 		cpu_intr_init(ci);
    393 		cpu_get_tsc_freq(ci);
    394 		cpu_init(ci);
    395 		cpu_set_tss_gates(ci);
    396 		pmap_cpu_init_late(ci);
    397 #if NLAPIC > 0
    398 		if (caa->cpu_role != CPU_ROLE_SP) {
    399 			/* Enable lapic. */
    400 			lapic_enable();
    401 			lapic_set_lvt();
    402 			lapic_calibrate_timer(ci);
    403 		}
    404 #endif
    405 		/* Make sure DELAY() is initialized. */
    406 		DELAY(1);
    407 		again = true;
    408 	}
    409 
    410 	/* further PCB init done later. */
    411 
    412 	switch (caa->cpu_role) {
    413 	case CPU_ROLE_SP:
    414 		atomic_or_32(&ci->ci_flags, CPUF_SP);
    415 		cpu_identify(ci);
    416 		x86_errata();
    417 		x86_cpu_idle_init();
    418 		break;
    419 
    420 	case CPU_ROLE_BP:
    421 		atomic_or_32(&ci->ci_flags, CPUF_BSP);
    422 		cpu_identify(ci);
    423 		x86_errata();
    424 		x86_cpu_idle_init();
    425 		break;
    426 
    427 #ifdef MULTIPROCESSOR
    428 	case CPU_ROLE_AP:
    429 		/*
    430 		 * report on an AP
    431 		 */
    432 		cpu_intr_init(ci);
    433 		gdt_alloc_cpu(ci);
    434 		cpu_set_tss_gates(ci);
    435 		pmap_cpu_init_late(ci);
    436 		cpu_start_secondary(ci);
    437 		if (ci->ci_flags & CPUF_PRESENT) {
    438 			struct cpu_info *tmp;
    439 
    440 			cpu_identify(ci);
    441 			tmp = cpu_info_list;
    442 			while (tmp->ci_next)
    443 				tmp = tmp->ci_next;
    444 
    445 			tmp->ci_next = ci;
    446 		}
    447 		break;
    448 #endif
    449 
    450 	default:
    451 		panic("unknown processor type??\n");
    452 	}
    453 
    454 	pat_init(ci);
    455 
    456 	if (!pmf_device_register1(self, cpu_suspend, cpu_resume, cpu_shutdown))
    457 		aprint_error_dev(self, "couldn't establish power handler\n");
    458 
    459 #ifdef MULTIPROCESSOR
    460 	if (mp_verbose) {
    461 		struct lwp *l = ci->ci_data.cpu_idlelwp;
    462 		struct pcb *pcb = lwp_getpcb(l);
    463 
    464 		aprint_verbose_dev(self,
    465 		    "idle lwp at %p, idle sp at %p\n",
    466 		    l,
    467 #ifdef i386
    468 		    (void *)pcb->pcb_esp
    469 #else
    470 		    (void *)pcb->pcb_rsp
    471 #endif
    472 		);
    473 	}
    474 #endif
    475 
    476 	/*
    477 	 * Postpone the "cpufeaturebus" scan.
    478 	 * It is safe to scan the pseudo-bus
    479 	 * only after all CPUs have attached.
    480 	 */
    481 	(void)config_defer(self, cpu_defer);
    482 }
    483 
    484 static void
    485 cpu_defer(device_t self)
    486 {
    487 	cpu_rescan(self, NULL, NULL);
    488 }
    489 
    490 static int
    491 cpu_rescan(device_t self, const char *ifattr, const int *locators)
    492 {
    493 	struct cpu_softc *sc = device_private(self);
    494 	struct cpufeature_attach_args cfaa;
    495 	struct cpu_info *ci = sc->sc_info;
    496 
    497 	memset(&cfaa, 0, sizeof(cfaa));
    498 	cfaa.ci = ci;
    499 
    500 	if (ifattr_match(ifattr, "cpufeaturebus")) {
    501 
    502 		if (ci->ci_frequency == NULL) {
    503 			cfaa.name = "frequency";
    504 			ci->ci_frequency = config_found_ia(self,
    505 			    "cpufeaturebus", &cfaa, NULL);
    506 		}
    507 
    508 		if (ci->ci_padlock == NULL) {
    509 			cfaa.name = "padlock";
    510 			ci->ci_padlock = config_found_ia(self,
    511 			    "cpufeaturebus", &cfaa, NULL);
    512 		}
    513 
    514 		if (ci->ci_temperature == NULL) {
    515 			cfaa.name = "temperature";
    516 			ci->ci_temperature = config_found_ia(self,
    517 			    "cpufeaturebus", &cfaa, NULL);
    518 		}
    519 
    520 		if (ci->ci_vm == NULL) {
    521 			cfaa.name = "vm";
    522 			ci->ci_vm = config_found_ia(self,
    523 			    "cpufeaturebus", &cfaa, NULL);
    524 		}
    525 	}
    526 
    527 	return 0;
    528 }
    529 
    530 static void
    531 cpu_childdetached(device_t self, device_t child)
    532 {
    533 	struct cpu_softc *sc = device_private(self);
    534 	struct cpu_info *ci = sc->sc_info;
    535 
    536 	if (ci->ci_frequency == child)
    537 		ci->ci_frequency = NULL;
    538 
    539 	if (ci->ci_padlock == child)
    540 		ci->ci_padlock = NULL;
    541 
    542 	if (ci->ci_temperature == child)
    543 		ci->ci_temperature = NULL;
    544 
    545 	if (ci->ci_vm == child)
    546 		ci->ci_vm = NULL;
    547 }
    548 
    549 /*
    550  * Initialize the processor appropriately.
    551  */
    552 
    553 void
    554 cpu_init(struct cpu_info *ci)
    555 {
    556 	uint32_t cr4 = 0;
    557 
    558 	lcr0(rcr0() | CR0_WP);
    559 
    560 	/*
    561 	 * On a P6 or above, enable global TLB caching if the
    562 	 * hardware supports it.
    563 	 */
    564 	if (cpu_feature[0] & CPUID_PGE)
    565 		cr4 |= CR4_PGE;	/* enable global TLB caching */
    566 
    567 	/*
    568 	 * If we have FXSAVE/FXRESTOR, use them.
    569 	 */
    570 	if (cpu_feature[0] & CPUID_FXSR) {
    571 		cr4 |= CR4_OSFXSR;
    572 
    573 		/*
    574 		 * If we have SSE/SSE2, enable XMM exceptions.
    575 		 */
    576 		if (cpu_feature[0] & (CPUID_SSE|CPUID_SSE2))
    577 			cr4 |= CR4_OSXMMEXCPT;
    578 	}
    579 
    580 	/* If xsave is supported, enable it */
    581 	if (cpu_feature[1] & CPUID2_XSAVE)
    582 		cr4 |= CR4_OSXSAVE;
    583 
    584 	/* If SMEP is supported, enable it */
    585 	if (cpu_feature[5] & CPUID_SEF_SMEP)
    586 		cr4 |= CR4_SMEP;
    587 
    588 	if (cr4) {
    589 		cr4 |= rcr4();
    590 		lcr4(cr4);
    591 	}
    592 
    593 	/* If xsave is enabled, enable all fpu features */
    594 	if (cr4 & CR4_OSXSAVE)
    595 		wrxcr(0, x86_xsave_features & XCR0_FPU);
    596 
    597 #ifdef MTRR
    598 	/*
    599 	 * On a P6 or above, initialize MTRR's if the hardware supports them.
    600 	 */
    601 	if (cpu_feature[0] & CPUID_MTRR) {
    602 		if ((ci->ci_flags & CPUF_AP) == 0)
    603 			i686_mtrr_init_first();
    604 		mtrr_init_cpu(ci);
    605 	}
    606 
    607 #ifdef i386
    608 	if (strcmp((char *)(ci->ci_vendor), "AuthenticAMD") == 0) {
    609 		/*
    610 		 * Must be a K6-2 Step >= 7 or a K6-III.
    611 		 */
    612 		if (CPUID_TO_FAMILY(ci->ci_signature) == 5) {
    613 			if (CPUID_TO_MODEL(ci->ci_signature) > 8 ||
    614 			    (CPUID_TO_MODEL(ci->ci_signature) == 8 &&
    615 			     CPUID_TO_STEPPING(ci->ci_signature) >= 7)) {
    616 				mtrr_funcs = &k6_mtrr_funcs;
    617 				k6_mtrr_init_first();
    618 				mtrr_init_cpu(ci);
    619 			}
    620 		}
    621 	}
    622 #endif	/* i386 */
    623 #endif /* MTRR */
    624 
    625 	if (ci != &cpu_info_primary) {
    626 		/* Synchronize TSC again, and check for drift. */
    627 		wbinvd();
    628 		atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
    629 		tsc_sync_ap(ci);
    630 	} else {
    631 		atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
    632 	}
    633 }
    634 
    635 #ifdef MULTIPROCESSOR
    636 void
    637 cpu_boot_secondary_processors(void)
    638 {
    639 	struct cpu_info *ci;
    640 	kcpuset_t *cpus;
    641 	u_long i;
    642 
    643 	/* Now that we know the number of CPUs, patch the text segment. */
    644 	x86_patch(false);
    645 
    646 	kcpuset_create(&cpus, true);
    647 	kcpuset_set(cpus, cpu_index(curcpu()));
    648 	for (i = 0; i < maxcpus; i++) {
    649 		ci = cpu_lookup(i);
    650 		if (ci == NULL)
    651 			continue;
    652 		if (ci->ci_data.cpu_idlelwp == NULL)
    653 			continue;
    654 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    655 			continue;
    656 		if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
    657 			continue;
    658 		cpu_boot_secondary(ci);
    659 		kcpuset_set(cpus, cpu_index(ci));
    660 	}
    661 	while (!kcpuset_match(cpus, kcpuset_running))
    662 		;
    663 	kcpuset_destroy(cpus);
    664 
    665 	x86_mp_online = true;
    666 
    667 	/* Now that we know about the TSC, attach the timecounter. */
    668 	tsc_tc_init();
    669 
    670 	/* Enable zeroing of pages in the idle loop if we have SSE2. */
    671 	vm_page_zero_enable = ((cpu_feature[0] & CPUID_SSE2) != 0);
    672 }
    673 #endif
    674 
    675 static void
    676 cpu_init_idle_lwp(struct cpu_info *ci)
    677 {
    678 	struct lwp *l = ci->ci_data.cpu_idlelwp;
    679 	struct pcb *pcb = lwp_getpcb(l);
    680 
    681 	pcb->pcb_cr0 = rcr0();
    682 }
    683 
    684 void
    685 cpu_init_idle_lwps(void)
    686 {
    687 	struct cpu_info *ci;
    688 	u_long i;
    689 
    690 	for (i = 0; i < maxcpus; i++) {
    691 		ci = cpu_lookup(i);
    692 		if (ci == NULL)
    693 			continue;
    694 		if (ci->ci_data.cpu_idlelwp == NULL)
    695 			continue;
    696 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    697 			continue;
    698 		cpu_init_idle_lwp(ci);
    699 	}
    700 }
    701 
    702 #ifdef MULTIPROCESSOR
    703 void
    704 cpu_start_secondary(struct cpu_info *ci)
    705 {
    706 	extern paddr_t mp_pdirpa;
    707 	u_long psl;
    708 	int i;
    709 
    710 	mp_pdirpa = pmap_init_tmp_pgtbl(mp_trampoline_paddr);
    711 	atomic_or_32(&ci->ci_flags, CPUF_AP);
    712 	ci->ci_curlwp = ci->ci_data.cpu_idlelwp;
    713 	if (CPU_STARTUP(ci, mp_trampoline_paddr) != 0) {
    714 		return;
    715 	}
    716 
    717 	/*
    718 	 * Wait for it to become ready.   Setting cpu_starting opens the
    719 	 * initial gate and allows the AP to start soft initialization.
    720 	 */
    721 	KASSERT(cpu_starting == NULL);
    722 	cpu_starting = ci;
    723 	for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) {
    724 #ifdef MPDEBUG
    725 		extern int cpu_trace[3];
    726 		static int otrace[3];
    727 		if (memcmp(otrace, cpu_trace, sizeof(otrace)) != 0) {
    728 			aprint_debug_dev(ci->ci_dev, "trace %02x %02x %02x\n",
    729 			    cpu_trace[0], cpu_trace[1], cpu_trace[2]);
    730 			memcpy(otrace, cpu_trace, sizeof(otrace));
    731 		}
    732 #endif
    733 		i8254_delay(10);
    734 	}
    735 
    736 	if ((ci->ci_flags & CPUF_PRESENT) == 0) {
    737 		aprint_error_dev(ci->ci_dev, "failed to become ready\n");
    738 #if defined(MPDEBUG) && defined(DDB)
    739 		printf("dropping into debugger; continue from here to resume boot\n");
    740 		Debugger();
    741 #endif
    742 	} else {
    743 		/*
    744 		 * Synchronize time stamp counters. Invalidate cache and do
    745 		 * twice to try and minimize possible cache effects. Disable
    746 		 * interrupts to try and rule out any external interference.
    747 		 */
    748 		psl = x86_read_psl();
    749 		x86_disable_intr();
    750 		wbinvd();
    751 		tsc_sync_bp(ci);
    752 		x86_write_psl(psl);
    753 	}
    754 
    755 	CPU_START_CLEANUP(ci);
    756 	cpu_starting = NULL;
    757 }
    758 
    759 void
    760 cpu_boot_secondary(struct cpu_info *ci)
    761 {
    762 	int64_t drift;
    763 	u_long psl;
    764 	int i;
    765 
    766 	atomic_or_32(&ci->ci_flags, CPUF_GO);
    767 	for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) {
    768 		i8254_delay(10);
    769 	}
    770 	if ((ci->ci_flags & CPUF_RUNNING) == 0) {
    771 		aprint_error_dev(ci->ci_dev, "failed to start\n");
    772 #if defined(MPDEBUG) && defined(DDB)
    773 		printf("dropping into debugger; continue from here to resume boot\n");
    774 		Debugger();
    775 #endif
    776 	} else {
    777 		/* Synchronize TSC again, check for drift. */
    778 		drift = ci->ci_data.cpu_cc_skew;
    779 		psl = x86_read_psl();
    780 		x86_disable_intr();
    781 		wbinvd();
    782 		tsc_sync_bp(ci);
    783 		x86_write_psl(psl);
    784 		drift -= ci->ci_data.cpu_cc_skew;
    785 		aprint_debug_dev(ci->ci_dev, "TSC skew=%lld drift=%lld\n",
    786 		    (long long)ci->ci_data.cpu_cc_skew, (long long)drift);
    787 		tsc_sync_drift(drift);
    788 	}
    789 }
    790 
    791 /*
    792  * The CPU ends up here when it's ready to run.
    793  * This is called from code in mptramp.s; at this point, we are running
    794  * in the idle pcb/idle stack of the new CPU.  When this function returns,
    795  * this processor will enter the idle loop and start looking for work.
    796  */
    797 void
    798 cpu_hatch(void *v)
    799 {
    800 	struct cpu_info *ci = (struct cpu_info *)v;
    801 	struct pcb *pcb;
    802 	int s, i;
    803 
    804 	cpu_init_msrs(ci, true);
    805 	cpu_probe(ci);
    806 
    807 	ci->ci_data.cpu_cc_freq = cpu_info_primary.ci_data.cpu_cc_freq;
    808 	/* cpu_get_tsc_freq(ci); */
    809 
    810 	KDASSERT((ci->ci_flags & CPUF_PRESENT) == 0);
    811 
    812 	/*
    813 	 * Synchronize time stamp counters.  Invalidate cache and do twice
    814 	 * to try and minimize possible cache effects.  Note that interrupts
    815 	 * are off at this point.
    816 	 */
    817 	wbinvd();
    818 	atomic_or_32(&ci->ci_flags, CPUF_PRESENT);
    819 	tsc_sync_ap(ci);
    820 
    821 	/*
    822 	 * Wait to be brought online.  Use 'monitor/mwait' if available,
    823 	 * in order to make the TSC drift as much as possible. so that
    824 	 * we can detect it later.  If not available, try 'pause'.
    825 	 * We'd like to use 'hlt', but we have interrupts off.
    826 	 */
    827 	while ((ci->ci_flags & CPUF_GO) == 0) {
    828 		if ((cpu_feature[1] & CPUID2_MONITOR) != 0) {
    829 			x86_monitor(&ci->ci_flags, 0, 0);
    830 			if ((ci->ci_flags & CPUF_GO) != 0) {
    831 				continue;
    832 			}
    833 			x86_mwait(0, 0);
    834 		} else {
    835 			for (i = 10000; i != 0; i--) {
    836 				x86_pause();
    837 			}
    838 		}
    839 	}
    840 
    841 	/* Because the text may have been patched in x86_patch(). */
    842 	wbinvd();
    843 	x86_flush();
    844 	tlbflushg();
    845 
    846 	KASSERT((ci->ci_flags & CPUF_RUNNING) == 0);
    847 
    848 #ifdef PAE
    849 	pd_entry_t * l3_pd = ci->ci_pae_l3_pdir;
    850 	for (i = 0 ; i < PDP_SIZE; i++) {
    851 		l3_pd[i] = pmap_kernel()->pm_pdirpa[i] | PG_V;
    852 	}
    853 	lcr3(ci->ci_pae_l3_pdirpa);
    854 #else
    855 	lcr3(pmap_pdirpa(pmap_kernel(), 0));
    856 #endif
    857 
    858 	pcb = lwp_getpcb(curlwp);
    859 	pcb->pcb_cr3 = rcr3();
    860 	pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
    861 	lcr0(pcb->pcb_cr0);
    862 
    863 	cpu_init_idt();
    864 	gdt_init_cpu(ci);
    865 #if NLAPIC > 0
    866 	lapic_enable();
    867 	lapic_set_lvt();
    868 	lapic_initclocks();
    869 #endif
    870 
    871 	fpuinit(ci);
    872 	lldt(GSYSSEL(GLDT_SEL, SEL_KPL));
    873 	ltr(ci->ci_tss_sel);
    874 
    875 	cpu_init(ci);
    876 	cpu_get_tsc_freq(ci);
    877 
    878 	s = splhigh();
    879 #ifdef i386
    880 	lapic_tpr = 0;
    881 #else
    882 	lcr8(0);
    883 #endif
    884 	x86_enable_intr();
    885 	splx(s);
    886 	x86_errata();
    887 
    888 	aprint_debug_dev(ci->ci_dev, "running\n");
    889 
    890 	idle_loop(NULL);
    891 	KASSERT(false);
    892 }
    893 #endif
    894 
    895 #if defined(DDB)
    896 
    897 #include <ddb/db_output.h>
    898 #include <machine/db_machdep.h>
    899 
    900 /*
    901  * Dump CPU information from ddb.
    902  */
    903 void
    904 cpu_debug_dump(void)
    905 {
    906 	struct cpu_info *ci;
    907 	CPU_INFO_ITERATOR cii;
    908 
    909 	db_printf("addr		dev	id	flags	ipis	curlwp 		fpcurlwp\n");
    910 	for (CPU_INFO_FOREACH(cii, ci)) {
    911 		db_printf("%p	%s	%ld	%x	%x	%10p	%10p\n",
    912 		    ci,
    913 		    ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
    914 		    (long)ci->ci_cpuid,
    915 		    ci->ci_flags, ci->ci_ipis,
    916 		    ci->ci_curlwp,
    917 		    ci->ci_fpcurlwp);
    918 	}
    919 }
    920 #endif
    921 
    922 #if NLAPIC > 0
    923 static void
    924 cpu_copy_trampoline(void)
    925 {
    926 	/*
    927 	 * Copy boot code.
    928 	 */
    929 	extern u_char cpu_spinup_trampoline[];
    930 	extern u_char cpu_spinup_trampoline_end[];
    931 
    932 	vaddr_t mp_trampoline_vaddr;
    933 
    934 	mp_trampoline_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    935 	    UVM_KMF_VAONLY);
    936 
    937 	pmap_kenter_pa(mp_trampoline_vaddr, mp_trampoline_paddr,
    938 	    VM_PROT_READ | VM_PROT_WRITE, 0);
    939 	pmap_update(pmap_kernel());
    940 	memcpy((void *)mp_trampoline_vaddr,
    941 	    cpu_spinup_trampoline,
    942 	    cpu_spinup_trampoline_end - cpu_spinup_trampoline);
    943 
    944 	pmap_kremove(mp_trampoline_vaddr, PAGE_SIZE);
    945 	pmap_update(pmap_kernel());
    946 	uvm_km_free(kernel_map, mp_trampoline_vaddr, PAGE_SIZE, UVM_KMF_VAONLY);
    947 }
    948 #endif
    949 
    950 #ifdef i386
    951 static void
    952 tss_init(struct i386tss *tss, void *stack, void *func)
    953 {
    954 	KASSERT(curcpu()->ci_pmap == pmap_kernel());
    955 
    956 	memset(tss, 0, sizeof *tss);
    957 	tss->tss_esp0 = tss->tss_esp = (int)((char *)stack + USPACE - 16);
    958 	tss->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
    959 	tss->__tss_cs = GSEL(GCODE_SEL, SEL_KPL);
    960 	tss->tss_fs = GSEL(GCPU_SEL, SEL_KPL);
    961 	tss->tss_gs = tss->__tss_es = tss->__tss_ds =
    962 	    tss->__tss_ss = GSEL(GDATA_SEL, SEL_KPL);
    963 	/* %cr3 contains the value associated to pmap_kernel */
    964 	tss->tss_cr3 = rcr3();
    965 	tss->tss_esp = (int)((char *)stack + USPACE - 16);
    966 	tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
    967 	tss->__tss_eflags = PSL_MBO | PSL_NT;	/* XXX not needed? */
    968 	tss->__tss_eip = (int)func;
    969 }
    970 
    971 /* XXX */
    972 #define IDTVEC(name)	__CONCAT(X, name)
    973 typedef void (vector)(void);
    974 extern vector IDTVEC(tss_trap08);
    975 #if defined(DDB) && defined(MULTIPROCESSOR)
    976 extern vector Xintrddbipi;
    977 extern int ddb_vec;
    978 #endif
    979 
    980 static void
    981 cpu_set_tss_gates(struct cpu_info *ci)
    982 {
    983 	struct segment_descriptor sd;
    984 
    985 	ci->ci_doubleflt_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0,
    986 	    UVM_KMF_WIRED);
    987 	tss_init(&ci->ci_doubleflt_tss, ci->ci_doubleflt_stack,
    988 	    IDTVEC(tss_trap08));
    989 	setsegment(&sd, &ci->ci_doubleflt_tss, sizeof(struct i386tss) - 1,
    990 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
    991 	ci->ci_gdt[GTRAPTSS_SEL].sd = sd;
    992 	setgate(&idt[8], NULL, 0, SDT_SYSTASKGT, SEL_KPL,
    993 	    GSEL(GTRAPTSS_SEL, SEL_KPL));
    994 
    995 #if defined(DDB) && defined(MULTIPROCESSOR)
    996 	/*
    997 	 * Set up separate handler for the DDB IPI, so that it doesn't
    998 	 * stomp on a possibly corrupted stack.
    999 	 *
   1000 	 * XXX overwriting the gate set in db_machine_init.
   1001 	 * Should rearrange the code so that it's set only once.
   1002 	 */
   1003 	ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0,
   1004 	    UVM_KMF_WIRED);
   1005 	tss_init(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack, Xintrddbipi);
   1006 
   1007 	setsegment(&sd, &ci->ci_ddbipi_tss, sizeof(struct i386tss) - 1,
   1008 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
   1009 	ci->ci_gdt[GIPITSS_SEL].sd = sd;
   1010 
   1011 	setgate(&idt[ddb_vec], NULL, 0, SDT_SYSTASKGT, SEL_KPL,
   1012 	    GSEL(GIPITSS_SEL, SEL_KPL));
   1013 #endif
   1014 }
   1015 #else
   1016 static void
   1017 cpu_set_tss_gates(struct cpu_info *ci)
   1018 {
   1019 
   1020 }
   1021 #endif	/* i386 */
   1022 
   1023 #ifdef MULTIPROCESSOR
   1024 int
   1025 mp_cpu_start(struct cpu_info *ci, paddr_t target)
   1026 {
   1027 	unsigned short dwordptr[2];
   1028 	int error;
   1029 
   1030 	/*
   1031 	 * Bootstrap code must be addressable in real mode
   1032 	 * and it must be page aligned.
   1033 	 */
   1034 	KASSERT(target < 0x10000 && target % PAGE_SIZE == 0);
   1035 
   1036 	/*
   1037 	 * "The BSP must initialize CMOS shutdown code to 0Ah ..."
   1038 	 */
   1039 
   1040 	outb(IO_RTC, NVRAM_RESET);
   1041 	outb(IO_RTC+1, NVRAM_RESET_JUMP);
   1042 
   1043 	/*
   1044 	 * "and the warm reset vector (DWORD based at 40:67) to point
   1045 	 * to the AP startup code ..."
   1046 	 */
   1047 
   1048 	dwordptr[0] = 0;
   1049 	dwordptr[1] = target >> 4;
   1050 
   1051 #if NLAPIC > 0
   1052 	memcpy((uint8_t *)cmos_data_mapping + 0x467, dwordptr, 4);
   1053 #endif
   1054 
   1055 	if ((cpu_feature[0] & CPUID_APIC) == 0) {
   1056 		aprint_error("mp_cpu_start: CPU does not have APIC\n");
   1057 		return ENODEV;
   1058 	}
   1059 
   1060 	/*
   1061 	 * ... prior to executing the following sequence:".  We'll also add in
   1062 	 * local cache flush, in case the BIOS has left the AP with its cache
   1063 	 * disabled.  It may not be able to cope with MP coherency.
   1064 	 */
   1065 	wbinvd();
   1066 
   1067 	if (ci->ci_flags & CPUF_AP) {
   1068 		error = x86_ipi_init(ci->ci_cpuid);
   1069 		if (error != 0) {
   1070 			aprint_error_dev(ci->ci_dev, "%s: IPI not taken (1)\n",
   1071 			    __func__);
   1072 			return error;
   1073 		}
   1074 		i8254_delay(10000);
   1075 
   1076 		error = x86_ipi_startup(ci->ci_cpuid, target / PAGE_SIZE);
   1077 		if (error != 0) {
   1078 			aprint_error_dev(ci->ci_dev, "%s: IPI not taken (2)\n",
   1079 			    __func__);
   1080 			return error;
   1081 		}
   1082 		i8254_delay(200);
   1083 
   1084 		error = x86_ipi_startup(ci->ci_cpuid, target / PAGE_SIZE);
   1085 		if (error != 0) {
   1086 			aprint_error_dev(ci->ci_dev, "%s: IPI not taken (3)\n",
   1087 			    __func__);
   1088 			return error;
   1089 		}
   1090 		i8254_delay(200);
   1091 	}
   1092 
   1093 	return 0;
   1094 }
   1095 
   1096 void
   1097 mp_cpu_start_cleanup(struct cpu_info *ci)
   1098 {
   1099 	/*
   1100 	 * Ensure the NVRAM reset byte contains something vaguely sane.
   1101 	 */
   1102 
   1103 	outb(IO_RTC, NVRAM_RESET);
   1104 	outb(IO_RTC+1, NVRAM_RESET_RST);
   1105 }
   1106 #endif
   1107 
   1108 #ifdef __x86_64__
   1109 typedef void (vector)(void);
   1110 extern vector Xsyscall, Xsyscall32;
   1111 #endif
   1112 
   1113 void
   1114 cpu_init_msrs(struct cpu_info *ci, bool full)
   1115 {
   1116 #ifdef __x86_64__
   1117 	wrmsr(MSR_STAR,
   1118 	    ((uint64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
   1119 	    ((uint64_t)LSEL(LSYSRETBASE_SEL, SEL_UPL) << 48));
   1120 	wrmsr(MSR_LSTAR, (uint64_t)Xsyscall);
   1121 	wrmsr(MSR_CSTAR, (uint64_t)Xsyscall32);
   1122 	wrmsr(MSR_SFMASK, PSL_NT|PSL_T|PSL_I|PSL_C);
   1123 
   1124 	if (full) {
   1125 		wrmsr(MSR_FSBASE, 0);
   1126 		wrmsr(MSR_GSBASE, (uint64_t)ci);
   1127 		wrmsr(MSR_KERNELGSBASE, 0);
   1128 	}
   1129 #endif	/* __x86_64__ */
   1130 
   1131 	if (cpu_feature[2] & CPUID_NOX)
   1132 		wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE);
   1133 }
   1134 
   1135 void
   1136 cpu_offline_md(void)
   1137 {
   1138 	int s;
   1139 
   1140 	s = splhigh();
   1141 	fpusave_cpu(true);
   1142 	splx(s);
   1143 }
   1144 
   1145 /* XXX joerg restructure and restart CPUs individually */
   1146 static bool
   1147 cpu_stop(device_t dv)
   1148 {
   1149 	struct cpu_softc *sc = device_private(dv);
   1150 	struct cpu_info *ci = sc->sc_info;
   1151 	int err;
   1152 
   1153 	KASSERT((ci->ci_flags & CPUF_PRESENT) != 0);
   1154 
   1155 	if ((ci->ci_flags & CPUF_PRIMARY) != 0)
   1156 		return true;
   1157 
   1158 	if (ci->ci_data.cpu_idlelwp == NULL)
   1159 		return true;
   1160 
   1161 	sc->sc_wasonline = !(ci->ci_schedstate.spc_flags & SPCF_OFFLINE);
   1162 
   1163 	if (sc->sc_wasonline) {
   1164 		mutex_enter(&cpu_lock);
   1165 		err = cpu_setstate(ci, false);
   1166 		mutex_exit(&cpu_lock);
   1167 
   1168 		if (err != 0)
   1169 			return false;
   1170 	}
   1171 
   1172 	return true;
   1173 }
   1174 
   1175 static bool
   1176 cpu_suspend(device_t dv, const pmf_qual_t *qual)
   1177 {
   1178 	struct cpu_softc *sc = device_private(dv);
   1179 	struct cpu_info *ci = sc->sc_info;
   1180 
   1181 	if ((ci->ci_flags & CPUF_PRESENT) == 0)
   1182 		return true;
   1183 	else {
   1184 		cpufreq_suspend(ci);
   1185 	}
   1186 
   1187 	return cpu_stop(dv);
   1188 }
   1189 
   1190 static bool
   1191 cpu_resume(device_t dv, const pmf_qual_t *qual)
   1192 {
   1193 	struct cpu_softc *sc = device_private(dv);
   1194 	struct cpu_info *ci = sc->sc_info;
   1195 	int err = 0;
   1196 
   1197 	if ((ci->ci_flags & CPUF_PRESENT) == 0)
   1198 		return true;
   1199 
   1200 	if ((ci->ci_flags & CPUF_PRIMARY) != 0)
   1201 		goto out;
   1202 
   1203 	if (ci->ci_data.cpu_idlelwp == NULL)
   1204 		goto out;
   1205 
   1206 	if (sc->sc_wasonline) {
   1207 		mutex_enter(&cpu_lock);
   1208 		err = cpu_setstate(ci, true);
   1209 		mutex_exit(&cpu_lock);
   1210 	}
   1211 
   1212 out:
   1213 	if (err != 0)
   1214 		return false;
   1215 
   1216 	cpufreq_resume(ci);
   1217 
   1218 	return true;
   1219 }
   1220 
   1221 static bool
   1222 cpu_shutdown(device_t dv, int how)
   1223 {
   1224 	struct cpu_softc *sc = device_private(dv);
   1225 	struct cpu_info *ci = sc->sc_info;
   1226 
   1227 	if ((ci->ci_flags & CPUF_BSP) != 0)
   1228 		return false;
   1229 
   1230 	if ((ci->ci_flags & CPUF_PRESENT) == 0)
   1231 		return true;
   1232 
   1233 	return cpu_stop(dv);
   1234 }
   1235 
   1236 void
   1237 cpu_get_tsc_freq(struct cpu_info *ci)
   1238 {
   1239 	uint64_t last_tsc;
   1240 
   1241 	if (cpu_hascounter()) {
   1242 		last_tsc = cpu_counter_serializing();
   1243 		i8254_delay(100000);
   1244 		ci->ci_data.cpu_cc_freq =
   1245 		    (cpu_counter_serializing() - last_tsc) * 10;
   1246 	}
   1247 }
   1248 
   1249 void
   1250 x86_cpu_idle_mwait(void)
   1251 {
   1252 	struct cpu_info *ci = curcpu();
   1253 
   1254 	KASSERT(ci->ci_ilevel == IPL_NONE);
   1255 
   1256 	x86_monitor(&ci->ci_want_resched, 0, 0);
   1257 	if (__predict_false(ci->ci_want_resched)) {
   1258 		return;
   1259 	}
   1260 	x86_mwait(0, 0);
   1261 }
   1262 
   1263 void
   1264 x86_cpu_idle_halt(void)
   1265 {
   1266 	struct cpu_info *ci = curcpu();
   1267 
   1268 	KASSERT(ci->ci_ilevel == IPL_NONE);
   1269 
   1270 	x86_disable_intr();
   1271 	if (!__predict_false(ci->ci_want_resched)) {
   1272 		x86_stihlt();
   1273 	} else {
   1274 		x86_enable_intr();
   1275 	}
   1276 }
   1277 
   1278 /*
   1279  * Loads pmap for the current CPU.
   1280  */
   1281 void
   1282 cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
   1283 {
   1284 #ifdef PAE
   1285 	struct cpu_info *ci = curcpu();
   1286 	bool interrupts_enabled;
   1287 	pd_entry_t *l3_pd = ci->ci_pae_l3_pdir;
   1288 	int i;
   1289 
   1290 	/*
   1291 	 * disable interrupts to block TLB shootdowns, which can reload cr3.
   1292 	 * while this doesn't block NMIs, it's probably ok as NMIs unlikely
   1293 	 * reload cr3.
   1294 	 */
   1295 	interrupts_enabled = (x86_read_flags() & PSL_I) != 0;
   1296 	if (interrupts_enabled)
   1297 		x86_disable_intr();
   1298 
   1299 	for (i = 0 ; i < PDP_SIZE; i++) {
   1300 		l3_pd[i] = pmap->pm_pdirpa[i] | PG_V;
   1301 	}
   1302 
   1303 	if (interrupts_enabled)
   1304 		x86_enable_intr();
   1305 	tlbflush();
   1306 #else /* PAE */
   1307 	lcr3(pmap_pdirpa(pmap, 0));
   1308 #endif /* PAE */
   1309 }
   1310 
   1311 /*
   1312  * Notify all other cpus to halt.
   1313  */
   1314 
   1315 void
   1316 cpu_broadcast_halt(void)
   1317 {
   1318 	x86_broadcast_ipi(X86_IPI_HALT);
   1319 }
   1320 
   1321 /*
   1322  * Send a dummy ipi to a cpu to force it to run splraise()/spllower()
   1323  */
   1324 
   1325 void
   1326 cpu_kick(struct cpu_info *ci)
   1327 {
   1328 	x86_send_ipi(ci, 0);
   1329 }
   1330