Home | History | Annotate | Line # | Download | only in x86
cpu.c revision 1.44
      1 /*	$NetBSD: cpu.c,v 1.44 2010/05/04 23:27:14 jym Exp $	*/
      2 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
      3 
      4 /*-
      5  * Copyright (c) 2000 The NetBSD Foundation, Inc.
      6  * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to The NetBSD Foundation
     10  * by RedBack Networks Inc.
     11  *
     12  * Author: Bill Sommerfeld
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33  * POSSIBILITY OF SUCH DAMAGE.
     34  */
     35 
     36 /*
     37  * Copyright (c) 1999 Stefan Grefen
     38  *
     39  * Redistribution and use in source and binary forms, with or without
     40  * modification, are permitted provided that the following conditions
     41  * are met:
     42  * 1. Redistributions of source code must retain the above copyright
     43  *    notice, this list of conditions and the following disclaimer.
     44  * 2. Redistributions in binary form must reproduce the above copyright
     45  *    notice, this list of conditions and the following disclaimer in the
     46  *    documentation and/or other materials provided with the distribution.
     47  * 3. All advertising materials mentioning features or use of this software
     48  *    must display the following acknowledgement:
     49  *      This product includes software developed by the NetBSD
     50  *      Foundation, Inc. and its contributors.
     51  * 4. Neither the name of The NetBSD Foundation nor the names of its
     52  *    contributors may be used to endorse or promote products derived
     53  *    from this software without specific prior written permission.
     54  *
     55  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
     56  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
     59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     65  * SUCH DAMAGE.
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.44 2010/05/04 23:27:14 jym Exp $");
     70 
     71 #include "opt_ddb.h"
     72 #include "opt_multiprocessor.h"
     73 #include "opt_mpbios.h"		/* for MPDEBUG */
     74 #include "opt_mtrr.h"
     75 #include "opt_xen.h"
     76 
     77 #include "lapic.h"
     78 #include "ioapic.h"
     79 
     80 #include <sys/param.h>
     81 #include <sys/proc.h>
     82 #include <sys/systm.h>
     83 #include <sys/device.h>
     84 #include <sys/kmem.h>
     85 #include <sys/cpu.h>
     86 #include <sys/atomic.h>
     87 #include <sys/reboot.h>
     88 
     89 #include <uvm/uvm_extern.h>
     90 
     91 #include <machine/cpufunc.h>
     92 #include <machine/cpuvar.h>
     93 #include <machine/pmap.h>
     94 #include <machine/vmparam.h>
     95 #include <machine/mpbiosvar.h>
     96 #include <machine/pcb.h>
     97 #include <machine/specialreg.h>
     98 #include <machine/segments.h>
     99 #include <machine/gdt.h>
    100 #include <machine/mtrr.h>
    101 #include <machine/pio.h>
    102 
    103 #include <xen/vcpuvar.h>
    104 
    105 #if NLAPIC > 0
    106 #include <machine/apicvar.h>
    107 #include <machine/i82489reg.h>
    108 #include <machine/i82489var.h>
    109 #endif
    110 
    111 #include <dev/ic/mc146818reg.h>
    112 #include <dev/isa/isareg.h>
    113 
    114 #if MAXCPUS > 32
    115 #error cpu_info contains 32bit bitmasks
    116 #endif
    117 
    118 int     cpu_match(device_t, cfdata_t, void *);
    119 void    cpu_attach(device_t, device_t, void *);
    120 int     vcpu_match(device_t, cfdata_t, void *);
    121 void    vcpu_attach(device_t, device_t, void *);
    122 void    cpu_attach_common(device_t, device_t, void *);
    123 void	cpu_offline_md(void);
    124 
    125 struct cpu_softc {
    126 	device_t sc_dev;		/* device tree glue */
    127 	struct cpu_info *sc_info;	/* pointer to CPU info */
    128 	bool sc_wasonline;
    129 };
    130 
    131 int mp_cpu_start(struct cpu_info *, paddr_t);
    132 void mp_cpu_start_cleanup(struct cpu_info *);
    133 const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL,
    134 				      mp_cpu_start_cleanup };
    135 
    136 CFATTACH_DECL_NEW(cpu, sizeof(struct cpu_softc),
    137     cpu_match, cpu_attach, NULL, NULL);
    138 CFATTACH_DECL_NEW(vcpu, sizeof(struct cpu_softc),
    139     vcpu_match, vcpu_attach, NULL, NULL);
    140 
    141 /*
    142  * Statically-allocated CPU info for the primary CPU (or the only
    143  * CPU, on uniprocessors).  The CPU info list is initialized to
    144  * point at it.
    145  */
    146 #ifdef TRAPLOG
    147 #include <machine/tlog.h>
    148 struct tlog tlog_primary;
    149 #endif
    150 struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    151 	.ci_dev = 0,
    152 	.ci_self = &cpu_info_primary,
    153 	.ci_idepth = -1,
    154 	.ci_curlwp = &lwp0,
    155 	.ci_curldt = -1,
    156 #ifdef TRAPLOG
    157 	.ci_tlog = &tlog_primary,
    158 #endif
    159 
    160 };
    161 struct cpu_info phycpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    162 	.ci_dev = 0,
    163 	.ci_self = &phycpu_info_primary,
    164 };
    165 
    166 struct cpu_info *cpu_info_list = &cpu_info_primary;
    167 struct cpu_info *phycpu_info_list = &phycpu_info_primary;
    168 
    169 static void	cpu_set_tss_gates(struct cpu_info *ci);
    170 
    171 uint32_t cpus_attached = 0;
    172 uint32_t cpus_running = 0;
    173 
    174 uint32_t phycpus_attached = 0;
    175 uint32_t phycpus_running = 0;
    176 
    177 uint32_t cpu_feature[5]; /* X86 CPUID feature bits
    178 			  *	[0] basic features %edx
    179 			  *	[1] basic features %ecx
    180 			  *	[2] extended features %edx
    181 			  *	[3] extended features %ecx
    182 			  *	[4] VIA padlock features
    183 			  */
    184 
    185 bool x86_mp_online;
    186 paddr_t mp_trampoline_paddr = MP_TRAMPOLINE;
    187 
    188 #if defined(MULTIPROCESSOR)
    189 void    	cpu_hatch(void *);
    190 static void    	cpu_boot_secondary(struct cpu_info *ci);
    191 static void    	cpu_start_secondary(struct cpu_info *ci);
    192 static void	cpu_copy_trampoline(void);
    193 
    194 /*
    195  * Runs once per boot once multiprocessor goo has been detected and
    196  * the local APIC on the boot processor has been mapped.
    197  *
    198  * Called from lapic_boot_init() (from mpbios_scan()).
    199  */
    200 void
    201 cpu_init_first(void)
    202 {
    203 
    204 	cpu_info_primary.ci_cpuid = lapic_cpu_number();
    205 	cpu_copy_trampoline();
    206 }
    207 #endif	/* MULTIPROCESSOR */
    208 
    209 int
    210 cpu_match(device_t parent, cfdata_t match, void *aux)
    211 {
    212 
    213 	return 1;
    214 }
    215 
    216 void
    217 cpu_attach(device_t parent, device_t self, void *aux)
    218 {
    219 	struct cpu_softc *sc = device_private(self);
    220 	struct cpu_attach_args *caa = aux;
    221 	struct cpu_info *ci;
    222 	uintptr_t ptr;
    223 	static bool again = false;
    224 
    225 	sc->sc_dev = self;
    226 
    227 	if (phycpus_attached == ~0) {
    228 		aprint_error(": increase MAXCPUS\n");
    229 		return;
    230 	}
    231 
    232 	/*
    233 	 * If we're an Application Processor, allocate a cpu_info
    234 	 * structure, otherwise use the primary's.
    235 	 */
    236 	if (caa->cpu_role == CPU_ROLE_AP) {
    237 		if ((boothowto & RB_MD1) != 0) {
    238 			aprint_error(": multiprocessor boot disabled\n");
    239 			if (!pmf_device_register(self, NULL, NULL))
    240 				aprint_error_dev(self,
    241 				   "couldn't establish power handler\n");
    242 			return;
    243 		}
    244 		aprint_naive(": Application Processor\n");
    245 		ptr = (uintptr_t)kmem_zalloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    246 		    KM_SLEEP);
    247 		ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
    248 		ci->ci_curldt = -1;
    249 	} else {
    250 		aprint_naive(": %s Processor\n",
    251 		    caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
    252 		ci = &phycpu_info_primary;
    253 	}
    254 
    255 	ci->ci_self = ci;
    256 	sc->sc_info = ci;
    257 
    258 	ci->ci_dev = self;
    259 	ci->ci_cpuid = caa->cpu_number;
    260 	ci->ci_vcpu = NULL;
    261 
    262 	/*
    263 	 * Boot processor may not be attached first, but the below
    264 	 * must be done to allow booting other processors.
    265 	 */
    266 	if (!again) {
    267 		atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
    268 		/* Basic init */
    269 		again = true;
    270 	}
    271 
    272 	printf(": ");
    273 	switch (caa->cpu_role) {
    274 	case CPU_ROLE_SP:
    275 		printf("(uniprocessor)\n");
    276 		atomic_or_32(&ci->ci_flags, CPUF_SP);
    277 		break;
    278 
    279 	case CPU_ROLE_BP:
    280 		printf("(boot processor)\n");
    281 		atomic_or_32(&ci->ci_flags, CPUF_BSP);
    282 		break;
    283 
    284 	case CPU_ROLE_AP:
    285 		/*
    286 		 * report on an AP
    287 		 */
    288 		printf("(application processor)\n");
    289 		if (ci->ci_flags & CPUF_PRESENT) {
    290 			struct cpu_info *tmp;
    291 
    292 			tmp = phycpu_info_list;
    293 			while (tmp->ci_next)
    294 				tmp = tmp->ci_next;
    295 
    296 			tmp->ci_next = ci;
    297 		}
    298 		break;
    299 
    300 	default:
    301 		panic("unknown processor type??\n");
    302 	}
    303 
    304 	atomic_or_32(&phycpus_attached, ci->ci_cpumask);
    305 
    306 	return;
    307 }
    308 
    309 int
    310 vcpu_match(device_t parent, cfdata_t match, void *aux)
    311 {
    312 	struct vcpu_attach_args *vcaa = aux;
    313 
    314 	if (strcmp(vcaa->vcaa_name, match->cf_name) == 0)
    315 		return 1;
    316 	return 0;
    317 }
    318 
    319 void
    320 vcpu_attach(device_t parent, device_t self, void *aux)
    321 {
    322 	struct vcpu_attach_args *vcaa = aux;
    323 
    324 	cpu_attach_common(parent, self, &vcaa->vcaa_caa);
    325 }
    326 
    327 static void
    328 cpu_vm_init(struct cpu_info *ci)
    329 {
    330 	int ncolors = 2, i;
    331 
    332 	for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) {
    333 		struct x86_cache_info *cai;
    334 		int tcolors;
    335 
    336 		cai = &ci->ci_cinfo[i];
    337 
    338 		tcolors = atop(cai->cai_totalsize);
    339 		switch(cai->cai_associativity) {
    340 		case 0xff:
    341 			tcolors = 1; /* fully associative */
    342 			break;
    343 		case 0:
    344 		case 1:
    345 			break;
    346 		default:
    347 			tcolors /= cai->cai_associativity;
    348 		}
    349 		ncolors = max(ncolors, tcolors);
    350 	}
    351 
    352 	/*
    353 	 * Knowing the size of the largest cache on this CPU, re-color
    354 	 * our pages.
    355 	 */
    356 	if (ncolors <= uvmexp.ncolors)
    357 		return;
    358 	aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors);
    359 	uvm_page_recolor(ncolors);
    360 }
    361 
    362 void
    363 cpu_attach_common(device_t parent, device_t self, void *aux)
    364 {
    365 	struct cpu_softc *sc = device_private(self);
    366 	struct cpu_attach_args *caa = aux;
    367 	struct cpu_info *ci;
    368 	uintptr_t ptr;
    369 	int cpunum = caa->cpu_number;
    370 	static bool again = false;
    371 
    372 	sc->sc_dev = self;
    373 
    374 	/*
    375 	 * If we're an Application Processor, allocate a cpu_info
    376 	 * structure, otherwise use the primary's.
    377 	 */
    378 	if (caa->cpu_role == CPU_ROLE_AP) {
    379 		aprint_naive(": Application Processor\n");
    380 		ptr = (uintptr_t)kmem_alloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    381 		    KM_SLEEP);
    382 		ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
    383 		memset(ci, 0, sizeof(*ci));
    384 #ifdef TRAPLOG
    385 		ci->ci_tlog_base = kmem_zalloc(sizeof(struct tlog), KM_SLEEP);
    386 #endif
    387 	} else {
    388 		aprint_naive(": %s Processor\n",
    389 		    caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
    390 		ci = &cpu_info_primary;
    391 #if NLAPIC > 0
    392 		if (cpunum != lapic_cpu_number()) {
    393 			/* XXX should be done earlier */
    394 			uint32_t reg;
    395 			aprint_verbose("\n");
    396 			aprint_verbose_dev(self, "running CPU at apic %d"
    397 			    " instead of at expected %d", lapic_cpu_number(),
    398 			    cpunum);
    399 			reg = i82489_readreg(LAPIC_ID);
    400 			i82489_writereg(LAPIC_ID, (reg & ~LAPIC_ID_MASK) |
    401 			    (cpunum << LAPIC_ID_SHIFT));
    402 		}
    403 		if (cpunum != lapic_cpu_number()) {
    404 			aprint_error_dev(self, "unable to reset apic id\n");
    405 		}
    406 #endif
    407 	}
    408 
    409 	ci->ci_self = ci;
    410 	sc->sc_info = ci;
    411 	ci->ci_dev = self;
    412 	ci->ci_cpuid = cpunum;
    413 
    414 	KASSERT(HYPERVISOR_shared_info != NULL);
    415 	ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[cpunum];
    416 
    417 	ci->ci_func = caa->cpu_func;
    418 
    419 	/* Must be called before mi_cpu_attach(). */
    420 	cpu_vm_init(ci);
    421 
    422 	if (caa->cpu_role == CPU_ROLE_AP) {
    423 		int error;
    424 
    425 		error = mi_cpu_attach(ci);
    426 		if (error != 0) {
    427 			aprint_normal("\n");
    428 			aprint_error_dev(self,
    429 			    "mi_cpu_attach failed with %d\n", error);
    430 			return;
    431 		}
    432 	} else {
    433 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    434 	}
    435 
    436 	ci->ci_cpumask = (1 << cpu_index(ci));
    437 	pmap_reference(pmap_kernel());
    438 	ci->ci_pmap = pmap_kernel();
    439 	ci->ci_tlbstate = TLBSTATE_STALE;
    440 
    441 	/*
    442 	 * Boot processor may not be attached first, but the below
    443 	 * must be done to allow booting other processors.
    444 	 */
    445 	if (!again) {
    446 		atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
    447 		/* Basic init. */
    448 		cpu_intr_init(ci);
    449 		cpu_get_tsc_freq(ci);
    450 		cpu_init(ci);
    451 		cpu_set_tss_gates(ci);
    452 		pmap_cpu_init_late(ci);
    453 #if NLAPIC > 0
    454 		if (caa->cpu_role != CPU_ROLE_SP) {
    455 			/* Enable lapic. */
    456 			lapic_enable();
    457 			lapic_set_lvt();
    458 			lapic_calibrate_timer();
    459 		}
    460 #endif
    461 		/* Make sure DELAY() is initialized. */
    462 		DELAY(1);
    463 		again = true;
    464 	}
    465 
    466 	/* further PCB init done later. */
    467 
    468 	switch (caa->cpu_role) {
    469 	case CPU_ROLE_SP:
    470 		atomic_or_32(&ci->ci_flags, CPUF_SP);
    471 		cpu_identify(ci);
    472 #if 0
    473 		x86_errata();
    474 #endif
    475 		x86_cpu_idle_init();
    476 		break;
    477 
    478 	case CPU_ROLE_BP:
    479 		atomic_or_32(&ci->ci_flags, CPUF_BSP);
    480 		cpu_identify(ci);
    481 		cpu_init(ci);
    482 #if 0
    483 		x86_errata();
    484 #endif
    485 		x86_cpu_idle_init();
    486 		break;
    487 
    488 	case CPU_ROLE_AP:
    489 		/*
    490 		 * report on an AP
    491 		 */
    492 
    493 #if defined(MULTIPROCESSOR)
    494 		cpu_intr_init(ci);
    495 		gdt_alloc_cpu(ci);
    496 		cpu_set_tss_gates(ci);
    497 		pmap_cpu_init_early(ci);
    498 		pmap_cpu_init_late(ci);
    499 		cpu_start_secondary(ci);
    500 		if (ci->ci_flags & CPUF_PRESENT) {
    501 			struct cpu_info *tmp;
    502 
    503 			identifycpu(ci);
    504 			tmp = cpu_info_list;
    505 			while (tmp->ci_next)
    506 				tmp = tmp->ci_next;
    507 
    508 			tmp->ci_next = ci;
    509 		}
    510 #else
    511 		aprint_error_dev(self, "not started\n");
    512 #endif
    513 		break;
    514 
    515 	default:
    516 		aprint_normal("\n");
    517 		panic("unknown processor type??\n");
    518 	}
    519 
    520 	atomic_or_32(&cpus_attached, ci->ci_cpumask);
    521 
    522 #if 0
    523 	if (!pmf_device_register(self, cpu_suspend, cpu_resume))
    524 		aprint_error_dev(self, "couldn't establish power handler\n");
    525 #endif
    526 
    527 #if defined(MULTIPROCESSOR)
    528 	if (mp_verbose) {
    529 		struct lwp *l = ci->ci_data.cpu_idlelwp;
    530 		struct pcb *pcb = lwp_getpcb(l);
    531 
    532 		aprint_verbose_dev(self,
    533 		    "idle lwp at %p, idle sp at 0x%p\n",
    534 		    l,
    535 #ifdef i386
    536 		    (void *)pcb->pcb_esp
    537 #else
    538 		    (void *)pcb->pcb_rsp
    539 #endif
    540 		);
    541 
    542 	}
    543 #endif
    544 }
    545 
    546 /*
    547  * Initialize the processor appropriately.
    548  */
    549 
    550 void
    551 cpu_init(struct cpu_info *ci)
    552 {
    553 
    554 	/*
    555 	 * On a P6 or above, enable global TLB caching if the
    556 	 * hardware supports it.
    557 	 */
    558 	if (cpu_feature[0] & CPUID_PGE)
    559 		lcr4(rcr4() | CR4_PGE);	/* enable global TLB caching */
    560 
    561 #ifdef XXXMTRR
    562 	/*
    563 	 * On a P6 or above, initialize MTRR's if the hardware supports them.
    564 	 */
    565 	if (cpu_feature[0] & CPUID_MTRR) {
    566 		if ((ci->ci_flags & CPUF_AP) == 0)
    567 			i686_mtrr_init_first();
    568 		mtrr_init_cpu(ci);
    569 	}
    570 #endif
    571 	/*
    572 	 * If we have FXSAVE/FXRESTOR, use them.
    573 	 */
    574 	if (cpu_feature[0] & CPUID_FXSR) {
    575 		lcr4(rcr4() | CR4_OSFXSR);
    576 
    577 		/*
    578 		 * If we have SSE/SSE2, enable XMM exceptions.
    579 		 */
    580 		if (cpu_feature[0] & (CPUID_SSE|CPUID_SSE2))
    581 			lcr4(rcr4() | CR4_OSXMMEXCPT);
    582 	}
    583 
    584 	atomic_or_32(&cpus_running, ci->ci_cpumask);
    585 	atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
    586 }
    587 
    588 
    589 #ifdef MULTIPROCESSOR
    590 void
    591 cpu_boot_secondary_processors(void)
    592 {
    593 	struct cpu_info *ci;
    594 	u_long i;
    595 
    596 	for (i = 0; i < maxcpus; i++) {
    597 		ci = cpu_lookup(i);
    598 		if (ci == NULL)
    599 			continue;
    600 		if (ci->ci_data.cpu_idlelwp == NULL)
    601 			continue;
    602 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    603 			continue;
    604 		if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
    605 			continue;
    606 		cpu_boot_secondary(ci);
    607 	}
    608 
    609 	x86_mp_online = true;
    610 }
    611 
    612 static void
    613 cpu_init_idle_lwp(struct cpu_info *ci)
    614 {
    615 	struct lwp *l = ci->ci_data.cpu_idlelwp;
    616 	struct pcb *pcb = lwp_getpcb(l);
    617 
    618 	pcb->pcb_cr0 = rcr0();
    619 }
    620 
    621 void
    622 cpu_init_idle_lwps(void)
    623 {
    624 	struct cpu_info *ci;
    625 	u_long i;
    626 
    627 	for (i = 0; i < maxcpus; i++) {
    628 		ci = cpu_lookup(i);
    629 		if (ci == NULL)
    630 			continue;
    631 		if (ci->ci_data.cpu_idlelwp == NULL)
    632 			continue;
    633 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    634 			continue;
    635 		cpu_init_idle_lwp(ci);
    636 	}
    637 }
    638 
    639 void
    640 cpu_start_secondary(struct cpu_info *ci)
    641 {
    642 	int i;
    643 	struct pmap *kpm = pmap_kernel();
    644 	extern uint32_t mp_pdirpa;
    645 
    646 	mp_pdirpa = kpm->pm_pdirpa; /* XXX move elsewhere, not per CPU. */
    647 
    648 	atomic_or_32(&ci->ci_flags, CPUF_AP);
    649 
    650 	aprint_debug_dev(ci->ci_dev, "starting\n");
    651 
    652 	ci->ci_curlwp = ci->ci_data.cpu_idlelwp;
    653 	if (CPU_STARTUP(ci, mp_trampoline_paddr) != 0)
    654 		return;
    655 
    656 	/*
    657 	 * wait for it to become ready
    658 	 */
    659 	for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) {
    660 #ifdef MPDEBUG
    661 		extern int cpu_trace[3];
    662 		static int otrace[3];
    663 		if (memcmp(otrace, cpu_trace, sizeof(otrace)) != 0) {
    664 			aprint_debug_dev(ci->ci_dev, "trace %02x %02x %02x\n",
    665 				cpu_trace[0], cpu_trace[1], cpu_trace[2]);
    666 			memcpy(otrace, cpu_trace, sizeof(otrace));
    667 		}
    668 #endif
    669 		delay(10);
    670 	}
    671 	if ((ci->ci_flags & CPUF_PRESENT) == 0) {
    672 		aprint_error_dev(ci->ci_dev, "failed to become ready\n");
    673 #if defined(MPDEBUG) && defined(DDB)
    674 		printf("dropping into debugger; continue from here to resume boot\n");
    675 		Debugger();
    676 #endif
    677 	}
    678 
    679 	CPU_START_CLEANUP(ci);
    680 }
    681 
    682 void
    683 cpu_boot_secondary(struct cpu_info *ci)
    684 {
    685 	int i;
    686 
    687 	atomic_or_32(&ci->ci_flags, CPUF_GO);
    688 	for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) {
    689 		delay(10);
    690 	}
    691 	if ((ci->ci_flags & CPUF_RUNNING) == 0) {
    692 		aprint_error_dev(ci->ci_dev, "CPU failed to start\n");
    693 #if defined(MPDEBUG) && defined(DDB)
    694 		printf("dropping into debugger; continue from here to resume boot\n");
    695 		Debugger();
    696 #endif
    697 	}
    698 }
    699 
    700 /*
    701  * The CPU ends up here when its ready to run
    702  * This is called from code in mptramp.s; at this point, we are running
    703  * in the idle pcb/idle stack of the new CPU.  When this function returns,
    704  * this processor will enter the idle loop and start looking for work.
    705  *
    706  * XXX should share some of this with init386 in machdep.c
    707  */
    708 void
    709 cpu_hatch(void *v)
    710 {
    711 	struct cpu_info *ci = (struct cpu_info *)v;
    712 	struct pcb *pcb;
    713 	int s, i;
    714 
    715 	cpu_probe(ci);
    716 
    717 	cpu_feature[0] &= ~CPUID_FEAT_BLACKLIST;
    718 	cpu_feature[2] &= ~CPUID_FEAT_EXT_BLACKLIST;
    719 
    720         cpu_init_msrs(ci, true);
    721 
    722 	KDASSERT((ci->ci_flags & CPUF_PRESENT) == 0);
    723 	atomic_or_32(&ci->ci_flags, CPUF_PRESENT);
    724 	while ((ci->ci_flags & CPUF_GO) == 0) {
    725 		/* Don't use delay, boot CPU may be patching the text. */
    726 		for (i = 10000; i != 0; i--)
    727 			x86_pause();
    728 	}
    729 
    730 	/* Because the text may have been patched in x86_patch(). */
    731 	wbinvd();
    732 	x86_flush();
    733 
    734 	KASSERT((ci->ci_flags & CPUF_RUNNING) == 0);
    735 
    736 	pcb = lwp_getpcb(curlwp);
    737 	lcr3(pmap_kernel()->pm_pdirpa);
    738 	pcb->pcb_cr3 = pmap_kernel()->pm_pdirpa;
    739 	pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
    740 	lcr0(pcb->pcb_cr0);
    741 
    742 	cpu_init_idt();
    743 	gdt_init_cpu(ci);
    744 	lapic_enable();
    745 	lapic_set_lvt();
    746 	lapic_initclocks();
    747 
    748 #ifdef i386
    749 	npxinit(ci);
    750 #else
    751 	fpuinit(ci);
    752 #endif
    753 
    754 	lldt(GSEL(GLDT_SEL, SEL_KPL));
    755 	ltr(ci->ci_tss_sel);
    756 
    757 	cpu_init(ci);
    758 	cpu_get_tsc_freq(ci);
    759 
    760 	s = splhigh();
    761 #ifdef i386
    762 	lapic_tpr = 0;
    763 #else
    764 	lcr8(0);
    765 #endif
    766 	x86_enable_intr();
    767 	splx(s);
    768 #if 0
    769 	x86_errata();
    770 #endif
    771 
    772 	aprint_debug_dev(ci->ci_dev, "CPU %ld running\n",
    773 		(long)ci->ci_cpuid);
    774 }
    775 
    776 #if defined(DDB)
    777 
    778 #include <ddb/db_output.h>
    779 #include <machine/db_machdep.h>
    780 
    781 /*
    782  * Dump CPU information from ddb.
    783  */
    784 void
    785 cpu_debug_dump(void)
    786 {
    787 	struct cpu_info *ci;
    788 	CPU_INFO_ITERATOR cii;
    789 
    790 	db_printf("addr		dev	id	flags	ipis	curlwp 		fpcurlwp\n");
    791 	for (CPU_INFO_FOREACH(cii, ci)) {
    792 		db_printf("%p	%s	%ld	%x	%x	%10p	%10p\n",
    793 		    ci,
    794 		    ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
    795 		    (long)ci->ci_cpuid,
    796 		    ci->ci_flags, ci->ci_ipis,
    797 		    ci->ci_curlwp,
    798 		    ci->ci_fpcurlwp);
    799 	}
    800 }
    801 #endif /* DDB */
    802 
    803 static void
    804 cpu_copy_trampoline(void)
    805 {
    806 	/*
    807 	 * Copy boot code.
    808 	 */
    809 	extern u_char cpu_spinup_trampoline[];
    810 	extern u_char cpu_spinup_trampoline_end[];
    811 
    812 	vaddr_t mp_trampoline_vaddr;
    813 
    814 	mp_trampoline_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    815 		UVM_KMF_VAONLY);
    816 
    817 	pmap_kenter_pa(mp_trampoline_vaddr, mp_trampoline_paddr,
    818 		VM_PROT_READ | VM_PROT_WRITE, 0);
    819 	pmap_update(pmap_kernel());
    820 	memcpy((void *)mp_trampoline_vaddr,
    821 		cpu_spinup_trampoline,
    822 		cpu_spinup_trampoline_end - cpu_spinup_trampoline);
    823 
    824 	pmap_kremove(mp_trampoline_vaddr, PAGE_SIZE);
    825 	pmap_update(pmap_kernel());
    826 	uvm_km_free(kernel_map, mp_trampoline_vaddr, PAGE_SIZE, UVM_KMF_VAONLY);
    827 }
    828 
    829 #endif /* MULTIPROCESSOR */
    830 
    831 #ifdef i386
    832 #if 0
    833 static void
    834 tss_init(struct i386tss *tss, void *stack, void *func)
    835 {
    836 	memset(tss, 0, sizeof *tss);
    837 	tss->tss_esp0 = tss->tss_esp = (int)((char *)stack + USPACE - 16);
    838 	tss->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
    839 	tss->__tss_cs = GSEL(GCODE_SEL, SEL_KPL);
    840 	tss->tss_fs = GSEL(GCPU_SEL, SEL_KPL);
    841 	tss->tss_gs = tss->__tss_es = tss->__tss_ds =
    842 	    tss->__tss_ss = GSEL(GDATA_SEL, SEL_KPL);
    843 	tss->tss_cr3 = pmap_kernel()->pm_pdirpa;
    844 	tss->tss_esp = (int)((char *)stack + USPACE - 16);
    845 	tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
    846 	tss->__tss_eflags = PSL_MBO | PSL_NT;   /* XXX not needed? */
    847 	tss->__tss_eip = (int)func;
    848 }
    849 #endif
    850 
    851 /* XXX */
    852 #define IDTVEC(name)	__CONCAT(X, name)
    853 typedef void (vector)(void);
    854 extern vector IDTVEC(tss_trap08);
    855 #ifdef DDB
    856 extern vector Xintrddbipi;
    857 extern int ddb_vec;
    858 #endif
    859 
    860 static void
    861 cpu_set_tss_gates(struct cpu_info *ci)
    862 {
    863 #if 0
    864 	struct segment_descriptor sd;
    865 
    866 	ci->ci_doubleflt_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0,
    867 	    UVM_KMF_WIRED);
    868 	tss_init(&ci->ci_doubleflt_tss, ci->ci_doubleflt_stack,
    869 	    IDTVEC(tss_trap08));
    870 	setsegment(&sd, &ci->ci_doubleflt_tss, sizeof(struct i386tss) - 1,
    871 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
    872 	ci->ci_gdt[GTRAPTSS_SEL].sd = sd;
    873 	setgate(&idt[8], NULL, 0, SDT_SYSTASKGT, SEL_KPL,
    874 	    GSEL(GTRAPTSS_SEL, SEL_KPL));
    875 #endif
    876 
    877 #if defined(DDB) && defined(MULTIPROCESSOR)
    878 	/*
    879 	 * Set up separate handler for the DDB IPI, so that it doesn't
    880 	 * stomp on a possibly corrupted stack.
    881 	 *
    882 	 * XXX overwriting the gate set in db_machine_init.
    883 	 * Should rearrange the code so that it's set only once.
    884 	 */
    885 	ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0,
    886 	    UVM_KMF_WIRED);
    887 	tss_init(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack,
    888 	    Xintrddbipi);
    889 
    890 	setsegment(&sd, &ci->ci_ddbipi_tss, sizeof(struct i386tss) - 1,
    891 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
    892 	ci->ci_gdt[GIPITSS_SEL].sd = sd;
    893 
    894 	setgate(&idt[ddb_vec], NULL, 0, SDT_SYSTASKGT, SEL_KPL,
    895 	    GSEL(GIPITSS_SEL, SEL_KPL));
    896 #endif
    897 }
    898 #else
    899 static void
    900 cpu_set_tss_gates(struct cpu_info *ci)
    901 {
    902 
    903 }
    904 #endif	/* i386 */
    905 
    906 int
    907 mp_cpu_start(struct cpu_info *ci, paddr_t target)
    908 {
    909 #if 0
    910 #if NLAPIC > 0
    911 	int error;
    912 #endif
    913 	unsigned short dwordptr[2];
    914 
    915 	/*
    916 	 * Bootstrap code must be addressable in real mode
    917 	 * and it must be page aligned.
    918 	 */
    919 	KASSERT(target < 0x10000 && target % PAGE_SIZE == 0);
    920 
    921 	/*
    922 	 * "The BSP must initialize CMOS shutdown code to 0Ah ..."
    923 	 */
    924 
    925 	outb(IO_RTC, NVRAM_RESET);
    926 	outb(IO_RTC+1, NVRAM_RESET_JUMP);
    927 
    928 	/*
    929 	 * "and the warm reset vector (DWORD based at 40:67) to point
    930 	 * to the AP startup code ..."
    931 	 */
    932 
    933 	dwordptr[0] = 0;
    934 	dwordptr[1] = target >> 4;
    935 
    936 	pmap_kenter_pa (0, 0, VM_PROT_READ|VM_PROT_WRITE, 0);
    937 	memcpy ((uint8_t *) 0x467, dwordptr, 4);
    938 	pmap_kremove (0, PAGE_SIZE);
    939 
    940 #if NLAPIC > 0
    941 	/*
    942 	 * ... prior to executing the following sequence:"
    943 	 */
    944 
    945 	if (ci->ci_flags & CPUF_AP) {
    946 		if ((error = x86_ipi_init(ci->ci_cpuid)) != 0)
    947 			return error;
    948 
    949 		delay(10000);
    950 
    951 		if (cpu_feature & CPUID_APIC) {
    952 			error = x86_ipi_init(ci->ci_cpuid);
    953 			if (error != 0) {
    954 				aprint_error_dev(ci->ci_dev, "%s: IPI not taken (1)\n",
    955 						__func__);
    956 				return error;
    957 			}
    958 
    959 			delay(10000);
    960 
    961 			error = x86_ipi(target / PAGE_SIZE, ci->ci_cpuid,
    962 					LAPIC_DLMODE_STARTUP);
    963 			if (error != 0) {
    964 				aprint_error_dev(ci->ci_dev, "%s: IPI not taken (2)\n",
    965 						__func__);
    966 				return error;
    967 			}
    968 			delay(200);
    969 
    970 			error = x86_ipi(target / PAGE_SIZE, ci->ci_cpuid,
    971 					LAPIC_DLMODE_STARTUP);
    972 			if (error != 0) {
    973 				aprint_error_dev(ci->ci_dev, "%s: IPI not taken ((3)\n",
    974 						__func__);
    975 				return error;
    976 			}
    977 			delay(200);
    978 		}
    979 	}
    980 #endif
    981 #endif /* 0 */
    982 	return 0;
    983 }
    984 
    985 void
    986 mp_cpu_start_cleanup(struct cpu_info *ci)
    987 {
    988 #if 0
    989 	/*
    990 	 * Ensure the NVRAM reset byte contains something vaguely sane.
    991 	 */
    992 
    993 	outb(IO_RTC, NVRAM_RESET);
    994 	outb(IO_RTC+1, NVRAM_RESET_RST);
    995 #endif
    996 }
    997 
    998 void
    999 cpu_init_msrs(struct cpu_info *ci, bool full)
   1000 {
   1001 #ifdef __x86_64__
   1002 	if (full) {
   1003 		HYPERVISOR_set_segment_base (SEGBASE_FS, 0);
   1004 		HYPERVISOR_set_segment_base (SEGBASE_GS_KERNEL, (uint64_t) ci);
   1005 		HYPERVISOR_set_segment_base (SEGBASE_GS_USER, 0);
   1006 	}
   1007 #endif	/* __x86_64__ */
   1008 
   1009 	if (cpu_feature[2] & CPUID_NOX)
   1010 		wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE);
   1011 }
   1012 
   1013 void
   1014 cpu_offline_md(void)
   1015 {
   1016         int s;
   1017 
   1018         s = splhigh();
   1019 #ifdef __i386__
   1020         npxsave_cpu(true);
   1021 #else
   1022         fpusave_cpu(true);
   1023 #endif
   1024         splx(s);
   1025 }
   1026 
   1027 #if 0
   1028 /* XXX joerg restructure and restart CPUs individually */
   1029 static bool
   1030 cpu_suspend(device_t dv, const pmf_qual_t *qual)
   1031 {
   1032 	struct cpu_softc *sc = device_private(dv);
   1033 	struct cpu_info *ci = sc->sc_info;
   1034 	int err;
   1035 
   1036 	if (ci->ci_flags & CPUF_PRIMARY)
   1037 		return true;
   1038 	if (ci->ci_data.cpu_idlelwp == NULL)
   1039 		return true;
   1040 	if ((ci->ci_flags & CPUF_PRESENT) == 0)
   1041 		return true;
   1042 
   1043 	sc->sc_wasonline = !(ci->ci_schedstate.spc_flags & SPCF_OFFLINE);
   1044 
   1045 	if (sc->sc_wasonline) {
   1046 		mutex_enter(&cpu_lock);
   1047 		err = cpu_setstate(ci, false);
   1048 		mutex_exit(&cpu_lock);
   1049 
   1050 		if (err)
   1051 			return false;
   1052 	}
   1053 
   1054 	return true;
   1055 }
   1056 
   1057 static bool
   1058 cpu_resume(device_t dv, const pmf_qual_t *qual)
   1059 {
   1060 	struct cpu_softc *sc = device_private(dv);
   1061 	struct cpu_info *ci = sc->sc_info;
   1062 	int err = 0;
   1063 
   1064 	if (ci->ci_flags & CPUF_PRIMARY)
   1065 		return true;
   1066 	if (ci->ci_data.cpu_idlelwp == NULL)
   1067 		return true;
   1068 	if ((ci->ci_flags & CPUF_PRESENT) == 0)
   1069 		return true;
   1070 
   1071 	if (sc->sc_wasonline) {
   1072 		mutex_enter(&cpu_lock);
   1073 		err = cpu_setstate(ci, true);
   1074 		mutex_exit(&cpu_lock);
   1075 	}
   1076 
   1077 	return err == 0;
   1078 }
   1079 #endif
   1080 
   1081 void
   1082 cpu_get_tsc_freq(struct cpu_info *ci)
   1083 {
   1084 	const volatile vcpu_time_info_t *tinfo = &ci->ci_vcpu->time;
   1085 	delay(1000000);
   1086 	uint64_t freq = 1000000000ULL << 32;
   1087 	freq = freq / (uint64_t)tinfo->tsc_to_system_mul;
   1088 	if ( tinfo->tsc_shift < 0 )
   1089 		freq = freq << -tinfo->tsc_shift;
   1090 	else
   1091 		freq = freq >> tinfo->tsc_shift;
   1092 	ci->ci_data.cpu_cc_freq = freq;
   1093 }
   1094 
   1095 void
   1096 x86_cpu_idle_xen(void)
   1097 {
   1098 	struct cpu_info *ci = curcpu();
   1099 
   1100 	KASSERT(ci->ci_ilevel == IPL_NONE);
   1101 
   1102 	x86_disable_intr();
   1103 	if (!__predict_false(ci->ci_want_resched)) {
   1104 		idle_block();
   1105 	} else {
   1106 		x86_enable_intr();
   1107 	}
   1108 }
   1109