Home | History | Annotate | Line # | Download | only in x86
cpu.c revision 1.92
      1 /*	$NetBSD: cpu.c,v 1.92 2012/06/06 22:22:41 rmind Exp $	*/
      2 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
      3 
      4 /*-
      5  * Copyright (c) 2000 The NetBSD Foundation, Inc.
      6  * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to The NetBSD Foundation
     10  * by RedBack Networks Inc.
     11  *
     12  * Author: Bill Sommerfeld
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33  * POSSIBILITY OF SUCH DAMAGE.
     34  */
     35 
     36 /*
     37  * Copyright (c) 1999 Stefan Grefen
     38  *
     39  * Redistribution and use in source and binary forms, with or without
     40  * modification, are permitted provided that the following conditions
     41  * are met:
     42  * 1. Redistributions of source code must retain the above copyright
     43  *    notice, this list of conditions and the following disclaimer.
     44  * 2. Redistributions in binary form must reproduce the above copyright
     45  *    notice, this list of conditions and the following disclaimer in the
     46  *    documentation and/or other materials provided with the distribution.
     47  * 3. All advertising materials mentioning features or use of this software
     48  *    must display the following acknowledgement:
     49  *      This product includes software developed by the NetBSD
     50  *      Foundation, Inc. and its contributors.
     51  * 4. Neither the name of The NetBSD Foundation nor the names of its
     52  *    contributors may be used to endorse or promote products derived
     53  *    from this software without specific prior written permission.
     54  *
     55  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
     56  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
     59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     65  * SUCH DAMAGE.
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.92 2012/06/06 22:22:41 rmind Exp $");
     70 
     71 #include "opt_ddb.h"
     72 #include "opt_multiprocessor.h"
     73 #include "opt_mpbios.h"		/* for MPDEBUG */
     74 #include "opt_mtrr.h"
     75 #include "opt_xen.h"
     76 
     77 #include "lapic.h"
     78 #include "ioapic.h"
     79 
     80 #include <sys/param.h>
     81 #include <sys/proc.h>
     82 #include <sys/systm.h>
     83 #include <sys/device.h>
     84 #include <sys/kmem.h>
     85 #include <sys/cpu.h>
     86 #include <sys/cpufreq.h>
     87 #include <sys/atomic.h>
     88 #include <sys/reboot.h>
     89 #include <sys/idle.h>
     90 
     91 #include <uvm/uvm.h>
     92 
     93 #include <machine/cpufunc.h>
     94 #include <machine/cpuvar.h>
     95 #include <machine/pmap.h>
     96 #include <machine/vmparam.h>
     97 #include <machine/mpbiosvar.h>
     98 #include <machine/pcb.h>
     99 #include <machine/specialreg.h>
    100 #include <machine/segments.h>
    101 #include <machine/gdt.h>
    102 #include <machine/mtrr.h>
    103 #include <machine/pio.h>
    104 
    105 #ifdef i386
    106 #include <machine/npx.h>
    107 #else
    108 #include <machine/fpu.h>
    109 #endif
    110 
    111 #include <xen/xen.h>
    112 #include <xen/xen-public/vcpu.h>
    113 #include <xen/vcpuvar.h>
    114 
    115 #if NLAPIC > 0
    116 #include <machine/apicvar.h>
    117 #include <machine/i82489reg.h>
    118 #include <machine/i82489var.h>
    119 #endif
    120 
    121 #include <dev/ic/mc146818reg.h>
    122 #include <dev/isa/isareg.h>
    123 
    124 static int	cpu_match(device_t, cfdata_t, void *);
    125 static void	cpu_attach(device_t, device_t, void *);
    126 static void	cpu_defer(device_t);
    127 static int	cpu_rescan(device_t, const char *, const int *);
    128 static void	cpu_childdetached(device_t, device_t);
    129 static int	vcpu_match(device_t, cfdata_t, void *);
    130 static void	vcpu_attach(device_t, device_t, void *);
    131 static void	cpu_attach_common(device_t, device_t, void *);
    132 void		cpu_offline_md(void);
    133 
    134 struct cpu_softc {
    135 	device_t sc_dev;		/* device tree glue */
    136 	struct cpu_info *sc_info;	/* pointer to CPU info */
    137 	bool sc_wasonline;
    138 };
    139 
    140 int mp_cpu_start(struct cpu_info *, vaddr_t);
    141 void mp_cpu_start_cleanup(struct cpu_info *);
    142 const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL,
    143 				      mp_cpu_start_cleanup };
    144 
    145 CFATTACH_DECL2_NEW(cpu, sizeof(struct cpu_softc),
    146     cpu_match, cpu_attach, NULL, NULL, cpu_rescan, cpu_childdetached);
    147 
    148 CFATTACH_DECL_NEW(vcpu, sizeof(struct cpu_softc),
    149     vcpu_match, vcpu_attach, NULL, NULL);
    150 
    151 /*
    152  * Statically-allocated CPU info for the primary CPU (or the only
    153  * CPU, on uniprocessors).  The CPU info list is initialized to
    154  * point at it.
    155  */
    156 #ifdef TRAPLOG
    157 #include <machine/tlog.h>
    158 struct tlog tlog_primary;
    159 #endif
    160 struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    161 	.ci_dev = 0,
    162 	.ci_self = &cpu_info_primary,
    163 	.ci_idepth = -1,
    164 	.ci_curlwp = &lwp0,
    165 	.ci_curldt = -1,
    166 #ifdef TRAPLOG
    167 	.ci_tlog = &tlog_primary,
    168 #endif
    169 
    170 };
    171 struct cpu_info phycpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    172 	.ci_dev = 0,
    173 	.ci_self = &phycpu_info_primary,
    174 };
    175 
    176 struct cpu_info *cpu_info_list = &cpu_info_primary;
    177 struct cpu_info *phycpu_info_list = &phycpu_info_primary;
    178 
    179 uint32_t cpu_feature[5]; /* X86 CPUID feature bits
    180 			  *	[0] basic features %edx
    181 			  *	[1] basic features %ecx
    182 			  *	[2] extended features %edx
    183 			  *	[3] extended features %ecx
    184 			  *	[4] VIA padlock features
    185 			  */
    186 
    187 bool x86_mp_online;
    188 paddr_t mp_trampoline_paddr = MP_TRAMPOLINE;
    189 
    190 #if defined(MULTIPROCESSOR)
    191 void    	cpu_hatch(void *);
    192 static void    	cpu_boot_secondary(struct cpu_info *ci);
    193 static void    	cpu_start_secondary(struct cpu_info *ci);
    194 #endif	/* MULTIPROCESSOR */
    195 
    196 static int
    197 cpu_match(device_t parent, cfdata_t match, void *aux)
    198 {
    199 
    200 	return 1;
    201 }
    202 
    203 static void
    204 cpu_attach(device_t parent, device_t self, void *aux)
    205 {
    206 	struct cpu_softc *sc = device_private(self);
    207 	struct cpu_attach_args *caa = aux;
    208 	struct cpu_info *ci;
    209 	uintptr_t ptr;
    210 	static int nphycpu = 0;
    211 
    212 	sc->sc_dev = self;
    213 
    214 	/*
    215 	 * If we're an Application Processor, allocate a cpu_info
    216 	 * If we're the first attached CPU use the primary cpu_info,
    217 	 * otherwise allocate a new one
    218 	 */
    219 	aprint_naive("\n");
    220 	aprint_normal("\n");
    221 	if (nphycpu > 0) {
    222 		struct cpu_info *tmp;
    223 		ptr = (uintptr_t)kmem_zalloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    224 		    KM_SLEEP);
    225 		ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
    226 		ci->ci_curldt = -1;
    227 
    228 		tmp = phycpu_info_list;
    229 		while (tmp->ci_next)
    230 			tmp = tmp->ci_next;
    231 
    232 		tmp->ci_next = ci;
    233 	} else {
    234 		ci = &phycpu_info_primary;
    235 	}
    236 
    237 	ci->ci_self = ci;
    238 	sc->sc_info = ci;
    239 
    240 	ci->ci_dev = self;
    241 	ci->ci_acpiid = caa->cpu_id;
    242 	ci->ci_cpuid = caa->cpu_number;
    243 	ci->ci_vcpu = NULL;
    244 	ci->ci_index = nphycpu++;
    245 
    246 	if (!pmf_device_register(self, NULL, NULL))
    247 		aprint_error_dev(self, "couldn't establish power handler\n");
    248 
    249 	(void)config_defer(self, cpu_defer);
    250 }
    251 
    252 static void
    253 cpu_defer(device_t self)
    254 {
    255 	cpu_rescan(self, NULL, NULL);
    256 }
    257 
    258 static int
    259 cpu_rescan(device_t self, const char *ifattr, const int *locators)
    260 {
    261 	struct cpu_softc *sc = device_private(self);
    262 	struct cpufeature_attach_args cfaa;
    263 	struct cpu_info *ci = sc->sc_info;
    264 
    265 	memset(&cfaa, 0, sizeof(cfaa));
    266 	cfaa.ci = ci;
    267 
    268 	if (ifattr_match(ifattr, "cpufeaturebus")) {
    269 
    270 		if (ci->ci_frequency == NULL) {
    271 			cfaa.name = "frequency";
    272 			ci->ci_frequency = config_found_ia(self,
    273 			    "cpufeaturebus", &cfaa, NULL);
    274 		}
    275 	}
    276 
    277 	return 0;
    278 }
    279 
    280 static void
    281 cpu_childdetached(device_t self, device_t child)
    282 {
    283 	struct cpu_softc *sc = device_private(self);
    284 	struct cpu_info *ci = sc->sc_info;
    285 
    286 	if (ci->ci_frequency == child)
    287 		ci->ci_frequency = NULL;
    288 }
    289 
    290 static int
    291 vcpu_match(device_t parent, cfdata_t match, void *aux)
    292 {
    293 	struct vcpu_attach_args *vcaa = aux;
    294 	struct vcpu_runstate_info vcr;
    295 	int error;
    296 
    297 	if (strcmp(vcaa->vcaa_name, match->cf_name) == 0) {
    298 		error = HYPERVISOR_vcpu_op(VCPUOP_get_runstate_info,
    299 					   vcaa->vcaa_caa.cpu_number,
    300 					   &vcr);
    301 		switch (error) {
    302 		case 0:
    303 			return 1;
    304 		case -ENOENT:
    305 			return 0;
    306 		default:
    307 			panic("Unknown hypervisor error %d returned on vcpu runstate probe\n", error);
    308 		}
    309 	}
    310 
    311 	return 0;
    312 }
    313 
    314 static void
    315 vcpu_attach(device_t parent, device_t self, void *aux)
    316 {
    317 	struct vcpu_attach_args *vcaa = aux;
    318 
    319 	KASSERT(vcaa->vcaa_caa.cpu_func == NULL);
    320 	vcaa->vcaa_caa.cpu_func = &mp_cpu_funcs;
    321 	cpu_attach_common(parent, self, &vcaa->vcaa_caa);
    322 
    323 	if (!pmf_device_register(self, NULL, NULL))
    324 		aprint_error_dev(self, "couldn't establish power handler\n");
    325 }
    326 
    327 static int
    328 vcpu_is_up(struct cpu_info *ci)
    329 {
    330 	KASSERT(ci != NULL);
    331 	return HYPERVISOR_vcpu_op(VCPUOP_is_up, ci->ci_cpuid, NULL);
    332 }
    333 
    334 static void
    335 cpu_vm_init(struct cpu_info *ci)
    336 {
    337 	int ncolors = 2, i;
    338 
    339 	for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) {
    340 		struct x86_cache_info *cai;
    341 		int tcolors;
    342 
    343 		cai = &ci->ci_cinfo[i];
    344 
    345 		tcolors = atop(cai->cai_totalsize);
    346 		switch(cai->cai_associativity) {
    347 		case 0xff:
    348 			tcolors = 1; /* fully associative */
    349 			break;
    350 		case 0:
    351 		case 1:
    352 			break;
    353 		default:
    354 			tcolors /= cai->cai_associativity;
    355 		}
    356 		ncolors = max(ncolors, tcolors);
    357 	}
    358 
    359 	/*
    360 	 * Knowing the size of the largest cache on this CPU, potentially
    361 	 * re-color our pages.
    362 	 */
    363 	aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors);
    364 	uvm_page_recolor(ncolors);
    365 	pmap_tlb_cpu_init(ci);
    366 }
    367 
    368 static void
    369 cpu_attach_common(device_t parent, device_t self, void *aux)
    370 {
    371 	struct cpu_softc *sc = device_private(self);
    372 	struct cpu_attach_args *caa = aux;
    373 	struct cpu_info *ci;
    374 	uintptr_t ptr;
    375 	int cpunum = caa->cpu_number;
    376 	static bool again = false;
    377 
    378 	sc->sc_dev = self;
    379 
    380 	/*
    381 	 * If we're an Application Processor, allocate a cpu_info
    382 	 * structure, otherwise use the primary's.
    383 	 */
    384 	if (caa->cpu_role == CPU_ROLE_AP) {
    385 		aprint_naive(": Application Processor\n");
    386 		ptr = (uintptr_t)kmem_alloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    387 		    KM_SLEEP);
    388 		ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
    389 		memset(ci, 0, sizeof(*ci));
    390 #ifdef TRAPLOG
    391 		ci->ci_tlog_base = kmem_zalloc(sizeof(struct tlog), KM_SLEEP);
    392 #endif
    393 	} else {
    394 		aprint_naive(": %s Processor\n",
    395 		    caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
    396 		ci = &cpu_info_primary;
    397 	}
    398 
    399 	ci->ci_self = ci;
    400 	sc->sc_info = ci;
    401 	ci->ci_dev = self;
    402 	ci->ci_cpuid = cpunum;
    403 
    404 	KASSERT(HYPERVISOR_shared_info != NULL);
    405 	KASSERT(cpunum < XEN_LEGACY_MAX_VCPUS);
    406 	ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[cpunum];
    407 
    408 	KASSERT(ci->ci_func == 0);
    409 	ci->ci_func = caa->cpu_func;
    410 
    411 	/* Must be called before mi_cpu_attach(). */
    412 	cpu_vm_init(ci);
    413 
    414 	if (caa->cpu_role == CPU_ROLE_AP) {
    415 		int error;
    416 
    417 		error = mi_cpu_attach(ci);
    418 
    419 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    420 		if (error != 0) {
    421 			aprint_normal("\n");
    422 			aprint_error_dev(self,
    423 			    "mi_cpu_attach failed with %d\n", error);
    424 			return;
    425 		}
    426 
    427 	} else {
    428 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    429 	}
    430 
    431 	KASSERT(ci->ci_cpuid == ci->ci_index);
    432 	pmap_reference(pmap_kernel());
    433 	ci->ci_pmap = pmap_kernel();
    434 	ci->ci_tlbstate = TLBSTATE_STALE;
    435 
    436 	/*
    437 	 * Boot processor may not be attached first, but the below
    438 	 * must be done to allow booting other processors.
    439 	 */
    440 	if (!again) {
    441 		atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
    442 		/* Basic init. */
    443 		cpu_intr_init(ci);
    444 		cpu_get_tsc_freq(ci);
    445 		cpu_init(ci);
    446 		pmap_cpu_init_late(ci);
    447 
    448 		/* Every processor needs to init it's own ipi h/w (similar to lapic) */
    449 		xen_ipi_init();
    450 
    451 		/* Make sure DELAY() is initialized. */
    452 		DELAY(1);
    453 		again = true;
    454 	}
    455 
    456 	/* further PCB init done later. */
    457 
    458 	switch (caa->cpu_role) {
    459 	case CPU_ROLE_SP:
    460 		atomic_or_32(&ci->ci_flags, CPUF_SP);
    461 		cpu_identify(ci);
    462 		x86_cpu_idle_init();
    463 
    464 		break;
    465 
    466 	case CPU_ROLE_BP:
    467 		atomic_or_32(&ci->ci_flags, CPUF_BSP);
    468 		cpu_identify(ci);
    469 		x86_cpu_idle_init();
    470 
    471 		break;
    472 
    473 	case CPU_ROLE_AP:
    474 		atomic_or_32(&ci->ci_flags, CPUF_AP);
    475 
    476 		/*
    477 		 * report on an AP
    478 		 */
    479 
    480 #if defined(MULTIPROCESSOR)
    481 		/* interrupt handler stack */
    482 		cpu_intr_init(ci);
    483 
    484 		/* Setup per-cpu memory for gdt */
    485 		gdt_alloc_cpu(ci);
    486 
    487 		pmap_cpu_init_late(ci);
    488 		cpu_start_secondary(ci);
    489 
    490 		if (ci->ci_flags & CPUF_PRESENT) {
    491 			struct cpu_info *tmp;
    492 
    493 			cpu_identify(ci);
    494 			tmp = cpu_info_list;
    495 			while (tmp->ci_next)
    496 				tmp = tmp->ci_next;
    497 
    498 			tmp->ci_next = ci;
    499 		}
    500 #else
    501 		aprint_error(": not started\n");
    502 #endif
    503 		break;
    504 
    505 	default:
    506 		aprint_normal("\n");
    507 		panic("unknown processor type??\n");
    508 	}
    509 
    510 #ifdef MPVERBOSE
    511 	if (mp_verbose) {
    512 		struct lwp *l = ci->ci_data.cpu_idlelwp;
    513 		struct pcb *pcb = lwp_getpcb(l);
    514 
    515 		aprint_verbose_dev(self,
    516 		    "idle lwp at %p, idle sp at 0x%p\n",
    517 		    l,
    518 #ifdef i386
    519 		    (void *)pcb->pcb_esp
    520 #else /* i386 */
    521 		    (void *)pcb->pcb_rsp
    522 #endif /* i386 */
    523 		);
    524 
    525 	}
    526 #endif /* MPVERBOSE */
    527 }
    528 
    529 /*
    530  * Initialize the processor appropriately.
    531  */
    532 
    533 void
    534 cpu_init(struct cpu_info *ci)
    535 {
    536 
    537 	/*
    538 	 * If we have FXSAVE/FXRESTOR, use them.
    539 	 */
    540 	if (cpu_feature[0] & CPUID_FXSR) {
    541 		lcr4(rcr4() | CR4_OSFXSR);
    542 
    543 		/*
    544 		 * If we have SSE/SSE2, enable XMM exceptions.
    545 		 */
    546 		if (cpu_feature[0] & (CPUID_SSE|CPUID_SSE2))
    547 			lcr4(rcr4() | CR4_OSXMMEXCPT);
    548 	}
    549 
    550 #ifdef __x86_64__
    551 	/* No user PGD mapped for this CPU yet */
    552 	ci->ci_xen_current_user_pgd = 0;
    553 #endif
    554 #if defined(__x86_64__) || defined(PAE)
    555 	mutex_init(&ci->ci_kpm_mtx, MUTEX_DEFAULT, IPL_VM);
    556 #endif
    557 
    558 	atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
    559 }
    560 
    561 
    562 #ifdef MULTIPROCESSOR
    563 
    564 void
    565 cpu_boot_secondary_processors(void)
    566 {
    567 	struct cpu_info *ci;
    568 	u_long i;
    569 	for (i = 0; i < maxcpus; i++) {
    570 		ci = cpu_lookup(i);
    571 		if (ci == NULL)
    572 			continue;
    573 		if (ci->ci_data.cpu_idlelwp == NULL)
    574 			continue;
    575 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    576 			continue;
    577 		if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
    578 			continue;
    579 		cpu_boot_secondary(ci);
    580 	}
    581 
    582 	x86_mp_online = true;
    583 }
    584 
    585 static void
    586 cpu_init_idle_lwp(struct cpu_info *ci)
    587 {
    588 	struct lwp *l = ci->ci_data.cpu_idlelwp;
    589 	struct pcb *pcb = lwp_getpcb(l);
    590 
    591 	pcb->pcb_cr0 = rcr0();
    592 }
    593 
    594 void
    595 cpu_init_idle_lwps(void)
    596 {
    597 	struct cpu_info *ci;
    598 	u_long i;
    599 
    600 	for (i = 0; i < maxcpus; i++) {
    601 		ci = cpu_lookup(i);
    602 		if (ci == NULL)
    603 			continue;
    604 		if (ci->ci_data.cpu_idlelwp == NULL)
    605 			continue;
    606 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    607 			continue;
    608 		cpu_init_idle_lwp(ci);
    609 	}
    610 }
    611 
    612 static void
    613 cpu_start_secondary(struct cpu_info *ci)
    614 {
    615 	int i;
    616 
    617 	aprint_debug_dev(ci->ci_dev, "starting\n");
    618 
    619 	ci->ci_curlwp = ci->ci_data.cpu_idlelwp;
    620 
    621 	if (CPU_STARTUP(ci, (vaddr_t) cpu_hatch) != 0) {
    622 		return;
    623 	}
    624 
    625 	/*
    626 	 * wait for it to become ready
    627 	 */
    628 	for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) {
    629 		delay(10);
    630 	}
    631 	if ((ci->ci_flags & CPUF_PRESENT) == 0) {
    632 		aprint_error_dev(ci->ci_dev, "failed to become ready\n");
    633 #if defined(MPDEBUG) && defined(DDB)
    634 		printf("dropping into debugger; continue from here to resume boot\n");
    635 		Debugger();
    636 #endif
    637 	}
    638 
    639 	CPU_START_CLEANUP(ci);
    640 }
    641 
    642 void
    643 cpu_boot_secondary(struct cpu_info *ci)
    644 {
    645 	int i;
    646 	atomic_or_32(&ci->ci_flags, CPUF_GO);
    647 	for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) {
    648 		delay(10);
    649 	}
    650 	if ((ci->ci_flags & CPUF_RUNNING) == 0) {
    651 		aprint_error_dev(ci->ci_dev, "CPU failed to start\n");
    652 #if defined(MPDEBUG) && defined(DDB)
    653 		printf("dropping into debugger; continue from here to resume boot\n");
    654 		Debugger();
    655 #endif
    656 	}
    657 }
    658 
    659 /*
    660  * APs end up here immediately after initialisation and VCPUOP_up in
    661  * mp_cpu_start().
    662  * At this point, we are running in the idle pcb/idle stack of the new
    663  * CPU.  This function jumps to the idle loop and starts looking for
    664  * work.
    665  */
    666 extern void x86_64_tls_switch(struct lwp *);
    667 void
    668 cpu_hatch(void *v)
    669 {
    670 	struct cpu_info *ci = (struct cpu_info *)v;
    671 	struct pcb *pcb;
    672 	int s, i;
    673 
    674 	/* Setup TLS and kernel GS/FS */
    675 	cpu_init_msrs(ci, true);
    676 	cpu_init_idt();
    677 	gdt_init_cpu(ci);
    678 
    679 	cpu_probe(ci);
    680 
    681 	atomic_or_32(&ci->ci_flags, CPUF_PRESENT);
    682 
    683 	while ((ci->ci_flags & CPUF_GO) == 0) {
    684 		/* Don't use delay, boot CPU may be patching the text. */
    685 		for (i = 10000; i != 0; i--)
    686 			x86_pause();
    687 	}
    688 
    689 	/* Because the text may have been patched in x86_patch(). */
    690 	x86_flush();
    691 	tlbflushg();
    692 
    693 	KASSERT((ci->ci_flags & CPUF_RUNNING) == 0);
    694 
    695 	pcb = lwp_getpcb(curlwp);
    696 	pcb->pcb_cr3 = pmap_pdirpa(pmap_kernel(), 0);
    697 	pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
    698 
    699 	xen_ipi_init();
    700 
    701 	xen_initclocks();
    702 
    703 #ifdef __x86_64__
    704 	fpuinit(ci);
    705 #endif
    706 
    707 	lldt(GSEL(GLDT_SEL, SEL_KPL));
    708 
    709 	cpu_init(ci);
    710 	cpu_get_tsc_freq(ci);
    711 
    712 	s = splhigh();
    713 	x86_enable_intr();
    714 	splx(s);
    715 
    716 	aprint_debug_dev(ci->ci_dev, "running\n");
    717 
    718 	cpu_switchto(NULL, ci->ci_data.cpu_idlelwp, true);
    719 
    720 	idle_loop(NULL);
    721 	KASSERT(false);
    722 }
    723 
    724 #if defined(DDB)
    725 
    726 #include <ddb/db_output.h>
    727 #include <machine/db_machdep.h>
    728 
    729 /*
    730  * Dump CPU information from ddb.
    731  */
    732 void
    733 cpu_debug_dump(void)
    734 {
    735 	struct cpu_info *ci;
    736 	CPU_INFO_ITERATOR cii;
    737 
    738 	db_printf("addr		dev	id	flags	ipis	curlwp 		fpcurlwp\n");
    739 	for (CPU_INFO_FOREACH(cii, ci)) {
    740 		db_printf("%p	%s	%ld	%x	%x	%10p	%10p\n",
    741 		    ci,
    742 		    ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
    743 		    (long)ci->ci_cpuid,
    744 		    ci->ci_flags, ci->ci_ipis,
    745 		    ci->ci_curlwp,
    746 		    ci->ci_fpcurlwp);
    747 	}
    748 }
    749 #endif /* DDB */
    750 
    751 #endif /* MULTIPROCESSOR */
    752 
    753 extern void hypervisor_callback(void);
    754 extern void failsafe_callback(void);
    755 #ifdef __x86_64__
    756 typedef void (vector)(void);
    757 extern vector Xsyscall, Xsyscall32;
    758 #endif
    759 
    760 /*
    761  * Setup the "trampoline". On Xen, we setup nearly all cpu context
    762  * outside a trampoline, so we prototype and call targetip like so:
    763  * void targetip(struct cpu_info *);
    764  */
    765 
    766 static void
    767 gdt_prepframes(paddr_t *frames, vaddr_t base, uint32_t entries)
    768 {
    769 	int i;
    770 	for (i = 0; i < roundup(entries, PAGE_SIZE) >> PAGE_SHIFT; i++) {
    771 
    772 		frames[i] = ((paddr_t) xpmap_ptetomach(
    773 				(pt_entry_t *) (base + (i << PAGE_SHIFT))))
    774 			>> PAGE_SHIFT;
    775 
    776 		/* Mark Read-only */
    777 		pmap_pte_clearbits(kvtopte(base + (i << PAGE_SHIFT)),
    778 		    PG_RW);
    779 	}
    780 }
    781 
    782 #ifdef __x86_64__
    783 extern char *ldtstore;
    784 
    785 static void
    786 xen_init_amd64_vcpuctxt(struct cpu_info *ci,
    787 			struct vcpu_guest_context *initctx,
    788 			void targetrip(struct cpu_info *))
    789 {
    790 	/* page frames to point at GDT */
    791 	extern int gdt_size;
    792 	paddr_t frames[16];
    793 	psize_t gdt_ents;
    794 
    795 	struct lwp *l;
    796 	struct pcb *pcb;
    797 
    798 	volatile struct vcpu_info *vci;
    799 
    800 	KASSERT(ci != NULL);
    801 	KASSERT(ci != &cpu_info_primary);
    802 	KASSERT(initctx != NULL);
    803 	KASSERT(targetrip != NULL);
    804 
    805 	memset(initctx, 0, sizeof *initctx);
    806 
    807 	gdt_ents = roundup(gdt_size, PAGE_SIZE) >> PAGE_SHIFT;
    808 	KASSERT(gdt_ents <= 16);
    809 
    810 	gdt_prepframes(frames, (vaddr_t) ci->ci_gdt, gdt_ents);
    811 
    812 	/* Initialise the vcpu context: We use idle_loop()'s pcb context. */
    813 
    814 	l = ci->ci_data.cpu_idlelwp;
    815 
    816 	KASSERT(l != NULL);
    817 	pcb = lwp_getpcb(l);
    818 	KASSERT(pcb != NULL);
    819 
    820 	/* resume with interrupts off */
    821 	vci = ci->ci_vcpu;
    822 	vci->evtchn_upcall_mask = 1;
    823 	xen_mb();
    824 
    825 	/* resume in kernel-mode */
    826 	initctx->flags = VGCF_in_kernel | VGCF_online;
    827 
    828 	/* Stack and entry points:
    829 	 * We arrange for the stack frame for cpu_hatch() to
    830 	 * appear as a callee frame of lwp_trampoline(). Being a
    831 	 * leaf frame prevents trampling on any of the MD stack setup
    832 	 * that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop()
    833 	 */
    834 
    835 	initctx->user_regs.rdi = (uint64_t) ci; /* targetrip(ci); */
    836 	initctx->user_regs.rip = (vaddr_t) targetrip;
    837 
    838 	initctx->user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
    839 
    840 	initctx->user_regs.rflags = pcb->pcb_flags;
    841 	initctx->user_regs.rsp = pcb->pcb_rsp;
    842 
    843 	/* Data segments */
    844 	initctx->user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
    845 	initctx->user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
    846 	initctx->user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
    847 
    848 	/* GDT */
    849 	memcpy(initctx->gdt_frames, frames, sizeof frames);
    850 	initctx->gdt_ents = gdt_ents;
    851 
    852 	/* LDT */
    853 	initctx->ldt_base = (unsigned long) ldtstore;
    854 	initctx->ldt_ents = LDT_SIZE >> 3;
    855 
    856 	/* Kernel context state */
    857 	initctx->kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
    858 	initctx->kernel_sp = pcb->pcb_rsp0;
    859 	initctx->ctrlreg[0] = pcb->pcb_cr0;
    860 	initctx->ctrlreg[1] = 0; /* "resuming" from kernel - no User cr3. */
    861 	initctx->ctrlreg[2] = (vaddr_t) targetrip;
    862 	/*
    863 	 * Use pmap_kernel() L4 PD directly, until we setup the
    864 	 * per-cpu L4 PD in pmap_cpu_init_late()
    865 	 */
    866 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_kpm_pdirpa)));
    867 	initctx->ctrlreg[4] = CR4_PAE | CR4_OSFXSR | CR4_OSXMMEXCPT;
    868 
    869 
    870 	/* Xen callbacks */
    871 	initctx->event_callback_eip = (unsigned long) hypervisor_callback;
    872 	initctx->failsafe_callback_eip = (unsigned long) failsafe_callback;
    873 	initctx->syscall_callback_eip = (unsigned long) Xsyscall;
    874 
    875 	return;
    876 }
    877 #else /* i386 */
    878 extern union descriptor *ldt;
    879 extern void Xsyscall(void);
    880 
    881 static void
    882 xen_init_i386_vcpuctxt(struct cpu_info *ci,
    883 			struct vcpu_guest_context *initctx,
    884 			void targeteip(struct cpu_info *))
    885 {
    886 	/* page frames to point at GDT */
    887 	extern int gdt_size;
    888 	paddr_t frames[16];
    889 	psize_t gdt_ents;
    890 
    891 	struct lwp *l;
    892 	struct pcb *pcb;
    893 
    894 	volatile struct vcpu_info *vci;
    895 
    896 	KASSERT(ci != NULL);
    897 	KASSERT(ci != &cpu_info_primary);
    898 	KASSERT(initctx != NULL);
    899 	KASSERT(targeteip != NULL);
    900 
    901 	memset(initctx, 0, sizeof *initctx);
    902 
    903 	gdt_ents = roundup(gdt_size, PAGE_SIZE) >> PAGE_SHIFT;
    904 	KASSERT(gdt_ents <= 16);
    905 
    906 	gdt_prepframes(frames, (vaddr_t) ci->ci_gdt, gdt_ents);
    907 
    908 	/*
    909 	 * Initialise the vcpu context:
    910 	 * We use this cpu's idle_loop() pcb context.
    911 	 */
    912 
    913 	l = ci->ci_data.cpu_idlelwp;
    914 
    915 	KASSERT(l != NULL);
    916 	pcb = lwp_getpcb(l);
    917 	KASSERT(pcb != NULL);
    918 
    919 	/* resume with interrupts off */
    920 	vci = ci->ci_vcpu;
    921 	vci->evtchn_upcall_mask = 1;
    922 	xen_mb();
    923 
    924 	/* resume in kernel-mode */
    925 	initctx->flags = VGCF_in_kernel | VGCF_online;
    926 
    927 	/* Stack frame setup for cpu_hatch():
    928 	 * We arrange for the stack frame for cpu_hatch() to
    929 	 * appear as a callee frame of lwp_trampoline(). Being a
    930 	 * leaf frame prevents trampling on any of the MD stack setup
    931 	 * that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop()
    932 	 */
    933 
    934 	initctx->user_regs.esp = pcb->pcb_esp - 4; /* Leave word for
    935 						      arg1 */
    936 	{ /* targeteip(ci); */
    937 		uint32_t *arg = (uint32_t *) initctx->user_regs.esp;
    938 		arg[1] = (uint32_t) ci; /* arg1 */
    939 
    940 	}
    941 
    942 	initctx->user_regs.eip = (vaddr_t) targeteip;
    943 	initctx->user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
    944 	initctx->user_regs.eflags |= pcb->pcb_iopl;
    945 
    946 	/* Data segments */
    947 	initctx->user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
    948 	initctx->user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
    949 	initctx->user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
    950 	initctx->user_regs.fs = GSEL(GDATA_SEL, SEL_KPL);
    951 
    952 	/* GDT */
    953 	memcpy(initctx->gdt_frames, frames, sizeof frames);
    954 	initctx->gdt_ents = gdt_ents;
    955 
    956 	/* LDT */
    957 	initctx->ldt_base = (unsigned long) ldt;
    958 	initctx->ldt_ents = NLDT;
    959 
    960 	/* Kernel context state */
    961 	initctx->kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
    962 	initctx->kernel_sp = pcb->pcb_esp0;
    963 	initctx->ctrlreg[0] = pcb->pcb_cr0;
    964 	initctx->ctrlreg[1] = 0; /* "resuming" from kernel - no User cr3. */
    965 	initctx->ctrlreg[2] = (vaddr_t) targeteip;
    966 #ifdef PAE
    967 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_pae_l3_pdirpa)));
    968 #else /* PAE */
    969 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(pcb->pcb_cr3)));
    970 #endif /* PAE */
    971 	initctx->ctrlreg[4] = /* CR4_PAE |  */CR4_OSFXSR | CR4_OSXMMEXCPT;
    972 
    973 
    974 	/* Xen callbacks */
    975 	initctx->event_callback_eip = (unsigned long) hypervisor_callback;
    976 	initctx->event_callback_cs = GSEL(GCODE_SEL, SEL_KPL);
    977 	initctx->failsafe_callback_eip = (unsigned long) failsafe_callback;
    978 	initctx->failsafe_callback_cs = GSEL(GCODE_SEL, SEL_KPL);
    979 
    980 	return;
    981 }
    982 #endif /* __x86_64__ */
    983 
    984 int
    985 mp_cpu_start(struct cpu_info *ci, vaddr_t target)
    986 {
    987 
    988 	int hyperror;
    989 	struct vcpu_guest_context vcpuctx;
    990 
    991 	KASSERT(ci != NULL);
    992 	KASSERT(ci != &cpu_info_primary);
    993 	KASSERT(ci->ci_flags & CPUF_AP);
    994 
    995 #ifdef __x86_64__
    996 	xen_init_amd64_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target);
    997 #else  /* i386 */
    998 	xen_init_i386_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target);
    999 #endif /* __x86_64__ */
   1000 
   1001 	/* Initialise the given vcpu to execute cpu_hatch(ci); */
   1002 	if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_initialise, ci->ci_cpuid, &vcpuctx))) {
   1003 		aprint_error(": context initialisation failed. errno = %d\n", hyperror);
   1004 		return hyperror;
   1005 	}
   1006 
   1007 	/* Start it up */
   1008 
   1009 	/* First bring it down */
   1010 	if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_cpuid, NULL))) {
   1011 		aprint_error(": VCPUOP_down hypervisor command failed. errno = %d\n", hyperror);
   1012 		return hyperror;
   1013 	}
   1014 
   1015 	if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_up, ci->ci_cpuid, NULL))) {
   1016 		aprint_error(": VCPUOP_up hypervisor command failed. errno = %d\n", hyperror);
   1017 		return hyperror;
   1018 	}
   1019 
   1020 	if (!vcpu_is_up(ci)) {
   1021 		aprint_error(": did not come up\n");
   1022 		return -1;
   1023 	}
   1024 
   1025 	return 0;
   1026 }
   1027 
   1028 void
   1029 mp_cpu_start_cleanup(struct cpu_info *ci)
   1030 {
   1031 	if (vcpu_is_up(ci)) {
   1032 		aprint_debug_dev(ci->ci_dev, "is started.\n");
   1033 	}
   1034 	else {
   1035 		aprint_error_dev(ci->ci_dev, "did not start up.\n");
   1036 	}
   1037 
   1038 }
   1039 
   1040 void
   1041 cpu_init_msrs(struct cpu_info *ci, bool full)
   1042 {
   1043 #ifdef __x86_64__
   1044 	if (full) {
   1045 		HYPERVISOR_set_segment_base (SEGBASE_FS, 0);
   1046 		HYPERVISOR_set_segment_base (SEGBASE_GS_KERNEL, (uint64_t) ci);
   1047 		HYPERVISOR_set_segment_base (SEGBASE_GS_USER, 0);
   1048 	}
   1049 #endif	/* __x86_64__ */
   1050 
   1051 	if (cpu_feature[2] & CPUID_NOX)
   1052 		wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE);
   1053 
   1054 }
   1055 
   1056 void
   1057 cpu_offline_md(void)
   1058 {
   1059         int s;
   1060 
   1061         s = splhigh();
   1062 #ifdef __i386__
   1063         npxsave_cpu(true);
   1064 #else
   1065         fpusave_cpu(true);
   1066 #endif
   1067         splx(s);
   1068 }
   1069 
   1070 void
   1071 cpu_get_tsc_freq(struct cpu_info *ci)
   1072 {
   1073 	uint32_t vcpu_tversion;
   1074 	const volatile vcpu_time_info_t *tinfo = &ci->ci_vcpu->time;
   1075 
   1076 	vcpu_tversion = tinfo->version;
   1077 	while (tinfo->version == vcpu_tversion); /* Wait for a time update. XXX: timeout ? */
   1078 
   1079 	uint64_t freq = 1000000000ULL << 32;
   1080 	freq = freq / (uint64_t)tinfo->tsc_to_system_mul;
   1081 	if ( tinfo->tsc_shift < 0 )
   1082 		freq = freq << -tinfo->tsc_shift;
   1083 	else
   1084 		freq = freq >> tinfo->tsc_shift;
   1085 	ci->ci_data.cpu_cc_freq = freq;
   1086 }
   1087 
   1088 void
   1089 x86_cpu_idle_xen(void)
   1090 {
   1091 	struct cpu_info *ci = curcpu();
   1092 
   1093 	KASSERT(ci->ci_ilevel == IPL_NONE);
   1094 
   1095 	x86_disable_intr();
   1096 	if (!__predict_false(ci->ci_want_resched)) {
   1097 		idle_block();
   1098 	} else {
   1099 		x86_enable_intr();
   1100 	}
   1101 }
   1102 
   1103 /*
   1104  * Loads pmap for the current CPU.
   1105  */
   1106 void
   1107 cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
   1108 {
   1109 	KASSERT(pmap != pmap_kernel());
   1110 
   1111 #if defined(__x86_64__) || defined(PAE)
   1112 	struct cpu_info *ci = curcpu();
   1113 	cpuid_t cid = cpu_index(ci);
   1114 
   1115 	mutex_enter(&ci->ci_kpm_mtx);
   1116 	/* make new pmap visible to pmap_kpm_sync_xcall() */
   1117 	kcpuset_atomic_set(pmap->pm_xen_ptp_cpus, cid);
   1118 #endif
   1119 #ifdef i386
   1120 #ifdef PAE
   1121 	{
   1122 		int i;
   1123 		paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
   1124 		/* don't update the kernel L3 slot */
   1125 		for (i = 0 ; i < PDP_SIZE - 1; i++) {
   1126 			xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
   1127 			    xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
   1128 		}
   1129 		tlbflush();
   1130 	}
   1131 #else /* PAE */
   1132 	lcr3(pmap_pdirpa(pmap, 0));
   1133 #endif /* PAE */
   1134 #endif /* i386 */
   1135 
   1136 #ifdef __x86_64__
   1137 	{
   1138 		int i;
   1139 		pd_entry_t *new_pgd;
   1140 		paddr_t l4_pd_ma;
   1141 
   1142 		l4_pd_ma = xpmap_ptom_masked(ci->ci_kpm_pdirpa);
   1143 
   1144 		/*
   1145 		 * Map user space address in kernel space and load
   1146 		 * user cr3
   1147 		 */
   1148 		new_pgd = pmap->pm_pdir;
   1149 		KASSERT(pmap == ci->ci_pmap);
   1150 
   1151 		/* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */
   1152 		for (i = 0; i < PDIR_SLOT_PTE; i++) {
   1153 			KASSERT(pmap != pmap_kernel() || new_pgd[i] == 0);
   1154 			if (ci->ci_kpm_pdir[i] != new_pgd[i]) {
   1155 				xpq_queue_pte_update(
   1156 				   l4_pd_ma + i * sizeof(pd_entry_t),
   1157 				    new_pgd[i]);
   1158 			}
   1159 		}
   1160 
   1161 		xen_set_user_pgd(pmap_pdirpa(pmap, 0));
   1162 		ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0);
   1163 
   1164 		tlbflush();
   1165 	}
   1166 
   1167 #endif /* __x86_64__ */
   1168 #if defined(__x86_64__) || defined(PAE)
   1169 	/* old pmap no longer visible to pmap_kpm_sync_xcall() */
   1170 	if (oldpmap != pmap_kernel()) {
   1171 		kcpuset_atomic_clear(oldpmap->pm_xen_ptp_cpus, cid);
   1172 	}
   1173 	mutex_exit(&ci->ci_kpm_mtx);
   1174 #endif
   1175 }
   1176 
   1177  /*
   1178   * pmap_cpu_init_late: perform late per-CPU initialization.
   1179   * Short note about percpu PDIR pages:
   1180   * Both the PAE and __x86_64__ architectures have per-cpu PDIR
   1181   * tables. This is to get around Xen's pagetable setup constraints for
   1182   * PAE (multiple L3[3]s cannot point to the same L2 - Xen
   1183   * will refuse to pin a table setup this way.) and for multiple cpus
   1184   * to map in different user pmaps on __x86_64__ (see: cpu_load_pmap())
   1185   *
   1186   * What this means for us is that the PDIR of the pmap_kernel() is
   1187   * considered to be a canonical "SHADOW" PDIR with the following
   1188   * properties:
   1189   * - Its recursive mapping points to itself
   1190   * - per-cpu recursive mappings point to themselves on __x86_64__
   1191   * - per-cpu L4 pages' kernel entries are expected to be in sync with
   1192   *   the shadow
   1193   */
   1194 
   1195 void
   1196 pmap_cpu_init_late(struct cpu_info *ci)
   1197 {
   1198 #if defined(PAE) || defined(__x86_64__)
   1199 	/*
   1200 	 * The BP has already its own PD page allocated during early
   1201 	 * MD startup.
   1202 	 */
   1203 
   1204 #if defined(__x86_64__)
   1205 	/* Setup per-cpu normal_pdes */
   1206 	int i;
   1207 	extern pd_entry_t * const normal_pdes[];
   1208 	for (i = 0;i < PTP_LEVELS - 1;i++) {
   1209 		ci->ci_normal_pdes[i] = normal_pdes[i];
   1210 	}
   1211 #endif /* __x86_64__ */
   1212 
   1213 	if (ci == &cpu_info_primary)
   1214 		return;
   1215 
   1216 	KASSERT(ci != NULL);
   1217 
   1218 #if defined(PAE)
   1219 	cpu_alloc_l3_page(ci);
   1220 	KASSERT(ci->ci_pae_l3_pdirpa != 0);
   1221 
   1222 	/* Initialise L2 entries 0 - 2: Point them to pmap_kernel() */
   1223 	int i;
   1224 	for (i = 0 ; i < PDP_SIZE - 1; i++) {
   1225 		ci->ci_pae_l3_pdir[i] =
   1226 		    xpmap_ptom_masked(pmap_kernel()->pm_pdirpa[i]) | PG_V;
   1227 	}
   1228 #endif /* PAE */
   1229 
   1230 	ci->ci_kpm_pdir = (pd_entry_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
   1231 	    UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_NOWAIT);
   1232 
   1233 	if (ci->ci_kpm_pdir == NULL) {
   1234 		panic("%s: failed to allocate L4 per-cpu PD for CPU %d\n",
   1235 		      __func__, cpu_index(ci));
   1236 	}
   1237 	ci->ci_kpm_pdirpa = vtophys((vaddr_t) ci->ci_kpm_pdir);
   1238 	KASSERT(ci->ci_kpm_pdirpa != 0);
   1239 
   1240 #if defined(__x86_64__)
   1241 	/*
   1242 	 * Copy over the pmap_kernel() shadow L4 entries
   1243 	 */
   1244 
   1245 	memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir, PAGE_SIZE);
   1246 
   1247 	/* Recursive kernel mapping */
   1248 	ci->ci_kpm_pdir[PDIR_SLOT_PTE] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_k | PG_V;
   1249 #elif defined(PAE)
   1250 	/* Copy over the pmap_kernel() shadow L2 entries that map the kernel */
   1251 	memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir + PDIR_SLOT_KERN, nkptp[PTP_LEVELS - 1] * sizeof(pd_entry_t));
   1252 #endif /* __x86_64__ else PAE */
   1253 
   1254 	/* Xen wants R/O */
   1255 	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_kpm_pdir,
   1256 	    (vaddr_t)ci->ci_kpm_pdir + PAGE_SIZE, VM_PROT_READ);
   1257 	pmap_update(pmap_kernel());
   1258 #if defined(PAE)
   1259 	/* Initialise L3 entry 3. This mapping is shared across all
   1260 	 * pmaps and is static, ie; loading a new pmap will not update
   1261 	 * this entry.
   1262 	 */
   1263 
   1264 	ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_k | PG_V;
   1265 
   1266 	/* Mark L3 R/O (Xen wants this) */
   1267 	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_pae_l3_pdir,
   1268 	    (vaddr_t)ci->ci_pae_l3_pdir + PAGE_SIZE, VM_PROT_READ);
   1269 	pmap_update(pmap_kernel());
   1270 
   1271 	xpq_queue_pin_l3_table(xpmap_ptom_masked(ci->ci_pae_l3_pdirpa));
   1272 
   1273 #elif defined(__x86_64__)
   1274 	xpq_queue_pin_l4_table(xpmap_ptom_masked(ci->ci_kpm_pdirpa));
   1275 #endif /* PAE , __x86_64__ */
   1276 #endif /* defined(PAE) || defined(__x86_64__) */
   1277 }
   1278 
   1279 /*
   1280  * Notify all other cpus to halt.
   1281  */
   1282 
   1283 void
   1284 cpu_broadcast_halt(void)
   1285 {
   1286 	xen_broadcast_ipi(XEN_IPI_HALT);
   1287 }
   1288 
   1289 /*
   1290  * Send a dummy ipi to a cpu.
   1291  */
   1292 
   1293 void
   1294 cpu_kick(struct cpu_info *ci)
   1295 {
   1296 	(void)xen_send_ipi(ci, XEN_IPI_KICK);
   1297 }
   1298