Home | History | Annotate | Line # | Download | only in x86
cpu.c revision 1.83
      1 /*	$NetBSD: cpu.c,v 1.83 2012/02/22 18:29:31 bouyer Exp $	*/
      2 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
      3 
      4 /*-
      5  * Copyright (c) 2000 The NetBSD Foundation, Inc.
      6  * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to The NetBSD Foundation
     10  * by RedBack Networks Inc.
     11  *
     12  * Author: Bill Sommerfeld
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33  * POSSIBILITY OF SUCH DAMAGE.
     34  */
     35 
     36 /*
     37  * Copyright (c) 1999 Stefan Grefen
     38  *
     39  * Redistribution and use in source and binary forms, with or without
     40  * modification, are permitted provided that the following conditions
     41  * are met:
     42  * 1. Redistributions of source code must retain the above copyright
     43  *    notice, this list of conditions and the following disclaimer.
     44  * 2. Redistributions in binary form must reproduce the above copyright
     45  *    notice, this list of conditions and the following disclaimer in the
     46  *    documentation and/or other materials provided with the distribution.
     47  * 3. All advertising materials mentioning features or use of this software
     48  *    must display the following acknowledgement:
     49  *      This product includes software developed by the NetBSD
     50  *      Foundation, Inc. and its contributors.
     51  * 4. Neither the name of The NetBSD Foundation nor the names of its
     52  *    contributors may be used to endorse or promote products derived
     53  *    from this software without specific prior written permission.
     54  *
     55  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
     56  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
     59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     65  * SUCH DAMAGE.
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.83 2012/02/22 18:29:31 bouyer Exp $");
     70 
     71 #include "opt_ddb.h"
     72 #include "opt_multiprocessor.h"
     73 #include "opt_mpbios.h"		/* for MPDEBUG */
     74 #include "opt_mtrr.h"
     75 #include "opt_xen.h"
     76 
     77 #include "lapic.h"
     78 #include "ioapic.h"
     79 
     80 #include <sys/param.h>
     81 #include <sys/proc.h>
     82 #include <sys/systm.h>
     83 #include <sys/device.h>
     84 #include <sys/kmem.h>
     85 #include <sys/cpu.h>
     86 #include <sys/cpufreq.h>
     87 #include <sys/atomic.h>
     88 #include <sys/reboot.h>
     89 #include <sys/idle.h>
     90 
     91 #include <uvm/uvm.h>
     92 
     93 #include <machine/cpufunc.h>
     94 #include <machine/cpuvar.h>
     95 #include <machine/pmap.h>
     96 #include <machine/vmparam.h>
     97 #include <machine/mpbiosvar.h>
     98 #include <machine/pcb.h>
     99 #include <machine/specialreg.h>
    100 #include <machine/segments.h>
    101 #include <machine/gdt.h>
    102 #include <machine/mtrr.h>
    103 #include <machine/pio.h>
    104 
    105 #ifdef i386
    106 #include <machine/npx.h>
    107 #else
    108 #include <machine/fpu.h>
    109 #endif
    110 
    111 #include <xen/xen.h>
    112 #include <xen/xen-public/vcpu.h>
    113 #include <xen/vcpuvar.h>
    114 
    115 #if NLAPIC > 0
    116 #include <machine/apicvar.h>
    117 #include <machine/i82489reg.h>
    118 #include <machine/i82489var.h>
    119 #endif
    120 
    121 #include <dev/ic/mc146818reg.h>
    122 #include <dev/isa/isareg.h>
    123 
    124 #if MAXCPUS > 32
    125 #error cpu_info contains 32bit bitmasks
    126 #endif
    127 
    128 static int	cpu_match(device_t, cfdata_t, void *);
    129 static void	cpu_attach(device_t, device_t, void *);
    130 static void	cpu_defer(device_t);
    131 static int	cpu_rescan(device_t, const char *, const int *);
    132 static void	cpu_childdetached(device_t, device_t);
    133 static int	vcpu_match(device_t, cfdata_t, void *);
    134 static void	vcpu_attach(device_t, device_t, void *);
    135 static void	cpu_attach_common(device_t, device_t, void *);
    136 void		cpu_offline_md(void);
    137 
    138 struct cpu_softc {
    139 	device_t sc_dev;		/* device tree glue */
    140 	struct cpu_info *sc_info;	/* pointer to CPU info */
    141 	bool sc_wasonline;
    142 };
    143 
    144 int mp_cpu_start(struct cpu_info *, vaddr_t);
    145 void mp_cpu_start_cleanup(struct cpu_info *);
    146 const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL,
    147 				      mp_cpu_start_cleanup };
    148 
    149 CFATTACH_DECL2_NEW(cpu, sizeof(struct cpu_softc),
    150     cpu_match, cpu_attach, NULL, NULL, cpu_rescan, cpu_childdetached);
    151 
    152 CFATTACH_DECL_NEW(vcpu, sizeof(struct cpu_softc),
    153     vcpu_match, vcpu_attach, NULL, NULL);
    154 
    155 /*
    156  * Statically-allocated CPU info for the primary CPU (or the only
    157  * CPU, on uniprocessors).  The CPU info list is initialized to
    158  * point at it.
    159  */
    160 #ifdef TRAPLOG
    161 #include <machine/tlog.h>
    162 struct tlog tlog_primary;
    163 #endif
    164 struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    165 	.ci_dev = 0,
    166 	.ci_self = &cpu_info_primary,
    167 	.ci_idepth = -1,
    168 	.ci_curlwp = &lwp0,
    169 	.ci_curldt = -1,
    170 	.ci_cpumask = 1,
    171 #ifdef TRAPLOG
    172 	.ci_tlog = &tlog_primary,
    173 #endif
    174 
    175 };
    176 struct cpu_info phycpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    177 	.ci_dev = 0,
    178 	.ci_self = &phycpu_info_primary,
    179 };
    180 
    181 struct cpu_info *cpu_info_list = &cpu_info_primary;
    182 struct cpu_info *phycpu_info_list = &phycpu_info_primary;
    183 
    184 uint32_t cpus_attached = 1;
    185 uint32_t cpus_running = 1;
    186 
    187 uint32_t phycpus_attached = 0;
    188 uint32_t phycpus_running = 0;
    189 
    190 uint32_t cpu_feature[5]; /* X86 CPUID feature bits
    191 			  *	[0] basic features %edx
    192 			  *	[1] basic features %ecx
    193 			  *	[2] extended features %edx
    194 			  *	[3] extended features %ecx
    195 			  *	[4] VIA padlock features
    196 			  */
    197 
    198 bool x86_mp_online;
    199 paddr_t mp_trampoline_paddr = MP_TRAMPOLINE;
    200 
    201 #if defined(MULTIPROCESSOR)
    202 void    	cpu_hatch(void *);
    203 static void    	cpu_boot_secondary(struct cpu_info *ci);
    204 static void    	cpu_start_secondary(struct cpu_info *ci);
    205 #endif	/* MULTIPROCESSOR */
    206 
    207 static int
    208 cpu_match(device_t parent, cfdata_t match, void *aux)
    209 {
    210 
    211 	return 1;
    212 }
    213 
    214 static void
    215 cpu_attach(device_t parent, device_t self, void *aux)
    216 {
    217 	struct cpu_softc *sc = device_private(self);
    218 	struct cpu_attach_args *caa = aux;
    219 	struct cpu_info *ci;
    220 	uintptr_t ptr;
    221 	static int nphycpu = 0;
    222 
    223 	sc->sc_dev = self;
    224 
    225 	if (phycpus_attached == ~0) {
    226 		aprint_error(": increase MAXCPUS\n");
    227 		return;
    228 	}
    229 
    230 	/*
    231 	 * If we're an Application Processor, allocate a cpu_info
    232 	 * If we're the first attached CPU use the primary cpu_info,
    233 	 * otherwise allocate a new one
    234 	 */
    235 	aprint_naive("\n");
    236 	aprint_normal("\n");
    237 	if (nphycpu > 0) {
    238 		struct cpu_info *tmp;
    239 		ptr = (uintptr_t)kmem_zalloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    240 		    KM_SLEEP);
    241 		ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
    242 		ci->ci_curldt = -1;
    243 
    244 		tmp = phycpu_info_list;
    245 		while (tmp->ci_next)
    246 			tmp = tmp->ci_next;
    247 
    248 		tmp->ci_next = ci;
    249 	} else {
    250 		ci = &phycpu_info_primary;
    251 	}
    252 
    253 	ci->ci_self = ci;
    254 	sc->sc_info = ci;
    255 
    256 	ci->ci_dev = self;
    257 	ci->ci_acpiid = caa->cpu_id;
    258 	ci->ci_cpuid = caa->cpu_number;
    259 	ci->ci_vcpu = NULL;
    260 	ci->ci_index = nphycpu++;
    261 	ci->ci_cpumask = (1 << cpu_index(ci));
    262 
    263 	atomic_or_32(&phycpus_attached, ci->ci_cpumask);
    264 
    265 	if (!pmf_device_register(self, NULL, NULL))
    266 		aprint_error_dev(self, "couldn't establish power handler\n");
    267 
    268 	(void)config_defer(self, cpu_defer);
    269 }
    270 
    271 static void
    272 cpu_defer(device_t self)
    273 {
    274 	cpu_rescan(self, NULL, NULL);
    275 }
    276 
    277 static int
    278 cpu_rescan(device_t self, const char *ifattr, const int *locators)
    279 {
    280 	struct cpu_softc *sc = device_private(self);
    281 	struct cpufeature_attach_args cfaa;
    282 	struct cpu_info *ci = sc->sc_info;
    283 
    284 	memset(&cfaa, 0, sizeof(cfaa));
    285 	cfaa.ci = ci;
    286 
    287 	if (ifattr_match(ifattr, "cpufeaturebus")) {
    288 
    289 		if (ci->ci_frequency == NULL) {
    290 			cfaa.name = "frequency";
    291 			ci->ci_frequency = config_found_ia(self,
    292 			    "cpufeaturebus", &cfaa, NULL);
    293 		}
    294 	}
    295 
    296 	return 0;
    297 }
    298 
    299 static void
    300 cpu_childdetached(device_t self, device_t child)
    301 {
    302 	struct cpu_softc *sc = device_private(self);
    303 	struct cpu_info *ci = sc->sc_info;
    304 
    305 	if (ci->ci_frequency == child)
    306 		ci->ci_frequency = NULL;
    307 }
    308 
    309 static int
    310 vcpu_match(device_t parent, cfdata_t match, void *aux)
    311 {
    312 	struct vcpu_attach_args *vcaa = aux;
    313 	struct vcpu_runstate_info vcr;
    314 	int error;
    315 
    316 	if (strcmp(vcaa->vcaa_name, match->cf_name) == 0) {
    317 		error = HYPERVISOR_vcpu_op(VCPUOP_get_runstate_info,
    318 					   vcaa->vcaa_caa.cpu_number,
    319 					   &vcr);
    320 		switch (error) {
    321 		case 0:
    322 			return 1;
    323 		case -ENOENT:
    324 			return 0;
    325 		default:
    326 			panic("Unknown hypervisor error %d returned on vcpu runstate probe\n", error);
    327 		}
    328 	}
    329 
    330 	return 0;
    331 }
    332 
    333 static void
    334 vcpu_attach(device_t parent, device_t self, void *aux)
    335 {
    336 	struct vcpu_attach_args *vcaa = aux;
    337 
    338 	KASSERT(vcaa->vcaa_caa.cpu_func == NULL);
    339 	vcaa->vcaa_caa.cpu_func = &mp_cpu_funcs;
    340 	cpu_attach_common(parent, self, &vcaa->vcaa_caa);
    341 
    342 	if (!pmf_device_register(self, NULL, NULL))
    343 		aprint_error_dev(self, "couldn't establish power handler\n");
    344 }
    345 
    346 static int
    347 vcpu_is_up(struct cpu_info *ci)
    348 {
    349 	KASSERT(ci != NULL);
    350 	return HYPERVISOR_vcpu_op(VCPUOP_is_up, ci->ci_cpuid, NULL);
    351 }
    352 
    353 static void
    354 cpu_vm_init(struct cpu_info *ci)
    355 {
    356 	int ncolors = 2, i;
    357 
    358 	for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) {
    359 		struct x86_cache_info *cai;
    360 		int tcolors;
    361 
    362 		cai = &ci->ci_cinfo[i];
    363 
    364 		tcolors = atop(cai->cai_totalsize);
    365 		switch(cai->cai_associativity) {
    366 		case 0xff:
    367 			tcolors = 1; /* fully associative */
    368 			break;
    369 		case 0:
    370 		case 1:
    371 			break;
    372 		default:
    373 			tcolors /= cai->cai_associativity;
    374 		}
    375 		ncolors = max(ncolors, tcolors);
    376 	}
    377 
    378 	/*
    379 	 * Knowing the size of the largest cache on this CPU, potentially
    380 	 * re-color our pages.
    381 	 */
    382 	aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors);
    383 	uvm_page_recolor(ncolors);
    384 }
    385 
    386 static void
    387 cpu_attach_common(device_t parent, device_t self, void *aux)
    388 {
    389 	struct cpu_softc *sc = device_private(self);
    390 	struct cpu_attach_args *caa = aux;
    391 	struct cpu_info *ci;
    392 	uintptr_t ptr;
    393 	int cpunum = caa->cpu_number;
    394 	static bool again = false;
    395 
    396 	sc->sc_dev = self;
    397 
    398 	/*
    399 	 * If we're an Application Processor, allocate a cpu_info
    400 	 * structure, otherwise use the primary's.
    401 	 */
    402 	if (caa->cpu_role == CPU_ROLE_AP) {
    403 		aprint_naive(": Application Processor\n");
    404 		ptr = (uintptr_t)kmem_alloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    405 		    KM_SLEEP);
    406 		ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
    407 		memset(ci, 0, sizeof(*ci));
    408 #ifdef TRAPLOG
    409 		ci->ci_tlog_base = kmem_zalloc(sizeof(struct tlog), KM_SLEEP);
    410 #endif
    411 	} else {
    412 		aprint_naive(": %s Processor\n",
    413 		    caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
    414 		ci = &cpu_info_primary;
    415 	}
    416 
    417 	ci->ci_self = ci;
    418 	sc->sc_info = ci;
    419 	ci->ci_dev = self;
    420 	ci->ci_cpuid = cpunum;
    421 
    422 	KASSERT(HYPERVISOR_shared_info != NULL);
    423 	ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[cpunum];
    424 
    425 	KASSERT(ci->ci_func == 0);
    426 	ci->ci_func = caa->cpu_func;
    427 
    428 	/* Must be called before mi_cpu_attach(). */
    429 	cpu_vm_init(ci);
    430 
    431 	if (caa->cpu_role == CPU_ROLE_AP) {
    432 		int error;
    433 
    434 		error = mi_cpu_attach(ci);
    435 
    436 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    437 		if (error != 0) {
    438 			aprint_normal("\n");
    439 			aprint_error_dev(self,
    440 			    "mi_cpu_attach failed with %d\n", error);
    441 			return;
    442 		}
    443 
    444 	} else {
    445 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    446 	}
    447 
    448 	ci->ci_cpumask = (1 << cpu_index(ci));
    449 	pmap_reference(pmap_kernel());
    450 	ci->ci_pmap = pmap_kernel();
    451 	ci->ci_tlbstate = TLBSTATE_STALE;
    452 
    453 	/*
    454 	 * Boot processor may not be attached first, but the below
    455 	 * must be done to allow booting other processors.
    456 	 */
    457 	if (!again) {
    458 		atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
    459 		/* Basic init. */
    460 		cpu_intr_init(ci);
    461 		cpu_get_tsc_freq(ci);
    462 		cpu_init(ci);
    463 		pmap_cpu_init_late(ci);
    464 
    465 		/* Every processor needs to init it's own ipi h/w (similar to lapic) */
    466 		xen_ipi_init();
    467 		/* XXX: clock_init() */
    468 
    469 		/* Make sure DELAY() is initialized. */
    470 		DELAY(1);
    471 		again = true;
    472 	}
    473 
    474 	/* further PCB init done later. */
    475 
    476 	switch (caa->cpu_role) {
    477 	case CPU_ROLE_SP:
    478 		atomic_or_32(&ci->ci_flags, CPUF_SP);
    479 		cpu_identify(ci);
    480 #if 0
    481 		x86_errata();
    482 #endif
    483 		x86_cpu_idle_init();
    484 
    485 		break;
    486 
    487 	case CPU_ROLE_BP:
    488 		atomic_or_32(&ci->ci_flags, CPUF_BSP);
    489 		cpu_identify(ci);
    490 #if 0
    491 		x86_errata();
    492 #endif
    493 		x86_cpu_idle_init();
    494 
    495 		break;
    496 
    497 	case CPU_ROLE_AP:
    498 		atomic_or_32(&ci->ci_flags, CPUF_AP);
    499 
    500 		/*
    501 		 * report on an AP
    502 		 */
    503 
    504 #if defined(MULTIPROCESSOR)
    505 		/* interrupt handler stack */
    506 		cpu_intr_init(ci);
    507 
    508 		/* Setup per-cpu memory for gdt */
    509 		gdt_alloc_cpu(ci);
    510 
    511 		pmap_cpu_init_late(ci);
    512 		cpu_start_secondary(ci);
    513 
    514 		if (ci->ci_flags & CPUF_PRESENT) {
    515 			struct cpu_info *tmp;
    516 
    517 			cpu_identify(ci);
    518 			tmp = cpu_info_list;
    519 			while (tmp->ci_next)
    520 				tmp = tmp->ci_next;
    521 
    522 			tmp->ci_next = ci;
    523 		}
    524 #else
    525 		aprint_error(": not started\n");
    526 #endif
    527 		break;
    528 
    529 	default:
    530 		aprint_normal("\n");
    531 		panic("unknown processor type??\n");
    532 	}
    533 
    534 	atomic_or_32(&cpus_attached, ci->ci_cpumask);
    535 
    536 #ifdef MPVERBOSE
    537 	if (mp_verbose) {
    538 		struct lwp *l = ci->ci_data.cpu_idlelwp;
    539 		struct pcb *pcb = lwp_getpcb(l);
    540 
    541 		aprint_verbose_dev(self,
    542 		    "idle lwp at %p, idle sp at 0x%p\n",
    543 		    l,
    544 #ifdef i386
    545 		    (void *)pcb->pcb_esp
    546 #else /* i386 */
    547 		    (void *)pcb->pcb_rsp
    548 #endif /* i386 */
    549 		);
    550 
    551 	}
    552 #endif /* MPVERBOSE */
    553 }
    554 
    555 /*
    556  * Initialize the processor appropriately.
    557  */
    558 
    559 void
    560 cpu_init(struct cpu_info *ci)
    561 {
    562 
    563 	/*
    564 	 * On a P6 or above, enable global TLB caching if the
    565 	 * hardware supports it.
    566 	 */
    567 	if (cpu_feature[0] & CPUID_PGE)
    568 		lcr4(rcr4() | CR4_PGE);	/* enable global TLB caching */
    569 
    570 #ifdef XXXMTRR
    571 	/*
    572 	 * On a P6 or above, initialize MTRR's if the hardware supports them.
    573 	 */
    574 	if (cpu_feature[0] & CPUID_MTRR) {
    575 		if ((ci->ci_flags & CPUF_AP) == 0)
    576 			i686_mtrr_init_first();
    577 		mtrr_init_cpu(ci);
    578 	}
    579 #endif
    580 	/*
    581 	 * If we have FXSAVE/FXRESTOR, use them.
    582 	 */
    583 	if (cpu_feature[0] & CPUID_FXSR) {
    584 		lcr4(rcr4() | CR4_OSFXSR);
    585 
    586 		/*
    587 		 * If we have SSE/SSE2, enable XMM exceptions.
    588 		 */
    589 		if (cpu_feature[0] & (CPUID_SSE|CPUID_SSE2))
    590 			lcr4(rcr4() | CR4_OSXMMEXCPT);
    591 	}
    592 
    593 #ifdef __x86_64__
    594 	/* No user PGD mapped for this CPU yet */
    595 	ci->ci_xen_current_user_pgd = 0;
    596 #endif
    597 #if defined(__x86_64__) || defined(PAE)
    598 	mutex_init(&ci->ci_kpm_mtx, MUTEX_DEFAULT, IPL_VM);
    599 #endif
    600 
    601 	atomic_or_32(&cpus_running, ci->ci_cpumask);
    602 	atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
    603 
    604 	/* XXX: register vcpu_register_runstate_memory_area, and figure out how to make sure this VCPU is running ? */
    605 }
    606 
    607 
    608 #ifdef MULTIPROCESSOR
    609 
    610 void
    611 cpu_boot_secondary_processors(void)
    612 {
    613 	struct cpu_info *ci;
    614 	u_long i;
    615 	for (i = 0; i < maxcpus; i++) {
    616 		ci = cpu_lookup(i);
    617 		if (ci == NULL)
    618 			continue;
    619 		if (ci->ci_data.cpu_idlelwp == NULL)
    620 			continue;
    621 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    622 			continue;
    623 		if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
    624 			continue;
    625 		cpu_boot_secondary(ci);
    626 	}
    627 
    628 	x86_mp_online = true;
    629 }
    630 
    631 static void
    632 cpu_init_idle_lwp(struct cpu_info *ci)
    633 {
    634 	struct lwp *l = ci->ci_data.cpu_idlelwp;
    635 	struct pcb *pcb = lwp_getpcb(l);
    636 
    637 	pcb->pcb_cr0 = rcr0();
    638 }
    639 
    640 void
    641 cpu_init_idle_lwps(void)
    642 {
    643 	struct cpu_info *ci;
    644 	u_long i;
    645 
    646 	for (i = 0; i < maxcpus; i++) {
    647 		ci = cpu_lookup(i);
    648 		if (ci == NULL)
    649 			continue;
    650 		if (ci->ci_data.cpu_idlelwp == NULL)
    651 			continue;
    652 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    653 			continue;
    654 		cpu_init_idle_lwp(ci);
    655 	}
    656 }
    657 
    658 static void
    659 cpu_start_secondary(struct cpu_info *ci)
    660 {
    661 	int i;
    662 
    663 	aprint_debug_dev(ci->ci_dev, "starting\n");
    664 
    665 	ci->ci_curlwp = ci->ci_data.cpu_idlelwp;
    666 
    667 	if (CPU_STARTUP(ci, (vaddr_t) cpu_hatch) != 0) {
    668 		return;
    669 	}
    670 
    671 	/*
    672 	 * wait for it to become ready
    673 	 */
    674 	for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) {
    675 		delay(10);
    676 	}
    677 	if ((ci->ci_flags & CPUF_PRESENT) == 0) {
    678 		aprint_error_dev(ci->ci_dev, "failed to become ready\n");
    679 #if defined(MPDEBUG) && defined(DDB)
    680 		printf("dropping into debugger; continue from here to resume boot\n");
    681 		Debugger();
    682 #endif
    683 	}
    684 
    685 	CPU_START_CLEANUP(ci);
    686 }
    687 
    688 void
    689 cpu_boot_secondary(struct cpu_info *ci)
    690 {
    691 	int i;
    692 	atomic_or_32(&ci->ci_flags, CPUF_GO);
    693 	for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) {
    694 		delay(10);
    695 	}
    696 	if ((ci->ci_flags & CPUF_RUNNING) == 0) {
    697 		aprint_error_dev(ci->ci_dev, "CPU failed to start\n");
    698 #if defined(MPDEBUG) && defined(DDB)
    699 		printf("dropping into debugger; continue from here to resume boot\n");
    700 		Debugger();
    701 #endif
    702 	}
    703 }
    704 
    705 /*
    706  * APs end up here immediately after initialisation and VCPUOP_up in
    707  * mp_cpu_start().
    708  * At this point, we are running in the idle pcb/idle stack of the new
    709  * CPU.  This function jumps to the idle loop and starts looking for
    710  * work.
    711  */
    712 extern void x86_64_tls_switch(struct lwp *);
    713 void
    714 cpu_hatch(void *v)
    715 {
    716 	struct cpu_info *ci = (struct cpu_info *)v;
    717 	struct pcb *pcb;
    718 	int s, i;
    719 
    720 	/* Setup TLS and kernel GS/FS */
    721 	cpu_init_msrs(ci, true);
    722 	cpu_init_idt();
    723 	gdt_init_cpu(ci);
    724 
    725 	cpu_probe(ci);
    726 
    727 	atomic_or_32(&ci->ci_flags, CPUF_PRESENT);
    728 
    729 	while ((ci->ci_flags & CPUF_GO) == 0) {
    730 		/* Don't use delay, boot CPU may be patching the text. */
    731 		for (i = 10000; i != 0; i--)
    732 			x86_pause();
    733 	}
    734 
    735 	/* Because the text may have been patched in x86_patch(). */
    736 	x86_flush();
    737 	tlbflushg();
    738 
    739 	KASSERT((ci->ci_flags & CPUF_RUNNING) == 0);
    740 
    741 	pcb = lwp_getpcb(curlwp);
    742 	pcb->pcb_cr3 = pmap_pdirpa(pmap_kernel(), 0); /* XXX: consider using pmap_load() ? */
    743 	pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
    744 
    745 	xen_ipi_init();
    746 
    747 	xen_initclocks();
    748 
    749 	/* XXX: lapic_initclocks(); */
    750 
    751 #ifdef __x86_64__
    752 	fpuinit(ci);
    753 #endif
    754 
    755 	lldt(GSEL(GLDT_SEL, SEL_KPL));
    756 
    757 	cpu_init(ci);
    758 	cpu_get_tsc_freq(ci);
    759 
    760 	s = splhigh();
    761 	x86_enable_intr();
    762 	splx(s);
    763 #if 0
    764 	x86_errata();
    765 #endif
    766 
    767 	aprint_debug_dev(ci->ci_dev, "running\n");
    768 
    769 	cpu_switchto(NULL, ci->ci_data.cpu_idlelwp, true);
    770 
    771 	panic("switch to idle_loop context returned!\n");
    772 	/* NOTREACHED */
    773 }
    774 
    775 #if defined(DDB)
    776 
    777 #include <ddb/db_output.h>
    778 #include <machine/db_machdep.h>
    779 
    780 /*
    781  * Dump CPU information from ddb.
    782  */
    783 void
    784 cpu_debug_dump(void)
    785 {
    786 	struct cpu_info *ci;
    787 	CPU_INFO_ITERATOR cii;
    788 
    789 	db_printf("addr		dev	id	flags	ipis	curlwp 		fpcurlwp\n");
    790 	for (CPU_INFO_FOREACH(cii, ci)) {
    791 		db_printf("%p	%s	%ld	%x	%x	%10p	%10p\n",
    792 		    ci,
    793 		    ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
    794 		    (long)ci->ci_cpuid,
    795 		    ci->ci_flags, ci->ci_ipis,
    796 		    ci->ci_curlwp,
    797 		    ci->ci_fpcurlwp);
    798 	}
    799 }
    800 #endif /* DDB */
    801 
    802 #endif /* MULTIPROCESSOR */
    803 
    804 extern void hypervisor_callback(void);
    805 extern void failsafe_callback(void);
    806 #ifdef __x86_64__
    807 typedef void (vector)(void);
    808 extern vector Xsyscall, Xsyscall32;
    809 #endif
    810 
    811 /*
    812  * Setup the "trampoline". On Xen, we setup nearly all cpu context
    813  * outside a trampoline, so we prototype and call targetip like so:
    814  * void targetip(struct cpu_info *);
    815  */
    816 
    817 static void
    818 gdt_prepframes(paddr_t *frames, vaddr_t base, uint32_t entries)
    819 {
    820 	int i;
    821 	for (i = 0; i < roundup(entries, PAGE_SIZE) >> PAGE_SHIFT; i++) {
    822 
    823 		frames[i] = ((paddr_t) xpmap_ptetomach(
    824 				(pt_entry_t *) (base + (i << PAGE_SHIFT))))
    825 			>> PAGE_SHIFT;
    826 
    827 		/* Mark Read-only */
    828 		pmap_pte_clearbits(kvtopte(base + (i << PAGE_SHIFT)),
    829 		    PG_RW);
    830 	}
    831 }
    832 
    833 #ifdef __x86_64__
    834 extern char *ldtstore; /* XXX: Xen MP todo */
    835 
    836 static void
    837 xen_init_amd64_vcpuctxt(struct cpu_info *ci,
    838 			struct vcpu_guest_context *initctx,
    839 			void targetrip(struct cpu_info *))
    840 {
    841 	/* page frames to point at GDT */
    842 	extern int gdt_size;
    843 	paddr_t frames[16];
    844 	psize_t gdt_ents;
    845 
    846 	struct lwp *l;
    847 	struct pcb *pcb;
    848 
    849 	volatile struct vcpu_info *vci;
    850 
    851 	KASSERT(ci != NULL);
    852 	KASSERT(ci != &cpu_info_primary);
    853 	KASSERT(initctx != NULL);
    854 	KASSERT(targetrip != NULL);
    855 
    856 	memset(initctx, 0, sizeof *initctx);
    857 
    858 	gdt_ents = roundup(gdt_size, PAGE_SIZE) >> PAGE_SHIFT; /* XXX: re-investigate roundup(gdt_size... ) for gdt_ents. */
    859 	KASSERT(gdt_ents <= 16);
    860 
    861 	gdt_prepframes(frames, (vaddr_t) ci->ci_gdt, gdt_ents);
    862 
    863 	/* XXX: The stuff in here is amd64 specific. move to mptramp.[Sc] ? */
    864 
    865 	/* Initialise the vcpu context: We use idle_loop()'s pcb context. */
    866 
    867 	l = ci->ci_data.cpu_idlelwp;
    868 
    869 	KASSERT(l != NULL);
    870 	pcb = lwp_getpcb(l);
    871 	KASSERT(pcb != NULL);
    872 
    873 	/* resume with interrupts off */
    874 	vci = ci->ci_vcpu;
    875 	vci->evtchn_upcall_mask = 1;
    876 	xen_mb();
    877 
    878 	/* resume in kernel-mode */
    879 	initctx->flags = VGCF_in_kernel | VGCF_online;
    880 
    881 	/* Stack and entry points:
    882 	 * We arrange for the stack frame for cpu_hatch() to
    883 	 * appear as a callee frame of lwp_trampoline(). Being a
    884 	 * leaf frame prevents trampling on any of the MD stack setup
    885 	 * that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop()
    886 	 */
    887 
    888 	initctx->user_regs.rdi = (uint64_t) ci; /* targetrip(ci); */
    889 	initctx->user_regs.rip = (vaddr_t) targetrip;
    890 
    891 	initctx->user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
    892 
    893 	initctx->user_regs.rflags = pcb->pcb_flags;
    894 	initctx->user_regs.rsp = pcb->pcb_rsp;
    895 
    896 	/* Data segments */
    897 	initctx->user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
    898 	initctx->user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
    899 	initctx->user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
    900 
    901 	/* GDT */
    902 	memcpy(initctx->gdt_frames, frames, sizeof frames);
    903 	initctx->gdt_ents = gdt_ents;
    904 
    905 	/* LDT */
    906 	initctx->ldt_base = (unsigned long) ldtstore;
    907 	initctx->ldt_ents = LDT_SIZE >> 3;
    908 
    909 	/* Kernel context state */
    910 	initctx->kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
    911 	initctx->kernel_sp = pcb->pcb_rsp0;
    912 	initctx->ctrlreg[0] = pcb->pcb_cr0;
    913 	initctx->ctrlreg[1] = 0; /* "resuming" from kernel - no User cr3. */
    914 	initctx->ctrlreg[2] = pcb->pcb_cr2; /* XXX: */
    915 	/*
    916 	 * Use pmap_kernel() L4 PD directly, until we setup the
    917 	 * per-cpu L4 PD in pmap_cpu_init_late()
    918 	 */
    919 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_kpm_pdirpa)));
    920 	initctx->ctrlreg[4] = CR4_PAE | CR4_OSFXSR | CR4_OSXMMEXCPT;
    921 
    922 
    923 	/* Xen callbacks */
    924 	initctx->event_callback_eip = (unsigned long) hypervisor_callback;
    925 	initctx->failsafe_callback_eip = (unsigned long) failsafe_callback;
    926 	initctx->syscall_callback_eip = (unsigned long) Xsyscall;
    927 
    928 	return;
    929 }
    930 #else /* i386 */
    931 extern union descriptor *ldt;
    932 extern void Xsyscall(void);
    933 
    934 static void
    935 xen_init_i386_vcpuctxt(struct cpu_info *ci,
    936 			struct vcpu_guest_context *initctx,
    937 			void targeteip(struct cpu_info *))
    938 {
    939 	/* page frames to point at GDT */
    940 	extern int gdt_size;
    941 	paddr_t frames[16];
    942 	psize_t gdt_ents;
    943 
    944 	struct lwp *l;
    945 	struct pcb *pcb;
    946 
    947 	volatile struct vcpu_info *vci;
    948 
    949 	KASSERT(ci != NULL);
    950 	KASSERT(ci != &cpu_info_primary);
    951 	KASSERT(initctx != NULL);
    952 	KASSERT(targeteip != NULL);
    953 
    954 	memset(initctx, 0, sizeof *initctx);
    955 
    956 	gdt_ents = roundup(gdt_size, PAGE_SIZE) >> PAGE_SHIFT; /* XXX: re-investigate roundup(gdt_size... ) for gdt_ents. */
    957 	KASSERT(gdt_ents <= 16);
    958 
    959 	gdt_prepframes(frames, (vaddr_t) ci->ci_gdt, gdt_ents);
    960 
    961 	/*
    962 	 * Initialise the vcpu context:
    963 	 * We use this cpu's idle_loop() pcb context.
    964 	 */
    965 
    966 	l = ci->ci_data.cpu_idlelwp;
    967 
    968 	KASSERT(l != NULL);
    969 	pcb = lwp_getpcb(l);
    970 	KASSERT(pcb != NULL);
    971 
    972 	/* resume with interrupts off */
    973 	vci = ci->ci_vcpu;
    974 	vci->evtchn_upcall_mask = 1;
    975 	xen_mb();
    976 
    977 	/* resume in kernel-mode */
    978 	initctx->flags = VGCF_in_kernel | VGCF_online;
    979 
    980 	/* Stack frame setup for cpu_hatch():
    981 	 * We arrange for the stack frame for cpu_hatch() to
    982 	 * appear as a callee frame of lwp_trampoline(). Being a
    983 	 * leaf frame prevents trampling on any of the MD stack setup
    984 	 * that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop()
    985 	 */
    986 
    987 	initctx->user_regs.esp = pcb->pcb_esp - 4; /* Leave word for
    988 						      arg1 */
    989 	{ /* targeteip(ci); */
    990 		uint32_t *arg = (uint32_t *) initctx->user_regs.esp;
    991 		arg[1] = (uint32_t) ci; /* arg1 */
    992 
    993 	}
    994 
    995 	initctx->user_regs.eip = (vaddr_t) targeteip;
    996 	initctx->user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
    997 	initctx->user_regs.eflags |= pcb->pcb_iopl;
    998 
    999 	/* Data segments */
   1000 	initctx->user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
   1001 	initctx->user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
   1002 	initctx->user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
   1003 	initctx->user_regs.fs = GSEL(GDATA_SEL, SEL_KPL);
   1004 
   1005 	/* GDT */
   1006 	memcpy(initctx->gdt_frames, frames, sizeof frames);
   1007 	initctx->gdt_ents = gdt_ents;
   1008 
   1009 	/* LDT */
   1010 	initctx->ldt_base = (unsigned long) ldt;
   1011 	initctx->ldt_ents = NLDT;
   1012 
   1013 	/* Kernel context state */
   1014 	initctx->kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
   1015 	initctx->kernel_sp = pcb->pcb_esp0;
   1016 	initctx->ctrlreg[0] = pcb->pcb_cr0;
   1017 	initctx->ctrlreg[1] = 0; /* "resuming" from kernel - no User cr3. */
   1018 	initctx->ctrlreg[2] = pcb->pcb_cr2; /* XXX: */
   1019 #ifdef PAE
   1020 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_pae_l3_pdirpa)));
   1021 #else /* PAE */
   1022 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(pcb->pcb_cr3)));
   1023 #endif /* PAE */
   1024 	initctx->ctrlreg[4] = /* CR4_PAE |  */CR4_OSFXSR | CR4_OSXMMEXCPT;
   1025 
   1026 
   1027 	/* Xen callbacks */
   1028 	initctx->event_callback_eip = (unsigned long) hypervisor_callback;
   1029 	initctx->event_callback_cs = GSEL(GCODE_SEL, SEL_KPL);
   1030 	initctx->failsafe_callback_eip = (unsigned long) failsafe_callback;
   1031 	initctx->failsafe_callback_cs = GSEL(GCODE_SEL, SEL_KPL);
   1032 
   1033 	return;
   1034 }
   1035 #endif /* __x86_64__ */
   1036 
   1037 int
   1038 mp_cpu_start(struct cpu_info *ci, vaddr_t target)
   1039 {
   1040 
   1041 	int hyperror;
   1042 	struct vcpu_guest_context vcpuctx;
   1043 
   1044 	KASSERT(ci != NULL);
   1045 	KASSERT(ci != &cpu_info_primary);
   1046 	KASSERT(ci->ci_flags & CPUF_AP);
   1047 
   1048 #ifdef __x86_64__
   1049 	xen_init_amd64_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target);
   1050 #else  /* i386 */
   1051 	xen_init_i386_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target);
   1052 #endif /* __x86_64__ */
   1053 
   1054 	/* Initialise the given vcpu to execute cpu_hatch(ci); */
   1055 	if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_initialise, ci->ci_cpuid, &vcpuctx))) {
   1056 		aprint_error(": context initialisation failed. errno = %d\n", hyperror);
   1057 		return hyperror;
   1058 	}
   1059 
   1060 	/* Start it up */
   1061 
   1062 	/* First bring it down */
   1063 	if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_cpuid, NULL))) {
   1064 		aprint_error(": VCPUOP_down hypervisor command failed. errno = %d\n", hyperror);
   1065 		return hyperror;
   1066 	}
   1067 
   1068 	if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_up, ci->ci_cpuid, NULL))) {
   1069 		aprint_error(": VCPUOP_up hypervisor command failed. errno = %d\n", hyperror);
   1070 		return hyperror;
   1071 	}
   1072 
   1073 	if (!vcpu_is_up(ci)) {
   1074 		aprint_error(": did not come up\n");
   1075 		return -1;
   1076 	}
   1077 
   1078 	return 0;
   1079 }
   1080 
   1081 void
   1082 mp_cpu_start_cleanup(struct cpu_info *ci)
   1083 {
   1084 #if 0
   1085 	/*
   1086 	 * Ensure the NVRAM reset byte contains something vaguely sane.
   1087 	 */
   1088 
   1089 	outb(IO_RTC, NVRAM_RESET);
   1090 	outb(IO_RTC+1, NVRAM_RESET_RST);
   1091 #endif
   1092 	if (vcpu_is_up(ci)) {
   1093 		aprint_debug_dev(ci->ci_dev, "is started.\n");
   1094 	}
   1095 	else {
   1096 		aprint_error_dev(ci->ci_dev, "did not start up.\n");
   1097 	}
   1098 
   1099 }
   1100 
   1101 /* curcpu() uses %fs - shim for until cpu_init_msrs(), below */
   1102 static struct cpu_info *cpu_primary(void)
   1103 {
   1104 	return &cpu_info_primary;
   1105 }
   1106 /* XXX: rename to something more generic. users other than xpq exist */
   1107 struct cpu_info	* (*xpq_cpu)(void) = cpu_primary;
   1108 
   1109 void
   1110 cpu_init_msrs(struct cpu_info *ci, bool full)
   1111 {
   1112 #ifdef __x86_64__
   1113 	if (full) {
   1114 		HYPERVISOR_set_segment_base (SEGBASE_FS, 0);
   1115 		HYPERVISOR_set_segment_base (SEGBASE_GS_KERNEL, (uint64_t) ci);
   1116 		HYPERVISOR_set_segment_base (SEGBASE_GS_USER, 0);
   1117 		xpq_cpu = x86_curcpu;
   1118 	}
   1119 #endif	/* __x86_64__ */
   1120 
   1121 	if (cpu_feature[2] & CPUID_NOX)
   1122 		wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE);
   1123 
   1124 }
   1125 
   1126 void
   1127 cpu_offline_md(void)
   1128 {
   1129         int s;
   1130 
   1131         s = splhigh();
   1132 #ifdef __i386__
   1133         npxsave_cpu(true);
   1134 #else
   1135         fpusave_cpu(true);
   1136 #endif
   1137         splx(s);
   1138 }
   1139 
   1140 void
   1141 cpu_get_tsc_freq(struct cpu_info *ci)
   1142 {
   1143 	uint32_t vcpu_tversion;
   1144 	const volatile vcpu_time_info_t *tinfo = &ci->ci_vcpu->time;
   1145 
   1146 	vcpu_tversion = tinfo->version;
   1147 	while (tinfo->version == vcpu_tversion); /* Wait for a time update. XXX: timeout ? */
   1148 
   1149 	uint64_t freq = 1000000000ULL << 32;
   1150 	freq = freq / (uint64_t)tinfo->tsc_to_system_mul;
   1151 	if ( tinfo->tsc_shift < 0 )
   1152 		freq = freq << -tinfo->tsc_shift;
   1153 	else
   1154 		freq = freq >> tinfo->tsc_shift;
   1155 	ci->ci_data.cpu_cc_freq = freq;
   1156 }
   1157 
   1158 void
   1159 x86_cpu_idle_xen(void)
   1160 {
   1161 	struct cpu_info *ci = curcpu();
   1162 
   1163 	KASSERT(ci->ci_ilevel == IPL_NONE);
   1164 
   1165 	x86_disable_intr();
   1166 	if (!__predict_false(ci->ci_want_resched)) {
   1167 		idle_block();
   1168 	} else {
   1169 		x86_enable_intr();
   1170 	}
   1171 }
   1172 
   1173 /*
   1174  * Loads pmap for the current CPU.
   1175  */
   1176 void
   1177 cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
   1178 {
   1179 #if defined(__x86_64__) || defined(PAE)
   1180 	struct cpu_info *ci = curcpu();
   1181 	uint32_t cpumask = ci->ci_cpumask;
   1182 
   1183 	mutex_enter(&ci->ci_kpm_mtx);
   1184 	/* make new pmap visible to pmap_kpm_sync_xcall() */
   1185 	atomic_or_32(&pmap->pm_xen_ptp_cpus, cpumask);
   1186 #endif
   1187 #ifdef i386
   1188 #ifdef PAE
   1189 	{
   1190 		int i;
   1191 		paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
   1192 		/* don't update the kernel L3 slot */
   1193 		for (i = 0 ; i < PDP_SIZE - 1; i++) {
   1194 			xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
   1195 			    xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
   1196 		}
   1197 		tlbflush();
   1198 	}
   1199 #else /* PAE */
   1200 	lcr3(pmap_pdirpa(pmap, 0));
   1201 #endif /* PAE */
   1202 #endif /* i386 */
   1203 
   1204 #ifdef __x86_64__
   1205 	{
   1206 		int i;
   1207 		pd_entry_t *new_pgd;
   1208 		paddr_t l4_pd_ma;
   1209 
   1210 		l4_pd_ma = xpmap_ptom_masked(ci->ci_kpm_pdirpa);
   1211 
   1212 		/*
   1213 		 * Map user space address in kernel space and load
   1214 		 * user cr3
   1215 		 */
   1216 		new_pgd = pmap->pm_pdir;
   1217 		KASSERT(pmap == ci->ci_pmap);
   1218 
   1219 		/* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */
   1220 		for (i = 0; i < PDIR_SLOT_PTE; i++) {
   1221 			KASSERT(pmap != pmap_kernel() || new_pgd[i] == 0);
   1222 			if (ci->ci_kpm_pdir[i] != new_pgd[i]) {
   1223 				xpq_queue_pte_update(
   1224 				   l4_pd_ma + i * sizeof(pd_entry_t),
   1225 				    new_pgd[i]);
   1226 			}
   1227 		}
   1228 
   1229 		if (__predict_true(pmap != pmap_kernel())) {
   1230 			xen_set_user_pgd(pmap_pdirpa(pmap, 0));
   1231 			ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0);
   1232 		}
   1233 		else {
   1234 			xpq_queue_pt_switch(l4_pd_ma);
   1235 			ci->ci_xen_current_user_pgd = 0;
   1236 		}
   1237 
   1238 		tlbflush();
   1239 	}
   1240 
   1241 #endif /* __x86_64__ */
   1242 #if defined(__x86_64__) || defined(PAE)
   1243 	/* old pmap no longer visible to pmap_kpm_sync_xcall() */
   1244 	atomic_and_32(&oldpmap->pm_xen_ptp_cpus, ~cpumask);
   1245 	mutex_exit(&ci->ci_kpm_mtx);
   1246 #endif
   1247 }
   1248 
   1249  /*
   1250   * pmap_cpu_init_late: perform late per-CPU initialization.
   1251   * Short note about percpu PDIR pages:
   1252   * Both the PAE and __x86_64__ architectures have per-cpu PDIR
   1253   * tables. This is to get around Xen's pagetable setup constraints for
   1254   * PAE (multiple L3[3]s cannot point to the same L2 - Xen
   1255   * will refuse to pin a table setup this way.) and for multiple cpus
   1256   * to map in different user pmaps on __x86_64__ (see: cpu_load_pmap())
   1257   *
   1258   * What this means for us is that the PDIR of the pmap_kernel() is
   1259   * considered to be a canonical "SHADOW" PDIR with the following
   1260   * properties:
   1261   * - Its recursive mapping points to itself
   1262   * - per-cpu recurseive mappings point to themselves on __x86_64__
   1263   * - per-cpu L4 pages' kernel entries are expected to be in sync with
   1264   *   the shadow
   1265   */
   1266 
   1267 void
   1268 pmap_cpu_init_late(struct cpu_info *ci)
   1269 {
   1270 #if defined(PAE) || defined(__x86_64__)
   1271 	/*
   1272 	 * The BP has already its own PD page allocated during early
   1273 	 * MD startup.
   1274 	 */
   1275 
   1276 #if defined(__x86_64__)
   1277 	/* Setup per-cpu normal_pdes */
   1278 	int i;
   1279 	extern pd_entry_t * const normal_pdes[];
   1280 	for (i = 0;i < PTP_LEVELS - 1;i++) {
   1281 		ci->ci_normal_pdes[i] = normal_pdes[i];
   1282 	}
   1283 #endif /* __x86_64__ */
   1284 
   1285 	if (ci == &cpu_info_primary)
   1286 		return;
   1287 
   1288 	KASSERT(ci != NULL);
   1289 
   1290 #if defined(PAE)
   1291 	cpu_alloc_l3_page(ci);
   1292 	KASSERT(ci->ci_pae_l3_pdirpa != 0);
   1293 
   1294 	/* Initialise L2 entries 0 - 2: Point them to pmap_kernel() */
   1295 	int i;
   1296 	for (i = 0 ; i < PDP_SIZE - 1; i++) {
   1297 		ci->ci_pae_l3_pdir[i] =
   1298 		    xpmap_ptom_masked(pmap_kernel()->pm_pdirpa[i]) | PG_V;
   1299 	}
   1300 #endif /* PAE */
   1301 
   1302 	ci->ci_kpm_pdir = (pd_entry_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
   1303 	    UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_NOWAIT);
   1304 
   1305 	if (ci->ci_kpm_pdir == NULL) {
   1306 		panic("%s: failed to allocate L4 per-cpu PD for CPU %d\n",
   1307 		      __func__, cpu_index(ci));
   1308 	}
   1309 	ci->ci_kpm_pdirpa = vtophys((vaddr_t) ci->ci_kpm_pdir);
   1310 	KASSERT(ci->ci_kpm_pdirpa != 0);
   1311 
   1312 #if defined(__x86_64__)
   1313 	/*
   1314 	 * Copy over the pmap_kernel() shadow L4 entries
   1315 	 */
   1316 
   1317 	memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir, PAGE_SIZE);
   1318 
   1319 	/* Recursive kernel mapping */
   1320 	ci->ci_kpm_pdir[PDIR_SLOT_PTE] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_k | PG_V;
   1321 #elif defined(PAE)
   1322 	/* Copy over the pmap_kernel() shadow L2 entries that map the kernel */
   1323 	memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir + PDIR_SLOT_KERN, nkptp[PTP_LEVELS - 1] * sizeof(pd_entry_t));
   1324 #endif /* __x86_64__ else PAE */
   1325 
   1326 	/* Xen wants R/O */
   1327 	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_kpm_pdir,
   1328 	    (vaddr_t)ci->ci_kpm_pdir + PAGE_SIZE, VM_PROT_READ);
   1329 	pmap_update(pmap_kernel());
   1330 #if defined(PAE)
   1331 	/* Initialise L3 entry 3. This mapping is shared across all
   1332 	 * pmaps and is static, ie; loading a new pmap will not update
   1333 	 * this entry.
   1334 	 */
   1335 
   1336 	ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_k | PG_V;
   1337 
   1338 	/* Mark L3 R/O (Xen wants this) */
   1339 	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_pae_l3_pdir,
   1340 	    (vaddr_t)ci->ci_pae_l3_pdir + PAGE_SIZE, VM_PROT_READ);
   1341 	pmap_update(pmap_kernel());
   1342 
   1343 	xpq_queue_pin_l3_table(xpmap_ptom_masked(ci->ci_pae_l3_pdirpa));
   1344 
   1345 #elif defined(__x86_64__)
   1346 	xpq_queue_pin_l4_table(xpmap_ptom_masked(ci->ci_kpm_pdirpa));
   1347 #endif /* PAE , __x86_64__ */
   1348 #endif /* defined(PAE) || defined(__x86_64__) */
   1349 }
   1350 
   1351 /*
   1352  * Notify all other cpus to halt.
   1353  */
   1354 
   1355 void
   1356 cpu_broadcast_halt(void)
   1357 {
   1358 	xen_broadcast_ipi(XEN_IPI_HALT);
   1359 }
   1360 
   1361 /*
   1362  * Send a dummy ipi to a cpu.
   1363  */
   1364 
   1365 void
   1366 cpu_kick(struct cpu_info *ci)
   1367 {
   1368 	(void)xen_send_ipi(ci, XEN_IPI_KICK);
   1369 }
   1370