Home | History | Annotate | Line # | Download | only in x86
cpu.c revision 1.121
      1 /*	$NetBSD: cpu.c,v 1.121 2018/06/23 09:51:34 maxv Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000 The NetBSD Foundation, Inc.
      5  * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by RedBack Networks Inc.
     10  *
     11  * Author: Bill Sommerfeld
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32  * POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 /*
     36  * Copyright (c) 1999 Stefan Grefen
     37  *
     38  * Redistribution and use in source and binary forms, with or without
     39  * modification, are permitted provided that the following conditions
     40  * are met:
     41  * 1. Redistributions of source code must retain the above copyright
     42  *    notice, this list of conditions and the following disclaimer.
     43  * 2. Redistributions in binary form must reproduce the above copyright
     44  *    notice, this list of conditions and the following disclaimer in the
     45  *    documentation and/or other materials provided with the distribution.
     46  * 3. All advertising materials mentioning features or use of this software
     47  *    must display the following acknowledgement:
     48  *      This product includes software developed by the NetBSD
     49  *      Foundation, Inc. and its contributors.
     50  * 4. Neither the name of The NetBSD Foundation nor the names of its
     51  *    contributors may be used to endorse or promote products derived
     52  *    from this software without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
     55  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  */
     66 
     67 #include <sys/cdefs.h>
     68 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.121 2018/06/23 09:51:34 maxv Exp $");
     69 
     70 #include "opt_ddb.h"
     71 #include "opt_multiprocessor.h"
     72 #include "opt_mpbios.h"		/* for MPDEBUG */
     73 #include "opt_mtrr.h"
     74 #include "opt_xen.h"
     75 
     76 #include "lapic.h"
     77 #include "ioapic.h"
     78 
     79 #include <sys/param.h>
     80 #include <sys/proc.h>
     81 #include <sys/systm.h>
     82 #include <sys/device.h>
     83 #include <sys/kmem.h>
     84 #include <sys/cpu.h>
     85 #include <sys/cpufreq.h>
     86 #include <sys/atomic.h>
     87 #include <sys/reboot.h>
     88 #include <sys/idle.h>
     89 
     90 #include <uvm/uvm.h>
     91 
     92 #include <machine/cpu.h>
     93 #include <machine/cpufunc.h>
     94 #include <machine/cpuvar.h>
     95 #include <machine/pmap.h>
     96 #include <machine/vmparam.h>
     97 #include <machine/mpbiosvar.h>
     98 #include <machine/pcb.h>
     99 #include <machine/specialreg.h>
    100 #include <machine/segments.h>
    101 #include <machine/gdt.h>
    102 #include <machine/mtrr.h>
    103 #include <machine/pio.h>
    104 
    105 #include <x86/fpu.h>
    106 
    107 #include <xen/xen.h>
    108 #include <xen/xen-public/vcpu.h>
    109 #include <xen/vcpuvar.h>
    110 
    111 #if NLAPIC > 0
    112 #include <machine/apicvar.h>
    113 #include <machine/i82489reg.h>
    114 #include <machine/i82489var.h>
    115 #endif
    116 
    117 #include <dev/ic/mc146818reg.h>
    118 #include <dev/isa/isareg.h>
    119 
    120 static int	cpu_match(device_t, cfdata_t, void *);
    121 static void	cpu_attach(device_t, device_t, void *);
    122 static void	cpu_defer(device_t);
    123 static int	cpu_rescan(device_t, const char *, const int *);
    124 static void	cpu_childdetached(device_t, device_t);
    125 static int	vcpu_match(device_t, cfdata_t, void *);
    126 static void	vcpu_attach(device_t, device_t, void *);
    127 static void	cpu_attach_common(device_t, device_t, void *);
    128 void		cpu_offline_md(void);
    129 
    130 struct cpu_softc {
    131 	device_t sc_dev;		/* device tree glue */
    132 	struct cpu_info *sc_info;	/* pointer to CPU info */
    133 	bool sc_wasonline;
    134 };
    135 
    136 int mp_cpu_start(struct cpu_info *, vaddr_t);
    137 void mp_cpu_start_cleanup(struct cpu_info *);
    138 const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL,
    139 				      mp_cpu_start_cleanup };
    140 
    141 CFATTACH_DECL2_NEW(cpu, sizeof(struct cpu_softc),
    142     cpu_match, cpu_attach, NULL, NULL, cpu_rescan, cpu_childdetached);
    143 
    144 CFATTACH_DECL_NEW(vcpu, sizeof(struct cpu_softc),
    145     vcpu_match, vcpu_attach, NULL, NULL);
    146 
    147 /*
    148  * Statically-allocated CPU info for the primary CPU (or the only
    149  * CPU, on uniprocessors).  The CPU info list is initialized to
    150  * point at it.
    151  */
    152 struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    153 	.ci_dev = 0,
    154 	.ci_self = &cpu_info_primary,
    155 	.ci_idepth = -1,
    156 	.ci_curlwp = &lwp0,
    157 	.ci_curldt = -1,
    158 };
    159 struct cpu_info phycpu_info_primary __aligned(CACHE_LINE_SIZE) = {
    160 	.ci_dev = 0,
    161 	.ci_self = &phycpu_info_primary,
    162 };
    163 
    164 struct cpu_info *cpu_info_list = &cpu_info_primary;
    165 struct cpu_info *phycpu_info_list = &phycpu_info_primary;
    166 
    167 uint32_t cpu_feature[7] __read_mostly; /* X86 CPUID feature bits
    168 			  *	[0] basic features %edx
    169 			  *	[1] basic features %ecx
    170 			  *	[2] extended features %edx
    171 			  *	[3] extended features %ecx
    172 			  *	[4] VIA padlock features
    173 			  *	[5] structured extended features cpuid.7:%ebx
    174 			  *	[6] structured extended features cpuid.7:%ecx
    175 			  */
    176 
    177 bool x86_mp_online;
    178 paddr_t mp_trampoline_paddr = MP_TRAMPOLINE;
    179 
    180 #if defined(MULTIPROCESSOR)
    181 void    	cpu_hatch(void *);
    182 static void    	cpu_boot_secondary(struct cpu_info *ci);
    183 static void    	cpu_start_secondary(struct cpu_info *ci);
    184 #endif	/* MULTIPROCESSOR */
    185 
    186 static int
    187 cpu_match(device_t parent, cfdata_t match, void *aux)
    188 {
    189 
    190 	return 1;
    191 }
    192 
    193 static void
    194 cpu_attach(device_t parent, device_t self, void *aux)
    195 {
    196 	struct cpu_softc *sc = device_private(self);
    197 	struct cpu_attach_args *caa = aux;
    198 	struct cpu_info *ci;
    199 	uintptr_t ptr;
    200 	static int nphycpu = 0;
    201 
    202 	sc->sc_dev = self;
    203 
    204 	/*
    205 	 * If we're an Application Processor, allocate a cpu_info
    206 	 * If we're the first attached CPU use the primary cpu_info,
    207 	 * otherwise allocate a new one
    208 	 */
    209 	aprint_naive("\n");
    210 	aprint_normal("\n");
    211 	if (nphycpu > 0) {
    212 		struct cpu_info *tmp;
    213 		ptr = (uintptr_t)kmem_zalloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    214 		    KM_SLEEP);
    215 		ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
    216 		ci->ci_curldt = -1;
    217 
    218 		tmp = phycpu_info_list;
    219 		while (tmp->ci_next)
    220 			tmp = tmp->ci_next;
    221 
    222 		tmp->ci_next = ci;
    223 	} else {
    224 		ci = &phycpu_info_primary;
    225 	}
    226 
    227 	ci->ci_self = ci;
    228 	sc->sc_info = ci;
    229 
    230 	ci->ci_dev = self;
    231 	ci->ci_acpiid = caa->cpu_id;
    232 	ci->ci_cpuid = caa->cpu_number;
    233 	ci->ci_vcpu = NULL;
    234 	ci->ci_index = nphycpu++;
    235 
    236 	if (!pmf_device_register(self, NULL, NULL))
    237 		aprint_error_dev(self, "couldn't establish power handler\n");
    238 
    239 	(void)config_defer(self, cpu_defer);
    240 }
    241 
    242 static void
    243 cpu_defer(device_t self)
    244 {
    245 	cpu_rescan(self, NULL, NULL);
    246 }
    247 
    248 static int
    249 cpu_rescan(device_t self, const char *ifattr, const int *locators)
    250 {
    251 	struct cpu_softc *sc = device_private(self);
    252 	struct cpufeature_attach_args cfaa;
    253 	struct cpu_info *ci = sc->sc_info;
    254 
    255 	memset(&cfaa, 0, sizeof(cfaa));
    256 	cfaa.ci = ci;
    257 
    258 	if (ifattr_match(ifattr, "cpufeaturebus")) {
    259 
    260 		if (ci->ci_frequency == NULL) {
    261 			cfaa.name = "frequency";
    262 			ci->ci_frequency = config_found_ia(self,
    263 			    "cpufeaturebus", &cfaa, NULL);
    264 		}
    265 	}
    266 
    267 	return 0;
    268 }
    269 
    270 static void
    271 cpu_childdetached(device_t self, device_t child)
    272 {
    273 	struct cpu_softc *sc = device_private(self);
    274 	struct cpu_info *ci = sc->sc_info;
    275 
    276 	if (ci->ci_frequency == child)
    277 		ci->ci_frequency = NULL;
    278 }
    279 
    280 static int
    281 vcpu_match(device_t parent, cfdata_t match, void *aux)
    282 {
    283 	struct vcpu_attach_args *vcaa = aux;
    284 	struct vcpu_runstate_info vcr;
    285 	int error;
    286 
    287 	if (strcmp(vcaa->vcaa_name, match->cf_name) == 0) {
    288 		error = HYPERVISOR_vcpu_op(VCPUOP_get_runstate_info,
    289 		    vcaa->vcaa_caa.cpu_number, &vcr);
    290 		switch (error) {
    291 		case 0:
    292 			return 1;
    293 		case -ENOENT:
    294 			return 0;
    295 		default:
    296 			panic("Unknown hypervisor error %d returned on vcpu runstate probe\n", error);
    297 		}
    298 	}
    299 
    300 	return 0;
    301 }
    302 
    303 static void
    304 vcpu_attach(device_t parent, device_t self, void *aux)
    305 {
    306 	struct vcpu_attach_args *vcaa = aux;
    307 
    308 	KASSERT(vcaa->vcaa_caa.cpu_func == NULL);
    309 	vcaa->vcaa_caa.cpu_func = &mp_cpu_funcs;
    310 	cpu_attach_common(parent, self, &vcaa->vcaa_caa);
    311 
    312 	if (!pmf_device_register(self, NULL, NULL))
    313 		aprint_error_dev(self, "couldn't establish power handler\n");
    314 }
    315 
    316 static int
    317 vcpu_is_up(struct cpu_info *ci)
    318 {
    319 	KASSERT(ci != NULL);
    320 	return HYPERVISOR_vcpu_op(VCPUOP_is_up, ci->ci_cpuid, NULL);
    321 }
    322 
    323 static void
    324 cpu_vm_init(struct cpu_info *ci)
    325 {
    326 	int ncolors = 2, i;
    327 
    328 	for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) {
    329 		struct x86_cache_info *cai;
    330 		int tcolors;
    331 
    332 		cai = &ci->ci_cinfo[i];
    333 
    334 		tcolors = atop(cai->cai_totalsize);
    335 		switch (cai->cai_associativity) {
    336 		case 0xff:
    337 			tcolors = 1; /* fully associative */
    338 			break;
    339 		case 0:
    340 		case 1:
    341 			break;
    342 		default:
    343 			tcolors /= cai->cai_associativity;
    344 		}
    345 		ncolors = max(ncolors, tcolors);
    346 	}
    347 
    348 	/*
    349 	 * Knowing the size of the largest cache on this CPU, potentially
    350 	 * re-color our pages.
    351 	 */
    352 	aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors);
    353 	uvm_page_recolor(ncolors);
    354 	pmap_tlb_cpu_init(ci);
    355 #ifndef __HAVE_DIRECT_MAP
    356 	pmap_vpage_cpu_init(ci);
    357 #endif
    358 }
    359 
    360 static void
    361 cpu_attach_common(device_t parent, device_t self, void *aux)
    362 {
    363 	struct cpu_softc *sc = device_private(self);
    364 	struct cpu_attach_args *caa = aux;
    365 	struct cpu_info *ci;
    366 	uintptr_t ptr;
    367 	int cpunum = caa->cpu_number;
    368 	static bool again = false;
    369 
    370 	sc->sc_dev = self;
    371 
    372 	/*
    373 	 * If we're an Application Processor, allocate a cpu_info
    374 	 * structure, otherwise use the primary's.
    375 	 */
    376 	if (caa->cpu_role == CPU_ROLE_AP) {
    377 		aprint_naive(": Application Processor\n");
    378 		ptr = (uintptr_t)kmem_alloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
    379 		    KM_SLEEP);
    380 		ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
    381 		memset(ci, 0, sizeof(*ci));
    382 		cpu_init_tss(ci);
    383 	} else {
    384 		aprint_naive(": %s Processor\n",
    385 		    caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
    386 		ci = &cpu_info_primary;
    387 	}
    388 
    389 	ci->ci_self = ci;
    390 	sc->sc_info = ci;
    391 	ci->ci_dev = self;
    392 	ci->ci_cpuid = cpunum;
    393 
    394 	KASSERT(HYPERVISOR_shared_info != NULL);
    395 	KASSERT(cpunum < XEN_LEGACY_MAX_VCPUS);
    396 	ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[cpunum];
    397 
    398 	KASSERT(ci->ci_func == 0);
    399 	ci->ci_func = caa->cpu_func;
    400 	aprint_normal("\n");
    401 
    402 	/* Must be called before mi_cpu_attach(). */
    403 	cpu_vm_init(ci);
    404 
    405 	if (caa->cpu_role == CPU_ROLE_AP) {
    406 		int error;
    407 
    408 		error = mi_cpu_attach(ci);
    409 
    410 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    411 		if (error != 0) {
    412 			aprint_error_dev(self,
    413 			    "mi_cpu_attach failed with %d\n", error);
    414 			return;
    415 		}
    416 
    417 	} else {
    418 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    419 	}
    420 
    421 	KASSERT(ci->ci_cpuid == ci->ci_index);
    422 #ifdef __x86_64__
    423 	/* No user PGD mapped for this CPU yet */
    424 	ci->ci_xen_current_user_pgd = 0;
    425 #endif
    426 #if defined(__x86_64__) || defined(PAE)
    427 	mutex_init(&ci->ci_kpm_mtx, MUTEX_DEFAULT, IPL_VM);
    428 #endif
    429 	pmap_reference(pmap_kernel());
    430 	ci->ci_pmap = pmap_kernel();
    431 	ci->ci_tlbstate = TLBSTATE_STALE;
    432 
    433 	/*
    434 	 * Boot processor may not be attached first, but the below
    435 	 * must be done to allow booting other processors.
    436 	 */
    437 	if (!again) {
    438 		atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
    439 		/* Basic init. */
    440 		cpu_intr_init(ci);
    441 		cpu_get_tsc_freq(ci);
    442 		cpu_init(ci);
    443 		pmap_cpu_init_late(ci);
    444 
    445 		/* Every processor needs to init its own ipi h/w (similar to lapic) */
    446 		xen_ipi_init();
    447 
    448 		/* Make sure DELAY() is initialized. */
    449 		DELAY(1);
    450 		again = true;
    451 	}
    452 
    453 	/* further PCB init done later. */
    454 
    455 	switch (caa->cpu_role) {
    456 	case CPU_ROLE_SP:
    457 		atomic_or_32(&ci->ci_flags, CPUF_SP);
    458 		cpu_identify(ci);
    459 		x86_cpu_idle_init();
    460 		break;
    461 
    462 	case CPU_ROLE_BP:
    463 		atomic_or_32(&ci->ci_flags, CPUF_BSP);
    464 		cpu_identify(ci);
    465 		x86_cpu_idle_init();
    466 		break;
    467 
    468 	case CPU_ROLE_AP:
    469 		atomic_or_32(&ci->ci_flags, CPUF_AP);
    470 
    471 		/*
    472 		 * report on an AP
    473 		 */
    474 
    475 #if defined(MULTIPROCESSOR)
    476 		/* interrupt handler stack */
    477 		cpu_intr_init(ci);
    478 
    479 		/* Setup per-cpu memory for gdt */
    480 		gdt_alloc_cpu(ci);
    481 
    482 		pmap_cpu_init_late(ci);
    483 		cpu_start_secondary(ci);
    484 
    485 		if (ci->ci_flags & CPUF_PRESENT) {
    486 			struct cpu_info *tmp;
    487 
    488 			cpu_identify(ci);
    489 			tmp = cpu_info_list;
    490 			while (tmp->ci_next)
    491 				tmp = tmp->ci_next;
    492 
    493 			tmp->ci_next = ci;
    494 		}
    495 #else
    496 		aprint_error_dev(ci->ci_dev, "not started\n");
    497 #endif
    498 		break;
    499 
    500 	default:
    501 		panic("unknown processor type??\n");
    502 	}
    503 
    504 #ifdef MPVERBOSE
    505 	if (mp_verbose) {
    506 		struct lwp *l = ci->ci_data.cpu_idlelwp;
    507 		struct pcb *pcb = lwp_getpcb(l);
    508 
    509 		aprint_verbose_dev(self,
    510 		    "idle lwp at %p, idle sp at 0x%p\n",
    511 		    l,
    512 #ifdef i386
    513 		    (void *)pcb->pcb_esp
    514 #else
    515 		    (void *)pcb->pcb_rsp
    516 #endif
    517 		);
    518 
    519 	}
    520 #endif /* MPVERBOSE */
    521 }
    522 
    523 /*
    524  * Initialize the processor appropriately.
    525  */
    526 
    527 void
    528 cpu_init(struct cpu_info *ci)
    529 {
    530 
    531 	/*
    532 	 * If we have FXSAVE/FXRESTOR, use them.
    533 	 */
    534 	if (cpu_feature[0] & CPUID_FXSR) {
    535 		lcr4(rcr4() | CR4_OSFXSR);
    536 
    537 		/*
    538 		 * If we have SSE/SSE2, enable XMM exceptions.
    539 		 */
    540 		if (cpu_feature[0] & (CPUID_SSE|CPUID_SSE2))
    541 			lcr4(rcr4() | CR4_OSXMMEXCPT);
    542 	}
    543 
    544 	if (x86_fpu_save >= FPU_SAVE_FXSAVE) {
    545 		fpuinit_mxcsr_mask();
    546 	}
    547 
    548 	atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
    549 }
    550 
    551 
    552 #ifdef MULTIPROCESSOR
    553 
    554 void
    555 cpu_boot_secondary_processors(void)
    556 {
    557 	struct cpu_info *ci;
    558 	u_long i;
    559 	for (i = 0; i < maxcpus; i++) {
    560 		ci = cpu_lookup(i);
    561 		if (ci == NULL)
    562 			continue;
    563 		if (ci->ci_data.cpu_idlelwp == NULL)
    564 			continue;
    565 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    566 			continue;
    567 		if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
    568 			continue;
    569 		cpu_boot_secondary(ci);
    570 	}
    571 
    572 	x86_mp_online = true;
    573 }
    574 
    575 static void
    576 cpu_init_idle_lwp(struct cpu_info *ci)
    577 {
    578 	struct lwp *l = ci->ci_data.cpu_idlelwp;
    579 	struct pcb *pcb = lwp_getpcb(l);
    580 
    581 	pcb->pcb_cr0 = rcr0();
    582 }
    583 
    584 void
    585 cpu_init_idle_lwps(void)
    586 {
    587 	struct cpu_info *ci;
    588 	u_long i;
    589 
    590 	for (i = 0; i < maxcpus; i++) {
    591 		ci = cpu_lookup(i);
    592 		if (ci == NULL)
    593 			continue;
    594 		if (ci->ci_data.cpu_idlelwp == NULL)
    595 			continue;
    596 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    597 			continue;
    598 		cpu_init_idle_lwp(ci);
    599 	}
    600 }
    601 
    602 static void
    603 cpu_start_secondary(struct cpu_info *ci)
    604 {
    605 	int i;
    606 
    607 	aprint_debug_dev(ci->ci_dev, "starting\n");
    608 
    609 	ci->ci_curlwp = ci->ci_data.cpu_idlelwp;
    610 
    611 	if (CPU_STARTUP(ci, (vaddr_t) cpu_hatch) != 0) {
    612 		return;
    613 	}
    614 
    615 	/*
    616 	 * wait for it to become ready
    617 	 */
    618 	for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) {
    619 		delay(10);
    620 	}
    621 	if ((ci->ci_flags & CPUF_PRESENT) == 0) {
    622 		aprint_error_dev(ci->ci_dev, "failed to become ready\n");
    623 #if defined(MPDEBUG) && defined(DDB)
    624 		printf("dropping into debugger; continue from here to resume boot\n");
    625 		Debugger();
    626 #endif
    627 	}
    628 
    629 	CPU_START_CLEANUP(ci);
    630 }
    631 
    632 void
    633 cpu_boot_secondary(struct cpu_info *ci)
    634 {
    635 	int i;
    636 	atomic_or_32(&ci->ci_flags, CPUF_GO);
    637 	for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) {
    638 		delay(10);
    639 	}
    640 	if ((ci->ci_flags & CPUF_RUNNING) == 0) {
    641 		aprint_error_dev(ci->ci_dev, "CPU failed to start\n");
    642 #if defined(MPDEBUG) && defined(DDB)
    643 		printf("dropping into debugger; continue from here to resume boot\n");
    644 		Debugger();
    645 #endif
    646 	}
    647 }
    648 
    649 /*
    650  * APs end up here immediately after initialisation and VCPUOP_up in
    651  * mp_cpu_start().
    652  * At this point, we are running in the idle pcb/idle stack of the new
    653  * CPU.  This function jumps to the idle loop and starts looking for
    654  * work.
    655  */
    656 extern void x86_64_tls_switch(struct lwp *);
    657 void
    658 cpu_hatch(void *v)
    659 {
    660 	struct cpu_info *ci = (struct cpu_info *)v;
    661 	struct pcb *pcb;
    662 	int s, i;
    663 
    664 	/* Setup TLS and kernel GS/FS */
    665 	cpu_init_msrs(ci, true);
    666 	cpu_init_idt();
    667 	gdt_init_cpu(ci);
    668 
    669 	cpu_probe(ci);
    670 
    671 	atomic_or_32(&ci->ci_flags, CPUF_PRESENT);
    672 
    673 	while ((ci->ci_flags & CPUF_GO) == 0) {
    674 		/* Don't use delay, boot CPU may be patching the text. */
    675 		for (i = 10000; i != 0; i--)
    676 			x86_pause();
    677 	}
    678 
    679 	/* Because the text may have been patched in x86_patch(). */
    680 	x86_flush();
    681 	tlbflushg();
    682 
    683 	KASSERT((ci->ci_flags & CPUF_RUNNING) == 0);
    684 
    685 	pcb = lwp_getpcb(curlwp);
    686 	pcb->pcb_cr3 = pmap_pdirpa(pmap_kernel(), 0);
    687 	pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
    688 
    689 	xen_ipi_init();
    690 
    691 	xen_initclocks();
    692 
    693 #ifdef __x86_64__
    694 	fpuinit(ci);
    695 #endif
    696 
    697 	lldt(GSEL(GLDT_SEL, SEL_KPL));
    698 
    699 	cpu_init(ci);
    700 	cpu_get_tsc_freq(ci);
    701 
    702 	s = splhigh();
    703 	x86_enable_intr();
    704 	splx(s);
    705 
    706 	aprint_debug_dev(ci->ci_dev, "running\n");
    707 
    708 	cpu_switchto(NULL, ci->ci_data.cpu_idlelwp, true);
    709 
    710 	idle_loop(NULL);
    711 	KASSERT(false);
    712 }
    713 
    714 #if defined(DDB)
    715 
    716 #include <ddb/db_output.h>
    717 #include <machine/db_machdep.h>
    718 
    719 /*
    720  * Dump CPU information from ddb.
    721  */
    722 void
    723 cpu_debug_dump(void)
    724 {
    725 	struct cpu_info *ci;
    726 	CPU_INFO_ITERATOR cii;
    727 
    728 	db_printf("addr		dev	id	flags	ipis	curlwp 		fpcurlwp\n");
    729 	for (CPU_INFO_FOREACH(cii, ci)) {
    730 		db_printf("%p	%s	%ld	%x	%x	%10p	%10p\n",
    731 		    ci,
    732 		    ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
    733 		    (long)ci->ci_cpuid,
    734 		    ci->ci_flags, ci->ci_ipis,
    735 		    ci->ci_curlwp,
    736 		    ci->ci_fpcurlwp);
    737 	}
    738 }
    739 #endif /* DDB */
    740 
    741 #endif /* MULTIPROCESSOR */
    742 
    743 extern void hypervisor_callback(void);
    744 extern void failsafe_callback(void);
    745 #ifdef __x86_64__
    746 typedef void (vector)(void);
    747 extern vector Xsyscall, Xsyscall32;
    748 #endif
    749 
    750 /*
    751  * Setup the "trampoline". On Xen, we setup nearly all cpu context
    752  * outside a trampoline, so we prototype and call targetip like so:
    753  * void targetip(struct cpu_info *);
    754  */
    755 
    756 static void
    757 gdt_prepframes(paddr_t *frames, vaddr_t base, uint32_t entries)
    758 {
    759 	int i;
    760 	for (i = 0; i < entries; i++) {
    761 		frames[i] = ((paddr_t)xpmap_ptetomach(
    762 		    (pt_entry_t *)(base + (i << PAGE_SHIFT)))) >> PAGE_SHIFT;
    763 
    764 		/* Mark Read-only */
    765 		pmap_pte_clearbits(kvtopte(base + (i << PAGE_SHIFT)),
    766 		    PG_RW);
    767 	}
    768 }
    769 
    770 #ifdef __x86_64__
    771 extern char *ldtstore;
    772 
    773 static void
    774 xen_init_amd64_vcpuctxt(struct cpu_info *ci, struct vcpu_guest_context *initctx,
    775     void targetrip(struct cpu_info *))
    776 {
    777 	/* page frames to point at GDT */
    778 	extern int gdt_size;
    779 	paddr_t frames[16];
    780 	psize_t gdt_ents;
    781 
    782 	struct lwp *l;
    783 	struct pcb *pcb;
    784 
    785 	volatile struct vcpu_info *vci;
    786 
    787 	KASSERT(ci != NULL);
    788 	KASSERT(ci != &cpu_info_primary);
    789 	KASSERT(initctx != NULL);
    790 	KASSERT(targetrip != NULL);
    791 
    792 	memset(initctx, 0, sizeof(*initctx));
    793 
    794 	gdt_ents = roundup(gdt_size, PAGE_SIZE) >> PAGE_SHIFT;
    795 	KASSERT(gdt_ents <= 16);
    796 
    797 	gdt_prepframes(frames, (vaddr_t)ci->ci_gdt, gdt_ents);
    798 
    799 	/* Initialise the vcpu context: We use idle_loop()'s pcb context. */
    800 
    801 	l = ci->ci_data.cpu_idlelwp;
    802 
    803 	KASSERT(l != NULL);
    804 	pcb = lwp_getpcb(l);
    805 	KASSERT(pcb != NULL);
    806 
    807 	/* resume with interrupts off */
    808 	vci = ci->ci_vcpu;
    809 	vci->evtchn_upcall_mask = 1;
    810 	xen_mb();
    811 
    812 	/* resume in kernel-mode */
    813 	initctx->flags = VGCF_in_kernel | VGCF_online;
    814 
    815 	/* Stack and entry points:
    816 	 * We arrange for the stack frame for cpu_hatch() to
    817 	 * appear as a callee frame of lwp_trampoline(). Being a
    818 	 * leaf frame prevents trampling on any of the MD stack setup
    819 	 * that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop()
    820 	 */
    821 
    822 	initctx->user_regs.rdi = (uint64_t) ci; /* targetrip(ci); */
    823 	initctx->user_regs.rip = (vaddr_t) targetrip;
    824 
    825 	initctx->user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
    826 
    827 	initctx->user_regs.rflags = pcb->pcb_flags;
    828 	initctx->user_regs.rsp = pcb->pcb_rsp;
    829 
    830 	/* Data segments */
    831 	initctx->user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
    832 	initctx->user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
    833 	initctx->user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
    834 
    835 	/* GDT */
    836 	memcpy(initctx->gdt_frames, frames, sizeof(frames));
    837 	initctx->gdt_ents = gdt_ents;
    838 
    839 	/* LDT */
    840 	initctx->ldt_base = (unsigned long)ldtstore;
    841 	initctx->ldt_ents = LDT_SIZE >> 3;
    842 
    843 	/* Kernel context state */
    844 	initctx->kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
    845 	initctx->kernel_sp = pcb->pcb_rsp0;
    846 	initctx->ctrlreg[0] = pcb->pcb_cr0;
    847 	initctx->ctrlreg[1] = 0; /* "resuming" from kernel - no User cr3. */
    848 	initctx->ctrlreg[2] = (vaddr_t)targetrip;
    849 	/*
    850 	 * Use pmap_kernel() L4 PD directly, until we setup the
    851 	 * per-cpu L4 PD in pmap_cpu_init_late()
    852 	 */
    853 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_kpm_pdirpa)));
    854 	initctx->ctrlreg[4] = CR4_PAE | CR4_OSFXSR | CR4_OSXMMEXCPT;
    855 
    856 	/* Xen callbacks */
    857 	initctx->event_callback_eip = (unsigned long)hypervisor_callback;
    858 	initctx->failsafe_callback_eip = (unsigned long)failsafe_callback;
    859 	initctx->syscall_callback_eip = (unsigned long)Xsyscall;
    860 
    861 	return;
    862 }
    863 #else /* i386 */
    864 extern union descriptor *ldtstore;
    865 extern void Xsyscall(void);
    866 
    867 static void
    868 xen_init_i386_vcpuctxt(struct cpu_info *ci, struct vcpu_guest_context *initctx,
    869     void targeteip(struct cpu_info *))
    870 {
    871 	/* page frames to point at GDT */
    872 	extern int gdt_size;
    873 	paddr_t frames[16];
    874 	psize_t gdt_ents;
    875 
    876 	struct lwp *l;
    877 	struct pcb *pcb;
    878 
    879 	volatile struct vcpu_info *vci;
    880 
    881 	KASSERT(ci != NULL);
    882 	KASSERT(ci != &cpu_info_primary);
    883 	KASSERT(initctx != NULL);
    884 	KASSERT(targeteip != NULL);
    885 
    886 	memset(initctx, 0, sizeof(*initctx));
    887 
    888 	gdt_ents = roundup(gdt_size, PAGE_SIZE) >> PAGE_SHIFT;
    889 	KASSERT(gdt_ents <= 16);
    890 
    891 	gdt_prepframes(frames, (vaddr_t)ci->ci_gdt, gdt_ents);
    892 
    893 	/*
    894 	 * Initialise the vcpu context:
    895 	 * We use this cpu's idle_loop() pcb context.
    896 	 */
    897 
    898 	l = ci->ci_data.cpu_idlelwp;
    899 
    900 	KASSERT(l != NULL);
    901 	pcb = lwp_getpcb(l);
    902 	KASSERT(pcb != NULL);
    903 
    904 	/* resume with interrupts off */
    905 	vci = ci->ci_vcpu;
    906 	vci->evtchn_upcall_mask = 1;
    907 	xen_mb();
    908 
    909 	/* resume in kernel-mode */
    910 	initctx->flags = VGCF_in_kernel | VGCF_online;
    911 
    912 	/* Stack frame setup for cpu_hatch():
    913 	 * We arrange for the stack frame for cpu_hatch() to
    914 	 * appear as a callee frame of lwp_trampoline(). Being a
    915 	 * leaf frame prevents trampling on any of the MD stack setup
    916 	 * that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop()
    917 	 */
    918 
    919 	initctx->user_regs.esp = pcb->pcb_esp - 4; /* Leave word for
    920 						      arg1 */
    921 	{
    922 		/* targeteip(ci); */
    923 		uint32_t *arg = (uint32_t *)initctx->user_regs.esp;
    924 		arg[1] = (uint32_t)ci; /* arg1 */
    925 	}
    926 
    927 	initctx->user_regs.eip = (vaddr_t)targeteip;
    928 	initctx->user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
    929 	initctx->user_regs.eflags |= pcb->pcb_iopl;
    930 
    931 	/* Data segments */
    932 	initctx->user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
    933 	initctx->user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
    934 	initctx->user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
    935 	initctx->user_regs.fs = GSEL(GDATA_SEL, SEL_KPL);
    936 
    937 	/* GDT */
    938 	memcpy(initctx->gdt_frames, frames, sizeof(frames));
    939 	initctx->gdt_ents = gdt_ents;
    940 
    941 	/* LDT */
    942 	initctx->ldt_base = (unsigned long)ldtstore;
    943 	initctx->ldt_ents = NLDT;
    944 
    945 	/* Kernel context state */
    946 	initctx->kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
    947 	initctx->kernel_sp = pcb->pcb_esp0;
    948 	initctx->ctrlreg[0] = pcb->pcb_cr0;
    949 	initctx->ctrlreg[1] = 0; /* "resuming" from kernel - no User cr3. */
    950 	initctx->ctrlreg[2] = (vaddr_t)targeteip;
    951 #ifdef PAE
    952 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_pae_l3_pdirpa)));
    953 #else
    954 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(pcb->pcb_cr3)));
    955 #endif
    956 	initctx->ctrlreg[4] = /* CR4_PAE | */CR4_OSFXSR | CR4_OSXMMEXCPT;
    957 
    958 	/* Xen callbacks */
    959 	initctx->event_callback_eip = (unsigned long)hypervisor_callback;
    960 	initctx->event_callback_cs = GSEL(GCODE_SEL, SEL_KPL);
    961 	initctx->failsafe_callback_eip = (unsigned long)failsafe_callback;
    962 	initctx->failsafe_callback_cs = GSEL(GCODE_SEL, SEL_KPL);
    963 
    964 	return;
    965 }
    966 #endif /* __x86_64__ */
    967 
    968 int
    969 mp_cpu_start(struct cpu_info *ci, vaddr_t target)
    970 {
    971 	int hyperror;
    972 	struct vcpu_guest_context vcpuctx;
    973 
    974 	KASSERT(ci != NULL);
    975 	KASSERT(ci != &cpu_info_primary);
    976 	KASSERT(ci->ci_flags & CPUF_AP);
    977 
    978 #ifdef __x86_64__
    979 	xen_init_amd64_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target);
    980 #else
    981 	xen_init_i386_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target);
    982 #endif
    983 
    984 	/* Initialise the given vcpu to execute cpu_hatch(ci); */
    985 	if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_initialise, ci->ci_cpuid, &vcpuctx))) {
    986 		aprint_error(": context initialisation failed. errno = %d\n", hyperror);
    987 		return hyperror;
    988 	}
    989 
    990 	/* Start it up */
    991 
    992 	/* First bring it down */
    993 	if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_cpuid, NULL))) {
    994 		aprint_error(": VCPUOP_down hypervisor command failed. errno = %d\n", hyperror);
    995 		return hyperror;
    996 	}
    997 
    998 	if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_up, ci->ci_cpuid, NULL))) {
    999 		aprint_error(": VCPUOP_up hypervisor command failed. errno = %d\n", hyperror);
   1000 		return hyperror;
   1001 	}
   1002 
   1003 	if (!vcpu_is_up(ci)) {
   1004 		aprint_error(": did not come up\n");
   1005 		return -1;
   1006 	}
   1007 
   1008 	return 0;
   1009 }
   1010 
   1011 void
   1012 mp_cpu_start_cleanup(struct cpu_info *ci)
   1013 {
   1014 	if (vcpu_is_up(ci)) {
   1015 		aprint_debug_dev(ci->ci_dev, "is started.\n");
   1016 	} else {
   1017 		aprint_error_dev(ci->ci_dev, "did not start up.\n");
   1018 	}
   1019 }
   1020 
   1021 void
   1022 cpu_init_msrs(struct cpu_info *ci, bool full)
   1023 {
   1024 #ifdef __x86_64__
   1025 	if (full) {
   1026 		HYPERVISOR_set_segment_base(SEGBASE_FS, 0);
   1027 		HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (uint64_t)ci);
   1028 		HYPERVISOR_set_segment_base(SEGBASE_GS_USER, 0);
   1029 	}
   1030 #endif
   1031 
   1032 	if (cpu_feature[2] & CPUID_NOX)
   1033 		wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE);
   1034 }
   1035 
   1036 void
   1037 cpu_offline_md(void)
   1038 {
   1039 	int s;
   1040 
   1041 	s = splhigh();
   1042 	fpusave_cpu(true);
   1043 	splx(s);
   1044 }
   1045 
   1046 void
   1047 cpu_get_tsc_freq(struct cpu_info *ci)
   1048 {
   1049 	uint32_t vcpu_tversion;
   1050 	const volatile vcpu_time_info_t *tinfo = &ci->ci_vcpu->time;
   1051 
   1052 	vcpu_tversion = tinfo->version;
   1053 	while (tinfo->version == vcpu_tversion); /* Wait for a time update. XXX: timeout ? */
   1054 
   1055 	uint64_t freq = 1000000000ULL << 32;
   1056 	freq = freq / (uint64_t)tinfo->tsc_to_system_mul;
   1057 	if (tinfo->tsc_shift < 0)
   1058 		freq = freq << -tinfo->tsc_shift;
   1059 	else
   1060 		freq = freq >> tinfo->tsc_shift;
   1061 	ci->ci_data.cpu_cc_freq = freq;
   1062 }
   1063 
   1064 void
   1065 x86_cpu_idle_xen(void)
   1066 {
   1067 	struct cpu_info *ci = curcpu();
   1068 
   1069 	KASSERT(ci->ci_ilevel == IPL_NONE);
   1070 
   1071 	x86_disable_intr();
   1072 	if (!__predict_false(ci->ci_want_resched)) {
   1073 		idle_block();
   1074 	} else {
   1075 		x86_enable_intr();
   1076 	}
   1077 }
   1078 
   1079 /*
   1080  * Loads pmap for the current CPU.
   1081  */
   1082 void
   1083 cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
   1084 {
   1085 	KASSERT(pmap != pmap_kernel());
   1086 
   1087 #if defined(__x86_64__) || defined(PAE)
   1088 	struct cpu_info *ci = curcpu();
   1089 	cpuid_t cid = cpu_index(ci);
   1090 
   1091 	mutex_enter(&ci->ci_kpm_mtx);
   1092 	/* make new pmap visible to xen_kpm_sync() */
   1093 	kcpuset_atomic_set(pmap->pm_xen_ptp_cpus, cid);
   1094 #endif
   1095 
   1096 #ifdef i386
   1097 #ifdef PAE
   1098 	{
   1099 		int i;
   1100 		paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
   1101 		/* don't update the kernel L3 slot */
   1102 		for (i = 0 ; i < PDP_SIZE - 1; i++) {
   1103 			xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
   1104 			    xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
   1105 		}
   1106 		tlbflush();
   1107 	}
   1108 #else /* PAE */
   1109 	lcr3(pmap_pdirpa(pmap, 0));
   1110 #endif /* PAE */
   1111 #endif /* i386 */
   1112 
   1113 #ifdef __x86_64__
   1114 	{
   1115 		int i;
   1116 		pd_entry_t *new_pgd;
   1117 		paddr_t l4_pd_ma;
   1118 
   1119 		l4_pd_ma = xpmap_ptom_masked(ci->ci_kpm_pdirpa);
   1120 
   1121 		/*
   1122 		 * Map user space address in kernel space and load
   1123 		 * user cr3
   1124 		 */
   1125 		new_pgd = pmap->pm_pdir;
   1126 		KASSERT(pmap == ci->ci_pmap);
   1127 
   1128 		/* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */
   1129 		for (i = 0; i < PDIR_SLOT_PTE; i++) {
   1130 			KASSERT(pmap != pmap_kernel() || new_pgd[i] == 0);
   1131 			if (ci->ci_kpm_pdir[i] != new_pgd[i]) {
   1132 				xpq_queue_pte_update(
   1133 				    l4_pd_ma + i * sizeof(pd_entry_t),
   1134 				    new_pgd[i]);
   1135 			}
   1136 		}
   1137 
   1138 		xen_set_user_pgd(pmap_pdirpa(pmap, 0));
   1139 		ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0);
   1140 
   1141 		tlbflush();
   1142 	}
   1143 #endif /* __x86_64__ */
   1144 
   1145 #if defined(__x86_64__) || defined(PAE)
   1146 	/* old pmap no longer visible to xen_kpm_sync() */
   1147 	if (oldpmap != pmap_kernel()) {
   1148 		kcpuset_atomic_clear(oldpmap->pm_xen_ptp_cpus, cid);
   1149 	}
   1150 	mutex_exit(&ci->ci_kpm_mtx);
   1151 #endif
   1152 }
   1153 
   1154 /*
   1155  * pmap_cpu_init_late: perform late per-CPU initialization.
   1156  *
   1157  * Short note about percpu PDIR pages. Both the PAE and __x86_64__ architectures
   1158  * have per-cpu PDIR tables, for two different reasons:
   1159  *  - on PAE, this is to get around Xen's pagetable setup constraints (multiple
   1160  *    L3[3]s cannot point to the same L2 - Xen will refuse to pin a table set up
   1161  *    this way).
   1162  *  - on __x86_64__, this is for multiple CPUs to map in different user pmaps
   1163  *    (see cpu_load_pmap()).
   1164  *
   1165  * What this means for us is that the PDIR of the pmap_kernel() is considered
   1166  * to be a canonical "SHADOW" PDIR with the following properties:
   1167  *  - its recursive mapping points to itself
   1168  *  - per-cpu recursive mappings point to themselves on __x86_64__
   1169  *  - per-cpu L4 pages' kernel entries are expected to be in sync with
   1170  *    the shadow
   1171  */
   1172 
   1173 void
   1174 pmap_cpu_init_late(struct cpu_info *ci)
   1175 {
   1176 #if defined(PAE) || defined(__x86_64__)
   1177 	/*
   1178 	 * The BP has already its own PD page allocated during early
   1179 	 * MD startup.
   1180 	 */
   1181 
   1182 #if defined(__x86_64__)
   1183 	/* Setup per-cpu normal_pdes */
   1184 	int i;
   1185 	extern pd_entry_t * const normal_pdes[];
   1186 	for (i = 0;i < PTP_LEVELS - 1;i++) {
   1187 		ci->ci_normal_pdes[i] = normal_pdes[i];
   1188 	}
   1189 #endif /* __x86_64__ */
   1190 
   1191 	if (ci == &cpu_info_primary)
   1192 		return;
   1193 
   1194 	KASSERT(ci != NULL);
   1195 
   1196 #if defined(PAE)
   1197 	cpu_alloc_l3_page(ci);
   1198 	KASSERT(ci->ci_pae_l3_pdirpa != 0);
   1199 
   1200 	/* Initialise L2 entries 0 - 2: Point them to pmap_kernel() */
   1201 	int i;
   1202 	for (i = 0 ; i < PDP_SIZE - 1; i++) {
   1203 		ci->ci_pae_l3_pdir[i] =
   1204 		    xpmap_ptom_masked(pmap_kernel()->pm_pdirpa[i]) | PG_V;
   1205 	}
   1206 #endif /* PAE */
   1207 
   1208 	ci->ci_kpm_pdir = (pd_entry_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
   1209 	    UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_NOWAIT);
   1210 
   1211 	if (ci->ci_kpm_pdir == NULL) {
   1212 		panic("%s: failed to allocate L4 per-cpu PD for CPU %d\n",
   1213 		    __func__, cpu_index(ci));
   1214 	}
   1215 	ci->ci_kpm_pdirpa = vtophys((vaddr_t)ci->ci_kpm_pdir);
   1216 	KASSERT(ci->ci_kpm_pdirpa != 0);
   1217 
   1218 #if defined(__x86_64__)
   1219 	extern pt_entry_t xpmap_pg_nx;
   1220 
   1221 	/* Copy over the pmap_kernel() shadow L4 entries */
   1222 	memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir, PAGE_SIZE);
   1223 
   1224 	/* Recursive kernel mapping */
   1225 	ci->ci_kpm_pdir[PDIR_SLOT_PTE] = xpmap_ptom_masked(ci->ci_kpm_pdirpa)
   1226 	    | PG_V | xpmap_pg_nx;
   1227 #elif defined(PAE)
   1228 	/* Copy over the pmap_kernel() shadow L2 entries */
   1229 	memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir + PDIR_SLOT_KERN,
   1230 	    nkptp[PTP_LEVELS - 1] * sizeof(pd_entry_t));
   1231 #endif
   1232 
   1233 	/* Xen wants a RO pdir. */
   1234 	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_kpm_pdir,
   1235 	    (vaddr_t)ci->ci_kpm_pdir + PAGE_SIZE, VM_PROT_READ);
   1236 	pmap_update(pmap_kernel());
   1237 #if defined(PAE)
   1238 	/*
   1239 	 * Initialize L3 entry 3. This mapping is shared across all pmaps and is
   1240 	 * static, ie: loading a new pmap will not update this entry.
   1241 	 */
   1242 	ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_V;
   1243 
   1244 	/* Xen wants a RO L3. */
   1245 	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_pae_l3_pdir,
   1246 	    (vaddr_t)ci->ci_pae_l3_pdir + PAGE_SIZE, VM_PROT_READ);
   1247 	pmap_update(pmap_kernel());
   1248 
   1249 	xpq_queue_pin_l3_table(xpmap_ptom_masked(ci->ci_pae_l3_pdirpa));
   1250 
   1251 #elif defined(__x86_64__)
   1252 	xpq_queue_pin_l4_table(xpmap_ptom_masked(ci->ci_kpm_pdirpa));
   1253 #endif /* PAE , __x86_64__ */
   1254 #endif /* defined(PAE) || defined(__x86_64__) */
   1255 }
   1256 
   1257 /*
   1258  * Notify all other cpus to halt.
   1259  */
   1260 
   1261 void
   1262 cpu_broadcast_halt(void)
   1263 {
   1264 	xen_broadcast_ipi(XEN_IPI_HALT);
   1265 }
   1266 
   1267 /*
   1268  * Send a dummy ipi to a cpu.
   1269  */
   1270 
   1271 void
   1272 cpu_kick(struct cpu_info *ci)
   1273 {
   1274 	(void)xen_send_ipi(ci, XEN_IPI_KICK);
   1275 }
   1276