Home | History | Annotate | Line # | Download | only in x86
cpu.c revision 1.6.2.3
      1  1.6.2.3  matt /*	cpu.c,v 1.6.2.2 2008/01/09 01:50:13 matt Exp	*/
      2  1.6.2.2  matt /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
      3  1.6.2.2  matt 
      4  1.6.2.2  matt /*-
      5  1.6.2.2  matt  * Copyright (c) 2000 The NetBSD Foundation, Inc.
      6  1.6.2.2  matt  * All rights reserved.
      7  1.6.2.2  matt  *
      8  1.6.2.2  matt  * This code is derived from software contributed to The NetBSD Foundation
      9  1.6.2.2  matt  * by RedBack Networks Inc.
     10  1.6.2.2  matt  *
     11  1.6.2.2  matt  * Author: Bill Sommerfeld
     12  1.6.2.2  matt  *
     13  1.6.2.2  matt  * Redistribution and use in source and binary forms, with or without
     14  1.6.2.2  matt  * modification, are permitted provided that the following conditions
     15  1.6.2.2  matt  * are met:
     16  1.6.2.2  matt  * 1. Redistributions of source code must retain the above copyright
     17  1.6.2.2  matt  *    notice, this list of conditions and the following disclaimer.
     18  1.6.2.2  matt  * 2. Redistributions in binary form must reproduce the above copyright
     19  1.6.2.2  matt  *    notice, this list of conditions and the following disclaimer in the
     20  1.6.2.2  matt  *    documentation and/or other materials provided with the distribution.
     21  1.6.2.2  matt  * 3. All advertising materials mentioning features or use of this software
     22  1.6.2.2  matt  *    must display the following acknowledgement:
     23  1.6.2.2  matt  *        This product includes software developed by the NetBSD
     24  1.6.2.2  matt  *        Foundation, Inc. and its contributors.
     25  1.6.2.2  matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     26  1.6.2.2  matt  *    contributors may be used to endorse or promote products derived
     27  1.6.2.2  matt  *    from this software without specific prior written permission.
     28  1.6.2.2  matt  *
     29  1.6.2.2  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     30  1.6.2.2  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     31  1.6.2.2  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     32  1.6.2.2  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     33  1.6.2.2  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     34  1.6.2.2  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     35  1.6.2.2  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     36  1.6.2.2  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     37  1.6.2.2  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     38  1.6.2.2  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     39  1.6.2.2  matt  * POSSIBILITY OF SUCH DAMAGE.
     40  1.6.2.2  matt  */
     41  1.6.2.2  matt 
     42  1.6.2.2  matt /*
     43  1.6.2.2  matt  * Copyright (c) 1999 Stefan Grefen
     44  1.6.2.2  matt  *
     45  1.6.2.2  matt  * Redistribution and use in source and binary forms, with or without
     46  1.6.2.2  matt  * modification, are permitted provided that the following conditions
     47  1.6.2.2  matt  * are met:
     48  1.6.2.2  matt  * 1. Redistributions of source code must retain the above copyright
     49  1.6.2.2  matt  *    notice, this list of conditions and the following disclaimer.
     50  1.6.2.2  matt  * 2. Redistributions in binary form must reproduce the above copyright
     51  1.6.2.2  matt  *    notice, this list of conditions and the following disclaimer in the
     52  1.6.2.2  matt  *    documentation and/or other materials provided with the distribution.
     53  1.6.2.2  matt  * 3. All advertising materials mentioning features or use of this software
     54  1.6.2.2  matt  *    must display the following acknowledgement:
     55  1.6.2.2  matt  *      This product includes software developed by the NetBSD
     56  1.6.2.2  matt  *      Foundation, Inc. and its contributors.
     57  1.6.2.2  matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     58  1.6.2.2  matt  *    contributors may be used to endorse or promote products derived
     59  1.6.2.2  matt  *    from this software without specific prior written permission.
     60  1.6.2.2  matt  *
     61  1.6.2.2  matt  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
     62  1.6.2.2  matt  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     63  1.6.2.2  matt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     64  1.6.2.2  matt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
     65  1.6.2.2  matt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     66  1.6.2.2  matt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     67  1.6.2.2  matt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     68  1.6.2.2  matt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     69  1.6.2.2  matt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     70  1.6.2.2  matt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     71  1.6.2.2  matt  * SUCH DAMAGE.
     72  1.6.2.2  matt  */
     73  1.6.2.2  matt 
     74  1.6.2.2  matt #include <sys/cdefs.h>
     75  1.6.2.3  matt __KERNEL_RCSID(0, "cpu.c,v 1.6.2.2 2008/01/09 01:50:13 matt Exp");
     76  1.6.2.2  matt 
     77  1.6.2.2  matt #include "opt_ddb.h"
     78  1.6.2.2  matt #include "opt_multiprocessor.h"
     79  1.6.2.2  matt #include "opt_mpbios.h"		/* for MPDEBUG */
     80  1.6.2.2  matt #include "opt_mtrr.h"
     81  1.6.2.2  matt #include "opt_xen.h"
     82  1.6.2.2  matt 
     83  1.6.2.2  matt #include "lapic.h"
     84  1.6.2.2  matt #include "ioapic.h"
     85  1.6.2.2  matt 
     86  1.6.2.2  matt #include <sys/param.h>
     87  1.6.2.2  matt #include <sys/proc.h>
     88  1.6.2.2  matt #include <sys/user.h>
     89  1.6.2.2  matt #include <sys/systm.h>
     90  1.6.2.2  matt #include <sys/device.h>
     91  1.6.2.2  matt #include <sys/malloc.h>
     92  1.6.2.2  matt 
     93  1.6.2.2  matt #include <uvm/uvm_extern.h>
     94  1.6.2.2  matt 
     95  1.6.2.2  matt #include <machine/cpu.h>
     96  1.6.2.2  matt #include <machine/cpufunc.h>
     97  1.6.2.2  matt #include <machine/cpuvar.h>
     98  1.6.2.2  matt #include <machine/pmap.h>
     99  1.6.2.2  matt #include <machine/vmparam.h>
    100  1.6.2.2  matt #include <machine/mpbiosvar.h>
    101  1.6.2.2  matt #include <machine/pcb.h>
    102  1.6.2.2  matt #include <machine/specialreg.h>
    103  1.6.2.2  matt #include <machine/segments.h>
    104  1.6.2.2  matt #include <machine/gdt.h>
    105  1.6.2.2  matt #include <machine/mtrr.h>
    106  1.6.2.2  matt #include <machine/pio.h>
    107  1.6.2.2  matt 
    108  1.6.2.2  matt #ifdef XEN3
    109  1.6.2.2  matt #include <xen/vcpuvar.h>
    110  1.6.2.2  matt #endif
    111  1.6.2.2  matt 
    112  1.6.2.2  matt #if NLAPIC > 0
    113  1.6.2.2  matt #include <machine/apicvar.h>
    114  1.6.2.2  matt #include <machine/i82489reg.h>
    115  1.6.2.2  matt #include <machine/i82489var.h>
    116  1.6.2.2  matt #endif
    117  1.6.2.2  matt 
    118  1.6.2.2  matt #if NIOAPIC > 0
    119  1.6.2.2  matt #include <machine/i82093var.h>
    120  1.6.2.2  matt #endif
    121  1.6.2.2  matt 
    122  1.6.2.2  matt #include <dev/ic/mc146818reg.h>
    123  1.6.2.2  matt #include <dev/isa/isareg.h>
    124  1.6.2.2  matt 
    125  1.6.2.2  matt int     cpu_match(struct device *, struct cfdata *, void *);
    126  1.6.2.2  matt void    cpu_attach(struct device *, struct device *, void *);
    127  1.6.2.2  matt #ifdef XEN3
    128  1.6.2.2  matt int     vcpu_match(struct device *, struct cfdata *, void *);
    129  1.6.2.2  matt void    vcpu_attach(struct device *, struct device *, void *);
    130  1.6.2.2  matt #endif
    131  1.6.2.2  matt void    cpu_attach_common(struct device *, struct device *, void *);
    132  1.6.2.3  matt void	cpu_offline_md(void);
    133  1.6.2.2  matt 
    134  1.6.2.2  matt struct cpu_softc {
    135  1.6.2.2  matt 	struct device sc_dev;		/* device tree glue */
    136  1.6.2.2  matt 	struct cpu_info *sc_info;	/* pointer to CPU info */
    137  1.6.2.2  matt };
    138  1.6.2.2  matt 
    139  1.6.2.2  matt int mp_cpu_start(struct cpu_info *, paddr_t);
    140  1.6.2.2  matt void mp_cpu_start_cleanup(struct cpu_info *);
    141  1.6.2.2  matt const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL,
    142  1.6.2.2  matt 				      mp_cpu_start_cleanup };
    143  1.6.2.2  matt 
    144  1.6.2.2  matt CFATTACH_DECL(cpu, sizeof(struct cpu_softc),
    145  1.6.2.2  matt     cpu_match, cpu_attach, NULL, NULL);
    146  1.6.2.2  matt #ifdef XEN3
    147  1.6.2.2  matt CFATTACH_DECL(vcpu, sizeof(struct cpu_softc),
    148  1.6.2.2  matt     vcpu_match, vcpu_attach, NULL, NULL);
    149  1.6.2.2  matt #endif
    150  1.6.2.2  matt 
    151  1.6.2.2  matt /*
    152  1.6.2.2  matt  * Statically-allocated CPU info for the primary CPU (or the only
    153  1.6.2.2  matt  * CPU, on uniprocessors).  The CPU info list is initialized to
    154  1.6.2.2  matt  * point at it.
    155  1.6.2.2  matt  */
    156  1.6.2.2  matt #ifdef TRAPLOG
    157  1.6.2.2  matt #include <machine/tlog.h>
    158  1.6.2.2  matt struct tlog tlog_primary;
    159  1.6.2.2  matt #endif
    160  1.6.2.2  matt struct cpu_info cpu_info_primary = {
    161  1.6.2.3  matt 	.ci_dev = 0,
    162  1.6.2.2  matt 	.ci_self = &cpu_info_primary,
    163  1.6.2.2  matt 	.ci_idepth = -1,
    164  1.6.2.2  matt 	.ci_curlwp = &lwp0,
    165  1.6.2.2  matt #ifdef TRAPLOG
    166  1.6.2.2  matt 	.ci_tlog = &tlog_primary,
    167  1.6.2.2  matt #endif
    168  1.6.2.2  matt 
    169  1.6.2.2  matt };
    170  1.6.2.2  matt struct cpu_info phycpu_info_primary = {
    171  1.6.2.3  matt 	.ci_dev = 0,
    172  1.6.2.2  matt 	.ci_self = &phycpu_info_primary,
    173  1.6.2.2  matt };
    174  1.6.2.2  matt 
    175  1.6.2.2  matt struct cpu_info *cpu_info_list = &cpu_info_primary;
    176  1.6.2.2  matt 
    177  1.6.2.2  matt static void	cpu_set_tss_gates(struct cpu_info *ci);
    178  1.6.2.2  matt 
    179  1.6.2.2  matt u_int32_t cpus_attached = 0;
    180  1.6.2.2  matt 
    181  1.6.2.2  matt struct cpu_info *phycpu_info[X86_MAXPROCS] = { &cpu_info_primary };
    182  1.6.2.2  matt 
    183  1.6.2.2  matt #ifdef MULTIPROCESSOR
    184  1.6.2.2  matt /*
    185  1.6.2.2  matt  * Array of CPU info structures.  Must be statically-allocated because
    186  1.6.2.2  matt  * curproc, etc. are used early.
    187  1.6.2.2  matt  */
    188  1.6.2.2  matt struct cpu_info *cpu_info[X86_MAXPROCS] = { &cpu_info_primary };
    189  1.6.2.2  matt 
    190  1.6.2.2  matt u_int32_t cpus_running = 0;
    191  1.6.2.2  matt 
    192  1.6.2.2  matt void    	cpu_hatch(void *);
    193  1.6.2.2  matt static void    	cpu_boot_secondary(struct cpu_info *ci);
    194  1.6.2.2  matt static void    	cpu_start_secondary(struct cpu_info *ci);
    195  1.6.2.2  matt static void	cpu_copy_trampoline(void);
    196  1.6.2.2  matt 
    197  1.6.2.2  matt /*
    198  1.6.2.2  matt  * Runs once per boot once multiprocessor goo has been detected and
    199  1.6.2.2  matt  * the local APIC on the boot processor has been mapped.
    200  1.6.2.2  matt  *
    201  1.6.2.2  matt  * Called from lapic_boot_init() (from mpbios_scan()).
    202  1.6.2.2  matt  */
    203  1.6.2.2  matt void
    204  1.6.2.2  matt cpu_init_first()
    205  1.6.2.2  matt {
    206  1.6.2.2  matt 	int cpunum = lapic_cpu_number();
    207  1.6.2.2  matt 
    208  1.6.2.2  matt 	if (cpunum != 0) {
    209  1.6.2.2  matt 		cpu_info[0] = NULL;
    210  1.6.2.2  matt 		cpu_info[cpunum] = &cpu_info_primary;
    211  1.6.2.2  matt 	}
    212  1.6.2.2  matt 
    213  1.6.2.2  matt 	cpu_copy_trampoline();
    214  1.6.2.2  matt }
    215  1.6.2.2  matt #endif
    216  1.6.2.2  matt 
    217  1.6.2.2  matt int
    218  1.6.2.2  matt cpu_match(parent, match, aux)
    219  1.6.2.2  matt 	struct device *parent;
    220  1.6.2.2  matt 	struct cfdata *match;
    221  1.6.2.2  matt 	void *aux;
    222  1.6.2.2  matt {
    223  1.6.2.2  matt 
    224  1.6.2.2  matt 	return 1;
    225  1.6.2.2  matt }
    226  1.6.2.2  matt 
    227  1.6.2.2  matt void
    228  1.6.2.2  matt cpu_attach(parent, self, aux)
    229  1.6.2.2  matt 	struct device *parent, *self;
    230  1.6.2.2  matt 	void *aux;
    231  1.6.2.2  matt {
    232  1.6.2.2  matt #ifdef XEN3
    233  1.6.2.2  matt 	struct cpu_softc *sc = (void *) self;
    234  1.6.2.2  matt 	struct cpu_attach_args *caa = aux;
    235  1.6.2.2  matt 	struct cpu_info *ci;
    236  1.6.2.2  matt 	int cpunum = caa->cpu_number;
    237  1.6.2.2  matt 
    238  1.6.2.2  matt 	/*
    239  1.6.2.2  matt 	 * If we're an Application Processor, allocate a cpu_info
    240  1.6.2.2  matt 	 * structure, otherwise use the primary's.
    241  1.6.2.2  matt 	 */
    242  1.6.2.2  matt 	if (caa->cpu_role == CPU_ROLE_AP) {
    243  1.6.2.2  matt 		ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK | M_ZERO);
    244  1.6.2.2  matt 		if (phycpu_info[cpunum] != NULL)
    245  1.6.2.2  matt 			panic("cpu at apic id %d already attached?", cpunum);
    246  1.6.2.2  matt 		phycpu_info[cpunum] = ci;
    247  1.6.2.2  matt 	} else {
    248  1.6.2.2  matt 		ci = &phycpu_info_primary;
    249  1.6.2.2  matt 		if (cpunum != 0) {
    250  1.6.2.2  matt 			phycpu_info[0] = NULL;
    251  1.6.2.2  matt 			phycpu_info[cpunum] = ci;
    252  1.6.2.2  matt 		}
    253  1.6.2.2  matt 	}
    254  1.6.2.2  matt 
    255  1.6.2.2  matt 	ci->ci_self = ci;
    256  1.6.2.2  matt 	sc->sc_info = ci;
    257  1.6.2.2  matt 
    258  1.6.2.2  matt 	ci->ci_dev = self;
    259  1.6.2.2  matt 	ci->ci_apicid = caa->cpu_number;
    260  1.6.2.2  matt 	ci->ci_cpuid = ci->ci_apicid;
    261  1.6.2.2  matt 
    262  1.6.2.2  matt 	printf(": ");
    263  1.6.2.2  matt 	switch (caa->cpu_role) {
    264  1.6.2.2  matt 	case CPU_ROLE_SP:
    265  1.6.2.2  matt 		printf("(uniprocessor)\n");
    266  1.6.2.2  matt 		ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY;
    267  1.6.2.2  matt 		break;
    268  1.6.2.2  matt 
    269  1.6.2.2  matt 	case CPU_ROLE_BP:
    270  1.6.2.2  matt 		printf("(boot processor)\n");
    271  1.6.2.2  matt 		ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY;
    272  1.6.2.2  matt #if NIOAPIC > 0
    273  1.6.2.2  matt 		ioapic_bsp_id = caa->cpu_number;
    274  1.6.2.2  matt #endif
    275  1.6.2.2  matt 		break;
    276  1.6.2.2  matt 
    277  1.6.2.2  matt 	case CPU_ROLE_AP:
    278  1.6.2.2  matt 		/*
    279  1.6.2.2  matt 		 * report on an AP
    280  1.6.2.2  matt 		 */
    281  1.6.2.2  matt 		printf("(application processor)\n");
    282  1.6.2.2  matt 		break;
    283  1.6.2.2  matt 
    284  1.6.2.2  matt 	default:
    285  1.6.2.2  matt 		panic("unknown processor type??\n");
    286  1.6.2.2  matt 	}
    287  1.6.2.2  matt 	return;
    288  1.6.2.2  matt #else
    289  1.6.2.2  matt 	cpu_attach_common(parent, self, aux);
    290  1.6.2.2  matt #endif
    291  1.6.2.2  matt }
    292  1.6.2.2  matt 
    293  1.6.2.2  matt #ifdef XEN3
    294  1.6.2.2  matt int
    295  1.6.2.2  matt vcpu_match(parent, match, aux)
    296  1.6.2.2  matt 	struct device *parent;
    297  1.6.2.2  matt 	struct cfdata *match;
    298  1.6.2.2  matt 	void *aux;
    299  1.6.2.2  matt {
    300  1.6.2.2  matt 	struct vcpu_attach_args *vcaa = aux;
    301  1.6.2.2  matt 
    302  1.6.2.2  matt 	if (strcmp(vcaa->vcaa_name, match->cf_name) == 0)
    303  1.6.2.2  matt 		return 1;
    304  1.6.2.2  matt 	return 0;
    305  1.6.2.2  matt }
    306  1.6.2.2  matt 
    307  1.6.2.2  matt void
    308  1.6.2.2  matt vcpu_attach(parent, self, aux)
    309  1.6.2.2  matt 	struct device *parent, *self;
    310  1.6.2.2  matt 	void *aux;
    311  1.6.2.2  matt {
    312  1.6.2.2  matt 	struct vcpu_attach_args *vcaa = aux;
    313  1.6.2.2  matt 
    314  1.6.2.2  matt 	cpu_attach_common(parent, self, &vcaa->vcaa_caa);
    315  1.6.2.2  matt }
    316  1.6.2.2  matt #endif
    317  1.6.2.2  matt 
    318  1.6.2.2  matt static void
    319  1.6.2.2  matt cpu_vm_init(struct cpu_info *ci)
    320  1.6.2.2  matt {
    321  1.6.2.2  matt 	int ncolors = 2, i;
    322  1.6.2.2  matt 
    323  1.6.2.2  matt 	for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) {
    324  1.6.2.2  matt 		struct x86_cache_info *cai;
    325  1.6.2.2  matt 		int tcolors;
    326  1.6.2.2  matt 
    327  1.6.2.2  matt 		cai = &ci->ci_cinfo[i];
    328  1.6.2.2  matt 
    329  1.6.2.2  matt 		tcolors = atop(cai->cai_totalsize);
    330  1.6.2.2  matt 		switch(cai->cai_associativity) {
    331  1.6.2.2  matt 		case 0xff:
    332  1.6.2.2  matt 			tcolors = 1; /* fully associative */
    333  1.6.2.2  matt 			break;
    334  1.6.2.2  matt 		case 0:
    335  1.6.2.2  matt 		case 1:
    336  1.6.2.2  matt 			break;
    337  1.6.2.2  matt 		default:
    338  1.6.2.2  matt 			tcolors /= cai->cai_associativity;
    339  1.6.2.2  matt 		}
    340  1.6.2.2  matt 		ncolors = max(ncolors, tcolors);
    341  1.6.2.2  matt 	}
    342  1.6.2.2  matt 
    343  1.6.2.2  matt 	/*
    344  1.6.2.2  matt 	 * Knowing the size of the largest cache on this CPU, re-color
    345  1.6.2.2  matt 	 * our pages.
    346  1.6.2.2  matt 	 */
    347  1.6.2.2  matt 	if (ncolors <= uvmexp.ncolors)
    348  1.6.2.2  matt 		return;
    349  1.6.2.2  matt 	printf("%s: %d page colors\n", ci->ci_dev->dv_xname, ncolors);
    350  1.6.2.2  matt 	uvm_page_recolor(ncolors);
    351  1.6.2.2  matt }
    352  1.6.2.2  matt 
    353  1.6.2.2  matt void
    354  1.6.2.2  matt cpu_attach_common(parent, self, aux)
    355  1.6.2.2  matt 	struct device *parent, *self;
    356  1.6.2.2  matt 	void *aux;
    357  1.6.2.2  matt {
    358  1.6.2.2  matt 	struct cpu_softc *sc = (void *) self;
    359  1.6.2.2  matt 	struct cpu_attach_args *caa = aux;
    360  1.6.2.2  matt 	struct cpu_info *ci;
    361  1.6.2.2  matt #if defined(MULTIPROCESSOR)
    362  1.6.2.2  matt 	int cpunum = caa->cpu_number;
    363  1.6.2.2  matt #endif
    364  1.6.2.2  matt 
    365  1.6.2.2  matt 	/*
    366  1.6.2.2  matt 	 * If we're an Application Processor, allocate a cpu_info
    367  1.6.2.2  matt 	 * structure, otherwise use the primary's.
    368  1.6.2.2  matt 	 */
    369  1.6.2.2  matt 	if (caa->cpu_role == CPU_ROLE_AP) {
    370  1.6.2.2  matt 		ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK | M_ZERO);
    371  1.6.2.2  matt #if defined(MULTIPROCESSOR)
    372  1.6.2.2  matt 		if (cpu_info[cpunum] != NULL)
    373  1.6.2.2  matt 			panic("cpu at apic id %d already attached?", cpunum);
    374  1.6.2.2  matt 		cpu_info[cpunum] = ci;
    375  1.6.2.2  matt #endif
    376  1.6.2.2  matt #ifdef TRAPLOG
    377  1.6.2.2  matt 		ci->ci_tlog_base = malloc(sizeof(struct tlog),
    378  1.6.2.2  matt 		    M_DEVBUF, M_WAITOK);
    379  1.6.2.2  matt #endif
    380  1.6.2.2  matt 	} else {
    381  1.6.2.2  matt 		ci = &cpu_info_primary;
    382  1.6.2.2  matt #if defined(MULTIPROCESSOR)
    383  1.6.2.2  matt 		if (cpunum != lapic_cpu_number()) {
    384  1.6.2.2  matt 			panic("%s: running CPU is at apic %d"
    385  1.6.2.2  matt 			    " instead of at expected %d",
    386  1.6.2.2  matt 			    sc->sc_dev.dv_xname, lapic_cpu_number(), cpunum);
    387  1.6.2.2  matt 		}
    388  1.6.2.2  matt #endif
    389  1.6.2.2  matt 	}
    390  1.6.2.2  matt 
    391  1.6.2.2  matt 	ci->ci_self = ci;
    392  1.6.2.2  matt 	sc->sc_info = ci;
    393  1.6.2.2  matt 
    394  1.6.2.2  matt 	ci->ci_dev = self;
    395  1.6.2.2  matt 	ci->ci_apicid = caa->cpu_number;
    396  1.6.2.2  matt #ifdef MULTIPROCESSOR
    397  1.6.2.2  matt 	ci->ci_cpuid = ci->ci_apicid;
    398  1.6.2.2  matt #else
    399  1.6.2.2  matt 	ci->ci_cpuid = 0;	/* False for APs, but they're not used anyway */
    400  1.6.2.2  matt #endif
    401  1.6.2.2  matt 	ci->ci_cpumask = (1 << ci->ci_cpuid);
    402  1.6.2.2  matt 	ci->ci_func = caa->cpu_func;
    403  1.6.2.2  matt 
    404  1.6.2.2  matt 	if (caa->cpu_role == CPU_ROLE_AP) {
    405  1.6.2.2  matt #if defined(MULTIPROCESSOR)
    406  1.6.2.2  matt 		int error;
    407  1.6.2.2  matt 
    408  1.6.2.2  matt 		error = mi_cpu_attach(ci);
    409  1.6.2.2  matt 		if (error != 0) {
    410  1.6.2.2  matt 			aprint_normal("\n");
    411  1.6.2.2  matt 			aprint_error("%s: mi_cpu_attach failed with %d\n",
    412  1.6.2.2  matt 			    sc->sc_dev.dv_xname, error);
    413  1.6.2.2  matt 			return;
    414  1.6.2.2  matt 		}
    415  1.6.2.2  matt #endif
    416  1.6.2.2  matt 	} else {
    417  1.6.2.2  matt 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);
    418  1.6.2.2  matt 	}
    419  1.6.2.2  matt 
    420  1.6.2.2  matt 	pmap_reference(pmap_kernel());
    421  1.6.2.2  matt 	ci->ci_pmap = pmap_kernel();
    422  1.6.2.2  matt 	ci->ci_tlbstate = TLBSTATE_STALE;
    423  1.6.2.2  matt 
    424  1.6.2.2  matt 	/* further PCB init done later. */
    425  1.6.2.2  matt 
    426  1.6.2.2  matt 	printf(": ");
    427  1.6.2.2  matt 
    428  1.6.2.2  matt 	switch (caa->cpu_role) {
    429  1.6.2.2  matt 	case CPU_ROLE_SP:
    430  1.6.2.2  matt 		printf("(uniprocessor)\n");
    431  1.6.2.2  matt 		ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY;
    432  1.6.2.2  matt 		cpu_intr_init(ci);
    433  1.6.2.2  matt 		identifycpu(ci);
    434  1.6.2.2  matt 		cpu_init(ci);
    435  1.6.2.2  matt 		cpu_set_tss_gates(ci);
    436  1.6.2.2  matt 		break;
    437  1.6.2.2  matt 
    438  1.6.2.2  matt 	case CPU_ROLE_BP:
    439  1.6.2.2  matt 		printf("apid %d (boot processor)\n", caa->cpu_number);
    440  1.6.2.2  matt 		ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY;
    441  1.6.2.2  matt 		cpu_intr_init(ci);
    442  1.6.2.2  matt 		identifycpu(ci);
    443  1.6.2.2  matt 		cpu_init(ci);
    444  1.6.2.2  matt 		cpu_set_tss_gates(ci);
    445  1.6.2.2  matt 		break;
    446  1.6.2.2  matt 
    447  1.6.2.2  matt 	case CPU_ROLE_AP:
    448  1.6.2.2  matt 		/*
    449  1.6.2.2  matt 		 * report on an AP
    450  1.6.2.2  matt 		 */
    451  1.6.2.2  matt 		printf("apid %d (application processor)\n", caa->cpu_number);
    452  1.6.2.2  matt 
    453  1.6.2.2  matt #if defined(MULTIPROCESSOR)
    454  1.6.2.2  matt 		cpu_intr_init(ci);
    455  1.6.2.2  matt 		gdt_alloc_cpu(ci);
    456  1.6.2.2  matt 		cpu_set_tss_gates(ci);
    457  1.6.2.2  matt 		cpu_start_secondary(ci);
    458  1.6.2.2  matt 		if (ci->ci_flags & CPUF_PRESENT) {
    459  1.6.2.2  matt 			identifycpu(ci);
    460  1.6.2.2  matt 			ci->ci_next = cpu_info_list->ci_next;
    461  1.6.2.2  matt 			cpu_info_list->ci_next = ci;
    462  1.6.2.2  matt 		}
    463  1.6.2.2  matt #else
    464  1.6.2.2  matt 		printf("%s: not started\n", sc->sc_dev.dv_xname);
    465  1.6.2.2  matt #endif
    466  1.6.2.2  matt 		break;
    467  1.6.2.2  matt 
    468  1.6.2.2  matt 	default:
    469  1.6.2.2  matt 		panic("unknown processor type??\n");
    470  1.6.2.2  matt 	}
    471  1.6.2.2  matt 	cpu_vm_init(ci);
    472  1.6.2.2  matt 
    473  1.6.2.2  matt 	cpus_attached |= (1 << ci->ci_cpuid);
    474  1.6.2.2  matt 
    475  1.6.2.2  matt #if defined(MULTIPROCESSOR)
    476  1.6.2.2  matt 	if (mp_verbose) {
    477  1.6.2.2  matt 		struct lwp *l = ci->ci_data.cpu_idlelwp;
    478  1.6.2.2  matt 
    479  1.6.2.2  matt 		aprint_verbose("%s: idle lwp at %p, idle sp at 0x%x\n",
    480  1.6.2.2  matt 		    sc->sc_dev.dv_xname, l, l->l_addr->u_pcb.pcb_esp);
    481  1.6.2.2  matt 	}
    482  1.6.2.2  matt #endif
    483  1.6.2.2  matt }
    484  1.6.2.2  matt 
    485  1.6.2.2  matt /*
    486  1.6.2.2  matt  * Initialize the processor appropriately.
    487  1.6.2.2  matt  */
    488  1.6.2.2  matt 
    489  1.6.2.2  matt void
    490  1.6.2.2  matt cpu_init(ci)
    491  1.6.2.2  matt 	struct cpu_info *ci;
    492  1.6.2.2  matt {
    493  1.6.2.2  matt 	/* configure the CPU if needed */
    494  1.6.2.2  matt 	if (ci->cpu_setup != NULL)
    495  1.6.2.2  matt 		(*ci->cpu_setup)(ci);
    496  1.6.2.2  matt 
    497  1.6.2.2  matt 	/*
    498  1.6.2.2  matt 	 * On a P6 or above, enable global TLB caching if the
    499  1.6.2.2  matt 	 * hardware supports it.
    500  1.6.2.2  matt 	 */
    501  1.6.2.2  matt 	if (cpu_feature & CPUID_PGE)
    502  1.6.2.2  matt 		lcr4(rcr4() | CR4_PGE);	/* enable global TLB caching */
    503  1.6.2.2  matt 
    504  1.6.2.2  matt #ifdef XXXMTRR
    505  1.6.2.2  matt 	/*
    506  1.6.2.2  matt 	 * On a P6 or above, initialize MTRR's if the hardware supports them.
    507  1.6.2.2  matt 	 */
    508  1.6.2.2  matt 	if (cpu_feature & CPUID_MTRR) {
    509  1.6.2.2  matt 		if ((ci->ci_flags & CPUF_AP) == 0)
    510  1.6.2.2  matt 			i686_mtrr_init_first();
    511  1.6.2.2  matt 		mtrr_init_cpu(ci);
    512  1.6.2.2  matt 	}
    513  1.6.2.2  matt #endif
    514  1.6.2.2  matt 	/*
    515  1.6.2.2  matt 	 * If we have FXSAVE/FXRESTOR, use them.
    516  1.6.2.2  matt 	 */
    517  1.6.2.2  matt 	if (cpu_feature & CPUID_FXSR) {
    518  1.6.2.2  matt 		lcr4(rcr4() | CR4_OSFXSR);
    519  1.6.2.2  matt 
    520  1.6.2.2  matt 		/*
    521  1.6.2.2  matt 		 * If we have SSE/SSE2, enable XMM exceptions.
    522  1.6.2.2  matt 		 */
    523  1.6.2.2  matt 		if (cpu_feature & (CPUID_SSE|CPUID_SSE2))
    524  1.6.2.2  matt 			lcr4(rcr4() | CR4_OSXMMEXCPT);
    525  1.6.2.2  matt 	}
    526  1.6.2.2  matt 
    527  1.6.2.2  matt #ifdef MULTIPROCESSOR
    528  1.6.2.2  matt 	ci->ci_flags |= CPUF_RUNNING;
    529  1.6.2.2  matt 	cpus_running |= 1 << ci->ci_cpuid;
    530  1.6.2.2  matt #endif
    531  1.6.2.2  matt }
    532  1.6.2.2  matt 
    533  1.6.2.2  matt 
    534  1.6.2.2  matt #ifdef MULTIPROCESSOR
    535  1.6.2.2  matt void
    536  1.6.2.2  matt cpu_boot_secondary_processors()
    537  1.6.2.2  matt {
    538  1.6.2.2  matt 	struct cpu_info *ci;
    539  1.6.2.2  matt 	u_long i;
    540  1.6.2.2  matt 
    541  1.6.2.2  matt 	for (i=0; i < X86_MAXPROCS; i++) {
    542  1.6.2.2  matt 		ci = cpu_info[i];
    543  1.6.2.2  matt 		if (ci == NULL)
    544  1.6.2.2  matt 			continue;
    545  1.6.2.2  matt 		if (ci->ci_data.cpu_idlelwp == NULL)
    546  1.6.2.2  matt 			continue;
    547  1.6.2.2  matt 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    548  1.6.2.2  matt 			continue;
    549  1.6.2.2  matt 		if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
    550  1.6.2.2  matt 			continue;
    551  1.6.2.2  matt 		cpu_boot_secondary(ci);
    552  1.6.2.2  matt 	}
    553  1.6.2.2  matt }
    554  1.6.2.2  matt 
    555  1.6.2.2  matt static void
    556  1.6.2.2  matt cpu_init_idle_lwp(struct cpu_info *ci)
    557  1.6.2.2  matt {
    558  1.6.2.2  matt 	struct lwp *l = ci->ci_data.cpu_idlelwp;
    559  1.6.2.2  matt 	struct pcb *pcb = &l->l_addr->u_pcb;
    560  1.6.2.2  matt 
    561  1.6.2.2  matt 	pcb->pcb_cr0 = rcr0();
    562  1.6.2.2  matt }
    563  1.6.2.2  matt 
    564  1.6.2.2  matt void
    565  1.6.2.2  matt cpu_init_idle_lwps()
    566  1.6.2.2  matt {
    567  1.6.2.2  matt 	struct cpu_info *ci;
    568  1.6.2.2  matt 	u_long i;
    569  1.6.2.2  matt 
    570  1.6.2.2  matt 	for (i = 0; i < X86_MAXPROCS; i++) {
    571  1.6.2.2  matt 		ci = cpu_info[i];
    572  1.6.2.2  matt 		if (ci == NULL)
    573  1.6.2.2  matt 			continue;
    574  1.6.2.2  matt 		if (ci->ci_data.cpu_idlelwp == NULL)
    575  1.6.2.2  matt 			continue;
    576  1.6.2.2  matt 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
    577  1.6.2.2  matt 			continue;
    578  1.6.2.2  matt 		cpu_init_idle_lwp(ci);
    579  1.6.2.2  matt 	}
    580  1.6.2.2  matt }
    581  1.6.2.2  matt 
    582  1.6.2.2  matt void
    583  1.6.2.2  matt cpu_start_secondary (ci)
    584  1.6.2.2  matt 	struct cpu_info *ci;
    585  1.6.2.2  matt {
    586  1.6.2.2  matt 	int i;
    587  1.6.2.2  matt 	struct pmap *kpm = pmap_kernel();
    588  1.6.2.2  matt 	extern u_int32_t mp_pdirpa;
    589  1.6.2.2  matt 
    590  1.6.2.2  matt 	mp_pdirpa = kpm->pm_pdirpa; /* XXX move elsewhere, not per CPU. */
    591  1.6.2.2  matt 
    592  1.6.2.2  matt 	ci->ci_flags |= CPUF_AP;
    593  1.6.2.2  matt 
    594  1.6.2.2  matt 	printf("%s: starting\n", ci->ci_dev->dv_xname);
    595  1.6.2.2  matt 
    596  1.6.2.2  matt 	ci->ci_curlwp = ci->ci_data.cpu_idlelwp;
    597  1.6.2.2  matt 	CPU_STARTUP(ci);
    598  1.6.2.2  matt 
    599  1.6.2.2  matt 	/*
    600  1.6.2.2  matt 	 * wait for it to become ready
    601  1.6.2.2  matt 	 */
    602  1.6.2.2  matt 	for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i>0;i--) {
    603  1.6.2.2  matt 		delay(10);
    604  1.6.2.2  matt 	}
    605  1.6.2.2  matt 	if (! (ci->ci_flags & CPUF_PRESENT)) {
    606  1.6.2.2  matt 		printf("%s: failed to become ready\n", ci->ci_dev->dv_xname);
    607  1.6.2.2  matt #if defined(MPDEBUG) && defined(DDB)
    608  1.6.2.2  matt 		printf("dropping into debugger; continue from here to resume boot\n");
    609  1.6.2.2  matt 		Debugger();
    610  1.6.2.2  matt #endif
    611  1.6.2.2  matt 	}
    612  1.6.2.2  matt 
    613  1.6.2.2  matt 	CPU_START_CLEANUP(ci);
    614  1.6.2.2  matt }
    615  1.6.2.2  matt 
    616  1.6.2.2  matt void
    617  1.6.2.2  matt cpu_boot_secondary(ci)
    618  1.6.2.2  matt 	struct cpu_info *ci;
    619  1.6.2.2  matt {
    620  1.6.2.2  matt 	int i;
    621  1.6.2.2  matt 
    622  1.6.2.2  matt 	ci->ci_flags |= CPUF_GO; /* XXX atomic */
    623  1.6.2.2  matt 
    624  1.6.2.2  matt 	for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i>0;i--) {
    625  1.6.2.2  matt 		delay(10);
    626  1.6.2.2  matt 	}
    627  1.6.2.2  matt 	if (! (ci->ci_flags & CPUF_RUNNING)) {
    628  1.6.2.2  matt 		printf("CPU failed to start\n");
    629  1.6.2.2  matt #if defined(MPDEBUG) && defined(DDB)
    630  1.6.2.2  matt 		printf("dropping into debugger; continue from here to resume boot\n");
    631  1.6.2.2  matt 		Debugger();
    632  1.6.2.2  matt #endif
    633  1.6.2.2  matt 	}
    634  1.6.2.2  matt }
    635  1.6.2.2  matt 
    636  1.6.2.2  matt /*
    637  1.6.2.2  matt  * The CPU ends up here when its ready to run
    638  1.6.2.2  matt  * This is called from code in mptramp.s; at this point, we are running
    639  1.6.2.2  matt  * in the idle pcb/idle stack of the new CPU.  When this function returns,
    640  1.6.2.2  matt  * this processor will enter the idle loop and start looking for work.
    641  1.6.2.2  matt  *
    642  1.6.2.2  matt  * XXX should share some of this with init386 in machdep.c
    643  1.6.2.2  matt  */
    644  1.6.2.2  matt void
    645  1.6.2.2  matt cpu_hatch(void *v)
    646  1.6.2.2  matt {
    647  1.6.2.2  matt 	struct cpu_info *ci = (struct cpu_info *)v;
    648  1.6.2.2  matt 	int s;
    649  1.6.2.2  matt #ifdef __x86_64__
    650  1.6.2.2  matt         cpu_init_msrs(ci);
    651  1.6.2.2  matt #endif
    652  1.6.2.2  matt 
    653  1.6.2.2  matt 	cpu_probe_features(ci);
    654  1.6.2.2  matt 	cpu_feature &= ci->ci_feature_flags;
    655  1.6.2.2  matt 	/* not on Xen... */
    656  1.6.2.2  matt 	cpu_feature &= ~(CPUID_PGE|CPUID_PSE|CPUID_MTRR|CPUID_FXSR|CPUID_NOX);
    657  1.6.2.2  matt 
    658  1.6.2.2  matt #ifdef DEBUG
    659  1.6.2.2  matt 	if (ci->ci_flags & CPUF_PRESENT)
    660  1.6.2.2  matt 		panic("%s: already running!?", ci->ci_dev->dv_xname);
    661  1.6.2.2  matt #endif
    662  1.6.2.2  matt 
    663  1.6.2.2  matt 	ci->ci_flags |= CPUF_PRESENT;
    664  1.6.2.2  matt 
    665  1.6.2.2  matt 	lapic_enable();
    666  1.6.2.2  matt 	lapic_initclocks();
    667  1.6.2.2  matt 
    668  1.6.2.2  matt 	while ((ci->ci_flags & CPUF_GO) == 0)
    669  1.6.2.2  matt 		delay(10);
    670  1.6.2.2  matt #ifdef DEBUG
    671  1.6.2.2  matt 	if (ci->ci_flags & CPUF_RUNNING)
    672  1.6.2.2  matt 		panic("%s: already running!?", ci->ci_dev->dv_xname);
    673  1.6.2.2  matt #endif
    674  1.6.2.2  matt 
    675  1.6.2.2  matt 	lcr0(ci->ci_data.cpu_idlelwp->l_addr->u_pcb.pcb_cr0);
    676  1.6.2.2  matt 	cpu_init_idt();
    677  1.6.2.2  matt 	lapic_set_lvt();
    678  1.6.2.2  matt 	gdt_init_cpu(ci);
    679  1.6.2.2  matt 	npxinit(ci);
    680  1.6.2.2  matt 
    681  1.6.2.2  matt 	lldt(GSEL(GLDT_SEL, SEL_KPL));
    682  1.6.2.2  matt 
    683  1.6.2.2  matt 	cpu_init(ci);
    684  1.6.2.2  matt 
    685  1.6.2.2  matt 	s = splhigh();
    686  1.6.2.2  matt 	lapic_tpr = 0;
    687  1.6.2.2  matt 	enable_intr();
    688  1.6.2.2  matt 
    689  1.6.2.2  matt 	printf("%s: CPU %ld running\n",ci->ci_dev->dv_xname, ci->ci_cpuid);
    690  1.6.2.2  matt 	if (ci->ci_feature_flags & CPUID_TSC)
    691  1.6.2.2  matt 		cc_microset(ci);
    692  1.6.2.2  matt 	splx(s);
    693  1.6.2.2  matt }
    694  1.6.2.2  matt 
    695  1.6.2.2  matt #if defined(DDB)
    696  1.6.2.2  matt 
    697  1.6.2.2  matt #include <ddb/db_output.h>
    698  1.6.2.2  matt #include <machine/db_machdep.h>
    699  1.6.2.2  matt 
    700  1.6.2.2  matt /*
    701  1.6.2.2  matt  * Dump CPU information from ddb.
    702  1.6.2.2  matt  */
    703  1.6.2.2  matt void
    704  1.6.2.2  matt cpu_debug_dump(void)
    705  1.6.2.2  matt {
    706  1.6.2.2  matt 	struct cpu_info *ci;
    707  1.6.2.2  matt 	CPU_INFO_ITERATOR cii;
    708  1.6.2.2  matt 
    709  1.6.2.2  matt 	db_printf("addr		dev	id	flags	ipis	curproc		fpcurproc\n");
    710  1.6.2.2  matt 	for (CPU_INFO_FOREACH(cii, ci)) {
    711  1.6.2.2  matt 		db_printf("%p	%s	%ld	%x	%x	%10p	%10p\n",
    712  1.6.2.2  matt 		    ci,
    713  1.6.2.2  matt 		    ci->ci_dev == NULL ? "BOOT" : ci->ci_dev->dv_xname,
    714  1.6.2.2  matt 		    ci->ci_cpuid,
    715  1.6.2.2  matt 		    ci->ci_flags, ci->ci_ipis,
    716  1.6.2.2  matt 		    ci->ci_curlwp,
    717  1.6.2.2  matt 		    ci->ci_fpcurlwp);
    718  1.6.2.2  matt 	}
    719  1.6.2.2  matt }
    720  1.6.2.2  matt #endif
    721  1.6.2.2  matt 
    722  1.6.2.2  matt static void
    723  1.6.2.2  matt cpu_copy_trampoline()
    724  1.6.2.2  matt {
    725  1.6.2.2  matt 	/*
    726  1.6.2.2  matt 	 * Copy boot code.
    727  1.6.2.2  matt 	 */
    728  1.6.2.2  matt 	extern u_char cpu_spinup_trampoline[];
    729  1.6.2.2  matt 	extern u_char cpu_spinup_trampoline_end[];
    730  1.6.2.2  matt 	pmap_kenter_pa((vaddr_t)MP_TRAMPOLINE,	/* virtual */
    731  1.6.2.2  matt 	    (paddr_t)MP_TRAMPOLINE,	/* physical */
    732  1.6.2.2  matt 	    VM_PROT_ALL);		/* protection */
    733  1.6.2.2  matt 	memcpy((void *)MP_TRAMPOLINE,
    734  1.6.2.2  matt 	    cpu_spinup_trampoline,
    735  1.6.2.2  matt 	    cpu_spinup_trampoline_end-cpu_spinup_trampoline);
    736  1.6.2.2  matt }
    737  1.6.2.2  matt 
    738  1.6.2.2  matt #endif
    739  1.6.2.2  matt 
    740  1.6.2.2  matt 
    741  1.6.2.2  matt /* XXX */
    742  1.6.2.2  matt #define IDTVEC(name)	__CONCAT(X, name)
    743  1.6.2.2  matt typedef void (vector)(void);
    744  1.6.2.2  matt extern vector IDTVEC(tss_trap08);
    745  1.6.2.2  matt #ifdef DDB
    746  1.6.2.2  matt extern vector Xintrddbipi;
    747  1.6.2.2  matt extern int ddb_vec;
    748  1.6.2.2  matt #endif
    749  1.6.2.2  matt 
    750  1.6.2.2  matt static void
    751  1.6.2.2  matt cpu_set_tss_gates(struct cpu_info *ci)
    752  1.6.2.2  matt {
    753  1.6.2.2  matt #if defined(DDB) && defined(MULTIPROCESSOR)
    754  1.6.2.2  matt 	/*
    755  1.6.2.2  matt 	 * Set up separate handler for the DDB IPI, so that it doesn't
    756  1.6.2.2  matt 	 * stomp on a possibly corrupted stack.
    757  1.6.2.2  matt 	 *
    758  1.6.2.2  matt 	 * XXX overwriting the gate set in db_machine_init.
    759  1.6.2.2  matt 	 * Should rearrange the code so that it's set only once.
    760  1.6.2.2  matt 	 */
    761  1.6.2.2  matt 	ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0,
    762  1.6.2.2  matt 	    UVM_KMF_WIRED);
    763  1.6.2.2  matt 	tss_init(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack,
    764  1.6.2.2  matt 	    Xintrddbipi);
    765  1.6.2.2  matt 
    766  1.6.2.2  matt 	setsegment(&sd, &ci->ci_ddbipi_tss, sizeof(struct i386tss) - 1,
    767  1.6.2.2  matt 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
    768  1.6.2.2  matt 	ci->ci_gdt[GIPITSS_SEL].sd = sd;
    769  1.6.2.2  matt 
    770  1.6.2.2  matt 	setgate(&idt[ddb_vec], NULL, 0, SDT_SYSTASKGT, SEL_KPL,
    771  1.6.2.2  matt 	    GSEL(GIPITSS_SEL, SEL_KPL));
    772  1.6.2.2  matt #endif
    773  1.6.2.2  matt }
    774  1.6.2.2  matt 
    775  1.6.2.2  matt int
    776  1.6.2.2  matt mp_cpu_start(struct cpu_info *ci, paddr_t target)
    777  1.6.2.2  matt {
    778  1.6.2.2  matt #if 0
    779  1.6.2.2  matt #if NLAPIC > 0
    780  1.6.2.2  matt 	int error;
    781  1.6.2.2  matt #endif
    782  1.6.2.2  matt 	unsigned short dwordptr[2];
    783  1.6.2.2  matt 
    784  1.6.2.2  matt 	/*
    785  1.6.2.2  matt 	 * "The BSP must initialize CMOS shutdown code to 0Ah ..."
    786  1.6.2.2  matt 	 */
    787  1.6.2.2  matt 
    788  1.6.2.2  matt 	outb(IO_RTC, NVRAM_RESET);
    789  1.6.2.2  matt 	outb(IO_RTC+1, NVRAM_RESET_JUMP);
    790  1.6.2.2  matt 
    791  1.6.2.2  matt 	/*
    792  1.6.2.2  matt 	 * "and the warm reset vector (DWORD based at 40:67) to point
    793  1.6.2.2  matt 	 * to the AP startup code ..."
    794  1.6.2.2  matt 	 */
    795  1.6.2.2  matt 
    796  1.6.2.2  matt 	dwordptr[0] = 0;
    797  1.6.2.2  matt 	dwordptr[1] = target >> 4;
    798  1.6.2.2  matt 
    799  1.6.2.2  matt 	pmap_kenter_pa (0, 0, VM_PROT_READ|VM_PROT_WRITE);
    800  1.6.2.2  matt 	memcpy ((u_int8_t *) 0x467, dwordptr, 4);
    801  1.6.2.2  matt 	pmap_kremove (0, PAGE_SIZE);
    802  1.6.2.2  matt 
    803  1.6.2.2  matt #if NLAPIC > 0
    804  1.6.2.2  matt 	/*
    805  1.6.2.2  matt 	 * ... prior to executing the following sequence:"
    806  1.6.2.2  matt 	 */
    807  1.6.2.2  matt 
    808  1.6.2.2  matt 	if (ci->ci_flags & CPUF_AP) {
    809  1.6.2.2  matt 		if ((error = x86_ipi_init(ci->ci_apicid)) != 0)
    810  1.6.2.2  matt 			return error;
    811  1.6.2.2  matt 
    812  1.6.2.2  matt 		delay(10000);
    813  1.6.2.2  matt 
    814  1.6.2.2  matt 		if (cpu_feature & CPUID_APIC) {
    815  1.6.2.2  matt 
    816  1.6.2.2  matt 			if ((error = x86_ipi(target/PAGE_SIZE,
    817  1.6.2.2  matt 					     ci->ci_apicid,
    818  1.6.2.2  matt 					     LAPIC_DLMODE_STARTUP)) != 0)
    819  1.6.2.2  matt 				return error;
    820  1.6.2.2  matt 			delay(200);
    821  1.6.2.2  matt 
    822  1.6.2.2  matt 			if ((error = x86_ipi(target/PAGE_SIZE,
    823  1.6.2.2  matt 					     ci->ci_apicid,
    824  1.6.2.2  matt 					     LAPIC_DLMODE_STARTUP)) != 0)
    825  1.6.2.2  matt 				return error;
    826  1.6.2.2  matt 			delay(200);
    827  1.6.2.2  matt 		}
    828  1.6.2.2  matt 	}
    829  1.6.2.2  matt #endif
    830  1.6.2.2  matt #endif /* 0 */
    831  1.6.2.2  matt 	return 0;
    832  1.6.2.2  matt }
    833  1.6.2.2  matt 
    834  1.6.2.2  matt void
    835  1.6.2.2  matt mp_cpu_start_cleanup(struct cpu_info *ci)
    836  1.6.2.2  matt {
    837  1.6.2.2  matt #if 0
    838  1.6.2.2  matt 	/*
    839  1.6.2.2  matt 	 * Ensure the NVRAM reset byte contains something vaguely sane.
    840  1.6.2.2  matt 	 */
    841  1.6.2.2  matt 
    842  1.6.2.2  matt 	outb(IO_RTC, NVRAM_RESET);
    843  1.6.2.2  matt 	outb(IO_RTC+1, NVRAM_RESET_RST);
    844  1.6.2.2  matt #endif
    845  1.6.2.2  matt }
    846  1.6.2.2  matt 
    847  1.6.2.2  matt #ifdef __x86_64__
    848  1.6.2.2  matt 
    849  1.6.2.2  matt void
    850  1.6.2.2  matt cpu_init_msrs(struct cpu_info *ci, bool full)
    851  1.6.2.2  matt {
    852  1.6.2.2  matt 	if (full) {
    853  1.6.2.2  matt 		HYPERVISOR_set_segment_base (SEGBASE_FS, 0);
    854  1.6.2.2  matt 		HYPERVISOR_set_segment_base (SEGBASE_GS_KERNEL, (u_int64_t) ci);
    855  1.6.2.2  matt 		HYPERVISOR_set_segment_base (SEGBASE_GS_USER, 0);
    856  1.6.2.2  matt 	}
    857  1.6.2.2  matt }
    858  1.6.2.2  matt #endif	/* __x86_64__ */
    859  1.6.2.2  matt 
    860  1.6.2.2  matt void
    861  1.6.2.2  matt cpu_get_tsc_freq(struct cpu_info *ci)
    862  1.6.2.2  matt {
    863  1.6.2.2  matt #ifdef XEN3
    864  1.6.2.2  matt 	const volatile vcpu_time_info_t *tinfo =
    865  1.6.2.2  matt 		   &HYPERVISOR_shared_info->vcpu_info[0].time;
    866  1.6.2.2  matt 	delay(1000000);
    867  1.6.2.2  matt 	uint64_t freq = 1000000000ULL << 32;
    868  1.6.2.2  matt 	freq = freq / (uint64_t)tinfo->tsc_to_system_mul;
    869  1.6.2.2  matt 	if ( tinfo->tsc_shift < 0 )
    870  1.6.2.2  matt 		freq = freq << -tinfo->tsc_shift;
    871  1.6.2.2  matt 	else
    872  1.6.2.2  matt 		freq = freq >> tinfo->tsc_shift;
    873  1.6.2.2  matt 	ci->ci_tsc_freq = freq;
    874  1.6.2.2  matt #else
    875  1.6.2.2  matt 	/* XXX this needs to read the shared_info of the CPU being probed.. */
    876  1.6.2.2  matt 	ci->ci_tsc_freq = HYPERVISOR_shared_info->cpu_freq;
    877  1.6.2.2  matt #endif /* XEN3 */
    878  1.6.2.2  matt }
    879  1.6.2.3  matt 
    880  1.6.2.3  matt void
    881  1.6.2.3  matt cpu_offline_md(void)
    882  1.6.2.3  matt {
    883  1.6.2.3  matt         int s;
    884  1.6.2.3  matt 
    885  1.6.2.3  matt         s = splhigh();
    886  1.6.2.3  matt #ifdef __i386__
    887  1.6.2.3  matt         npxsave_cpu(true);
    888  1.6.2.3  matt #else
    889  1.6.2.3  matt         fpusave_cpu(true);
    890  1.6.2.3  matt #endif
    891  1.6.2.3  matt         splx(s);
    892  1.6.2.3  matt }
    893