Home | History | Annotate | Line # | Download | only in sparc64
      1 /*	$NetBSD: cpu.c,v 1.140 2021/04/05 22:36:27 nakayama Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1996
      5  *	The President and Fellows of Harvard College. All rights reserved.
      6  * Copyright (c) 1992, 1993
      7  *	The Regents of the University of California.  All rights reserved.
      8  *
      9  * This software was developed by the Computer Systems Engineering group
     10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     11  * contributed to Berkeley.
     12  *
     13  * All advertising materials mentioning features or use of this software
     14  * must display the following acknowledgement:
     15  *	This product includes software developed by Harvard University.
     16  *	This product includes software developed by the University of
     17  *	California, Lawrence Berkeley Laboratory.
     18  *
     19  * Redistribution and use in source and binary forms, with or without
     20  * modification, are permitted provided that the following conditions
     21  * are met:
     22  *
     23  * 1. Redistributions of source code must retain the above copyright
     24  *    notice, this list of conditions and the following disclaimer.
     25  * 2. Redistributions in binary form must reproduce the above copyright
     26  *    notice, this list of conditions and the following disclaimer in the
     27  *    documentation and/or other materials provided with the distribution.
     28  * 3. All advertising materials mentioning features or use of this software
     29  *    must display the following acknowledgement:
     30  *	This product includes software developed by Aaron Brown and
     31  *	Harvard University.
     32  *	This product includes software developed by the University of
     33  *	California, Berkeley and its contributors.
     34  * 4. Neither the name of the University nor the names of its contributors
     35  *    may be used to endorse or promote products derived from this software
     36  *    without specific prior written permission.
     37  *
     38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     48  * SUCH DAMAGE.
     49  *
     50  *	@(#)cpu.c	8.5 (Berkeley) 11/23/93
     51  *
     52  */
     53 
     54 #include <sys/cdefs.h>
     55 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.140 2021/04/05 22:36:27 nakayama Exp $");
     56 
     57 #include "opt_multiprocessor.h"
     58 
     59 #include <sys/param.h>
     60 #include <sys/systm.h>
     61 #include <sys/device.h>
     62 #include <sys/kernel.h>
     63 #include <sys/reboot.h>
     64 #include <sys/cpu.h>
     65 #include <sys/sysctl.h>
     66 #include <sys/kmem.h>
     67 
     68 #include <uvm/uvm.h>
     69 
     70 #include <machine/autoconf.h>
     71 #include <machine/cpu.h>
     72 #include <machine/reg.h>
     73 #include <machine/trap.h>
     74 #include <machine/pmap.h>
     75 #include <machine/sparc64.h>
     76 #include <machine/openfirm.h>
     77 #include <machine/hypervisor.h>
     78 #include <machine/mdesc.h>
     79 
     80 #include <sparc64/sparc64/cache.h>
     81 
     82 #define SUN4V_MONDO_QUEUE_SIZE	32
     83 #define SUN4V_QUEUE_ENTRY_SIZE	64
     84 
     85 int ecache_min_line_size;
     86 
     87 /* Linked list of all CPUs in system. */
     88 #if defined(MULTIPROCESSOR)
     89 int sparc_ncpus = 0;
     90 #endif
     91 struct cpu_info *cpus = NULL;
     92 
     93 volatile sparc64_cpuset_t cpus_active;/* set of active cpus */
     94 struct cpu_bootargs *cpu_args;	/* allocated very early in pmap_bootstrap. */
     95 struct pool_cache *fpstate_cache;
     96 
     97 static struct cpu_info *alloc_cpuinfo(u_int);
     98 static void cpu_idle_sun4v(void);
     99 
    100 /* The following are used externally (sysctl_hw). */
    101 char	machine[] = MACHINE;		/* from <machine/param.h> */
    102 char	machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
    103 
    104 /* These are used in locore.s, and are maximums */
    105 int	dcache_line_size;
    106 int	dcache_size;
    107 int	icache_line_size;
    108 int	icache_size;
    109 
    110 #ifdef MULTIPROCESSOR
    111 static const char *ipi_evcnt_names[IPI_EVCNT_NUM] = IPI_EVCNT_NAMES;
    112 #endif
    113 
    114 static void cpu_reset_fpustate(void);
    115 
    116 volatile int sync_tick = 0;
    117 
    118 /* The CPU configuration driver. */
    119 void cpu_attach(device_t, device_t, void *);
    120 int cpu_match(device_t, cfdata_t, void *);
    121 
    122 CFATTACH_DECL_NEW(cpu, 0, cpu_match, cpu_attach, NULL, NULL);
    123 
    124 static int
    125 cpuid_from_node(u_int cpu_node)
    126 {
    127 	/*
    128 	 * Determine the cpuid by examining the nodes properties
    129 	 * in the following order:
    130 	 *  upa-portid
    131 	 *  portid
    132 	 *  cpuid
    133 	 *  reg (sun4v only)
    134 	 */
    135 
    136 	int id;
    137 
    138 	id = prom_getpropint(cpu_node, "upa-portid", -1);
    139 	if (id == -1)
    140 		id = prom_getpropint(cpu_node, "portid", -1);
    141 	if (id == -1)
    142 		id = prom_getpropint(cpu_node, "cpuid", -1);
    143 	if (CPU_ISSUN4V) {
    144 		int reg[4];
    145 		int* regp=reg;
    146 		int len = 4;
    147 		int rc = prom_getprop(cpu_node, "reg", sizeof(int),
    148 		    &len, &regp);
    149 		if ( rc != 0)
    150 			panic("No reg property found\n");
    151 		/* cpuid in the lower 24 bits - sun4v hypervisor arch */
    152 		id = reg[0] & 0x0fffffff;
    153 	}
    154 	if (id == -1)
    155 		panic("failed to determine cpuid");
    156 
    157 	return id;
    158 }
    159 
    160 static int
    161 cpu_cache_info_sun4v(const char *type, int level, const char *prop)
    162 {
    163 	int idx = 0;
    164 	uint64_t val = 0;
    165 	idx = mdesc_find_node_by_idx(idx, "cache");
    166 	while (idx != -1 && val == 0) {
    167 		const char *name = mdesc_name_by_idx(idx);
    168 		if (strcmp("cache", name) == 0) {
    169 			const char *p;
    170 			size_t len = 0;
    171 			p = mdesc_get_prop_data(idx, "type", &len);
    172 			if (p == NULL)
    173 				panic("No type found\n");
    174 			if (len == 0)
    175 				panic("Len is zero");
    176 			if (type == NULL || strcmp(p, type) == 0) {
    177 				uint64_t l;
    178 				l = mdesc_get_prop_val(idx, "level");
    179 				if (l == level)
    180 					val = mdesc_get_prop_val(idx, prop);
    181 			}
    182 		}
    183 		if (val == 0)
    184 			idx = mdesc_next_node(idx);
    185 	}
    186 	return val;
    187 }
    188 
    189 static int
    190 cpu_icache_size(int node)
    191 {
    192 	if (CPU_ISSUN4V)
    193 		return cpu_cache_info_sun4v("instn", 1, "size");
    194 	else
    195 		return prom_getpropint(node, "icache-size", 0);
    196 }
    197 
    198 static int
    199 cpu_icache_line_size(int node)
    200 {
    201 	if (CPU_ISSUN4V)
    202 		return cpu_cache_info_sun4v("instn", 1, "line-size");
    203 	else
    204 		return prom_getpropint(node, "icache-line-size", 0);
    205 }
    206 
    207 static int
    208 cpu_icache_nlines(int node)
    209 {
    210 	if (CPU_ISSUN4V)
    211 		return 0;
    212 	else
    213 		return prom_getpropint(node, "icache-nlines", 64);
    214 }
    215 
    216 static int
    217 cpu_icache_associativity(int node)
    218 {
    219 	if (CPU_ISSUN4V) {
    220 		int val;
    221 		val = cpu_cache_info_sun4v("instn", 1, "associativity");
    222 		if (val == 0)
    223 			val = 1;
    224 		return val;
    225 	} else
    226 		return prom_getpropint(node, "icache-associativity", 1);
    227 }
    228 
    229 static int
    230 cpu_dcache_size(int node)
    231 {
    232 	if (CPU_ISSUN4V)
    233 		return cpu_cache_info_sun4v("data", 1, "size");
    234 	else
    235 		return prom_getpropint(node, "dcache-size", 0);
    236 }
    237 
    238 static int
    239 cpu_dcache_line_size(int node)
    240 {
    241 	if (CPU_ISSUN4V)
    242 		return cpu_cache_info_sun4v("data", 1, "line-size");
    243 	else
    244 		return prom_getpropint(node, "dcache-line-size", 0);
    245 }
    246 
    247 static int
    248 cpu_dcache_nlines(int node)
    249 {
    250 	if (CPU_ISSUN4V)
    251 		return 0;
    252 	else
    253 		return prom_getpropint(node, "dcache-nlines", 128);
    254 }
    255 
    256 static int
    257 cpu_dcache_associativity(int node)
    258 {
    259 	if (CPU_ISSUN4V) {
    260 		int val;
    261 		val = cpu_cache_info_sun4v("data", 1, "associativity");
    262 		if (val == 0)
    263 			val = 1;
    264 		return val;
    265 	} else
    266 		return prom_getpropint(node, "dcache-associativity", 1);
    267 }
    268 
    269 int
    270 cpu_ecache_size(int node)
    271 {
    272 	if (CPU_ISSUN4V)
    273 		return cpu_cache_info_sun4v(NULL, 2, "size");
    274 	else
    275 		return prom_getpropint(node, "ecache-size", 0);
    276 }
    277 
    278 static int
    279 cpu_ecache_line_size(int node)
    280 {
    281 	if (CPU_ISSUN4V)
    282 		return cpu_cache_info_sun4v(NULL, 2, "line-size");
    283 	else
    284 		return prom_getpropint(node, "ecache-line-size", 0);
    285 }
    286 
    287 static int
    288 cpu_ecache_nlines(int node)
    289 {
    290 	if (CPU_ISSUN4V)
    291 		return 0;
    292 	else
    293 		return prom_getpropint(node, "ecache-nlines", 32768);
    294 }
    295 
    296 int
    297 cpu_ecache_associativity(int node)
    298 {
    299 	if (CPU_ISSUN4V) {
    300 		int val;
    301 		val = cpu_cache_info_sun4v(NULL, 2, "associativity");
    302 		if (val == 0)
    303 			val = 1;
    304 		return val;
    305 	} else
    306 		return prom_getpropint(node, "ecache-associativity", 1);
    307 }
    308 
    309 struct cpu_info *
    310 alloc_cpuinfo(u_int cpu_node)
    311 {
    312 	paddr_t pa0, pa;
    313 	vaddr_t va, va0;
    314 	vsize_t sz = 8 * PAGE_SIZE;
    315 	int cpuid;
    316 	struct cpu_info *cpi, *ci;
    317 	extern paddr_t cpu0paddr;
    318 
    319 	/*
    320 	 * Check for matching cpuid in the cpus list.
    321 	 */
    322 	cpuid = cpuid_from_node(cpu_node);
    323 
    324 	for (cpi = cpus; cpi != NULL; cpi = cpi->ci_next)
    325 		if (cpi->ci_cpuid == cpuid)
    326 			return cpi;
    327 
    328 	/* Allocate the aligned VA and determine the size. */
    329 	va = uvm_km_alloc(kernel_map, sz, 8 * PAGE_SIZE, UVM_KMF_VAONLY);
    330 	if (!va)
    331 		panic("alloc_cpuinfo: no virtual space");
    332 	va0 = va;
    333 
    334 	pa0 = cpu0paddr;
    335 	cpu0paddr += sz;
    336 
    337 	for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE)
    338 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
    339 
    340 	pmap_update(pmap_kernel());
    341 
    342 	cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK);
    343 
    344 	memset((void *)va0, 0, sz);
    345 
    346 	/*
    347 	 * Initialize cpuinfo structure.
    348 	 *
    349 	 * Arrange pcb, idle stack and interrupt stack in the same
    350 	 * way as is done for the boot CPU in pmap.c.
    351 	 */
    352 	cpi->ci_next = NULL;
    353 	cpi->ci_curlwp = NULL;
    354 	cpi->ci_cpuid = cpuid;
    355 	cpi->ci_fplwp = NULL;
    356 	cpi->ci_eintstack = NULL;
    357 	cpi->ci_spinup = NULL;
    358 	cpi->ci_paddr = pa0;
    359 	cpi->ci_self = cpi;
    360 	if (CPU_ISSUN4V)
    361 		cpi->ci_mmufsa = pa0;
    362 	cpi->ci_node = cpu_node;
    363 	cpi->ci_idepth = -1;
    364 	memset(cpi->ci_intrpending, -1, sizeof(cpi->ci_intrpending));
    365 
    366 	/*
    367 	 * Finally, add itself to the list of active cpus.
    368 	 */
    369 	for (ci = cpus; ci->ci_next != NULL; ci = ci->ci_next)
    370 		;
    371 #ifdef MULTIPROCESSOR
    372 	ci->ci_next = cpi;
    373 #endif
    374 	return (cpi);
    375 }
    376 
    377 int
    378 cpu_match(device_t parent, cfdata_t cf, void *aux)
    379 {
    380 	struct mainbus_attach_args *ma = aux;
    381 
    382 	if (strcmp(cf->cf_name, ma->ma_name) != 0)
    383 		return 0;
    384 
    385 	/*
    386 	 * If we are going to only attach a single cpu, make sure
    387 	 * to pick the one we are running on right now.
    388 	 */
    389 	if (cpuid_from_node(ma->ma_node) != cpu_myid()) {
    390 #ifdef MULTIPROCESSOR
    391 		if (boothowto & RB_MD1)
    392 #endif
    393 			return 0;
    394 	}
    395 
    396 	return 1;
    397 }
    398 
    399 static void
    400 cpu_reset_fpustate(void)
    401 {
    402 	struct fpstate64 *fpstate;
    403 	struct fpstate64 fps[2];
    404 
    405 	/* This needs to be 64-byte aligned */
    406 	fpstate = ALIGNFPSTATE(&fps[1]);
    407 
    408 	/*
    409 	 * Get the FSR and clear any exceptions.  If we do not unload
    410 	 * the queue here and it is left over from a previous crash, we
    411 	 * will panic in the first loadfpstate(), due to a sequence error,
    412 	 * so we need to dump the whole state anyway.
    413 	 */
    414 	fpstate->fs_fsr = 7 << FSR_VER_SHIFT;	/* 7 is reserved for "none" */
    415 	savefpstate(fpstate);
    416 }
    417 
    418 /* setup the hw.cpuN.* nodes for this cpu */
    419 static void
    420 cpu_setup_sysctl(struct cpu_info *ci, device_t dev)
    421 {
    422 	const struct sysctlnode *cpunode = NULL;
    423 
    424 	sysctl_createv(NULL, 0, NULL, &cpunode,
    425 		       CTLFLAG_PERMANENT,
    426 		       CTLTYPE_NODE, device_xname(dev), NULL,
    427 		       NULL, 0, NULL, 0,
    428 		       CTL_HW,
    429 		       CTL_CREATE, CTL_EOL);
    430 
    431 	if (cpunode == NULL)
    432 		return;
    433 
    434 #define SETUPS(name, member)					\
    435 	sysctl_createv(NULL, 0, &cpunode, NULL,			\
    436 		       CTLFLAG_PERMANENT,			\
    437 		       CTLTYPE_STRING, name, NULL,		\
    438 		       NULL, 0, member, 0,			\
    439 		       CTL_CREATE, CTL_EOL);
    440 
    441 	SETUPS("name", __UNCONST(ci->ci_name))
    442 #undef SETUPS
    443 
    444 #define SETUPI(name, member)					\
    445 	sysctl_createv(NULL, 0, &cpunode, NULL,			\
    446 		       CTLFLAG_PERMANENT,			\
    447 		       CTLTYPE_INT, name, NULL,			\
    448 		       NULL, 0, member, 0,			\
    449 		       CTL_CREATE, CTL_EOL);
    450 
    451 	SETUPI("id", &ci->ci_cpuid);
    452 #undef SETUPI
    453 
    454 #define SETUPQ(name, member)					\
    455 	sysctl_createv(NULL, 0, &cpunode, NULL,			\
    456 		       CTLFLAG_PERMANENT,			\
    457 		       CTLTYPE_QUAD, name, NULL,			\
    458 		       NULL, 0, member, 0,			\
    459 		       CTL_CREATE, CTL_EOL);
    460 
    461 	SETUPQ("clock_frequency", &ci->ci_cpu_clockrate[0])
    462 	SETUPQ("ver", &ci->ci_ver)
    463 #undef SETUPI
    464 
    465         sysctl_createv(NULL, 0, &cpunode, NULL,
    466                        CTLFLAG_PERMANENT,
    467                        CTLTYPE_STRUCT, "cacheinfo", NULL,
    468                        NULL, 0, &ci->ci_cacheinfo, sizeof(ci->ci_cacheinfo),
    469 		       CTL_CREATE, CTL_EOL);
    470 
    471 }
    472 
    473 /*
    474  * Attach the CPU.
    475  * Discover interesting goop about the virtual address cache
    476  * (slightly funny place to do it, but this is where it is to be found).
    477  */
    478 void
    479 cpu_attach(device_t parent, device_t dev, void *aux)
    480 {
    481 	int node;
    482 	uint64_t clk, sclk = 0;
    483 	struct mainbus_attach_args *ma = aux;
    484 	struct cpu_info *ci;
    485 	const char *sep;
    486 	register int i, l;
    487 	int bigcache, cachesize;
    488 	char buf[100];
    489 	int 	totalsize = 0;
    490 	int 	linesize, dcachesize, icachesize;
    491 
    492 	/* tell them what we have */
    493 	node = ma->ma_node;
    494 
    495 	/*
    496 	 * Allocate cpu_info structure if needed.
    497 	 */
    498 	ci = alloc_cpuinfo((u_int)node);
    499 
    500 	/*
    501 	 * Only do this on the boot cpu.  Other cpu's call
    502 	 * cpu_reset_fpustate() from cpu_hatch() before they
    503 	 * call into the idle loop.
    504 	 * For other cpus, we need to call mi_cpu_attach()
    505 	 * and complete setting up cpcb.
    506 	 */
    507 	if (CPU_IS_PRIMARY(ci)) {
    508 		fpstate_cache = pool_cache_init(sizeof(struct fpstate64),
    509 					SPARC64_BLOCK_SIZE, 0, 0, "fpstate",
    510 					NULL, IPL_NONE, NULL, NULL, NULL);
    511 		cpu_reset_fpustate();
    512 	}
    513 #ifdef MULTIPROCESSOR
    514 	else {
    515 		mi_cpu_attach(ci);
    516 		ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
    517 	}
    518 	for (i = 0; i < IPI_EVCNT_NUM; ++i)
    519 		evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR,
    520 				     NULL, device_xname(dev), ipi_evcnt_names[i]);
    521 #endif
    522 	evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL,
    523 			     device_xname(dev), "timer");
    524 	mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM);
    525 
    526 	clk = prom_getpropuint64(node, "clock-frequency64", 0);
    527 	if (clk == 0)
    528 	  clk = prom_getpropint(node, "clock-frequency", 0);
    529 	if (clk == 0) {
    530 		/*
    531 		 * Try to find it in the OpenPROM root...
    532 		 */
    533 		clk = prom_getpropint(findroot(), "clock-frequency", 0);
    534 	}
    535 	if (clk) {
    536 		/* Tell OS what frequency we run on */
    537 		ci->ci_cpu_clockrate[0] = clk;
    538 		ci->ci_cpu_clockrate[1] = clk / 1000000;
    539 	}
    540 
    541 	sclk = prom_getpropint(findroot(), "stick-frequency", 0);
    542 
    543 	ci->ci_system_clockrate[0] = sclk;
    544 	ci->ci_system_clockrate[1] = sclk / 1000000;
    545 
    546 	ci->ci_name = kmem_strdupsize(prom_getpropstring(node, "name"), NULL,
    547 				      KM_SLEEP);
    548 	snprintf(buf, sizeof buf, "%s @ %s MHz", ci->ci_name, clockfreq(clk));
    549 	cpu_setmodel("%s (%s)", machine_model, buf);
    550 
    551 	aprint_normal(": %s, CPU id %d\n", buf, ci->ci_cpuid);
    552 	aprint_naive("\n");
    553 	if (CPU_ISSUN4U || CPU_ISSUN4US) {
    554 		ci->ci_ver = getver();
    555 		aprint_normal_dev(dev, "manuf %x, impl %x, mask %x\n",
    556 		    (u_int)GETVER_CPU_MANUF(),
    557 		    (u_int)GETVER_CPU_IMPL(),
    558 		    (u_int)GETVER_CPU_MASK());
    559 	}
    560 #ifdef NUMA
    561 	if (CPU_IS_USIIIi()) {
    562 		uint64_t start = ci->ci_cpuid;
    563 		start <<= 36;
    564 		ci->ci_numa_id = ci->ci_cpuid;
    565 		printf("NUMA bucket %d %016lx\n", ci->ci_cpuid, start);
    566 		uvm_page_numa_load(start, 0x1000000000, ci->ci_cpuid);
    567 	}
    568 #endif
    569 	if (ci->ci_system_clockrate[0] != 0) {
    570 		aprint_normal_dev(dev, "system tick frequency %s MHz\n",
    571 		    clockfreq(ci->ci_system_clockrate[0]));
    572 	}
    573 	aprint_normal_dev(dev, "");
    574 
    575 	bigcache = 0;
    576 
    577 	icachesize = cpu_icache_size(node);
    578 	if (icachesize > icache_size)
    579 		icache_size = icachesize;
    580 	linesize = l = cpu_icache_line_size(node);
    581 	if (linesize > icache_line_size)
    582 		icache_line_size = linesize;
    583 
    584 	for (i = 0; (1 << i) < l && l; i++)
    585 		/* void */;
    586 	if ((1 << i) != l && l)
    587 		panic("bad icache line size %d", l);
    588 	totalsize = icachesize;
    589 	if (totalsize == 0)
    590 		totalsize = l *
    591 		    cpu_icache_nlines(node) * cpu_icache_associativity(node);
    592 
    593 	cachesize = totalsize / cpu_icache_associativity(node);
    594 	bigcache = cachesize;
    595 
    596 	sep = "";
    597 	if (totalsize > 0) {
    598 		aprint_normal("%s%ldK instruction (%ld b/l)", sep,
    599 		       (long)totalsize/1024,
    600 		       (long)linesize);
    601 		sep = ", ";
    602 	}
    603 	ci->ci_cacheinfo.c_itotalsize = totalsize;
    604 	ci->ci_cacheinfo.c_ilinesize = linesize;
    605 
    606 	dcachesize = cpu_dcache_size(node);
    607 	if (dcachesize > dcache_size)
    608 		dcache_size = dcachesize;
    609 	linesize = l = cpu_dcache_line_size(node);
    610 	if (linesize > dcache_line_size)
    611 		dcache_line_size = linesize;
    612 
    613 	for (i = 0; (1 << i) < l && l; i++)
    614 		/* void */;
    615 	if ((1 << i) != l && l)
    616 		panic("bad dcache line size %d", l);
    617 	totalsize = dcachesize;
    618 	if (totalsize == 0)
    619 		totalsize = l *
    620 		    cpu_dcache_nlines(node) * cpu_dcache_associativity(node);
    621 
    622 	cachesize = totalsize / cpu_dcache_associativity(node);
    623 	if (cachesize > bigcache)
    624 		bigcache = cachesize;
    625 
    626 	if (totalsize > 0) {
    627 		aprint_normal("%s%ldK data (%ld b/l)", sep,
    628 		       (long)totalsize/1024,
    629 		       (long)linesize);
    630 		sep = ", ";
    631 	}
    632 	ci->ci_cacheinfo.c_dtotalsize = totalsize;
    633 	ci->ci_cacheinfo.c_dlinesize = linesize;
    634 
    635 	linesize = l = cpu_ecache_line_size(node);
    636 	for (i = 0; (1 << i) < l && l; i++)
    637 		/* void */;
    638 	if ((1 << i) != l && l)
    639 		panic("bad ecache line size %d", l);
    640 	totalsize = cpu_ecache_size(node);
    641 	if (totalsize == 0)
    642 		totalsize = l *
    643 		    cpu_ecache_nlines(node) * cpu_ecache_associativity(node);
    644 
    645 	cachesize = totalsize / cpu_ecache_associativity(node);
    646 	if (cachesize > bigcache)
    647 		bigcache = cachesize;
    648 
    649 	if (totalsize > 0) {
    650 		aprint_normal("%s%ldK external (%ld b/l)", sep,
    651 		       (long)totalsize/1024,
    652 		       (long)linesize);
    653 	}
    654 	aprint_normal("\n");
    655 	ci->ci_cacheinfo.c_etotalsize = totalsize;
    656 	ci->ci_cacheinfo.c_elinesize = linesize;
    657 
    658 	if (ecache_min_line_size == 0 ||
    659 	    linesize < ecache_min_line_size)
    660 		ecache_min_line_size = linesize;
    661 
    662 	cpu_setup_sysctl(ci, dev);
    663 
    664 	/*
    665 	 * Now that we know the size of the largest cache on this CPU,
    666 	 * re-color our pages.
    667 	 */
    668 	uvm_page_recolor(atop(bigcache)); /* XXX */
    669 
    670 	/*
    671 	 * CPU specific ipi setup
    672 	 * Currently only necessary for SUN4V
    673 	 */
    674 	if (CPU_ISSUN4V) {
    675 		paddr_t pa = ci->ci_paddr;
    676 		int err;
    677 
    678 		pa += CPUINFO_VA - INTSTACK;
    679 		pa += PAGE_SIZE;
    680 
    681 		ci->ci_cpumq = pa;
    682 		err = hv_cpu_qconf(CPU_MONDO_QUEUE, ci->ci_cpumq, SUN4V_MONDO_QUEUE_SIZE);
    683 		if (err != H_EOK)
    684 			panic("Unable to set cpu mondo queue: %d", err);
    685 		pa += SUN4V_MONDO_QUEUE_SIZE * SUN4V_QUEUE_ENTRY_SIZE;
    686 
    687 		ci->ci_devmq = pa;
    688 		err = hv_cpu_qconf(DEVICE_MONDO_QUEUE, ci->ci_devmq, SUN4V_MONDO_QUEUE_SIZE);
    689 		if (err != H_EOK)
    690 			panic("Unable to set device mondo queue: %d", err);
    691 		pa += SUN4V_MONDO_QUEUE_SIZE * SUN4V_QUEUE_ENTRY_SIZE;
    692 
    693 		ci->ci_mondo = pa;
    694 		pa += 64; /* mondo message is 64 bytes */
    695 
    696 		ci->ci_cpuset = pa;
    697 		pa += 64;
    698 	}
    699 
    700 	/*
    701 	 * cpu_idle setup (currently only necessary for sun4v)
    702 	 */
    703 	if (CPU_ISSUN4V) {
    704 		ci->ci_idlespin = cpu_idle_sun4v;
    705 	}
    706 }
    707 
    708 static void
    709 cpu_idle_sun4v(void)
    710 {
    711 	hv_cpu_yield();
    712 }
    713 
    714 int
    715 cpu_myid(void)
    716 {
    717 	char buf[32];
    718 
    719 	if (CPU_ISSUN4V) {
    720 		uint64_t myid;
    721 		hv_cpu_myid(&myid);
    722 		return myid;
    723 	}
    724 	if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 &&
    725 	    strcmp(buf, "SUNW,Ultra-Enterprise-10000") == 0)
    726 		return lduwa(0x1fff40000d0UL, ASI_PHYS_NON_CACHED);
    727 	switch (GETVER_CPU_IMPL()) {
    728 		case IMPL_OLYMPUS_C:
    729 		case IMPL_JUPITER:
    730 			return CPU_JUPITERID;
    731 		case IMPL_CHEETAH:
    732 		case IMPL_CHEETAH_PLUS:
    733 		case IMPL_JAGUAR:
    734 		case IMPL_PANTHER:
    735 			return CPU_FIREPLANEID;
    736 		default:
    737 			return CPU_UPAID;
    738 	}
    739 }
    740 
    741 #if defined(MULTIPROCESSOR)
    742 vaddr_t cpu_spinup_trampoline;
    743 
    744 /*
    745  * Start secondary processors in motion.
    746  */
    747 void
    748 cpu_boot_secondary_processors(void)
    749 {
    750 	int i, pstate;
    751 	struct cpu_info *ci;
    752 
    753 	sync_tick = 0;
    754 
    755 	sparc64_ipi_init();
    756 
    757 	if (boothowto & RB_MD1) {
    758 		cpus[0].ci_next = NULL;
    759 		sparc_ncpus = ncpu = ncpuonline = 1;
    760 		return;
    761 	}
    762 
    763 	for (ci = cpus; ci != NULL; ci = ci->ci_next) {
    764 		if (ci->ci_cpuid == cpu_myid())
    765 			continue;
    766 
    767 		cpu_pmap_prepare(ci, false);
    768 		cpu_args->cb_node = ci->ci_node;
    769 		cpu_args->cb_cpuinfo = ci->ci_paddr;
    770 		cpu_args->cb_cputyp = cputyp;
    771 		membar_Sync();
    772 
    773 		/* Disable interrupts and start another CPU. */
    774 		pstate = getpstate();
    775 		setpstate(PSTATE_KERN);
    776 
    777 		int rc = prom_startcpu_by_cpuid(ci->ci_cpuid,
    778 		    (void *)cpu_spinup_trampoline, 0);
    779 		if (rc == -1)
    780 			prom_startcpu(ci->ci_node,
    781 			    (void *)cpu_spinup_trampoline, 0);
    782 
    783 		for (i = 0; i < 2000; i++) {
    784 			membar_Sync();
    785 			if (CPUSET_HAS(cpus_active, ci->ci_index))
    786 				break;
    787 			delay(10000);
    788 		}
    789 
    790 		/* synchronize %tick ( to some degree at least ) */
    791 		delay(1000);
    792 		sync_tick = 1;
    793 		membar_Sync();
    794 		if (CPU_ISSUN4U || CPU_ISSUN4US)
    795 			settick(0);
    796 		if (ci->ci_system_clockrate[0] != 0)
    797 			if (CPU_ISSUN4U || CPU_ISSUN4US)
    798 				setstick(0);
    799 
    800 		setpstate(pstate);
    801 
    802 		if (!CPUSET_HAS(cpus_active, ci->ci_index))
    803 			printf("cpu%d: startup failed\n", ci->ci_cpuid);
    804 	}
    805 }
    806 
    807 void
    808 cpu_hatch(void)
    809 {
    810 	char *v = (char*)CPUINFO_VA;
    811 	int i;
    812 
    813 	/* XXX - why flush the icache here? but should be harmless */
    814 	for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long))
    815 		sparc_flush_icache(v + i);
    816 
    817 	cpu_pmap_init(curcpu());
    818 	CPUSET_ADD(cpus_active, cpu_number());
    819 	cpu_reset_fpustate();
    820 	curlwp = curcpu()->ci_data.cpu_idlelwp;
    821 	membar_Sync();
    822 
    823 	/* wait for the boot CPU to flip the switch */
    824 	while (sync_tick == 0) {
    825 		/* we do nothing here */
    826 	}
    827 	if (CPU_ISSUN4U || CPU_ISSUN4US)
    828 		settick(0);
    829 	if (curcpu()->ci_system_clockrate[0] != 0) {
    830 		if (CPU_ISSUN4U || CPU_ISSUN4US)
    831 			setstick(0);
    832 		stickintr_establish(PIL_CLOCK, stickintr);
    833 	} else {
    834 		tickintr_establish(PIL_CLOCK, tickintr);
    835 	}
    836 	spl0();
    837 }
    838 #endif /* MULTIPROCESSOR */
    839