Home | History | Annotate | Line # | Download | only in dev
      1 /*	$NetBSD: cpu.c,v 1.3 2022/02/14 08:12:48 riastradh Exp $	*/
      2 
      3 /*	$OpenBSD: cpu.c,v 1.29 2009/02/08 18:33:28 miod Exp $	*/
      4 
      5 /*
      6  * Copyright (c) 1998-2003 Michael Shalayeff
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
     22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     28  * THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.3 2022/02/14 08:12:48 riastradh Exp $");
     33 
     34 #include "opt_multiprocessor.h"
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/device.h>
     39 #include <sys/atomic.h>
     40 #include <sys/reboot.h>
     41 
     42 #include <uvm/uvm.h>
     43 
     44 #include <machine/cpufunc.h>
     45 #include <machine/pdc.h>
     46 #include <machine/iomod.h>
     47 #include <machine/autoconf.h>
     48 
     49 #include <hppa/hppa/cpuvar.h>
     50 #include <hppa/hppa/machdep.h>
     51 #include <hppa/dev/cpudevs.h>
     52 
     53 #ifdef MULTIPROCESSOR
     54 
     55 int hppa_ncpu;
     56 
     57 struct cpu_info *cpu_hatch_info;
     58 static volatile int start_secondary_cpu;
     59 #endif
     60 
     61 int	cpumatch(device_t, cfdata_t, void *);
     62 void	cpuattach(device_t, device_t, void *);
     63 
     64 CFATTACH_DECL_NEW(cpu, sizeof(struct cpu_softc),
     65     cpumatch, cpuattach, NULL, NULL);
     66 
     67 int
     68 cpumatch(device_t parent, cfdata_t cf, void *aux)
     69 {
     70 	struct confargs *ca = aux;
     71 
     72 	/* probe any 1.0, 1.1 or 2.0 */
     73 	if (ca->ca_type.iodc_type != HPPA_TYPE_NPROC ||
     74 	    ca->ca_type.iodc_sv_model != HPPA_NPROC_HPPA)
     75 		return 0;
     76 
     77 	return 1;
     78 }
     79 
     80 void
     81 cpuattach(device_t parent, device_t self, void *aux)
     82 {
     83 	/* machdep.c */
     84 	extern struct pdc_cache pdc_cache;
     85 	extern struct pdc_btlb pdc_btlb;
     86 	extern struct pdc_model pdc_model;
     87 	extern u_int cpu_ticksnum, cpu_ticksdenom;
     88 
     89 	struct cpu_softc *sc = device_private(self);
     90 	struct confargs *ca = aux;
     91 	static const char lvls[4][4] = { "0", "1", "1.5", "2" };
     92 	struct hppa_interrupt_register *ir;
     93 	struct cpu_info *ci;
     94 	u_int mhz = 100 * cpu_ticksnum / cpu_ticksdenom;
     95 	int cpuno = device_unit(self);
     96 
     97 #ifdef MULTIPROCESSOR
     98 	struct pglist mlist;
     99 	struct vm_page *m;
    100 	int error;
    101 #endif
    102 
    103 	sc->sc_dev = self;
    104 
    105 	/* Print the CPU chip name, nickname, and rev. */
    106 	aprint_normal(": %s", hppa_cpu_info->hci_chip_name);
    107 	if (hppa_cpu_info->hci_chip_nickname != NULL)
    108 		aprint_normal(" (%s)", hppa_cpu_info->hci_chip_nickname);
    109 	aprint_normal(" rev %d", cpu_revision);
    110 
    111 	/* sanity against luser amongst config editors */
    112 	if (ca->ca_irq != 31) {
    113 		aprint_error_dev(self, "bad irq number %d\n", ca->ca_irq);
    114 		return;
    115 	}
    116 
    117 	/* Print the CPU type, spec, level, category, and speed. */
    118 	aprint_normal("\n%s: %s, PA-RISC %s", device_xname(self),
    119 	    hppa_cpu_info->hci_chip_type,
    120 	    hppa_cpu_info->hci_chip_spec);
    121 	aprint_normal(", lev %s, cat %c, ",
    122 	    lvls[pdc_model.pa_lvl], "AB"[pdc_model.mc]);
    123 
    124 	aprint_normal("%d", mhz / 100);
    125 	if (mhz % 100 > 9)
    126 		aprint_normal(".%02d", mhz % 100);
    127 
    128 	aprint_normal(" MHz clk\n%s: %s", device_xname(self),
    129 	    pdc_model.sh? "shadows, ": "");
    130 
    131 	if (pdc_cache.dc_conf.cc_fsel)
    132 		aprint_normal("%uK cache", pdc_cache.dc_size / 1024);
    133 	else
    134 		aprint_normal("%uK/%uK D/I caches", pdc_cache.dc_size / 1024,
    135 		    pdc_cache.ic_size / 1024);
    136 	if (pdc_cache.dt_conf.tc_sh)
    137 		aprint_normal(", %u shared TLB", pdc_cache.dt_size);
    138 	else
    139 		aprint_normal(", %u/%u D/I TLBs", pdc_cache.dt_size,
    140 		    pdc_cache.it_size);
    141 
    142 	if (pdc_btlb.finfo.num_c)
    143 		aprint_normal(", %u shared BTLB", pdc_btlb.finfo.num_c);
    144 	else {
    145 		aprint_normal(", %u/%u D/I BTLBs", pdc_btlb.finfo.num_i,
    146 		    pdc_btlb.finfo.num_d);
    147 	}
    148 	aprint_normal("\n");
    149 
    150 	/*
    151 	 * Describe the floating-point support.
    152 	 */
    153 	if (fpu_present)
    154 		aprint_normal("%s: %s floating point, rev %d\n", device_xname(self),
    155 		    hppa_mod_info(HPPA_TYPE_FPU, (fpu_version >> 16) & 0x1f),
    156 		    (fpu_version >> 11) & 0x1f);
    157 	else
    158 		aprint_normal("%s: no floating point\n", device_xname(self));
    159 
    160 
    161 	if (cpuno >= HPPA_MAXCPUS) {
    162 		aprint_normal_dev(self, "not started\n");
    163 		return;
    164 	}
    165 
    166 	ci = &cpus[cpuno];
    167 	ci->ci_cpuid = cpuno;
    168 	ci->ci_hpa = ca->ca_hpa;
    169 
    170 	hppa_intr_initialise(ci);
    171 
    172 	ir = &ci->ci_ir;
    173 	hppa_interrupt_register_establish(ci, ir);
    174 	ir->ir_iscpu = true;
    175 	ir->ir_ci = ci;
    176 	ir->ir_name = device_xname(self);
    177 
    178 	sc->sc_ihclk = hppa_intr_establish(IPL_CLOCK, clock_intr,
    179 	    NULL /*clockframe*/, &ci->ci_ir, 31);
    180 #ifdef MULTIPROCESSOR
    181 	sc->sc_ihipi = hppa_intr_establish(IPL_HIGH, hppa_ipi_intr,
    182 	    NULL /*clockframe*/, &ci->ci_ir, 30);
    183 #endif
    184 
    185 	/*
    186 	 * Reserve some bits for chips that don't like to be moved
    187 	 * around, e.g. lasi and asp.
    188 	 */
    189 	ir->ir_rbits = ((1 << 28) | (1 << 27));
    190 	ir->ir_bits &= ~ir->ir_rbits;
    191 
    192 #ifdef MULTIPROCESSOR
    193 	/* Allocate stack for spin up and FPU emulation. */
    194 	TAILQ_INIT(&mlist);
    195 	error = uvm_pglistalloc(PAGE_SIZE, 0, -1L, PAGE_SIZE, 0, &mlist, 1, 0);
    196 
    197 	if (error) {
    198 		aprint_error(": unable to allocate CPU stack!\n");
    199 		return;
    200 	}
    201 	m = TAILQ_FIRST(&mlist);
    202 	ci->ci_stack = VM_PAGE_TO_PHYS(m);
    203 	ci->ci_softc = sc;
    204 
    205 	if (ci->ci_hpa == hppa_mcpuhpa) {
    206 		ci->ci_flags |= CPUF_PRIMARY|CPUF_RUNNING;
    207 	} else {
    208 		int err;
    209 
    210 		err = mi_cpu_attach(ci);
    211 		if (err) {
    212 			aprint_error_dev(self,
    213 			    "mi_cpu_attach failed with %d\n", err);
    214 			return;
    215 		}
    216 	}
    217 	hppa_ncpu++;
    218 	hppa_ipi_init(ci);
    219 #endif
    220 	KASSERT(ci->ci_cpl == -1);
    221 }
    222 
    223 #ifdef MULTIPROCESSOR
    224 void
    225 cpu_boot_secondary_processors(void)
    226 {
    227 	struct cpu_info *ci;
    228 	struct iomod *cpu;
    229 	int i, j;
    230 
    231 	for (i = 0; i < HPPA_MAXCPUS; i++) {
    232 
    233 		ci = &cpus[i];
    234 		if (ci->ci_cpuid == 0)
    235 			continue;
    236 
    237 		if (ci->ci_data.cpu_idlelwp == NULL)
    238 			continue;
    239 
    240 		if (ci->ci_flags & CPUF_PRIMARY)
    241 			continue;
    242 
    243 		/*
    244 		 * Release the specified CPU by triggering an EIR{0}.
    245 		 *
    246 		 * The `load-acquire operation' matching this
    247 		 * store-release is somewhere inside the silicon or
    248 		 * firmware -- the point is that the store to
    249 		 * cpu_hatch_info must happen before writing EIR{0};
    250 		 * there is conceptually some magic inside the silicon
    251 		 * or firmware that effectively does
    252 		 *
    253 		 *	if (atomic_load_acquire(&cpu->io_eir) == 0) {
    254 		 *		hw_cpu_spinup_trampoline();
    255 		 *	}
    256 		 *
    257 		 * so that hw_cpu_spinup_trampoline correctly sees the
    258 		 * value we just stored at cpu_hatch_info.
    259 		 */
    260 		cpu_hatch_info = ci;
    261 		cpu = (struct iomod *)(ci->ci_hpa);
    262 		atomic_store_release(&cpu->io_eir, 0);
    263 
    264 		/* Wait for CPU to wake up... */
    265 		j = 0;
    266 		while (!(ci->ci_flags & CPUF_RUNNING) && j++ < 10000)
    267 			delay(1000);
    268 		if (!(ci->ci_flags & CPUF_RUNNING))
    269 			printf("failed to hatch cpu %i!\n", ci->ci_cpuid);
    270 	}
    271 
    272 	/*
    273 	 * Release secondary CPUs.
    274 	 *
    275 	 * Matches load-acquire in cpu_hatch.
    276 	 */
    277 	atomic_store_release(&start_secondary_cpu, 1);
    278 }
    279 
    280 void
    281 cpu_hw_init(void)
    282 {
    283 	struct cpu_info *ci = curcpu();
    284 
    285 	/* Purge TLB and flush caches. */
    286 	ptlball();
    287 	fcacheall();
    288 
    289 	/* Enable address translations. */
    290 	ci->ci_psw = PSW_I | PSW_Q | PSW_P | PSW_C | PSW_D;
    291 	ci->ci_psw |= (cpus[0].ci_psw & PSW_O);
    292 
    293 	ci->ci_curlwp = ci->ci_data.cpu_idlelwp;
    294 }
    295 
    296 void
    297 cpu_hatch(void)
    298 {
    299 	struct cpu_info *ci = curcpu();
    300 
    301 	ci->ci_flags |= CPUF_RUNNING;
    302 
    303 	/*
    304 	 * Wait for additional CPUs to spinup.
    305 	 *
    306 	 * Matches store-release in cpu_boot_secondary_processors.
    307 	 */
    308 	while (!atomic_load_acquire(&start_secondary_cpu))
    309 		;
    310 
    311 	/* Spin for now */
    312 	for (;;)
    313 		;
    314 
    315 }
    316 #endif
    317