1 1.7 thorpej /* $NetBSD: rmixl_cpucore.c,v 1.7 2021/08/07 16:18:59 thorpej Exp $ */ 2 1.2 matt 3 1.2 matt /* 4 1.2 matt * Copyright 2002 Wasabi Systems, Inc. 5 1.2 matt * All rights reserved. 6 1.2 matt * 7 1.2 matt * Written by Simon Burge for Wasabi Systems, Inc. 8 1.2 matt * 9 1.2 matt * Redistribution and use in source and binary forms, with or without 10 1.2 matt * modification, are permitted provided that the following conditions 11 1.2 matt * are met: 12 1.2 matt * 1. Redistributions of source code must retain the above copyright 13 1.2 matt * notice, this list of conditions and the following disclaimer. 14 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright 15 1.2 matt * notice, this list of conditions and the following disclaimer in the 16 1.2 matt * documentation and/or other materials provided with the distribution. 17 1.2 matt * 3. All advertising materials mentioning features or use of this software 18 1.2 matt * must display the following acknowledgement: 19 1.2 matt * This product includes software developed for the NetBSD Project by 20 1.2 matt * Wasabi Systems, Inc. 21 1.2 matt * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 1.2 matt * or promote products derived from this software without specific prior 23 1.2 matt * written permission. 24 1.2 matt * 25 1.2 matt * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 1.2 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 1.2 matt * POSSIBILITY OF SUCH DAMAGE. 36 1.2 matt */ 37 1.2 matt 38 1.2 matt #include "locators.h" 39 1.2 matt 40 1.2 matt #include <sys/cdefs.h> 41 1.7 thorpej __KERNEL_RCSID(0, "$NetBSD: rmixl_cpucore.c,v 1.7 2021/08/07 16:18:59 thorpej Exp $"); 42 1.2 matt 43 1.2 matt #include "opt_multiprocessor.h" 44 1.2 matt 45 1.2 matt #include <sys/param.h> 46 1.2 matt #include <sys/device.h> 47 1.2 matt #include <sys/systm.h> 48 1.2 matt #include <sys/cpu.h> 49 1.2 matt 50 1.2 matt #include <uvm/uvm_extern.h> 51 1.2 matt 52 1.2 matt #include <mips/rmi/rmixlvar.h> 53 1.2 matt #include <mips/rmi/rmixl_cpunodevar.h> 54 1.2 matt #include <mips/rmi/rmixl_cpucorevar.h> 55 1.2 matt #include <mips/rmi/rmixl_fmnvar.h> 56 1.2 matt 57 1.2 matt static int cpucore_rmixl_match(device_t, cfdata_t, void *); 58 1.2 matt static void cpucore_rmixl_attach(device_t, device_t, void *); 59 1.2 matt static int cpucore_rmixl_print(void *, const char *); 60 1.2 matt 61 1.2 matt CFATTACH_DECL_NEW(cpucore_rmixl, sizeof(struct cpucore_softc), 62 1.2 matt cpucore_rmixl_match, cpucore_rmixl_attach, NULL, NULL); 63 1.2 matt 64 1.2 matt static int 65 1.2 matt cpucore_rmixl_match(device_t parent, cfdata_t cf, void *aux) 66 1.2 matt { 67 1.2 matt struct cpunode_attach_args *na = aux; 68 1.2 matt int core = cf->cf_loc[CPUNODECF_CORE]; 69 1.2 matt 70 1.2 matt if (!cpu_rmixl(mips_options.mips_cpu)) 71 1.2 matt return 0; 72 1.2 matt 73 1.2 matt if (strncmp(na->na_name, cf->cf_name, strlen(cf->cf_name)) == 0 74 1.2 matt #ifndef MULTIPROCESSOR 75 1.2 matt && na->na_core == 0 76 1.2 matt #endif 77 1.2 matt && (core == CPUNODECF_CORE_DEFAULT || core == na->na_core)) 78 1.2 matt return 1; 79 1.2 matt 80 1.2 matt return 0; 81 1.2 matt } 82 1.2 matt 83 1.2 matt static void 84 1.2 matt cpucore_rmixl_attach(device_t parent, device_t self, void *aux) 85 1.2 matt { 86 1.2 matt struct cpucore_softc * const sc = device_private(self); 87 1.2 matt struct cpunode_attach_args *na = aux; 88 1.2 matt struct cpucore_attach_args ca; 89 1.2 matt u_int nthreads; 90 1.2 matt struct rmixl_config *rcp = &rmixl_configuration; 91 1.2 matt 92 1.2 matt sc->sc_dev = self; 93 1.2 matt sc->sc_core = na->na_core; 94 1.5 matt KASSERT(sc->sc_hatched == false); 95 1.2 matt 96 1.2 matt #if 0 97 1.2 matt #ifdef MULTIPROCESSOR 98 1.2 matt /* 99 1.2 matt * Create the TLB structure needed - one per core and core0 uses the 100 1.2 matt * default one for the system. 101 1.2 matt */ 102 1.2 matt if (sc->sc_core == 0) { 103 1.2 matt sc->sc_tlbinfo = &pmap_tlb0_info; 104 1.2 matt } else { 105 1.2 matt const vaddr_t va = (vaddr_t)&sc->sc_tlbinfo0; 106 1.2 matt paddr_t pa; 107 1.2 matt 108 1.2 matt if (! pmap_extract(pmap_kernel(), va, &pa)) 109 1.2 matt panic("%s: pmap_extract fail, va %#"PRIxVADDR, __func__, va); 110 1.2 matt #ifdef _LP64 111 1.2 matt sc->sc_tlbinfo = (struct pmap_tlb_info *) 112 1.2 matt MIPS_PHYS_TO_XKPHYS_CACHED(pa); 113 1.2 matt #else 114 1.2 matt sc->sc_tlbinfo = (struct pmap_tlb_info *) 115 1.2 matt MIPS_PHYS_TO_KSEG0(pa); 116 1.2 matt #endif 117 1.2 matt pmap_tlb_info_init(sc->sc_tlbinfo); 118 1.2 matt } 119 1.2 matt #endif 120 1.2 matt #endif 121 1.2 matt 122 1.2 matt aprint_normal("\n"); 123 1.2 matt aprint_normal_dev(self, "%lu.%02luMHz (hz cycles = %lu, " 124 1.2 matt "delay divisor = %lu)\n", 125 1.2 matt curcpu()->ci_cpu_freq / 1000000, 126 1.2 matt (curcpu()->ci_cpu_freq % 1000000) / 10000, 127 1.2 matt curcpu()->ci_cycles_per_hz, curcpu()->ci_divisor_delay); 128 1.2 matt 129 1.2 matt aprint_normal("%s: ", device_xname(self)); 130 1.2 matt cpu_identify(self); 131 1.2 matt 132 1.2 matt nthreads = MIPS_CIDFL_RMI_NTHREADS(mips_options.mips_cpu->cpu_cidflags); 133 1.2 matt aprint_normal_dev(self, "%d %s on core\n", nthreads, 134 1.2 matt nthreads == 1 ? "thread" : "threads"); 135 1.2 matt 136 1.2 matt /* 137 1.2 matt * Attach CPU (RMI thread contexts) devices 138 1.2 matt * according to userapp_cpu_map bitmask. 139 1.2 matt */ 140 1.2 matt u_int thread_mask = (1 << nthreads) - 1; 141 1.2 matt u_int core_shft = sc->sc_core * nthreads; 142 1.2 matt u_int threads_enb = 143 1.2 matt (u_int)(rcp->rc_psb_info.userapp_cpu_map >> core_shft) & thread_mask; 144 1.2 matt u_int threads_dis = (~threads_enb) & thread_mask; 145 1.2 matt 146 1.3 cliff sc->sc_threads_dis = threads_dis; 147 1.2 matt if (threads_dis != 0) { 148 1.2 matt aprint_normal_dev(self, "threads"); 149 1.3 cliff u_int d = threads_dis; 150 1.3 cliff while (d != 0) { 151 1.5 matt const u_int t = ffs(d) - 1; 152 1.3 cliff d ^= (1 << t); 153 1.3 cliff aprint_normal(" %d%s", t, (d==0) ? "" : ","); 154 1.2 matt } 155 1.2 matt aprint_normal(" offline (disabled by firmware)\n"); 156 1.2 matt } 157 1.2 matt 158 1.3 cliff u_int threads_try_attach = threads_enb; 159 1.3 cliff while (threads_try_attach != 0) { 160 1.5 matt const u_int t = ffs(threads_try_attach) - 1; 161 1.5 matt const u_int bit = 1 << t; 162 1.5 matt threads_try_attach ^= bit; 163 1.2 matt ca.ca_name = "cpu"; 164 1.2 matt ca.ca_thread = t; 165 1.2 matt ca.ca_core = sc->sc_core; 166 1.6 thorpej if (config_found(self, &ca, cpucore_rmixl_print, 167 1.7 thorpej CFARGS_NONE) == NULL) { 168 1.3 cliff /* 169 1.3 cliff * thread did not attach, e.g. not configured 170 1.3 cliff * arrange to have it disabled in THREADEN PCR 171 1.3 cliff */ 172 1.3 cliff threads_enb ^= bit; 173 1.3 cliff threads_dis |= bit; 174 1.3 cliff } 175 1.2 matt } 176 1.5 matt 177 1.3 cliff sc->sc_threads_enb = threads_enb; 178 1.3 cliff sc->sc_threads_dis = threads_dis; 179 1.3 cliff 180 1.3 cliff /* 181 1.3 cliff * when attaching the core of the primary cpu, 182 1.3 cliff * do the post-running initialization here 183 1.3 cliff */ 184 1.3 cliff if (sc->sc_core == RMIXL_CPU_CORE((curcpu()->ci_cpuid))) 185 1.3 cliff cpucore_rmixl_run(self); 186 1.2 matt } 187 1.2 matt 188 1.2 matt static int 189 1.2 matt cpucore_rmixl_print(void *aux, const char *pnp) 190 1.2 matt { 191 1.2 matt struct cpucore_attach_args *ca = aux; 192 1.2 matt 193 1.2 matt if (pnp != NULL) 194 1.2 matt aprint_normal("%s:", pnp); 195 1.2 matt aprint_normal(" thread %d", ca->ca_thread); 196 1.2 matt 197 1.2 matt return (UNCONF); 198 1.2 matt } 199 1.2 matt 200 1.3 cliff /* 201 1.3 cliff * cpucore_rmixl_run 202 1.4 cliff * called from cpucore_rmixl_attach for primary core 203 1.4 cliff * and called from cpu_rmixl_run for each hatched cpu 204 1.3 cliff * the first call for each cpucore causes init of per-core features: 205 1.3 cliff * - disable unused threads 206 1.3 cliff * - set Fine-grained (Round Robin) thread scheduling mode 207 1.3 cliff */ 208 1.3 cliff void 209 1.3 cliff cpucore_rmixl_run(device_t self) 210 1.3 cliff { 211 1.3 cliff struct cpucore_softc * const sc = device_private(self); 212 1.3 cliff 213 1.3 cliff if (sc->sc_running == false) { 214 1.3 cliff sc->sc_running = true; 215 1.3 cliff rmixl_mtcr(RMIXL_PCR_THREADEN, sc->sc_threads_enb); 216 1.3 cliff rmixl_mtcr(RMIXL_PCR_SCHEDULING, 0); 217 1.3 cliff } 218 1.3 cliff } 219 1.3 cliff 220 1.2 matt #ifdef MULTIPROCESSOR 221 1.2 matt /* 222 1.2 matt * cpucore_rmixl_hatch 223 1.2 matt * called from cpu_rmixl_hatch for each cpu 224 1.2 matt * the first call for each cpucore causes init of per-core features 225 1.2 matt */ 226 1.2 matt void 227 1.2 matt cpucore_rmixl_hatch(device_t self) 228 1.2 matt { 229 1.2 matt struct cpucore_softc * const sc = device_private(self); 230 1.2 matt 231 1.2 matt if (sc->sc_hatched == false) { 232 1.2 matt /* PCRs for core#0 are set up in mach_init() */ 233 1.2 matt if (sc->sc_core != 0) 234 1.2 matt rmixl_pcr_init_core(); 235 1.2 matt rmixl_fmn_init_core(); 236 1.2 matt sc->sc_hatched = true; 237 1.2 matt } 238 1.2 matt } 239 1.2 matt #endif /* MULTIPROCESSOR */ 240