rmixl_cpu.c revision 1.3
1/* $NetBSD: rmixl_cpu.c,v 1.3 2011/04/14 05:12:58 cliff Exp $ */ 2 3/* 4 * Copyright 2002 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38#include "locators.h" 39 40#include <sys/cdefs.h> 41__KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.3 2011/04/14 05:12:58 cliff Exp $"); 42 43#include "opt_multiprocessor.h" 44#include "opt_ddb.h" 45 46#include "opt_multiprocessor.h" 47 48#include <sys/param.h> 49#include <sys/device.h> 50#include <sys/systm.h> 51#include <sys/cpu.h> 52#include <sys/lock.h> 53#include <sys/lwp.h> 54#include <sys/cpu.h> 55#include <sys/malloc.h> 56#include <uvm/uvm_pglist.h> 57#include <uvm/uvm_extern.h> 58#include <mips/regnum.h> 59#include <mips/asm.h> 60#include <mips/pmap.h> 61#include <mips/rmi/rmixlreg.h> 62#include <mips/rmi/rmixlvar.h> 63#include <mips/rmi/rmixl_cpucorevar.h> 64#include <mips/rmi/rmixl_cpuvar.h> 65#include <mips/rmi/rmixl_intr.h> 66#include <mips/rmi/rmixl_fmnvar.h> 67#ifdef DDB 68#include <mips/db_machdep.h> 69#endif 70 71 72static int cpu_rmixl_match(device_t, cfdata_t, void *); 73static void cpu_rmixl_attach(device_t, device_t, void *); 74static void cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const); 75#ifdef NOTYET 76static int cpu_fmn_intr(void *, rmixl_fmn_rxmsg_t *); 77#endif 78 79#ifdef MULTIPROCESSOR 80void cpu_rmixl_hatch(struct cpu_info *); 81void cpu_rmixl_run(struct cpu_info *); 82#if 0 83static void cpu_setup_trampoline_ipi(struct device *, struct cpu_info *); 84#endif 85static int cpu_setup_trampoline_common(struct cpu_info *, struct rmixl_cpu_trampoline_args *); 86static void cpu_setup_trampoline_callback(struct cpu_info *); 87#endif /* MULTIPROCESSOR */ 88 89#ifdef DEBUG 90void rmixl_cpu_data_print(struct cpu_data *); 91struct cpu_info * 92 rmixl_cpuinfo_print(u_int); 93#endif /* DEBUG */ 94 95CFATTACH_DECL_NEW(cpu_rmixl, sizeof(struct rmixl_cpu_softc), 96 cpu_rmixl_match, cpu_rmixl_attach, NULL, NULL); 97 98#ifdef MULTIPROCESSOR 99static struct rmixl_cpu_trampoline_args rmixl_cpu_trampoline_args; 100#endif 101 102/* 103 * cpu_rmixl_db_watch_init - initialize COP0 watchpoint stuff 104 * 105 * clear IEU_DEFEATURE[DBE] to ensure T_WATCH on watchpoint exception 106 * set COP0 watchhi and watchlo 107 * 108 * disable all watchpoints 109 */ 110static void 111cpu_rmixl_db_watch_init(void) 112{ 113 uint32_t r; 114 115 r = rmixl_mfcr(RMIXL_PCR_IEU_DEFEATURE); 116 r &= ~__BIT(7); /* DBE */ 117 rmixl_mtcr(RMIXL_PCR_IEU_DEFEATURE, r); 118 119 cpuwatch_clr_all(); 120} 121 122/* 123 * cpu_xls616_erratum 124 * 125 * on the XLS616, COUNT/COMPARE clock regs seem to interact between 126 * threads on a core 127 * 128 * the symptom of the error is retarded clock interrupts 129 * and very slow apparent system performance 130 * 131 * other XLS chips may have the same problem. 132 * we may need to add other PID checks. 133 */ 134static inline bool 135cpu_xls616_erratum(device_t parent, struct cpucore_attach_args *ca) 136{ 137#if 0 138 if (mips_options.mips_cpu->cpu_pid == MIPS_XLS616) { 139 if (ca->ca_thread > 0) { 140 aprint_error_dev(parent, "XLS616 CLOCK ERRATUM: " 141 "deconfigure cpu%d\n", ca->ca_thread); 142 return true; 143 } 144 } 145#endif 146 return false; 147} 148 149static bool 150cpu_rmixl_erratum(device_t parent, struct cpucore_attach_args *ca) 151{ 152 return cpu_xls616_erratum(parent, ca); 153} 154 155static int 156cpu_rmixl_match(device_t parent, cfdata_t cf, void *aux) 157{ 158 struct cpucore_attach_args *ca = aux; 159 int thread = cf->cf_loc[CPUCORECF_THREAD]; 160 161 if (!cpu_rmixl(mips_options.mips_cpu)) 162 return 0; 163 164 if (strncmp(ca->ca_name, cf->cf_name, strlen(cf->cf_name)) == 0 165#ifndef MULTIPROCESSOR 166 && ca->ca_thread == 0 167#endif 168 && (thread == CPUCORECF_THREAD_DEFAULT || thread == ca->ca_thread) 169 && (!cpu_rmixl_erratum(parent, ca))) 170 return 1; 171 172 return 0; 173} 174 175static void 176cpu_rmixl_attach(device_t parent, device_t self, void *aux) 177{ 178 struct rmixl_cpu_softc * const sc = device_private(self); 179 struct cpu_info *ci = NULL; 180 static bool once = false; 181 extern void rmixl_spl_init_cpu(void); 182 183 if (once == false) { 184 /* first attach is the primary cpu */ 185 once = true; 186 ci = curcpu(); 187 sc->sc_dev = self; 188 sc->sc_ci = ci; 189 ci->ci_softc = (void *)sc; 190 191 rmixl_spl_init_cpu(); /* spl initialization for CPU#0 */ 192 cpu_rmixl_attach_primary(sc); 193 194#ifdef MULTIPROCESSOR 195 mips_locoresw.lsw_cpu_init = cpu_rmixl_hatch; 196 mips_locoresw.lsw_cpu_run = cpu_rmixl_run; 197 } else { 198 struct cpucore_attach_args *ca = aux; 199 struct cpucore_softc * const ccsc = device_private(parent); 200 rmixlfw_psb_type_t psb_type = rmixl_configuration.rc_psb_type; 201 cpuid_t cpuid; 202 203 KASSERT(ca->ca_core < 8); 204 KASSERT(ca->ca_thread < 4); 205 cpuid = (ca->ca_core << 2) | ca->ca_thread; 206 ci = cpu_info_alloc(ccsc->sc_tlbinfo, cpuid, 207 /* XXX */ 0, ca->ca_core, ca->ca_thread); 208 KASSERT(ci != NULL); 209 if (ccsc->sc_tlbinfo == NULL) 210 ccsc->sc_tlbinfo = ci->ci_tlb_info; 211 sc->sc_dev = self; 212 sc->sc_ci = ci; 213 ci->ci_softc = (void *)sc; 214 215 switch (psb_type) { 216 case PSB_TYPE_RMI: 217 case PSB_TYPE_DELL: 218 cpu_setup_trampoline_callback(ci); 219 break; 220 default: 221 aprint_error(": psb type=%s cpu_wakeup unsupported\n", 222 rmixlfw_psb_type_name(psb_type)); 223 return; 224 } 225 226 const u_long cpu_mask = 1L << cpu_index(ci); 227 for (size_t i=0; i < 10000; i++) { 228 if ((cpus_hatched & cpu_mask) != 0) 229 break; 230 DELAY(100); 231 } 232 if ((cpus_hatched & cpu_mask) == 0) { 233 aprint_error(": failed to hatch\n"); 234 return; 235 } 236#endif /* MULTIPROCESSOR */ 237 } 238 239 /* 240 * do per-cpu interrupt initialization 241 */ 242 rmixl_intr_init_cpu(ci); 243 244 aprint_normal("\n"); 245 246 cpu_attach_common(self, ci); 247} 248 249/* 250 * attach the primary processor 251 */ 252static void 253cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const sc) 254{ 255 struct cpu_info *ci = sc->sc_ci; 256 uint32_t ebase; 257 258 KASSERT(CPU_IS_PRIMARY(ci)); 259 260 /* 261 * obtain and set cpuid of the primary processor 262 */ 263 asm volatile("dmfc0 %0, $15, 1;" : "=r"(ebase)); 264 ci->ci_cpuid = ebase & __BITS(9,0); 265 266 cpu_rmixl_db_watch_init(); 267 268 rmixl_fmn_init(); 269 270 rmixl_intr_init_clk(); 271#ifdef MULTIPROCESSOR 272 rmixl_intr_init_ipi(); 273#endif 274 275#ifdef NOTYET 276 void *ih = rmixl_fmn_intr_establish(RMIXL_FMN_STID_CORE0, 277 cpu_fmn_intr, ci); 278 if (ih == NULL) 279 panic("%s: rmixl_fmn_intr_establish failed", 280 __func__); 281 sc->sc_ih_fmn = ih; 282#endif 283 284} 285 286#ifdef NOTYET 287static int 288cpu_fmn_intr(void *arg, rmixl_fmn_rxmsg_t *rxmsg) 289{ 290 if (CPU_IS_PRIMARY(curcpu())) { 291 printf("%s: cpu%ld: rxsid=%#x, code=%d, size=%d\n", 292 __func__, cpu_number(), 293 rxmsg->rxsid, rxmsg->code, rxmsg->size); 294 for (int i=0; i < rxmsg->size; i++) 295 printf("\t%#"PRIx64"\n", rxmsg->msg.data[i]); 296 } 297 298 return 1; 299} 300#endif 301 302#ifdef MULTIPROCESSOR 303/* 304 * cpu_rmixl_run 305 * 306 * - chip-specific post-running code called from cpu_hatch via lsw_cpu_run 307 */ 308void 309cpu_rmixl_run(struct cpu_info *ci) 310{ 311 struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc; 312 cpucore_rmixl_run(device_parent(sc->sc_dev)); 313} 314 315/* 316 * cpu_rmixl_hatch 317 * 318 * - chip-specific hatch code called from cpu_hatch via lsw_cpu_init 319 */ 320void 321cpu_rmixl_hatch(struct cpu_info *ci) 322{ 323 struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc; 324 extern void rmixl_spl_init_cpu(void); 325 326 rmixl_spl_init_cpu(); /* spl initialization for this CPU */ 327 328 (void)splhigh(); 329 330#ifdef DEBUG 331 uint32_t ebase; 332 asm volatile("dmfc0 %0, $15, 1;" : "=r"(ebase)); 333 KASSERT((ebase & __BITS(9,0)) == ci->ci_cpuid); 334 KASSERT(curcpu() == ci); 335#endif 336 337 cpucore_rmixl_hatch(device_parent(sc->sc_dev)); 338 339 cpu_rmixl_db_watch_init(); 340} 341 342static int 343cpu_setup_trampoline_common(struct cpu_info *ci, struct rmixl_cpu_trampoline_args *ta) 344{ 345 struct lwp *l = ci->ci_data.cpu_idlelwp; 346 uintptr_t stacktop; 347 348#ifdef DIAGNOSTIC 349 /* Ensure our current stack can be used by the firmware */ 350 uint64_t sp; 351 __asm__ volatile("move %0, $sp\n" : "=r"(sp)); 352#ifdef _LP64 353 /* can be made into a KSEG0 addr */ 354 KASSERT(MIPS_XKPHYS_P(sp)); 355 KASSERT((MIPS_XKPHYS_TO_PHYS(sp) >> 32) == 0); 356#else 357 /* is a KSEG0 addr */ 358 KASSERT(MIPS_KSEG0_P(sp)); 359#endif /* _LP64 */ 360#endif /* DIAGNOSTIC */ 361 362#ifndef _LP64 363 /* 364 * Ensure 'ci' is a KSEG0 address for trampoline args 365 * to avoid TLB fault in cpu_trampoline() when loading ci_idlelwp 366 */ 367 KASSERT(MIPS_KSEG0_P(ci)); 368#endif 369 370 /* 371 * Ensure 'ta' is a KSEG0 address for trampoline args 372 * to avoid TLB fault in trampoline when loading args. 373 * 374 * Note: 375 * RMI firmware only passes the lower 32-bit half of 'ta' 376 * to rmixl_cpu_trampoline (the upper half is clear) 377 * so rmixl_cpu_trampoline must reconstruct the missing upper half 378 * rmixl_cpu_trampoline "knows" 'ta' is a KSEG0 address 379 * and sign-extends to make an LP64 KSEG0 address. 380 */ 381 KASSERT(MIPS_KSEG0_P(ta)); 382 383 /* 384 * marshal args for rmixl_cpu_trampoline; 385 * note for non-LP64 kernel, use of intptr_t 386 * forces sign extension of 32 bit pointers 387 */ 388 stacktop = (uintptr_t)l->l_md.md_utf - CALLFRAME_SIZ; 389 ta->ta_sp = (uint64_t)(intptr_t)stacktop; 390 ta->ta_lwp = (uint64_t)(intptr_t)l; 391 ta->ta_cpuinfo = (uint64_t)(intptr_t)ci; 392 393 return 0; 394} 395 396static void 397cpu_setup_trampoline_callback(struct cpu_info *ci) 398{ 399 void (*wakeup_cpu)(void *, void *, unsigned int); 400 struct rmixl_cpu_trampoline_args *ta = &rmixl_cpu_trampoline_args; 401 extern void rmixl_cpu_trampoline(void *); 402 extern void rmixlfw_wakeup_cpu(void *, void *, u_int64_t, void *); 403 404 cpu_setup_trampoline_common(ci, ta); 405 406#if _LP64 407 wakeup_cpu = (void *)rmixl_configuration.rc_psb_info.wakeup; 408#else 409 wakeup_cpu = (void *)(intptr_t) 410 (rmixl_configuration.rc_psb_info.wakeup & 0xffffffff); 411#endif 412 413 rmixlfw_wakeup_cpu(rmixl_cpu_trampoline, (void *)ta, 414 (uint64_t)1 << ci->ci_cpuid, wakeup_cpu); 415} 416#endif /* MULTIPROCESSOR */ 417 418 419#ifdef DEBUG 420void 421rmixl_cpu_data_print(struct cpu_data *dp) 422{ 423 printf("cpu_biglock_wanted %p\n", dp->cpu_biglock_wanted); 424 printf("cpu_callout %p\n", dp->cpu_callout); 425 printf("cpu_unused1 %p\n", dp->cpu_unused1); 426 printf("cpu_unused2 %d\n", dp->cpu_unused2); 427 printf("&cpu_schedstate %p\n", &dp->cpu_schedstate); /* TBD */ 428 printf("&cpu_xcall %p\n", &dp->cpu_xcall); /* TBD */ 429 printf("cpu_xcall_pending %d\n", dp->cpu_xcall_pending); 430 printf("cpu_onproc %p\n", dp->cpu_onproc); 431 printf("&cpu_qchain %p\n", &dp->cpu_qchain); /* TBD */ 432 printf("cpu_idlelwp %p\n", dp->cpu_idlelwp); 433 printf("cpu_lockstat %p\n", dp->cpu_lockstat); 434 printf("cpu_index %d\n", dp->cpu_index); 435 printf("cpu_biglock_count %d\n", dp->cpu_biglock_count); 436 printf("cpu_spin_locks %d\n", dp->cpu_spin_locks); 437 printf("cpu_simple_locks %d\n", dp->cpu_simple_locks); 438 printf("cpu_spin_locks2 %d\n", dp->cpu_spin_locks2); 439 printf("cpu_lkdebug_recurse %d\n", dp->cpu_lkdebug_recurse); 440 printf("cpu_softints %d\n", dp->cpu_softints); 441 printf("cpu_nsyscall %"PRIu64"\n", dp->cpu_nsyscall); 442 printf("cpu_ntrap %"PRIu64"\n", dp->cpu_ntrap); 443 printf("cpu_nfault %"PRIu64"\n", dp->cpu_nfault); 444 printf("cpu_nintr %"PRIu64"\n", dp->cpu_nintr); 445 printf("cpu_nsoft %"PRIu64"\n", dp->cpu_nsoft); 446 printf("cpu_nswtch %"PRIu64"\n", dp->cpu_nswtch); 447 printf("cpu_uvm %p\n", dp->cpu_uvm); 448 printf("cpu_softcpu %p\n", dp->cpu_softcpu); 449 printf("&cpu_biodone %p\n", &dp->cpu_biodone); /* TBD */ 450 printf("&cpu_percpu %p\n", &dp->cpu_percpu); /* TBD */ 451 printf("cpu_selcluster %p\n", dp->cpu_selcluster); 452 printf("cpu_nch %p\n", dp->cpu_nch); 453 printf("&cpu_ld_locks %p\n", &dp->cpu_ld_locks); /* TBD */ 454 printf("&cpu_ld_lock %p\n", &dp->cpu_ld_lock); /* TBD */ 455 printf("cpu_cc_freq %#"PRIx64"\n", dp->cpu_cc_freq); 456 printf("cpu_cc_skew %#"PRIx64"\n", dp->cpu_cc_skew); 457} 458 459struct cpu_info * 460rmixl_cpuinfo_print(u_int cpuindex) 461{ 462 struct cpu_info * const ci = cpu_lookup(cpuindex); 463 464 if (ci != NULL) { 465 rmixl_cpu_data_print(&ci->ci_data); 466 printf("ci_dev %p\n", ci->ci_dev); 467 printf("ci_cpuid %ld\n", ci->ci_cpuid); 468 printf("ci_cctr_freq %ld\n", ci->ci_cctr_freq); 469 printf("ci_cpu_freq %ld\n", ci->ci_cpu_freq); 470 printf("ci_cycles_per_hz %ld\n", ci->ci_cycles_per_hz); 471 printf("ci_divisor_delay %ld\n", ci->ci_divisor_delay); 472 printf("ci_divisor_recip %ld\n", ci->ci_divisor_recip); 473 printf("ci_curlwp %p\n", ci->ci_curlwp); 474 printf("ci_want_resched %d\n", ci->ci_want_resched); 475 printf("ci_mtx_count %d\n", ci->ci_mtx_count); 476 printf("ci_mtx_oldspl %d\n", ci->ci_mtx_oldspl); 477 printf("ci_idepth %d\n", ci->ci_idepth); 478 printf("ci_cpl %d\n", ci->ci_cpl); 479 printf("&ci_cpl %p\n", &ci->ci_cpl); /* XXX */ 480 printf("ci_next_cp0_clk_intr %#x\n", ci->ci_next_cp0_clk_intr); 481 for (int i=0; i < SOFTINT_COUNT; i++) 482 printf("ci_softlwps[%d] %p\n", i, ci->ci_softlwps[i]); 483 printf("ci_tlb_slot %d\n", ci->ci_tlb_slot); 484 printf("ci_pmap_asid_cur %d\n", ci->ci_pmap_asid_cur); 485 printf("ci_tlb_info %p\n", ci->ci_tlb_info); 486 printf("ci_pmap_seg0tab %p\n", ci->ci_pmap_seg0tab); 487#ifdef _LP64 488 printf("ci_pmap_segtab %p\n", ci->ci_pmap_segtab); 489#else 490 printf("ci_pmap_srcbase %#"PRIxVADDR"\n", ci->ci_pmap_srcbase); 491 printf("ci_pmap_dstbase %#"PRIxVADDR"\n", ci->ci_pmap_dstbase); 492#endif 493#ifdef MULTIPROCESSOR 494 printf("ci_flags %#lx\n", ci->ci_flags); 495 printf("ci_request_ipis %#"PRIx64"\n", ci->ci_request_ipis); 496 printf("ci_active_ipis %#"PRIx64"\n", ci->ci_active_ipis); 497 printf("ci_ksp_tlb_slot %d\n", ci->ci_ksp_tlb_slot); 498#endif 499 } 500 501 return ci; 502} 503#endif /* DEBUG */ 504