1 1.22 thorpej /* $NetBSD: machdep.c,v 1.22 2024/03/05 14:15:31 thorpej Exp $ */ 2 1.2 matt 3 1.2 matt /* 4 1.2 matt * Copyright 2001, 2002 Wasabi Systems, Inc. 5 1.2 matt * All rights reserved. 6 1.2 matt * 7 1.2 matt * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. 8 1.2 matt * 9 1.2 matt * Redistribution and use in source and binary forms, with or without 10 1.2 matt * modification, are permitted provided that the following conditions 11 1.2 matt * are met: 12 1.2 matt * 1. Redistributions of source code must retain the above copyright 13 1.2 matt * notice, this list of conditions and the following disclaimer. 14 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright 15 1.2 matt * notice, this list of conditions and the following disclaimer in the 16 1.2 matt * documentation and/or other materials provided with the distribution. 17 1.2 matt * 3. All advertising materials mentioning features or use of this software 18 1.2 matt * must display the following acknowledgement: 19 1.2 matt * This product includes software developed for the NetBSD Project by 20 1.2 matt * Wasabi Systems, Inc. 21 1.2 matt * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 1.2 matt * or promote products derived from this software without specific prior 23 1.2 matt * written permission. 24 1.2 matt * 25 1.2 matt * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 1.2 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 1.2 matt * POSSIBILITY OF SUCH DAMAGE. 36 1.2 matt */ 37 1.2 matt 38 1.2 matt /* 39 1.6 rmind * Copyright (c) 1988 University of Utah. 40 1.2 matt * Copyright (c) 1992, 1993 41 1.2 matt * The Regents of the University of California. All rights reserved. 42 1.2 matt * 43 1.2 matt * This code is derived from software contributed to Berkeley by 44 1.2 matt * the Systems Programming Group of the University of Utah Computer 45 1.2 matt * Science Department, The Mach Operating System project at 46 1.2 matt * Carnegie-Mellon University and Ralph Campbell. 47 1.2 matt * 48 1.2 matt * Redistribution and use in source and binary forms, with or without 49 1.2 matt * modification, are permitted provided that the following conditions 50 1.2 matt * are met: 51 1.2 matt * 1. Redistributions of source code must retain the above copyright 52 1.2 matt * notice, this list of conditions and the following disclaimer. 53 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright 54 1.2 matt * notice, this list of conditions and the following disclaimer in the 55 1.2 matt * documentation and/or other materials provided with the distribution. 56 1.2 matt * 3. Neither the name of the University nor the names of its contributors 57 1.2 matt * may be used to endorse or promote products derived from this software 58 1.2 matt * without specific prior written permission. 59 1.2 matt * 60 1.2 matt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 61 1.2 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 62 1.2 matt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 63 1.2 matt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 64 1.2 matt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 65 1.2 matt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 66 1.2 matt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 67 1.2 matt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 68 1.2 matt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 69 1.2 matt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 70 1.2 matt * SUCH DAMAGE. 71 1.2 matt * 72 1.2 matt * @(#)machdep.c 8.3 (Berkeley) 1/12/94 73 1.2 matt * from: Utah Hdr: machdep.c 1.63 91/04/24 74 1.2 matt */ 75 1.2 matt 76 1.2 matt #include <sys/cdefs.h> 77 1.22 thorpej __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.22 2024/03/05 14:15:31 thorpej Exp $"); 78 1.2 matt 79 1.7 matt #define __INTR_PRIVATE 80 1.7 matt 81 1.7 matt #include "opt_multiprocessor.h" 82 1.2 matt #include "opt_ddb.h" 83 1.2 matt #include "opt_com.h" 84 1.2 matt #include "opt_execfmt.h" 85 1.2 matt #include "opt_memsize.h" 86 1.7 matt #include "rmixl_pcix.h" 87 1.7 matt #include "rmixl_pcie.h" 88 1.2 matt 89 1.2 matt #include <sys/param.h> 90 1.2 matt #include <sys/systm.h> 91 1.2 matt #include <sys/kernel.h> 92 1.2 matt #include <sys/buf.h> 93 1.14 christos #include <sys/cpu.h> 94 1.2 matt #include <sys/reboot.h> 95 1.2 matt #include <sys/mount.h> 96 1.2 matt #include <sys/kcore.h> 97 1.2 matt #include <sys/boot_flag.h> 98 1.2 matt #include <sys/termios.h> 99 1.2 matt #include <sys/ksyms.h> 100 1.2 matt #include <sys/bus.h> 101 1.2 matt #include <sys/device.h> 102 1.2 matt #include <sys/extent.h> 103 1.2 matt 104 1.2 matt #include <uvm/uvm_extern.h> 105 1.2 matt 106 1.2 matt #include <dev/cons.h> 107 1.2 matt 108 1.2 matt #include "ksyms.h" 109 1.2 matt 110 1.2 matt #if NKSYMS || defined(DDB) || defined(LKM) 111 1.8 matt #include <mips/db_machdep.h> 112 1.2 matt #include <ddb/db_extern.h> 113 1.2 matt #endif 114 1.2 matt 115 1.8 matt #include <mips/cpu.h> 116 1.8 matt #include <mips/psl.h> 117 1.8 matt #include <mips/cache.h> 118 1.8 matt #include <mips/mips_opcode.h> 119 1.2 matt 120 1.2 matt #include "com.h" 121 1.2 matt #if NCOM == 0 122 1.2 matt #error no serial console 123 1.2 matt #endif 124 1.2 matt 125 1.2 matt #include <dev/ic/comreg.h> 126 1.2 matt #include <dev/ic/comvar.h> 127 1.2 matt 128 1.7 matt #include <mips/include/intr.h> 129 1.7 matt 130 1.7 matt #include <mips/rmi/rmixlreg.h> 131 1.2 matt #include <mips/rmi/rmixlvar.h> 132 1.7 matt #include <mips/rmi/rmixl_intr.h> 133 1.2 matt #include <mips/rmi/rmixl_firmware.h> 134 1.7 matt #include <mips/rmi/rmixl_comvar.h> 135 1.7 matt #include <mips/rmi/rmixl_pcievar.h> 136 1.7 matt #include <mips/rmi/rmixl_pcixvar.h> 137 1.2 matt 138 1.2 matt #ifdef MACHDEP_DEBUG 139 1.2 matt int machdep_debug=MACHDEP_DEBUG; 140 1.2 matt # define DPRINTF(x) do { if (machdep_debug) printf x ; } while(0) 141 1.2 matt #else 142 1.2 matt # define DPRINTF(x) 143 1.2 matt #endif 144 1.2 matt 145 1.2 matt #ifndef CONSFREQ 146 1.7 matt # define CONSFREQ 66000000 147 1.2 matt #endif 148 1.2 matt #ifndef CONSPEED 149 1.2 matt # define CONSPEED 38400 150 1.2 matt #endif 151 1.2 matt #ifndef CONMODE 152 1.2 matt # define CONMODE ((TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8) 153 1.2 matt #endif 154 1.2 matt #ifndef CONSADDR 155 1.2 matt # define CONSADDR RMIXL_IO_DEV_UART_1 156 1.2 matt #endif 157 1.2 matt 158 1.2 matt int comcnfreq = CONSFREQ; 159 1.2 matt int comcnspeed = CONSPEED; 160 1.2 matt tcflag_t comcnmode = CONMODE; 161 1.2 matt bus_addr_t comcnaddr = (bus_addr_t)CONSADDR; 162 1.2 matt 163 1.2 matt struct rmixl_config rmixl_configuration; 164 1.2 matt 165 1.2 matt 166 1.2 matt /* 167 1.2 matt * array of tested firmware versions 168 1.2 matt * if you find new ones and they work 169 1.2 matt * please add them 170 1.2 matt */ 171 1.7 matt typedef struct rmiclfw_psb_id { 172 1.7 matt uint64_t psb_version; 173 1.7 matt rmixlfw_psb_type_t psb_type; 174 1.7 matt } rmiclfw_psb_id_t; 175 1.7 matt static rmiclfw_psb_id_t rmiclfw_psb_id[] = { 176 1.7 matt { 0x4958d4fb00000056ULL, PSB_TYPE_RMI }, 177 1.7 matt { 0x4aacdb6a00000056ULL, PSB_TYPE_RMI }, 178 1.7 matt { 0x4b67d03200000056ULL, PSB_TYPE_RMI }, 179 1.7 matt { 0x4c17058b00000056ULL, PSB_TYPE_RMI }, 180 1.7 matt { 0x49a5a8fa00000056ULL, PSB_TYPE_DELL }, 181 1.7 matt { 0x4b8ead3100000056ULL, PSB_TYPE_DELL }, 182 1.2 matt }; 183 1.2 matt #define RMICLFW_PSB_VERSIONS_LEN \ 184 1.7 matt (sizeof(rmiclfw_psb_id)/sizeof(rmiclfw_psb_id[0])) 185 1.2 matt 186 1.2 matt /* 187 1.2 matt * storage for fixed extent used to allocate physical address regions 188 1.2 matt * because extent(9) start and end values are u_long, they are only 189 1.2 matt * 32 bits on a 32 bit kernel, which is insuffucuent since XLS physical 190 1.2 matt * address is 40 bits wide. So the "physaddr" map stores regions 191 1.2 matt * in units of megabytes. 192 1.2 matt */ 193 1.2 matt static u_long rmixl_physaddr_storage[ 194 1.2 matt EXTENT_FIXED_STORAGE_SIZE(32)/sizeof(u_long) 195 1.2 matt ]; 196 1.2 matt 197 1.2 matt /* Maps for VM objects. */ 198 1.2 matt struct vm_map *phys_map = NULL; 199 1.2 matt 200 1.2 matt int netboot; /* Are we netbooting? */ 201 1.2 matt 202 1.2 matt 203 1.2 matt phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; 204 1.7 matt u_quad_t mem_cluster_maxaddr; 205 1.2 matt u_int mem_cluster_cnt; 206 1.2 matt 207 1.2 matt 208 1.2 matt void configure(void); 209 1.2 matt void mach_init(int, int32_t *, void *, int64_t); 210 1.7 matt static uint64_t rmixlfw_init(int64_t); 211 1.7 matt static uint64_t mem_clusters_init(rmixlfw_mmap_t *, rmixlfw_mmap_t *); 212 1.7 matt static void __attribute__((__noreturn__)) rmixl_reset(void); 213 1.2 matt static void rmixl_physaddr_init(void); 214 1.2 matt static u_int ram_seg_resv(phys_ram_seg_t *, u_int, u_quad_t, u_quad_t); 215 1.2 matt void rmixlfw_mmap_print(rmixlfw_mmap_t *); 216 1.2 matt 217 1.2 matt 218 1.7 matt #ifdef MULTIPROCESSOR 219 1.15 matt static bool rmixl_fixup_cop0_oscratch(int32_t, uint32_t [2], void *); 220 1.7 matt void rmixl_get_wakeup_info(struct rmixl_config *); 221 1.7 matt #ifdef MACHDEP_DEBUG 222 1.7 matt static void rmixl_wakeup_info_print(volatile rmixlfw_cpu_wakeup_info_t *); 223 1.7 matt #endif /* MACHDEP_DEBUG */ 224 1.7 matt #endif /* MULTIPROCESSOR */ 225 1.8 matt static void rmixl_fixup_curcpu(void); 226 1.2 matt 227 1.2 matt /* 228 1.2 matt * Do all the stuff that locore normally does before calling main(). 229 1.2 matt */ 230 1.2 matt void 231 1.2 matt mach_init(int argc, int32_t *argv, void *envp, int64_t infop) 232 1.2 matt { 233 1.2 matt struct rmixl_config *rcp = &rmixl_configuration; 234 1.3 rmind void *kernend; 235 1.7 matt uint64_t memsize; 236 1.2 matt extern char edata[], end[]; 237 1.2 matt 238 1.7 matt rmixl_pcr_init_core(); 239 1.2 matt 240 1.2 matt /* 241 1.2 matt * Clear the BSS segment. 242 1.2 matt */ 243 1.2 matt kernend = (void *)mips_round_page(end); 244 1.2 matt memset(edata, 0, (char *)kernend - edata); 245 1.2 matt 246 1.2 matt /* 247 1.2 matt * Set up the exception vectors and CPU-specific function 248 1.2 matt * vectors early on. We need the wbflush() vector set up 249 1.2 matt * before comcnattach() is called (or at least before the 250 1.2 matt * first printf() after that is called). 251 1.2 matt * Also clears the I+D caches. 252 1.7 matt * 253 1.7 matt * specify chip-specific EIRR/EIMR based spl functions 254 1.2 matt */ 255 1.7 matt #ifdef MULTIPROCESSOR 256 1.7 matt mips_vector_init(&rmixl_splsw, true); 257 1.7 matt #else 258 1.7 matt mips_vector_init(&rmixl_splsw, false); 259 1.7 matt #endif 260 1.7 matt 261 1.7 matt /* mips_vector_init initialized mips_options */ 262 1.14 christos cpu_setmodel("%s", mips_options.mips_cpu->cpu_name); 263 1.2 matt 264 1.7 matt /* get system info from firmware */ 265 1.2 matt memsize = rmixlfw_init(infop); 266 1.2 matt 267 1.18 cherry uvm_md_init(); 268 1.2 matt 269 1.2 matt physmem = btoc(memsize); 270 1.2 matt 271 1.7 matt rmixl_obio_eb_bus_mem_init(&rcp->rc_obio_eb_memt, rcp); 272 1.2 matt 273 1.2 matt #if NCOM > 0 274 1.2 matt rmixl_com_cnattach(comcnaddr, comcnspeed, comcnfreq, 275 1.2 matt COM_TYPE_NORMAL, comcnmode); 276 1.2 matt #endif 277 1.2 matt 278 1.2 matt printf("\nNetBSD/rmixl\n"); 279 1.7 matt printf("memsize = %#"PRIx64"\n", memsize); 280 1.7 matt #ifdef MEMLIMIT 281 1.7 matt printf("memlimit = %#"PRIx64"\n", (uint64_t)MEMLIMIT); 282 1.7 matt #endif 283 1.7 matt 284 1.7 matt #if defined(MULTIPROCESSOR) && defined(MACHDEP_DEBUG) 285 1.7 matt rmixl_wakeup_info_print(rcp->rc_cpu_wakeup_info); 286 1.7 matt rmixl_wakeup_info_print(rcp->rc_cpu_wakeup_info + 1); 287 1.7 matt printf("cpu_wakeup_info %p, cpu_wakeup_end %p\n", 288 1.7 matt rcp->rc_cpu_wakeup_info, 289 1.7 matt rcp->rc_cpu_wakeup_end); 290 1.7 matt printf("userapp_cpu_map: %#"PRIx64"\n", 291 1.7 matt rcp->rc_psb_info.userapp_cpu_map); 292 1.7 matt printf("wakeup: %#"PRIx64"\n", rcp->rc_psb_info.wakeup); 293 1.7 matt { 294 1.7 matt register_t sp; 295 1.7 matt asm volatile ("move %0, $sp\n" : "=r"(sp)); 296 1.7 matt printf("sp: %#"PRIx64"\n", sp); 297 1.7 matt } 298 1.7 matt #endif 299 1.2 matt 300 1.2 matt rmixl_physaddr_init(); 301 1.2 matt 302 1.2 matt /* 303 1.2 matt * Obtain the cpu frequency 304 1.2 matt * Compute the number of ticks for hz. 305 1.2 matt * Compute the delay divisor. 306 1.2 matt * Double the Hz if this CPU runs at twice the 307 1.2 matt * external/cp0-count frequency 308 1.2 matt */ 309 1.7 matt curcpu()->ci_cpu_freq = rcp->rc_psb_info.cpu_frequency; 310 1.7 matt curcpu()->ci_cctr_freq = curcpu()->ci_cpu_freq; 311 1.2 matt curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz; 312 1.2 matt curcpu()->ci_divisor_delay = 313 1.2 matt ((curcpu()->ci_cpu_freq + 500000) / 1000000); 314 1.7 matt if (mips_options.mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT) 315 1.2 matt curcpu()->ci_cpu_freq *= 2; 316 1.2 matt 317 1.2 matt /* 318 1.2 matt * Look at arguments passed to us and compute boothowto. 319 1.2 matt * - rmixl firmware gives us a 32 bit argv[i], so adapt 320 1.2 matt * by forcing sign extension in cast to (char *) 321 1.2 matt */ 322 1.2 matt boothowto = RB_AUTOBOOT; 323 1.2 matt for (int i = 1; i < argc; i++) { 324 1.2 matt for (char *cp = (char *)(intptr_t)argv[i]; *cp; cp++) { 325 1.2 matt int howto; 326 1.2 matt /* Ignore superfluous '-', if there is one */ 327 1.2 matt if (*cp == '-') 328 1.2 matt continue; 329 1.2 matt 330 1.2 matt howto = 0; 331 1.2 matt BOOT_FLAG(*cp, howto); 332 1.2 matt if (howto != 0) 333 1.2 matt boothowto |= howto; 334 1.2 matt #ifdef DIAGNOSTIC 335 1.2 matt else 336 1.2 matt printf("bootflag '%c' not recognised\n", *cp); 337 1.2 matt #endif 338 1.2 matt } 339 1.2 matt } 340 1.2 matt #ifdef DIAGNOSTIC 341 1.2 matt printf("boothowto %#x\n", boothowto); 342 1.2 matt #endif 343 1.2 matt 344 1.2 matt /* 345 1.2 matt * Reserve pages from the VM system. 346 1.7 matt */ 347 1.2 matt 348 1.2 matt /* reserve 0..start..kernend pages */ 349 1.7 matt mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 350 1.2 matt 0, round_page(MIPS_KSEG0_TO_PHYS(kernend))); 351 1.2 matt 352 1.2 matt /* reserve reset exception vector page */ 353 1.2 matt /* should never be in our clusters anyway... */ 354 1.7 matt mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 355 1.7 matt 0x1FC00000, 0x1FC00000+NBPG); 356 1.7 matt 357 1.7 matt #ifdef MULTIPROCEESOR 358 1.7 matt /* reserve the cpu_wakeup_info area */ 359 1.7 matt mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 360 1.7 matt (u_quad_t)trunc_page(rcp->rc_cpu_wakeup_info), 361 1.7 matt (u_quad_t)round_page(rcp->rc_cpu_wakeup_end)); 362 1.7 matt #endif 363 1.7 matt 364 1.7 matt #ifdef MEMLIMIT 365 1.7 matt /* reserve everything >= MEMLIMIT */ 366 1.7 matt mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 367 1.7 matt (u_quad_t)MEMLIMIT, (u_quad_t)~0); 368 1.7 matt #endif 369 1.7 matt 370 1.7 matt /* get maximum RAM address from the VM clusters */ 371 1.7 matt mem_cluster_maxaddr = 0; 372 1.7 matt for (u_int i=0; i < mem_cluster_cnt; i++) { 373 1.7 matt u_quad_t tmp = round_page( 374 1.7 matt mem_clusters[i].start + mem_clusters[i].size); 375 1.7 matt if (tmp > mem_cluster_maxaddr) 376 1.7 matt mem_cluster_maxaddr = tmp; 377 1.7 matt } 378 1.7 matt DPRINTF(("mem_cluster_maxaddr %#"PRIx64"\n", mem_cluster_maxaddr)); 379 1.2 matt 380 1.2 matt /* 381 1.7 matt * Load mem_clusters[] into the VM system. 382 1.2 matt */ 383 1.7 matt mips_page_physload(MIPS_KSEG0_START, (vaddr_t) kernend, 384 1.7 matt mem_clusters, mem_cluster_cnt, NULL, 0); 385 1.2 matt 386 1.2 matt /* 387 1.2 matt * Initialize error message buffer (at end of core). 388 1.2 matt */ 389 1.2 matt mips_init_msgbuf(); 390 1.2 matt 391 1.2 matt pmap_bootstrap(); 392 1.2 matt 393 1.2 matt /* 394 1.3 rmind * Allocate uarea page for lwp0 and set it. 395 1.2 matt */ 396 1.3 rmind mips_init_lwp0_uarea(); 397 1.2 matt 398 1.2 matt #if defined(DDB) 399 1.2 matt if (boothowto & RB_KDB) 400 1.2 matt Debugger(); 401 1.2 matt #endif 402 1.7 matt /* 403 1.7 matt * store (cpu#0) curcpu in COP0 OSSCRATCH0 404 1.7 matt * used in exception vector 405 1.7 matt */ 406 1.7 matt __asm __volatile("dmtc0 %0,$%1" 407 1.7 matt :: "r"(&cpu_info_store), "n"(MIPS_COP_0_OSSCRATCH)); 408 1.9 matt #ifdef MULTIPROCESSOR 409 1.15 matt mips_fixup_exceptions(rmixl_fixup_cop0_oscratch, NULL); 410 1.7 matt #endif 411 1.8 matt rmixl_fixup_curcpu(); 412 1.2 matt } 413 1.2 matt 414 1.2 matt /* 415 1.7 matt * set up Processor Control Regs for this core 416 1.7 matt */ 417 1.7 matt void 418 1.11 matt rmixl_pcr_init_core(void) 419 1.7 matt { 420 1.7 matt uint32_t r; 421 1.7 matt 422 1.7 matt #ifdef MULTIPROCESSOR 423 1.7 matt rmixl_mtcr(RMIXL_PCR_MMU_SETUP, __BITS(2,0)); 424 1.7 matt /* enable MMU clock gating */ 425 1.7 matt /* 4 threads active -- why needed if Global? */ 426 1.7 matt /* enable global TLB mode */ 427 1.7 matt #else 428 1.7 matt rmixl_mtcr(RMIXL_PCR_THREADEN, 1); /* disable all threads except #0 */ 429 1.7 matt rmixl_mtcr(RMIXL_PCR_MMU_SETUP, 0); /* enable MMU clock gating */ 430 1.7 matt /* set single MMU Thread Mode */ 431 1.7 matt /* TLB is partitioned (1 partition) */ 432 1.7 matt #endif 433 1.7 matt 434 1.7 matt r = rmixl_mfcr(RMIXL_PCR_L1D_CONFIG0); 435 1.7 matt r &= ~__BIT(14); /* disable Unaligned Access */ 436 1.7 matt rmixl_mtcr(RMIXL_PCR_L1D_CONFIG0, r); 437 1.7 matt 438 1.7 matt #if defined(DDB) && defined(MIPS_DDB_WATCH) 439 1.7 matt /* 440 1.7 matt * clear IEU_DEFEATURE[DBE] 441 1.7 matt * this enables COP0 watchpoint to trigger T_WATCH exception 442 1.7 matt * instead of signaling JTAG. 443 1.7 matt */ 444 1.7 matt r = rmixl_mfcr(RMIXL_PCR_IEU_DEFEATURE); 445 1.7 matt r &= ~__BIT(7); 446 1.7 matt rmixl_mtcr(RMIXL_PCR_IEU_DEFEATURE, r); 447 1.7 matt #endif 448 1.7 matt } 449 1.7 matt 450 1.7 matt #ifdef MULTIPROCESSOR 451 1.7 matt static bool 452 1.15 matt rmixl_fixup_cop0_oscratch(int32_t load_addr, uint32_t new_insns[2], void *arg) 453 1.7 matt { 454 1.7 matt size_t offset = load_addr - (intptr_t)&cpu_info_store; 455 1.7 matt 456 1.7 matt KASSERT(MIPS_KSEG0_P(load_addr)); 457 1.7 matt KASSERT(offset < sizeof(struct cpu_info)); 458 1.7 matt 459 1.7 matt /* 460 1.7 matt * Fixup this direct load cpu_info_store to actually get the current 461 1.7 matt * CPU's cpu_info from COP0 OSSCRATCH0 and then fix the load to be 462 1.7 matt * relative from the start of struct cpu_info. 463 1.7 matt */ 464 1.7 matt 465 1.7 matt /* [0] = [d]mfc0 rX, $22 (OSScratch) */ 466 1.7 matt new_insns[0] = (020 << 26) 467 1.7 matt #ifdef _LP64 468 1.7 matt | (1 << 21) /* double move */ 469 1.7 matt #endif 470 1.7 matt | (new_insns[0] & 0x001f0000) 471 1.7 matt | (MIPS_COP_0_OSSCRATCH << 11) | (0 << 0); 472 1.7 matt 473 1.7 matt /* [1] = [ls][dw] rX, offset(rX) */ 474 1.7 matt new_insns[1] = (new_insns[1] & 0xffff0000) | offset; 475 1.7 matt 476 1.7 matt return true; 477 1.7 matt } 478 1.7 matt #endif /* MULTIPROCESSOR */ 479 1.7 matt 480 1.7 matt /* 481 1.8 matt * The following changes all lX rN, L_CPU(MIPS_CURLWP) [curlwp->l_cpu] 482 1.8 matt * to [d]mfc0 rN, $22 [MIPS_COP_0_OSSCRATCH] 483 1.8 matt * 484 1.8 matt * the mfc0 is 3 cycles shorter than the load. 485 1.8 matt */ 486 1.8 matt #define LOAD_CURCPU_0 ((MIPS_CURLWP_REG << 21) | offsetof(lwp_t, l_cpu)) 487 1.8 matt #define MFC0_CURCPU_0 ((OP_COP0 << 26) | (MIPS_COP_0_OSSCRATCH << 11)) 488 1.8 matt #ifdef _LP64 489 1.8 matt #define LOAD_CURCPU ((uint32_t)(OP_LD << 26) | LOAD_CURCPU_0) 490 1.8 matt #define MFC0_CURCPU ((uint32_t)(OP_DMF << 21) | MFC0_CURCPU_0) 491 1.8 matt #else 492 1.8 matt #define LOAD_CURCPU ((uint32_t)(OP_LW << 26) | LOAD_CURCPU_0) 493 1.8 matt #define MFC0_CURCPU ((uint32_t)(OP_MF << 21) | MFC0_CURCPU_0) 494 1.8 matt #endif 495 1.8 matt #define LOAD_CURCPU_MASK 0xffe0ffff 496 1.8 matt 497 1.8 matt static void 498 1.8 matt rmixl_fixup_curcpu(void) 499 1.8 matt { 500 1.8 matt extern uint32_t _ftext[]; 501 1.8 matt extern uint32_t _etext[]; 502 1.8 matt 503 1.8 matt for (uint32_t *insnp = _ftext; insnp < _etext; insnp++) { 504 1.8 matt const uint32_t insn = *insnp; 505 1.8 matt if (__predict_false((insn & LOAD_CURCPU_MASK) == LOAD_CURCPU)) { 506 1.8 matt /* 507 1.8 matt * Since the register to loaded is located in bits 508 1.8 matt * 16-20 for the mfc0 and the load instruction we can 509 1.8 matt * just change the instruction bits around it. 510 1.8 matt */ 511 1.8 matt *insnp = insn ^ LOAD_CURCPU ^ MFC0_CURCPU; 512 1.8 matt mips_icache_sync_range((vaddr_t)insnp, 4); 513 1.8 matt } 514 1.8 matt } 515 1.8 matt } 516 1.8 matt 517 1.8 matt /* 518 1.2 matt * ram_seg_resv - cut reserved regions out of segs, fragmenting as needed 519 1.2 matt * 520 1.2 matt * we simply build a new table of segs, then copy it back over the given one 521 1.2 matt * this is inefficient but simple and called only a few times 522 1.2 matt * 523 1.2 matt * note: 'last' here means 1st addr past the end of the segment (start+size) 524 1.2 matt */ 525 1.2 matt static u_int 526 1.2 matt ram_seg_resv(phys_ram_seg_t *segs, u_int nsegs, 527 1.2 matt u_quad_t resv_first, u_quad_t resv_last) 528 1.2 matt { 529 1.2 matt u_quad_t first, last; 530 1.2 matt int new_nsegs=0; 531 1.2 matt int resv_flag; 532 1.2 matt phys_ram_seg_t new_segs[VM_PHYSSEG_MAX]; 533 1.2 matt 534 1.2 matt for (u_int i=0; i < nsegs; i++) { 535 1.2 matt resv_flag = 0; 536 1.2 matt first = trunc_page(segs[i].start); 537 1.2 matt last = round_page(segs[i].start + segs[i].size); 538 1.2 matt 539 1.2 matt KASSERT(new_nsegs < VM_PHYSSEG_MAX); 540 1.2 matt if ((resv_first <= first) && (resv_last >= last)) { 541 1.2 matt /* whole segment is resverved */ 542 1.2 matt continue; 543 1.2 matt } 544 1.2 matt if ((resv_first > first) && (resv_first < last)) { 545 1.2 matt u_quad_t new_last; 546 1.2 matt 547 1.2 matt /* 548 1.2 matt * reserved start in segment 549 1.2 matt * salvage the leading fragment 550 1.2 matt */ 551 1.2 matt resv_flag = 1; 552 1.2 matt new_last = last - (last - resv_first); 553 1.2 matt KASSERT (new_last > first); 554 1.2 matt new_segs[new_nsegs].start = first; 555 1.2 matt new_segs[new_nsegs].size = new_last - first; 556 1.2 matt new_nsegs++; 557 1.2 matt } 558 1.2 matt if ((resv_last > first) && (resv_last < last)) { 559 1.2 matt u_quad_t new_first; 560 1.2 matt 561 1.2 matt /* 562 1.2 matt * reserved end in segment 563 1.2 matt * salvage the trailing fragment 564 1.2 matt */ 565 1.2 matt resv_flag = 1; 566 1.2 matt new_first = first + (resv_last - first); 567 1.2 matt KASSERT (last > (new_first + NBPG)); 568 1.2 matt new_segs[new_nsegs].start = new_first; 569 1.2 matt new_segs[new_nsegs].size = last - new_first; 570 1.2 matt new_nsegs++; 571 1.2 matt } 572 1.2 matt if (resv_flag == 0) { 573 1.2 matt /* 574 1.2 matt * nothing reserved here, take it all 575 1.2 matt */ 576 1.2 matt new_segs[new_nsegs].start = first; 577 1.2 matt new_segs[new_nsegs].size = last - first; 578 1.2 matt new_nsegs++; 579 1.2 matt } 580 1.2 matt 581 1.2 matt } 582 1.2 matt 583 1.2 matt memcpy(segs, new_segs, sizeof(new_segs)); 584 1.2 matt 585 1.2 matt return new_nsegs; 586 1.2 matt } 587 1.2 matt 588 1.2 matt /* 589 1.2 matt * create an extent for physical address space 590 1.2 matt * these are in units of MB for sake of compression (for sake of 32 bit kernels) 591 1.2 matt * allocate the regions where we have known functions (DRAM, IO, etc) 592 1.2 matt * what remains can be allocated as needed for other stuff 593 1.2 matt * e.g. to configure BARs that are not already initialized and enabled. 594 1.2 matt */ 595 1.2 matt static void 596 1.2 matt rmixl_physaddr_init(void) 597 1.2 matt { 598 1.2 matt struct extent *ext; 599 1.2 matt unsigned long start = 0UL; 600 1.2 matt unsigned long end = (__BIT(40) / (1024 * 1024)) -1; 601 1.2 matt u_long base; 602 1.2 matt u_long size; 603 1.2 matt uint32_t r; 604 1.2 matt 605 1.10 para ext = extent_create("physaddr", start, end, 606 1.2 matt (void *)rmixl_physaddr_storage, sizeof(rmixl_physaddr_storage), 607 1.2 matt EX_NOWAIT | EX_NOCOALESCE); 608 1.2 matt 609 1.2 matt if (ext == NULL) 610 1.2 matt panic("%s: extent_create failed", __func__); 611 1.2 matt 612 1.2 matt /* 613 1.2 matt * grab regions per DRAM BARs 614 1.2 matt */ 615 1.2 matt for (u_int i=0; i < RMIXL_SBC_DRAM_NBARS; i++) { 616 1.2 matt r = RMIXL_IOREG_READ(RMIXL_SBC_DRAM_BAR(i)); 617 1.2 matt if ((r & RMIXL_DRAM_BAR_STATUS) == 0) 618 1.2 matt continue; /* not enabled */ 619 1.2 matt base = (u_long)(DRAM_BAR_TO_BASE((uint64_t)r) / (1024 * 1024)); 620 1.2 matt size = (u_long)(DRAM_BAR_TO_SIZE((uint64_t)r) / (1024 * 1024)); 621 1.2 matt 622 1.2 matt DPRINTF(("%s: %d: %d: 0x%08x -- 0x%010lx:%lu MB\n", 623 1.2 matt __func__, __LINE__, i, r, base * (1024 * 1024), size)); 624 1.2 matt if (extent_alloc_region(ext, base, size, EX_NOWAIT) != 0) 625 1.2 matt panic("%s: extent_alloc_region(%p, %#lx, %#lx, %#x) " 626 1.2 matt "failed", __func__, ext, base, size, EX_NOWAIT); 627 1.2 matt } 628 1.2 matt 629 1.2 matt /* 630 1.7 matt * get chip-dependent physaddr regions 631 1.2 matt */ 632 1.7 matt switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) { 633 1.7 matt case CIDFL_RMI_TYPE_XLR: 634 1.7 matt #if NRMIXL_PCIX 635 1.7 matt rmixl_physaddr_init_pcix(ext); 636 1.7 matt #endif 637 1.7 matt break; 638 1.7 matt case CIDFL_RMI_TYPE_XLS: 639 1.7 matt #if NRMIXL_PCIE 640 1.7 matt rmixl_physaddr_init_pcie(ext); 641 1.7 matt #endif 642 1.7 matt break; 643 1.7 matt case CIDFL_RMI_TYPE_XLP: 644 1.7 matt /* XXX TBD */ 645 1.7 matt panic("%s: RMI XLP not yet supported", __func__); 646 1.2 matt } 647 1.2 matt 648 1.2 matt /* 649 1.2 matt * at this point all regions left in "physaddr" extent 650 1.20 andvar * are unused holes in the physical address space 651 1.2 matt * available for use as needed. 652 1.2 matt */ 653 1.2 matt rmixl_configuration.rc_phys_ex = ext; 654 1.2 matt #ifdef MACHDEP_DEBUG 655 1.2 matt extent_print(ext); 656 1.2 matt #endif 657 1.2 matt } 658 1.2 matt 659 1.7 matt static uint64_t 660 1.2 matt rmixlfw_init(int64_t infop) 661 1.2 matt { 662 1.2 matt struct rmixl_config *rcp = &rmixl_configuration; 663 1.2 matt 664 1.7 matt #ifdef MULTIPROCESSOR 665 1.7 matt rmixl_get_wakeup_info(rcp); 666 1.7 matt #endif 667 1.2 matt 668 1.2 matt infop |= MIPS_KSEG0_START; 669 1.7 matt rcp->rc_psb_info = *(rmixlfw_info_t *)(intptr_t)infop; 670 1.2 matt 671 1.7 matt rcp->rc_psb_type = PSB_TYPE_UNKNOWN; 672 1.2 matt for (int i=0; i < RMICLFW_PSB_VERSIONS_LEN; i++) { 673 1.7 matt if (rmiclfw_psb_id[i].psb_version == 674 1.7 matt rcp->rc_psb_info.psb_version) { 675 1.7 matt rcp->rc_psb_type = rmiclfw_psb_id[i].psb_type; 676 1.2 matt goto found; 677 1.7 matt } 678 1.2 matt } 679 1.2 matt 680 1.2 matt rcp->rc_io_pbase = RMIXL_IO_DEV_PBASE; 681 1.2 matt rmixl_putchar_init(rcp->rc_io_pbase); 682 1.2 matt 683 1.2 matt #ifdef DIAGNOSTIC 684 1.2 matt rmixl_puts("\r\nWARNING: untested psb_version: "); 685 1.7 matt rmixl_puthex64(rcp->rc_psb_info.psb_version); 686 1.2 matt rmixl_puts("\r\n"); 687 1.2 matt #endif 688 1.2 matt 689 1.7 matt #ifdef MEMSIZE 690 1.2 matt /* XXX trust and use MEMSIZE */ 691 1.2 matt mem_clusters[0].start = 0; 692 1.2 matt mem_clusters[0].size = MEMSIZE; 693 1.2 matt mem_cluster_cnt = 1; 694 1.2 matt return MEMSIZE; 695 1.7 matt #else 696 1.7 matt rmixl_puts("\r\nERROR: configure MEMSIZE\r\n"); 697 1.7 matt cpu_reboot(RB_HALT, NULL); 698 1.7 matt /* NOTREACHED */ 699 1.7 matt #endif 700 1.2 matt 701 1.2 matt found: 702 1.7 matt rcp->rc_io_pbase = MIPS_KSEG1_TO_PHYS(rcp->rc_psb_info.io_base); 703 1.2 matt rmixl_putchar_init(rcp->rc_io_pbase); 704 1.2 matt #ifdef MACHDEP_DEBUG 705 1.2 matt rmixl_puts("\r\ninfop: "); 706 1.2 matt rmixl_puthex64((uint64_t)(intptr_t)infop); 707 1.2 matt #endif 708 1.2 matt #ifdef DIAGNOSTIC 709 1.7 matt rmixl_puts("\r\nrecognized psb_version="); 710 1.7 matt rmixl_puthex64(rcp->rc_psb_info.psb_version); 711 1.7 matt rmixl_puts(", psb_type="); 712 1.7 matt rmixl_puts(rmixlfw_psb_type_name(rcp->rc_psb_type)); 713 1.2 matt rmixl_puts("\r\n"); 714 1.2 matt #endif 715 1.2 matt 716 1.2 matt return mem_clusters_init( 717 1.7 matt (rmixlfw_mmap_t *)(intptr_t)rcp->rc_psb_info.psb_physaddr_map, 718 1.7 matt (rmixlfw_mmap_t *)(intptr_t)rcp->rc_psb_info.avail_mem_map); 719 1.2 matt } 720 1.2 matt 721 1.2 matt void 722 1.2 matt rmixlfw_mmap_print(rmixlfw_mmap_t *map) 723 1.2 matt { 724 1.2 matt #ifdef MACHDEP_DEBUG 725 1.2 matt for (uint32_t i=0; i < map->nmmaps; i++) { 726 1.2 matt rmixl_puthex32(i); 727 1.2 matt rmixl_puts(", "); 728 1.2 matt rmixl_puthex64(map->entry[i].start); 729 1.2 matt rmixl_puts(", "); 730 1.2 matt rmixl_puthex64(map->entry[i].size); 731 1.2 matt rmixl_puts(", "); 732 1.2 matt rmixl_puthex32(map->entry[i].type); 733 1.2 matt rmixl_puts("\r\n"); 734 1.2 matt } 735 1.2 matt #endif 736 1.2 matt } 737 1.2 matt 738 1.2 matt /* 739 1.2 matt * mem_clusters_init 740 1.2 matt * 741 1.2 matt * initialize mem_clusters[] table based on memory address mapping 742 1.2 matt * provided by boot firmware. 743 1.2 matt * 744 1.2 matt * prefer avail_mem_map if we can, otherwise use psb_physaddr_map. 745 1.2 matt * these will be limited by MEMSIZE if it is configured. 746 1.2 matt * if neither are available, just use MEMSIZE. 747 1.2 matt */ 748 1.7 matt static uint64_t 749 1.2 matt mem_clusters_init( 750 1.2 matt rmixlfw_mmap_t *psb_physaddr_map, 751 1.2 matt rmixlfw_mmap_t *avail_mem_map) 752 1.2 matt { 753 1.2 matt rmixlfw_mmap_t *map = NULL; 754 1.2 matt const char *mapname; 755 1.2 matt uint64_t sz; 756 1.2 matt uint64_t sum; 757 1.2 matt u_int cnt; 758 1.2 matt #ifdef MEMSIZE 759 1.7 matt uint64_t memsize = MEMSIZE; 760 1.2 matt #endif 761 1.2 matt 762 1.2 matt #ifdef MACHDEP_DEBUG 763 1.2 matt rmixl_puts("psb_physaddr_map: "); 764 1.2 matt rmixl_puthex64((uint64_t)(intptr_t)psb_physaddr_map); 765 1.2 matt rmixl_puts("\r\n"); 766 1.2 matt #endif 767 1.2 matt if (psb_physaddr_map != NULL) { 768 1.7 matt map = psb_physaddr_map; 769 1.2 matt mapname = "psb_physaddr_map"; 770 1.2 matt rmixlfw_mmap_print(map); 771 1.2 matt } 772 1.2 matt #ifdef DIAGNOSTIC 773 1.2 matt else { 774 1.2 matt rmixl_puts("WARNING: no psb_physaddr_map\r\n"); 775 1.2 matt } 776 1.2 matt #endif 777 1.2 matt 778 1.2 matt #ifdef MACHDEP_DEBUG 779 1.2 matt rmixl_puts("avail_mem_map: "); 780 1.2 matt rmixl_puthex64((uint64_t)(intptr_t)avail_mem_map); 781 1.2 matt rmixl_puts("\r\n"); 782 1.2 matt #endif 783 1.2 matt if (avail_mem_map != NULL) { 784 1.7 matt map = avail_mem_map; 785 1.2 matt mapname = "avail_mem_map"; 786 1.2 matt rmixlfw_mmap_print(map); 787 1.2 matt } 788 1.2 matt #ifdef DIAGNOSTIC 789 1.2 matt else { 790 1.2 matt rmixl_puts("WARNING: no avail_mem_map\r\n"); 791 1.2 matt } 792 1.2 matt #endif 793 1.2 matt 794 1.2 matt if (map == NULL) { 795 1.2 matt #ifndef MEMSIZE 796 1.2 matt rmixl_puts("panic: no firmware memory map, " 797 1.2 matt "must configure MEMSIZE\r\n"); 798 1.2 matt for(;;); /* XXX */ 799 1.2 matt #else 800 1.2 matt #ifdef DIAGNOSTIC 801 1.2 matt rmixl_puts("WARNING: no avail_mem_map, " 802 1.2 matt "using MEMSIZE\r\n"); 803 1.2 matt #endif 804 1.2 matt 805 1.2 matt mem_clusters[0].start = 0; 806 1.2 matt mem_clusters[0].size = MEMSIZE; 807 1.2 matt mem_cluster_cnt = 1; 808 1.2 matt return MEMSIZE; 809 1.2 matt #endif /* MEMSIZE */ 810 1.2 matt } 811 1.2 matt 812 1.2 matt #ifdef DIAGNOSTIC 813 1.2 matt rmixl_puts("using "); 814 1.2 matt rmixl_puts(mapname); 815 1.2 matt rmixl_puts("\r\n"); 816 1.2 matt #endif 817 1.2 matt #ifdef MACHDEP_DEBUG 818 1.2 matt rmixl_puts("memory clusters:\r\n"); 819 1.2 matt #endif 820 1.2 matt sum = 0; 821 1.2 matt cnt = 0; 822 1.2 matt for (uint32_t i=0; i < map->nmmaps; i++) { 823 1.2 matt if (map->entry[i].type != RMIXLFW_MMAP_TYPE_RAM) 824 1.2 matt continue; 825 1.2 matt mem_clusters[cnt].start = map->entry[i].start; 826 1.2 matt sz = map->entry[i].size; 827 1.2 matt sum += sz; 828 1.2 matt mem_clusters[cnt].size = sz; 829 1.2 matt #ifdef MACHDEP_DEBUG 830 1.2 matt rmixl_puthex32(i); 831 1.2 matt rmixl_puts(": "); 832 1.2 matt rmixl_puthex64(mem_clusters[cnt].start); 833 1.2 matt rmixl_puts(", "); 834 1.2 matt rmixl_puthex64(sz); 835 1.2 matt rmixl_puts(": "); 836 1.2 matt rmixl_puthex64(sum); 837 1.2 matt rmixl_puts("\r\n"); 838 1.2 matt #endif 839 1.2 matt #ifdef MEMSIZE 840 1.2 matt /* 841 1.2 matt * configurably limit memsize 842 1.2 matt */ 843 1.2 matt if (sum == memsize) 844 1.2 matt break; 845 1.2 matt if (sum > memsize) { 846 1.7 matt uint64_t tmp; 847 1.7 matt 848 1.2 matt tmp = sum - memsize; 849 1.2 matt sz -= tmp; 850 1.2 matt sum -= tmp; 851 1.2 matt mem_clusters[cnt].size = sz; 852 1.7 matt cnt++; 853 1.2 matt break; 854 1.2 matt } 855 1.2 matt #endif 856 1.2 matt cnt++; 857 1.2 matt } 858 1.2 matt mem_cluster_cnt = cnt; 859 1.2 matt return sum; 860 1.2 matt } 861 1.2 matt 862 1.7 matt #ifdef MULTIPROCESSOR 863 1.7 matt /* 864 1.7 matt * RMI firmware passes wakeup info structure in CP0 OS Scratch reg #7 865 1.7 matt * they do not explicitly give us the size of the wakeup area. 866 1.7 matt * we "know" that firmware loader sets wip->gp thusly: 867 1.7 matt * gp = stack_start[vcpu] = round_page(wakeup_end) + (vcpu * (PAGE_SIZE * 2)) 868 1.7 matt * so 869 1.7 matt * round_page(wakeup_end) == gp - (vcpu * (PAGE_SIZE * 2)) 870 1.7 matt * Only the "master" cpu runs this function, so 871 1.7 matt * vcpu = wip->master_cpu 872 1.7 matt */ 873 1.7 matt void 874 1.7 matt rmixl_get_wakeup_info(struct rmixl_config *rcp) 875 1.7 matt { 876 1.7 matt volatile rmixlfw_cpu_wakeup_info_t *wip; 877 1.7 matt int32_t scratch_7; 878 1.7 matt intptr_t end; 879 1.7 matt 880 1.7 matt __asm__ volatile( 881 1.7 matt ".set push" "\n" 882 1.7 matt ".set noreorder" "\n" 883 1.7 matt ".set mips64" "\n" 884 1.7 matt "dmfc0 %0, $22, 7" "\n" 885 1.7 matt ".set pop" "\n" 886 1.7 matt : "=r"(scratch_7)); 887 1.7 matt 888 1.7 matt wip = (volatile rmixlfw_cpu_wakeup_info_t *) 889 1.7 matt (intptr_t)scratch_7; 890 1.19 maya end = wip->entry.gp - (wip->master_cpu & (PAGE_SIZE * 2)); 891 1.7 matt 892 1.7 matt if (wip->valid == 1) { 893 1.7 matt rcp->rc_cpu_wakeup_end = (const void *)end; 894 1.7 matt rcp->rc_cpu_wakeup_info = wip; 895 1.7 matt } 896 1.7 matt }; 897 1.7 matt 898 1.7 matt #ifdef MACHDEP_DEBUG 899 1.7 matt static void 900 1.7 matt rmixl_wakeup_info_print(volatile rmixlfw_cpu_wakeup_info_t *wip) 901 1.7 matt { 902 1.7 matt int i; 903 1.7 matt 904 1.7 matt printf("%s: wip %p, size %lu\n", __func__, wip, sizeof(*wip)); 905 1.7 matt 906 1.7 matt printf("cpu_status %#x\n", wip->cpu_status); 907 1.7 matt printf("valid: %d\n", wip->valid); 908 1.7 matt printf("entry: addr %#x, args %#x, sp %#"PRIx64", gp %#"PRIx64"\n", 909 1.7 matt wip->entry.addr, 910 1.7 matt wip->entry.args, 911 1.7 matt wip->entry.sp, 912 1.7 matt wip->entry.gp); 913 1.7 matt printf("master_cpu %d\n", wip->master_cpu); 914 1.7 matt printf("master_cpu_mask %#x\n", wip->master_cpu_mask); 915 1.7 matt printf("buddy_cpu_mask %#x\n", wip->buddy_cpu_mask); 916 1.7 matt printf("psb_os_cpu_map %#x\n", wip->psb_os_cpu_map); 917 1.7 matt printf("argc %d\n", wip->argc); 918 1.7 matt printf("argv:"); 919 1.7 matt for (i=0; i < wip->argc; i++) 920 1.7 matt printf(" %#x", wip->argv[i]); 921 1.7 matt printf("\n"); 922 1.7 matt printf("valid_tlb_entries %d\n", wip->valid_tlb_entries); 923 1.7 matt printf("tlb_map:\n"); 924 1.7 matt for (i=0; i < wip->valid_tlb_entries; i++) { 925 1.7 matt volatile const struct lib_cpu_tlb_mapping *m = 926 1.7 matt &wip->tlb_map[i]; 927 1.7 matt printf(" %d", m->page_size); 928 1.7 matt printf(", %d", m->asid); 929 1.7 matt printf(", %d", m->coherency); 930 1.7 matt printf(", %d", m->coherency); 931 1.7 matt printf(", %d", m->attr); 932 1.7 matt printf(", %#x", m->virt); 933 1.7 matt printf(", %#"PRIx64"\n", m->phys); 934 1.7 matt } 935 1.7 matt printf("elf segs:\n"); 936 1.7 matt for (i=0; i < MAX_ELF_SEGMENTS; i++) { 937 1.7 matt volatile const struct core_segment_info *e = 938 1.7 matt &wip->seg_info[i]; 939 1.7 matt printf(" %#"PRIx64"", e->vaddr); 940 1.7 matt printf(", %#"PRIx64"", e->memsz); 941 1.7 matt printf(", %#x\n", e->flags); 942 1.7 matt } 943 1.7 matt printf("envc %d\n", wip->envc); 944 1.7 matt for (i=0; i < wip->envc; i++) 945 1.7 matt printf(" %#x \"%s\"", wip->envs[i], 946 1.7 matt (char *)(intptr_t)(int32_t)(wip->envs[i])); 947 1.7 matt printf("\n"); 948 1.7 matt printf("app_mode %d\n", wip->app_mode); 949 1.7 matt printf("printk_lock %#x\n", wip->printk_lock); 950 1.7 matt printf("kseg_master %d\n", wip->kseg_master); 951 1.7 matt printf("kuseg_reentry_function %#x\n", wip->kuseg_reentry_function); 952 1.7 matt printf("kuseg_reentry_args %#x\n", wip->kuseg_reentry_args); 953 1.7 matt printf("app_shared_mem_addr %#"PRIx64"\n", wip->app_shared_mem_addr); 954 1.7 matt printf("app_shared_mem_size %#"PRIx64"\n", wip->app_shared_mem_size); 955 1.7 matt printf("app_shared_mem_orig %#"PRIx64"\n", wip->app_shared_mem_orig); 956 1.7 matt printf("loader_lock %#x\n", wip->loader_lock); 957 1.7 matt printf("global_wakeup_mask %#x\n", wip->global_wakeup_mask); 958 1.7 matt printf("unused_0 %#x\n", wip->unused_0); 959 1.7 matt } 960 1.7 matt #endif /* MACHDEP_DEBUG */ 961 1.7 matt #endif /* MULTIPROCESSOR */ 962 1.7 matt 963 1.2 matt void 964 1.2 matt consinit(void) 965 1.2 matt { 966 1.2 matt 967 1.2 matt /* 968 1.2 matt * Everything related to console initialization is done 969 1.2 matt * in mach_init(). 970 1.2 matt */ 971 1.2 matt } 972 1.2 matt 973 1.2 matt /* 974 1.2 matt * Allocate memory for variable-sized tables, 975 1.2 matt */ 976 1.2 matt void 977 1.11 matt cpu_startup(void) 978 1.2 matt { 979 1.2 matt /* 980 1.2 matt * Virtual memory is bootstrapped -- notify the bus spaces 981 1.2 matt * that memory allocation is now safe. 982 1.2 matt */ 983 1.2 matt rmixl_configuration.rc_mallocsafe = 1; 984 1.2 matt 985 1.17 matt /* Do the usual stuff */ 986 1.17 matt cpu_startup_common(); 987 1.2 matt } 988 1.2 matt 989 1.2 matt int waittime = -1; 990 1.2 matt 991 1.2 matt void 992 1.3 rmind cpu_reboot(int howto, char *bootstr) 993 1.2 matt { 994 1.2 matt 995 1.2 matt /* Take a snapshot before clobbering any registers. */ 996 1.16 matt savectx(lwp_getpcb(curlwp)); 997 1.2 matt 998 1.2 matt if (cold) { 999 1.2 matt howto |= RB_HALT; 1000 1.2 matt goto haltsys; 1001 1.2 matt } 1002 1.2 matt 1003 1.2 matt /* If "always halt" was specified as a boot flag, obey. */ 1004 1.2 matt if (boothowto & RB_HALT) 1005 1.2 matt howto |= RB_HALT; 1006 1.2 matt 1007 1.2 matt boothowto = howto; 1008 1.2 matt if ((howto & RB_NOSYNC) == 0 && (waittime < 0)) { 1009 1.2 matt waittime = 0; 1010 1.2 matt vfs_shutdown(); 1011 1.2 matt } 1012 1.2 matt 1013 1.2 matt splhigh(); 1014 1.2 matt 1015 1.2 matt if (howto & RB_DUMP) 1016 1.2 matt dumpsys(); 1017 1.2 matt 1018 1.2 matt haltsys: 1019 1.2 matt doshutdownhooks(); 1020 1.2 matt 1021 1.2 matt if (howto & RB_HALT) { 1022 1.2 matt printf("\n"); 1023 1.2 matt printf("The operating system has halted.\n"); 1024 1.2 matt printf("Please press any key to reboot.\n\n"); 1025 1.2 matt cnpollc(1); /* For proper keyboard command handling */ 1026 1.2 matt cngetc(); 1027 1.2 matt cnpollc(0); 1028 1.2 matt } 1029 1.2 matt 1030 1.2 matt printf("rebooting...\n\n"); 1031 1.2 matt 1032 1.7 matt rmixl_reset(); 1033 1.2 matt } 1034 1.2 matt 1035 1.2 matt /* 1036 1.2 matt * goodbye world 1037 1.2 matt */ 1038 1.2 matt void __attribute__((__noreturn__)) 1039 1.7 matt rmixl_reset(void) 1040 1.2 matt { 1041 1.7 matt uint32_t r; 1042 1.7 matt 1043 1.7 matt r = RMIXL_IOREG_READ(RMIXL_IO_DEV_GPIO + RMIXL_GPIO_RESET); 1044 1.7 matt r |= RMIXL_GPIO_RESET_RESET; 1045 1.7 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_GPIO + RMIXL_GPIO_RESET, r); 1046 1.7 matt 1047 1.7 matt printf("soft reset failed, spinning...\n"); 1048 1.2 matt for (;;); 1049 1.2 matt } 1050