1 1.148 thorpej /* $NetBSD: arm32_machdep.c,v 1.148 2025/09/06 21:02:40 thorpej Exp $ */ 2 1.1 chris 3 1.1 chris /* 4 1.1 chris * Copyright (c) 1994-1998 Mark Brinicombe. 5 1.1 chris * Copyright (c) 1994 Brini. 6 1.1 chris * All rights reserved. 7 1.1 chris * 8 1.1 chris * This code is derived from software written for Brini by Mark Brinicombe 9 1.1 chris * 10 1.1 chris * Redistribution and use in source and binary forms, with or without 11 1.1 chris * modification, are permitted provided that the following conditions 12 1.1 chris * are met: 13 1.1 chris * 1. Redistributions of source code must retain the above copyright 14 1.1 chris * notice, this list of conditions and the following disclaimer. 15 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 chris * notice, this list of conditions and the following disclaimer in the 17 1.1 chris * documentation and/or other materials provided with the distribution. 18 1.1 chris * 3. All advertising materials mentioning features or use of this software 19 1.1 chris * must display the following acknowledgement: 20 1.1 chris * This product includes software developed by Mark Brinicombe 21 1.1 chris * for the NetBSD Project. 22 1.1 chris * 4. The name of the company nor the name of the author may be used to 23 1.1 chris * endorse or promote products derived from this software without specific 24 1.1 chris * prior written permission. 25 1.1 chris * 26 1.1 chris * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 27 1.1 chris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 28 1.1 chris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 29 1.1 chris * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 30 1.1 chris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 1.1 chris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 1.1 chris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 1.1 chris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 1.1 chris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 1.1 chris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 1.1 chris * SUCH DAMAGE. 37 1.1 chris * 38 1.76 wiz * Machine dependent functions for kernel setup 39 1.1 chris * 40 1.1 chris * Created : 17/09/94 41 1.1 chris * Updated : 18/04/01 updated for new wscons 42 1.1 chris */ 43 1.37 lukem 44 1.37 lukem #include <sys/cdefs.h> 45 1.148 thorpej __KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.148 2025/09/06 21:02:40 thorpej Exp $"); 46 1.1 chris 47 1.116 skrll #include "opt_arm_debug.h" 48 1.123 skrll #include "opt_arm_start.h" 49 1.117 skrll #include "opt_fdt.h" 50 1.72 jmmv #include "opt_modular.h" 51 1.1 chris #include "opt_md.h" 52 1.123 skrll #include "opt_multiprocessor.h" 53 1.1 chris 54 1.1 chris #include <sys/param.h> 55 1.131 skrll 56 1.123 skrll #include <sys/atomic.h> 57 1.131 skrll #include <sys/buf.h> 58 1.131 skrll #include <sys/cpu.h> 59 1.131 skrll #include <sys/device.h> 60 1.131 skrll #include <sys/intr.h> 61 1.131 skrll #include <sys/ipi.h> 62 1.75 rmind #include <sys/kauth.h> 63 1.1 chris #include <sys/kernel.h> 64 1.1 chris #include <sys/mbuf.h> 65 1.131 skrll #include <sys/module.h> 66 1.1 chris #include <sys/mount.h> 67 1.1 chris #include <sys/msgbuf.h> 68 1.131 skrll #include <sys/proc.h> 69 1.131 skrll #include <sys/reboot.h> 70 1.1 chris #include <sys/sysctl.h> 71 1.131 skrll #include <sys/systm.h> 72 1.83 matt #include <sys/xcall.h> 73 1.1 chris 74 1.77 skrll #include <uvm/uvm_extern.h> 75 1.77 skrll 76 1.1 chris #include <dev/cons.h> 77 1.75 rmind #include <dev/mm.h> 78 1.1 chris 79 1.96 matt #include <arm/locore.h> 80 1.96 matt 81 1.137 skrll #include <arm/cpu_topology.h> 82 1.9 chris #include <arm/arm32/machdep.h> 83 1.81 matt 84 1.1 chris #include <machine/bootconfig.h> 85 1.81 matt #include <machine/pcb.h> 86 1.81 matt 87 1.117 skrll #if defined(FDT) 88 1.146 skrll #include <dev/fdt/fdtvar.h> 89 1.148 thorpej #include <dev/fdt/fdt_platform.h> 90 1.146 skrll 91 1.117 skrll #include <arm/fdt/arm_fdtvar.h> 92 1.117 skrll #include <arch/evbarm/fdt/platform.h> 93 1.117 skrll #endif 94 1.117 skrll 95 1.116 skrll #ifdef VERBOSE_INIT_ARM 96 1.124 skrll #define VPRINTF(...) printf(__VA_ARGS__) 97 1.124 skrll #ifdef __HAVE_GENERIC_START 98 1.123 skrll void generic_prints(const char *); 99 1.123 skrll void generic_printx(int); 100 1.123 skrll #define VPRINTS(s) generic_prints(s) 101 1.123 skrll #define VPRINTX(x) generic_printx(x) 102 1.116 skrll #else 103 1.124 skrll #define VPRINTS(s) __nothing 104 1.124 skrll #define VPRINTX(x) __nothing 105 1.124 skrll #endif 106 1.124 skrll #else 107 1.122 skrll #define VPRINTF(...) __nothing 108 1.123 skrll #define VPRINTS(s) __nothing 109 1.123 skrll #define VPRINTX(x) __nothing 110 1.116 skrll #endif 111 1.116 skrll 112 1.82 matt void (*cpu_reset_address)(void); /* Used by locore */ 113 1.82 matt paddr_t cpu_reset_address_paddr; /* Used by locore */ 114 1.1 chris 115 1.1 chris struct vm_map *phys_map = NULL; 116 1.1 chris 117 1.74 hannken #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 118 1.24 jdolecek extern size_t md_root_size; /* Memory disc size */ 119 1.74 hannken #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 120 1.1 chris 121 1.1 chris pv_addr_t kernelstack; 122 1.79 matt pv_addr_t abtstack; 123 1.79 matt pv_addr_t fiqstack; 124 1.79 matt pv_addr_t irqstack; 125 1.79 matt pv_addr_t undstack; 126 1.83 matt pv_addr_t idlestack; 127 1.1 chris 128 1.48 christos void * msgbufaddr; 129 1.1 chris extern paddr_t msgbufphys; 130 1.1 chris 131 1.1 chris int kernel_debug = 0; 132 1.99 matt int cpu_printfataltraps = 0; 133 1.90 matt int cpu_fpu_present; 134 1.97 matt int cpu_hwdiv_present; 135 1.91 matt int cpu_neon_present; 136 1.91 matt int cpu_simd_present; 137 1.91 matt int cpu_simdex_present; 138 1.92 matt int cpu_umull_present; 139 1.101 matt int cpu_synchprim_present; 140 1.108 martin int cpu_unaligned_sigbus; 141 1.92 matt const char *cpu_arch = ""; 142 1.91 matt 143 1.91 matt int cpu_instruction_set_attributes[6]; 144 1.91 matt int cpu_memory_model_features[4]; 145 1.91 matt int cpu_processor_features[2]; 146 1.91 matt int cpu_media_and_vfp_features[2]; 147 1.1 chris 148 1.12 reinoud /* exported variable to be filled in by the bootloaders */ 149 1.1 chris char *booted_kernel; 150 1.1 chris 151 1.1 chris /* Prototypes */ 152 1.1 chris 153 1.63 dsl void data_abort_handler(trapframe_t *frame); 154 1.63 dsl void prefetch_abort_handler(trapframe_t *frame); 155 1.63 dsl extern void configure(void); 156 1.1 chris 157 1.1 chris /* 158 1.22 thorpej * arm32_vector_init: 159 1.22 thorpej * 160 1.22 thorpej * Initialize the vector page, and select whether or not to 161 1.22 thorpej * relocate the vectors. 162 1.22 thorpej * 163 1.22 thorpej * NOTE: We expect the vector page to be mapped at its expected 164 1.22 thorpej * destination. 165 1.22 thorpej */ 166 1.22 thorpej void 167 1.22 thorpej arm32_vector_init(vaddr_t va, int which) 168 1.22 thorpej { 169 1.94 matt #if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR) 170 1.93 matt /* 171 1.93 matt * If this processor has the security extension, don't bother 172 1.93 matt * to move/map the vector page. Simply point VBAR to the copy 173 1.93 matt * that exists in the .text segment. 174 1.93 matt */ 175 1.94 matt #ifndef ARM_HAS_VBAR 176 1.93 matt if (va == ARM_VECTORS_LOW 177 1.95 matt && (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0) { 178 1.94 matt #endif 179 1.93 matt extern const uint32_t page0rel[]; 180 1.93 matt vector_page = (vaddr_t)page0rel; 181 1.93 matt KASSERT((vector_page & 0x1f) == 0); 182 1.93 matt armreg_vbar_write(vector_page); 183 1.116 skrll VPRINTF(" vbar=%p", page0rel); 184 1.93 matt cpu_control(CPU_CONTROL_VECRELOC, 0); 185 1.93 matt return; 186 1.94 matt #ifndef ARM_HAS_VBAR 187 1.93 matt } 188 1.93 matt #endif 189 1.94 matt #endif 190 1.94 matt #ifndef ARM_HAS_VBAR 191 1.83 matt if (CPU_IS_PRIMARY(curcpu())) { 192 1.83 matt extern unsigned int page0[], page0_data[]; 193 1.83 matt unsigned int *vectors = (int *) va; 194 1.83 matt unsigned int *vectors_data = vectors + (page0_data - page0); 195 1.83 matt int vec; 196 1.22 thorpej 197 1.83 matt /* 198 1.83 matt * Loop through the vectors we're taking over, and copy the 199 1.83 matt * vector's insn and data word. 200 1.83 matt */ 201 1.83 matt for (vec = 0; vec < ARM_NVEC; vec++) { 202 1.83 matt if ((which & (1 << vec)) == 0) { 203 1.83 matt /* Don't want to take over this vector. */ 204 1.83 matt continue; 205 1.83 matt } 206 1.83 matt vectors[vec] = page0[vec]; 207 1.83 matt vectors_data[vec] = page0_data[vec]; 208 1.22 thorpej } 209 1.22 thorpej 210 1.83 matt /* Now sync the vectors. */ 211 1.83 matt cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); 212 1.22 thorpej 213 1.83 matt vector_page = va; 214 1.83 matt } 215 1.30 scw 216 1.30 scw if (va == ARM_VECTORS_HIGH) { 217 1.30 scw /* 218 1.30 scw * Assume the MD caller knows what it's doing here, and 219 1.30 scw * really does want the vector page relocated. 220 1.30 scw * 221 1.30 scw * Note: This has to be done here (and not just in 222 1.30 scw * cpu_setup()) because the vector page needs to be 223 1.30 scw * accessible *before* cpu_startup() is called. 224 1.30 scw * Think ddb(9) ... 225 1.32 thorpej * 226 1.32 thorpej * NOTE: If the CPU control register is not readable, 227 1.32 thorpej * this will totally fail! We'll just assume that 228 1.32 thorpej * any system that has high vector support has a 229 1.32 thorpej * readable CPU control register, for now. If we 230 1.32 thorpej * ever encounter one that does not, we'll have to 231 1.32 thorpej * rethink this. 232 1.30 scw */ 233 1.30 scw cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 234 1.30 scw } 235 1.94 matt #endif 236 1.22 thorpej } 237 1.22 thorpej 238 1.22 thorpej /* 239 1.1 chris * Debug function just to park the CPU 240 1.1 chris */ 241 1.1 chris 242 1.1 chris void 243 1.65 cegger halt(void) 244 1.1 chris { 245 1.1 chris while (1) 246 1.1 chris cpu_sleep(0); 247 1.1 chris } 248 1.1 chris 249 1.1 chris 250 1.88 jmcneill /* Sync the discs, unmount the filesystems, and adjust the todr */ 251 1.1 chris 252 1.1 chris void 253 1.1 chris bootsync(void) 254 1.1 chris { 255 1.58 matt static bool bootsyncdone = false; 256 1.1 chris 257 1.1 chris if (bootsyncdone) return; 258 1.1 chris 259 1.58 matt bootsyncdone = true; 260 1.1 chris 261 1.1 chris /* Make sure we can still manage to do things */ 262 1.1 chris if (GetCPSR() & I32_bit) { 263 1.1 chris /* 264 1.1 chris * If we get here then boot has been called without RB_NOSYNC 265 1.1 chris * and interrupts were disabled. This means the boot() call 266 1.1 chris * did not come from a user process e.g. shutdown, but must 267 1.1 chris * have come from somewhere in the kernel. 268 1.1 chris */ 269 1.1 chris IRQenable; 270 1.1 chris printf("Warning IRQ's disabled during boot()\n"); 271 1.1 chris } 272 1.1 chris 273 1.1 chris vfs_shutdown(); 274 1.1 chris } 275 1.1 chris 276 1.1 chris /* 277 1.1 chris * void cpu_startup(void) 278 1.1 chris * 279 1.113 skrll * Machine dependent startup code. 280 1.1 chris * 281 1.1 chris */ 282 1.1 chris void 283 1.58 matt cpu_startup(void) 284 1.1 chris { 285 1.42 pk vaddr_t minaddr; 286 1.42 pk vaddr_t maxaddr; 287 1.1 chris 288 1.123 skrll #ifndef __HAVE_GENERIC_START 289 1.43 wiz /* Set the CPU control register */ 290 1.1 chris cpu_setup(boot_args); 291 1.123 skrll #endif 292 1.1 chris 293 1.94 matt #ifndef ARM_HAS_VBAR 294 1.1 chris /* Lock down zero page */ 295 1.22 thorpej vector_page_setprot(VM_PROT_READ); 296 1.94 matt #endif 297 1.1 chris 298 1.1 chris /* 299 1.1 chris * Give pmap a chance to set up a few more things now the vm 300 1.1 chris * is initialised 301 1.1 chris */ 302 1.1 chris pmap_postinit(); 303 1.1 chris 304 1.121 jmcneill #ifdef FDT 305 1.146 skrll const struct fdt_platform * const plat = fdt_platform_find(); 306 1.146 skrll if (plat->fp_startup != NULL) 307 1.146 skrll plat->fp_startup(); 308 1.121 jmcneill #endif 309 1.121 jmcneill 310 1.1 chris /* 311 1.1 chris * Initialize error message buffer (at end of core). 312 1.1 chris */ 313 1.1 chris 314 1.1 chris /* msgbufphys was setup during the secondary boot strap */ 315 1.103 matt if (!pmap_extract(pmap_kernel(), (vaddr_t)msgbufaddr, NULL)) { 316 1.103 matt for (u_int loop = 0; loop < btoc(MSGBUFSIZE); ++loop) { 317 1.103 matt pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE, 318 1.103 matt msgbufphys + loop * PAGE_SIZE, 319 1.103 matt VM_PROT_READ|VM_PROT_WRITE, 0); 320 1.103 matt } 321 1.103 matt } 322 1.5 chris pmap_update(pmap_kernel()); 323 1.1 chris initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); 324 1.1 chris 325 1.1 chris /* 326 1.120 skrll * Allocate a submap for physio 327 1.1 chris */ 328 1.42 pk minaddr = 0; 329 1.1 chris phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 330 1.47 thorpej VM_PHYS_SIZE, 0, false, NULL); 331 1.1 chris 332 1.120 skrll banner(); 333 1.1 chris 334 1.114 skrll /* 335 1.114 skrll * This is actually done by initarm_common, but not all ports use it 336 1.114 skrll * yet so do it here to catch them as well 337 1.114 skrll */ 338 1.81 matt struct lwp * const l = &lwp0; 339 1.81 matt struct pcb * const pcb = lwp_getpcb(l); 340 1.114 skrll 341 1.114 skrll /* Zero out the PCB. */ 342 1.114 skrll memset(pcb, 0, sizeof(*pcb)); 343 1.114 skrll 344 1.86 matt pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP; 345 1.114 skrll pcb->pcb_ksp -= sizeof(struct trapframe); 346 1.114 skrll 347 1.114 skrll struct trapframe * tf = (struct trapframe *)pcb->pcb_ksp; 348 1.114 skrll 349 1.114 skrll /* Zero out the trapframe. */ 350 1.114 skrll memset(tf, 0, sizeof(*tf)); 351 1.114 skrll lwp_settrapframe(l, tf); 352 1.114 skrll 353 1.114 skrll tf->tf_spsr = PSR_USR32_MODE; 354 1.139 rin #ifdef _ARM_ARCH_BE8 355 1.139 rin tf->tf_spsr |= PSR_E_BIT; 356 1.114 skrll #endif 357 1.129 riastrad 358 1.129 riastrad cpu_startup_hook(); 359 1.129 riastrad } 360 1.129 riastrad 361 1.129 riastrad __weak_alias(cpu_startup_hook,cpu_startup_default) 362 1.129 riastrad void 363 1.129 riastrad cpu_startup_default(void) 364 1.129 riastrad { 365 1.1 chris } 366 1.1 chris 367 1.1 chris /* 368 1.1 chris * machine dependent system variables. 369 1.1 chris */ 370 1.39 atatat static int 371 1.39 atatat sysctl_machdep_booted_device(SYSCTLFN_ARGS) 372 1.39 atatat { 373 1.39 atatat struct sysctlnode node; 374 1.39 atatat 375 1.39 atatat if (booted_device == NULL) 376 1.134 skrll return EOPNOTSUPP; 377 1.39 atatat 378 1.39 atatat node = *rnode; 379 1.85 chs node.sysctl_data = __UNCONST(device_xname(booted_device)); 380 1.85 chs node.sysctl_size = strlen(device_xname(booted_device)) + 1; 381 1.134 skrll return sysctl_lookup(SYSCTLFN_CALL(&node)); 382 1.39 atatat } 383 1.1 chris 384 1.39 atatat static int 385 1.39 atatat sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 386 1.1 chris { 387 1.39 atatat struct sysctlnode node; 388 1.39 atatat 389 1.39 atatat if (booted_kernel == NULL || booted_kernel[0] == '\0') 390 1.134 skrll return EOPNOTSUPP; 391 1.1 chris 392 1.39 atatat node = *rnode; 393 1.39 atatat node.sysctl_data = booted_kernel; 394 1.39 atatat node.sysctl_size = strlen(booted_kernel) + 1; 395 1.134 skrll return sysctl_lookup(SYSCTLFN_CALL(&node)); 396 1.39 atatat } 397 1.25 thorpej 398 1.39 atatat static int 399 1.92 matt sysctl_machdep_cpu_arch(SYSCTLFN_ARGS) 400 1.92 matt { 401 1.92 matt struct sysctlnode node = *rnode; 402 1.92 matt node.sysctl_data = __UNCONST(cpu_arch); 403 1.92 matt node.sysctl_size = strlen(cpu_arch) + 1; 404 1.92 matt return sysctl_lookup(SYSCTLFN_CALL(&node)); 405 1.92 matt } 406 1.92 matt 407 1.92 matt static int 408 1.39 atatat sysctl_machdep_powersave(SYSCTLFN_ARGS) 409 1.39 atatat { 410 1.39 atatat struct sysctlnode node = *rnode; 411 1.39 atatat int error, newval; 412 1.25 thorpej 413 1.39 atatat newval = cpu_do_powersave; 414 1.39 atatat node.sysctl_data = &newval; 415 1.39 atatat if (cpufuncs.cf_sleep == (void *) cpufunc_nullop) 416 1.44 atatat node.sysctl_flags &= ~CTLFLAG_READWRITE; 417 1.39 atatat error = sysctl_lookup(SYSCTLFN_CALL(&node)); 418 1.39 atatat if (error || newp == NULL || newval == cpu_do_powersave) 419 1.134 skrll return error; 420 1.39 atatat 421 1.39 atatat if (newval < 0 || newval > 1) 422 1.134 skrll return EINVAL; 423 1.39 atatat cpu_do_powersave = newval; 424 1.25 thorpej 425 1.134 skrll return 0; 426 1.39 atatat } 427 1.25 thorpej 428 1.39 atatat SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 429 1.39 atatat { 430 1.1 chris 431 1.44 atatat sysctl_createv(clog, 0, NULL, NULL, 432 1.44 atatat CTLFLAG_PERMANENT, 433 1.39 atatat CTLTYPE_NODE, "machdep", NULL, 434 1.39 atatat NULL, 0, NULL, 0, 435 1.39 atatat CTL_MACHDEP, CTL_EOL); 436 1.39 atatat 437 1.44 atatat sysctl_createv(clog, 0, NULL, NULL, 438 1.44 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 439 1.40 atatat CTLTYPE_INT, "debug", NULL, 440 1.39 atatat NULL, 0, &kernel_debug, 0, 441 1.41 rearnsha CTL_MACHDEP, CPU_DEBUG, CTL_EOL); 442 1.44 atatat sysctl_createv(clog, 0, NULL, NULL, 443 1.44 atatat CTLFLAG_PERMANENT, 444 1.39 atatat CTLTYPE_STRING, "booted_device", NULL, 445 1.39 atatat sysctl_machdep_booted_device, 0, NULL, 0, 446 1.39 atatat CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL); 447 1.44 atatat sysctl_createv(clog, 0, NULL, NULL, 448 1.44 atatat CTLFLAG_PERMANENT, 449 1.39 atatat CTLTYPE_STRING, "booted_kernel", NULL, 450 1.39 atatat sysctl_machdep_booted_kernel, 0, NULL, 0, 451 1.39 atatat CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 452 1.44 atatat sysctl_createv(clog, 0, NULL, NULL, 453 1.44 atatat CTLFLAG_PERMANENT, 454 1.39 atatat CTLTYPE_STRUCT, "console_device", NULL, 455 1.39 atatat sysctl_consdev, 0, NULL, sizeof(dev_t), 456 1.39 atatat CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 457 1.44 atatat sysctl_createv(clog, 0, NULL, NULL, 458 1.92 matt CTLFLAG_PERMANENT, 459 1.92 matt CTLTYPE_STRING, "cpu_arch", NULL, 460 1.92 matt sysctl_machdep_cpu_arch, 0, NULL, 0, 461 1.92 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 462 1.92 matt sysctl_createv(clog, 0, NULL, NULL, 463 1.44 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 464 1.39 atatat CTLTYPE_INT, "powersave", NULL, 465 1.39 atatat sysctl_machdep_powersave, 0, &cpu_do_powersave, 0, 466 1.39 atatat CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL); 467 1.90 matt sysctl_createv(clog, 0, NULL, NULL, 468 1.91 matt CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 469 1.91 matt CTLTYPE_INT, "cpu_id", NULL, 470 1.91 matt NULL, curcpu()->ci_arm_cpuid, NULL, 0, 471 1.91 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 472 1.91 matt #ifdef FPU_VFP 473 1.91 matt sysctl_createv(clog, 0, NULL, NULL, 474 1.91 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 475 1.91 matt CTLTYPE_INT, "fpu_id", NULL, 476 1.132 skrll NULL, 0, &cpu_info_store[0].ci_vfp_id, 0, 477 1.91 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 478 1.91 matt #endif 479 1.91 matt sysctl_createv(clog, 0, NULL, NULL, 480 1.90 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 481 1.90 matt CTLTYPE_INT, "fpu_present", NULL, 482 1.90 matt NULL, 0, &cpu_fpu_present, 0, 483 1.90 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 484 1.91 matt sysctl_createv(clog, 0, NULL, NULL, 485 1.91 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 486 1.97 matt CTLTYPE_INT, "hwdiv_present", NULL, 487 1.97 matt NULL, 0, &cpu_hwdiv_present, 0, 488 1.97 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 489 1.97 matt sysctl_createv(clog, 0, NULL, NULL, 490 1.97 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 491 1.91 matt CTLTYPE_INT, "neon_present", NULL, 492 1.91 matt NULL, 0, &cpu_neon_present, 0, 493 1.91 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 494 1.91 matt sysctl_createv(clog, 0, NULL, NULL, 495 1.91 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 496 1.91 matt CTLTYPE_STRUCT, "id_isar", NULL, 497 1.91 matt NULL, 0, 498 1.91 matt cpu_instruction_set_attributes, 499 1.91 matt sizeof(cpu_instruction_set_attributes), 500 1.91 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 501 1.91 matt sysctl_createv(clog, 0, NULL, NULL, 502 1.91 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 503 1.91 matt CTLTYPE_STRUCT, "id_mmfr", NULL, 504 1.91 matt NULL, 0, 505 1.91 matt cpu_memory_model_features, 506 1.91 matt sizeof(cpu_memory_model_features), 507 1.91 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 508 1.91 matt sysctl_createv(clog, 0, NULL, NULL, 509 1.91 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 510 1.91 matt CTLTYPE_STRUCT, "id_pfr", NULL, 511 1.91 matt NULL, 0, 512 1.91 matt cpu_processor_features, 513 1.91 matt sizeof(cpu_processor_features), 514 1.91 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 515 1.91 matt sysctl_createv(clog, 0, NULL, NULL, 516 1.91 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 517 1.91 matt CTLTYPE_STRUCT, "id_mvfr", NULL, 518 1.91 matt NULL, 0, 519 1.91 matt cpu_media_and_vfp_features, 520 1.91 matt sizeof(cpu_media_and_vfp_features), 521 1.91 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 522 1.91 matt sysctl_createv(clog, 0, NULL, NULL, 523 1.91 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 524 1.91 matt CTLTYPE_INT, "simd_present", NULL, 525 1.91 matt NULL, 0, &cpu_simd_present, 0, 526 1.91 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 527 1.91 matt sysctl_createv(clog, 0, NULL, NULL, 528 1.91 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 529 1.91 matt CTLTYPE_INT, "simdex_present", NULL, 530 1.91 matt NULL, 0, &cpu_simdex_present, 0, 531 1.91 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 532 1.99 matt sysctl_createv(clog, 0, NULL, NULL, 533 1.101 matt CTLFLAG_PERMANENT|CTLFLAG_READONLY, 534 1.101 matt CTLTYPE_INT, "synchprim_present", NULL, 535 1.101 matt NULL, 0, &cpu_synchprim_present, 0, 536 1.101 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 537 1.101 matt sysctl_createv(clog, 0, NULL, NULL, 538 1.99 matt CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 539 1.99 matt CTLTYPE_INT, "printfataltraps", NULL, 540 1.99 matt NULL, 0, &cpu_printfataltraps, 0, 541 1.99 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 542 1.139 rin cpu_unaligned_sigbus = 543 1.139 rin #if defined(__ARMEL__) 544 1.139 rin !CPU_IS_ARMV6_P() && !CPU_IS_ARMV7_P(); 545 1.139 rin #elif defined(_ARM_ARCH_BE8) 546 1.139 rin 0; 547 1.139 rin #else 548 1.139 rin 1; 549 1.139 rin #endif 550 1.108 martin sysctl_createv(clog, 0, NULL, NULL, 551 1.108 martin CTLFLAG_PERMANENT|CTLFLAG_READONLY, 552 1.108 martin CTLTYPE_INT, "unaligned_sigbus", 553 1.108 martin SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"), 554 1.108 martin NULL, 0, &cpu_unaligned_sigbus, 0, 555 1.108 martin CTL_MACHDEP, CTL_CREATE, CTL_EOL); 556 1.1 chris } 557 1.1 chris 558 1.1 chris void 559 1.64 dsl parse_mi_bootargs(char *args) 560 1.1 chris { 561 1.1 chris int integer; 562 1.1 chris 563 1.142 jmcneill if (get_bootconf_option(args, "-1", BOOTOPT_TYPE_BOOLEAN, &integer)) 564 1.142 jmcneill if (integer) 565 1.142 jmcneill boothowto |= RB_MD1; 566 1.1 chris if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer) 567 1.1 chris || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer)) 568 1.1 chris if (integer) 569 1.1 chris boothowto |= RB_SINGLE; 570 1.1 chris if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer) 571 1.89 skrll || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer) 572 1.89 skrll || get_bootconf_option(args, "-d", BOOTOPT_TYPE_BOOLEAN, &integer)) 573 1.1 chris if (integer) 574 1.1 chris boothowto |= RB_KDB; 575 1.1 chris if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer) 576 1.1 chris || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer)) 577 1.1 chris if (integer) 578 1.1 chris boothowto |= RB_ASKNAME; 579 1.145 mlelstv if (get_bootconf_option(args, "userconf", BOOTOPT_TYPE_BOOLEAN, &integer) 580 1.145 mlelstv || get_bootconf_option(args, "-c", BOOTOPT_TYPE_BOOLEAN, &integer)) 581 1.145 mlelstv if (integer) 582 1.145 mlelstv boothowto |= RB_USERCONF; 583 1.145 mlelstv if (get_bootconf_option(args, "halt", BOOTOPT_TYPE_BOOLEAN, &integer) 584 1.145 mlelstv || get_bootconf_option(args, "-b", BOOTOPT_TYPE_BOOLEAN, &integer)) 585 1.145 mlelstv if (integer) 586 1.145 mlelstv boothowto |= RB_HALT; 587 1.145 mlelstv if (get_bootconf_option(args, "-1", BOOTOPT_TYPE_BOOLEAN, &integer)) 588 1.145 mlelstv if (integer) 589 1.145 mlelstv boothowto |= RB_MD1; 590 1.145 mlelstv if (get_bootconf_option(args, "-2", BOOTOPT_TYPE_BOOLEAN, &integer)) 591 1.145 mlelstv if (integer) 592 1.145 mlelstv boothowto |= RB_MD2; 593 1.145 mlelstv if (get_bootconf_option(args, "-3", BOOTOPT_TYPE_BOOLEAN, &integer)) 594 1.145 mlelstv if (integer) 595 1.145 mlelstv boothowto |= RB_MD3; 596 1.145 mlelstv if (get_bootconf_option(args, "-4", BOOTOPT_TYPE_BOOLEAN, &integer)) 597 1.145 mlelstv if (integer) 598 1.145 mlelstv boothowto |= RB_MD4; 599 1.1 chris 600 1.1 chris /* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer)) 601 1.1 chris bufpages = integer;*/ 602 1.1 chris 603 1.74 hannken #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 604 1.1 chris if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer) 605 1.1 chris || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) { 606 1.24 jdolecek md_root_size = integer; 607 1.24 jdolecek md_root_size *= 1024; 608 1.24 jdolecek if (md_root_size < 32*1024) 609 1.24 jdolecek md_root_size = 32*1024; 610 1.24 jdolecek if (md_root_size > 2048*1024) 611 1.24 jdolecek md_root_size = 2048*1024; 612 1.1 chris } 613 1.74 hannken #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 614 1.1 chris 615 1.1 chris if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer) 616 1.1 chris || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer)) 617 1.1 chris if (integer) 618 1.1 chris boothowto |= AB_QUIET; 619 1.1 chris if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer) 620 1.1 chris || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer)) 621 1.1 chris if (integer) 622 1.1 chris boothowto |= AB_VERBOSE; 623 1.109 bouyer if (get_bootconf_option(args, "debug", BOOTOPT_TYPE_BOOLEAN, &integer) 624 1.109 bouyer || get_bootconf_option(args, "-x", BOOTOPT_TYPE_BOOLEAN, &integer)) 625 1.109 bouyer if (integer) 626 1.109 bouyer boothowto |= AB_DEBUG; 627 1.145 mlelstv if (get_bootconf_option(args, "silent", BOOTOPT_TYPE_BOOLEAN, &integer) 628 1.145 mlelstv || get_bootconf_option(args, "-z", BOOTOPT_TYPE_BOOLEAN, &integer)) 629 1.145 mlelstv if (integer) 630 1.145 mlelstv boothowto |= AB_SILENT; 631 1.1 chris } 632 1.49 yamt 633 1.56 matt #ifdef __HAVE_FAST_SOFTINTS 634 1.56 matt #if IPL_SOFTSERIAL != IPL_SOFTNET + 1 635 1.56 matt #error IPLs are screwed up 636 1.58 matt #elif IPL_SOFTNET != IPL_SOFTBIO + 1 637 1.58 matt #error IPLs are screwed up 638 1.58 matt #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1 639 1.56 matt #error IPLs are screwed up 640 1.58 matt #elif !(IPL_SOFTCLOCK > IPL_NONE) 641 1.56 matt #error IPLs are screwed up 642 1.58 matt #elif (IPL_NONE != 0) 643 1.56 matt #error IPLs are screwed up 644 1.56 matt #endif 645 1.58 matt 646 1.83 matt #ifndef __HAVE_PIC_FAST_SOFTINTS 647 1.56 matt #define SOFTINT2IPLMAP \ 648 1.58 matt (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \ 649 1.58 matt ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \ 650 1.58 matt ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \ 651 1.58 matt ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4))) 652 1.56 matt #define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f) 653 1.56 matt 654 1.56 matt /* 655 1.56 matt * This returns a mask of softint IPLs that be dispatch at <ipl> 656 1.59 matt * SOFTIPLMASK(IPL_NONE) = 0x0000000f 657 1.59 matt * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e 658 1.59 matt * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c 659 1.59 matt * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008 660 1.59 matt * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000 661 1.56 matt */ 662 1.78 skrll #define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f) 663 1.56 matt 664 1.56 matt void softint_switch(lwp_t *, int); 665 1.56 matt 666 1.56 matt void 667 1.56 matt softint_trigger(uintptr_t mask) 668 1.56 matt { 669 1.56 matt curcpu()->ci_softints |= mask; 670 1.56 matt } 671 1.56 matt 672 1.56 matt void 673 1.56 matt softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep) 674 1.56 matt { 675 1.83 matt lwp_t ** lp = &l->l_cpu->ci_softlwps[level]; 676 1.56 matt KASSERT(*lp == NULL || *lp == l); 677 1.56 matt *lp = l; 678 1.56 matt *machdep = 1 << SOFTINT2IPL(level); 679 1.59 matt KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK))); 680 1.59 matt KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK))); 681 1.59 matt KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK))); 682 1.59 matt KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK))); 683 1.56 matt } 684 1.53 mrg 685 1.56 matt void 686 1.56 matt dosoftints(void) 687 1.56 matt { 688 1.56 matt struct cpu_info * const ci = curcpu(); 689 1.56 matt const int opl = ci->ci_cpl; 690 1.56 matt const uint32_t softiplmask = SOFTIPLMASK(opl); 691 1.144 riastrad int s; 692 1.56 matt 693 1.144 riastrad s = splhigh(); 694 1.144 riastrad KASSERT(s == opl); 695 1.56 matt for (;;) { 696 1.56 matt u_int softints = ci->ci_softints & softiplmask; 697 1.59 matt KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0)); 698 1.77 skrll KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0); 699 1.77 skrll if (softints == 0) { 700 1.144 riastrad break; 701 1.77 skrll } 702 1.56 matt #define DOSOFTINT(n) \ 703 1.77 skrll if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \ 704 1.58 matt ci->ci_softints &= \ 705 1.58 matt ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \ 706 1.56 matt softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \ 707 1.56 matt IPL_SOFT ## n); \ 708 1.56 matt continue; \ 709 1.56 matt } 710 1.56 matt DOSOFTINT(SERIAL); 711 1.56 matt DOSOFTINT(NET); 712 1.56 matt DOSOFTINT(BIO); 713 1.56 matt DOSOFTINT(CLOCK); 714 1.56 matt panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl); 715 1.56 matt } 716 1.144 riastrad splx(s); 717 1.53 mrg } 718 1.83 matt #endif /* !__HAVE_PIC_FAST_SOFTINTS */ 719 1.56 matt #endif /* __HAVE_FAST_SOFTINTS */ 720 1.72 jmmv 721 1.72 jmmv #ifdef MODULAR 722 1.72 jmmv /* 723 1.72 jmmv * Push any modules loaded by the boot loader. 724 1.72 jmmv */ 725 1.72 jmmv void 726 1.72 jmmv module_init_md(void) 727 1.72 jmmv { 728 1.135 jmcneill #ifdef FDT 729 1.135 jmcneill arm_fdt_module_init(); 730 1.135 jmcneill #endif 731 1.72 jmmv } 732 1.72 jmmv #endif /* MODULAR */ 733 1.75 rmind 734 1.75 rmind int 735 1.75 rmind mm_md_physacc(paddr_t pa, vm_prot_t prot) 736 1.75 rmind { 737 1.110 ryo if (pa >= physical_start && pa < physical_end) 738 1.110 ryo return 0; 739 1.75 rmind 740 1.110 ryo return kauth_authorize_machdep(kauth_cred_get(), 741 1.110 ryo KAUTH_MACHDEP_UNMANAGEDMEM, NULL, NULL, NULL, NULL); 742 1.75 rmind } 743 1.83 matt 744 1.83 matt #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP 745 1.83 matt vaddr_t 746 1.83 matt cpu_uarea_alloc_idlelwp(struct cpu_info *ci) 747 1.83 matt { 748 1.125 skrll const vaddr_t va = idlestack.pv_va + cpu_index(ci) * USPACE; 749 1.83 matt // printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va); 750 1.83 matt return va; 751 1.83 matt } 752 1.83 matt #endif 753 1.83 matt 754 1.83 matt #ifdef MULTIPROCESSOR 755 1.123 skrll /* 756 1.123 skrll * Initialise a secondary processor. 757 1.123 skrll * 758 1.123 skrll * printf isn't available to us for a number of reasons. 759 1.123 skrll * 760 1.126 skrll * - kprint_init has been called and printf will try to take locks which we 761 1.140 skrll * can't do just yet because bootstrap translation tables do not allowing 762 1.126 skrll * caching. 763 1.123 skrll * 764 1.123 skrll * - kmutex(9) relies on curcpu which isn't setup yet. 765 1.123 skrll * 766 1.123 skrll */ 767 1.136 skrll void __noasan 768 1.125 skrll cpu_init_secondary_processor(int cpuindex) 769 1.123 skrll { 770 1.130 msaitoh // pmap_kernel has been successfully built and we can switch to it 771 1.123 skrll cpu_domains(DOMAIN_DEFAULT); 772 1.123 skrll cpu_idcache_wbinv_all(); 773 1.123 skrll 774 1.125 skrll VPRINTS("index: "); 775 1.125 skrll VPRINTX(cpuindex); 776 1.123 skrll VPRINTS(" ttb"); 777 1.123 skrll 778 1.123 skrll cpu_setup(boot_args); 779 1.123 skrll 780 1.123 skrll #ifdef ARM_MMU_EXTENDED 781 1.123 skrll /* 782 1.123 skrll * TTBCR should have been initialized by the MD start code. 783 1.123 skrll */ 784 1.123 skrll KASSERT((armreg_contextidr_read() & 0xff) == 0); 785 1.123 skrll KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N)); 786 1.123 skrll /* 787 1.123 skrll * Disable lookups via TTBR0 until there is an activated pmap. 788 1.123 skrll */ 789 1.123 skrll 790 1.123 skrll armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); 791 1.123 skrll cpu_setttb(pmap_kernel()->pm_l1_pa , KERNEL_PID); 792 1.138 skrll isb(); 793 1.123 skrll #else 794 1.123 skrll cpu_setttb(pmap_kernel()->pm_l1->l1_physaddr, true); 795 1.123 skrll #endif 796 1.123 skrll 797 1.123 skrll cpu_tlb_flushID(); 798 1.123 skrll 799 1.123 skrll VPRINTS(" (TTBR0="); 800 1.123 skrll VPRINTX(armreg_ttbr_read()); 801 1.123 skrll VPRINTS(")"); 802 1.123 skrll 803 1.123 skrll #ifdef ARM_MMU_EXTENDED 804 1.123 skrll VPRINTS(" (TTBR1="); 805 1.123 skrll VPRINTX(armreg_ttbr1_read()); 806 1.123 skrll VPRINTS(")"); 807 1.123 skrll VPRINTS(" (TTBCR="); 808 1.123 skrll VPRINTX(armreg_ttbcr_read()); 809 1.125 skrll VPRINTS(")"); 810 1.123 skrll #endif 811 1.123 skrll 812 1.137 skrll struct cpu_info * ci = &cpu_info_store[cpuindex]; 813 1.137 skrll 814 1.137 skrll VPRINTS(" ci = "); 815 1.137 skrll VPRINTX((int)ci); 816 1.137 skrll 817 1.141 skrll ci->ci_ctrl = armreg_sctlr_read(); 818 1.141 skrll ci->ci_arm_cpuid = cpu_idnum(); 819 1.141 skrll ci->ci_arm_cputype = ci->ci_arm_cpuid & CPU_ID_CPU_MASK; 820 1.141 skrll ci->ci_arm_cpurev = ci->ci_arm_cpuid & CPU_ID_REVISION_MASK; 821 1.141 skrll 822 1.137 skrll ci->ci_midr = armreg_midr_read(); 823 1.141 skrll ci->ci_actlr = armreg_auxctl_read(); 824 1.141 skrll ci->ci_revidr = armreg_revidr_read(); 825 1.137 skrll ci->ci_mpidr = armreg_mpidr_read(); 826 1.137 skrll 827 1.137 skrll arm_cpu_topology_set(ci, ci->ci_mpidr); 828 1.137 skrll 829 1.141 skrll VPRINTS(" vfp"); 830 1.141 skrll vfp_detect(ci); 831 1.141 skrll 832 1.141 skrll VPRINTS(" hatched |="); 833 1.132 skrll VPRINTX(__BIT(cpuindex)); 834 1.125 skrll VPRINTS("\n\r"); 835 1.123 skrll 836 1.132 skrll cpu_set_hatched(cpuindex); 837 1.127 skrll 838 1.137 skrll /* 839 1.137 skrll * return to assembly to wait for cpu_boot_secondary_processors 840 1.137 skrll */ 841 1.123 skrll } 842 1.123 skrll 843 1.83 matt void 844 1.83 matt xc_send_ipi(struct cpu_info *ci) 845 1.83 matt { 846 1.83 matt KASSERT(kpreempt_disabled()); 847 1.83 matt KASSERT(curcpu() != ci); 848 1.83 matt 849 1.102 matt intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_XCALL); 850 1.83 matt } 851 1.105 rmind 852 1.105 rmind void 853 1.105 rmind cpu_ipi(struct cpu_info *ci) 854 1.105 rmind { 855 1.105 rmind KASSERT(kpreempt_disabled()); 856 1.105 rmind KASSERT(curcpu() != ci); 857 1.105 rmind 858 1.105 rmind intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_GENERIC); 859 1.105 rmind } 860 1.105 rmind 861 1.83 matt #endif /* MULTIPROCESSOR */ 862 1.87 matt 863 1.87 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 864 1.87 matt bool 865 1.87 matt mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap) 866 1.87 matt { 867 1.104 matt bool rv; 868 1.104 matt vaddr_t va = pmap_direct_mapped_phys(pa, &rv, 0); 869 1.104 matt if (rv) { 870 1.104 matt *vap = va; 871 1.87 matt } 872 1.104 matt return rv; 873 1.87 matt } 874 1.87 matt #endif 875 1.111 skrll 876 1.111 skrll bool 877 1.111 skrll mm_md_page_color(paddr_t pa, int *colorp) 878 1.111 skrll { 879 1.112 mrg #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 880 1.111 skrll *colorp = atop(pa & arm_cache_prefer_mask); 881 1.111 skrll 882 1.111 skrll return arm_cache_prefer_mask ? false : true; 883 1.112 mrg #else 884 1.112 mrg *colorp = 0; 885 1.112 mrg 886 1.112 mrg return true; 887 1.112 mrg #endif 888 1.111 skrll } 889 1.117 skrll 890 1.117 skrll #if defined(FDT) 891 1.117 skrll extern char KERNEL_BASE_phys[]; 892 1.117 skrll #define KERNEL_BASE_PHYS ((paddr_t)KERNEL_BASE_phys) 893 1.117 skrll 894 1.117 skrll void 895 1.117 skrll cpu_kernel_vm_init(paddr_t memory_start, psize_t memory_size) 896 1.117 skrll { 897 1.146 skrll const struct fdt_platform *plat = fdt_platform_find(); 898 1.117 skrll 899 1.117 skrll #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 900 1.117 skrll const bool mapallmem_p = true; 901 1.117 skrll #ifndef PMAP_NEED_ALLOC_POOLPAGE 902 1.117 skrll if (memory_size > KERNEL_VM_BASE - KERNEL_BASE) { 903 1.117 skrll VPRINTF("%s: dropping RAM size from %luMB to %uMB\n", 904 1.117 skrll __func__, (unsigned long) (memory_size >> 20), 905 1.117 skrll (KERNEL_VM_BASE - KERNEL_BASE) >> 20); 906 1.117 skrll memory_size = KERNEL_VM_BASE - KERNEL_BASE; 907 1.117 skrll } 908 1.117 skrll #endif 909 1.117 skrll #else 910 1.117 skrll const bool mapallmem_p = false; 911 1.117 skrll #endif 912 1.117 skrll 913 1.123 skrll VPRINTF("%s: kernel phys start %" PRIxPADDR " end %" PRIxPADDR "\n", 914 1.128 skrll __func__, memory_start, memory_start + memory_size); 915 1.123 skrll 916 1.117 skrll arm32_bootmem_init(memory_start, memory_size, KERNEL_BASE_PHYS); 917 1.117 skrll arm32_kernel_vm_init(KERNEL_VM_BASE, ARM_VECTORS_HIGH, 0, 918 1.146 skrll plat->fp_devmap(), mapallmem_p); 919 1.117 skrll } 920 1.117 skrll #endif 921 1.117 skrll 922