Home | History | Annotate | Line # | Download | only in aarch64
      1 /* $NetBSD: aarch64_machdep.c,v 1.71 2025/09/06 21:02:39 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.71 2025/09/06 21:02:39 thorpej Exp $");
     34 
     35 #include "opt_arm_debug.h"
     36 #include "opt_cpuoptions.h"
     37 #include "opt_ddb.h"
     38 #include "opt_fdt.h"
     39 #include "opt_kernhist.h"
     40 #include "opt_modular.h"
     41 
     42 #include <sys/param.h>
     43 #include <sys/types.h>
     44 #include <sys/asan.h>
     45 #include <sys/boot_flag.h>
     46 #include <sys/bus.h>
     47 #include <sys/core.h>
     48 #include <sys/conf.h>
     49 #include <sys/kauth.h>
     50 #include <sys/kcore.h>
     51 #include <sys/module.h>
     52 #include <sys/msgbuf.h>
     53 #include <sys/reboot.h>
     54 #include <sys/sysctl.h>
     55 #include <sys/xcall.h>
     56 
     57 #include <dev/mm.h>
     58 
     59 #include <uvm/uvm.h>
     60 
     61 #include <machine/bootconfig.h>
     62 
     63 #include <arm/cpufunc.h>
     64 
     65 #include <aarch64/armreg.h>
     66 #ifdef DDB
     67 #include <aarch64/db_machdep.h>
     68 #endif
     69 #include <aarch64/frame.h>
     70 #include <aarch64/machdep.h>
     71 #include <aarch64/pmap.h>
     72 #include <aarch64/pte.h>
     73 #include <aarch64/vmparam.h>
     74 #include <aarch64/kcore.h>
     75 
     76 #include <arm/fdt/arm_fdtvar.h>
     77 #include <dev/fdt/fdtvar.h>
     78 #include <dev/fdt/fdt_memory.h>
     79 #include <dev/fdt/fdt_platform.h>
     80 
     81 #ifdef VERBOSE_INIT_ARM
     82 #define VPRINTF(...)	printf(__VA_ARGS__)
     83 #else
     84 #define VPRINTF(...)	__nothing
     85 #endif
     86 
     87 char cpu_model[32];
     88 char machine[] = MACHINE;
     89 char machine_arch[] = MACHINE_ARCH;
     90 
     91 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
     92 	[PCU_FPU] = &pcu_fpu_ops
     93 };
     94 
     95 struct vm_map *phys_map;
     96 
     97 #ifdef MODULAR
     98 vaddr_t module_start, module_end;
     99 static struct vm_map module_map_store;
    100 #endif
    101 
    102 #ifdef KASAN
    103 vaddr_t kasan_kernelstart;
    104 vaddr_t kasan_kernelsize;
    105 #endif
    106 
    107 /* XXX */
    108 vaddr_t physical_start;
    109 vaddr_t physical_end;
    110 /* filled in before cleaning bss. keep in .data */
    111 u_long kern_vtopdiff __attribute__((__section__(".data")));
    112 
    113 /* extra physical memory allocated from round_page(_end[]) */
    114 long kernend_extra;
    115 
    116 /* dump configuration */
    117 int	cpu_dump(void);
    118 int	cpu_dumpsize(void);
    119 u_long	cpu_dump_mempagecnt(void);
    120 
    121 uint32_t dumpmag = 0x8fca0101;  /* magic number for savecore */
    122 int     dumpsize = 0;           /* also for savecore */
    123 long    dumplo = 0;
    124 
    125 int aarch64_bti_enabled __read_mostly;
    126 
    127 static void
    128 bti_init(void)
    129 {
    130 #ifdef ARMV85_BTI
    131 	extern uint64_t pmap_attr_gp;
    132 	uint64_t reg;
    133 
    134 	reg = reg_id_aa64pfr1_el1_read();
    135 
    136 	if (reg >= ID_AA64PFR1_EL1_BT_SUPPORTED) {
    137 		pmap_attr_gp = LX_BLKPAG_GP;
    138 		aarch64_bti_enabled = 1;
    139 	}
    140 #endif
    141 }
    142 
    143 void
    144 cpu_kernel_vm_init(uint64_t memory_start __unused, uint64_t memory_size __unused)
    145 {
    146 	extern char __kernel_text[];
    147 	extern char _end[];
    148 	extern char __data_start[];
    149 	extern char __rodata_start[];
    150 	u_int blk;
    151 
    152 	bti_init();
    153 
    154 	vaddr_t kernstart = trunc_page((vaddr_t)__kernel_text);
    155 	vaddr_t kernend = round_page((vaddr_t)_end);
    156 	paddr_t kernstart_phys = KERN_VTOPHYS(kernstart);
    157 	paddr_t kernend_phys = KERN_VTOPHYS(kernend);
    158 	vaddr_t data_start = (vaddr_t)__data_start;
    159 	vaddr_t rodata_start = (vaddr_t)__rodata_start;
    160 
    161 	/* add direct mappings of whole memory */
    162 	const pt_entry_t dmattr =
    163 	    LX_BLKPAG_ATTR_NORMAL_WB |
    164 	    LX_BLKPAG_AP_RW |
    165 	    LX_BLKPAG_PXN |
    166 	    LX_BLKPAG_UXN;
    167 	for (blk = 0; blk < bootconfig.dramblocks; blk++) {
    168 		uint64_t start, end;
    169 
    170 		start = trunc_page(bootconfig.dram[blk].address);
    171 		end = round_page(bootconfig.dram[blk].address +
    172 		    (uint64_t)bootconfig.dram[blk].pages * PAGE_SIZE);
    173 
    174 		pmapboot_enter_range(AARCH64_PA_TO_KVA(start), start,
    175 		    end - start, dmattr, printf);
    176 	}
    177 
    178 	/* Disable translation table walks using TTBR0 */
    179 	uint64_t tcr = reg_tcr_el1_read();
    180 	reg_tcr_el1_write(tcr | TCR_EPD0);
    181 	isb();
    182 
    183 	aarch64_tlbi_all();
    184 
    185 	/*
    186 	 * at this point, whole kernel image is mapped as "rwx".
    187 	 * permission should be changed to:
    188 	 *
    189 	 *    text     rwx => r-x
    190 	 *    rodata   rwx => r--
    191 	 *    data     rwx => rw-  (.bss included)
    192 	 *
    193 	 * kernel image has mapped by L2 block. (2Mbyte)
    194 	 */
    195 	pmapboot_protect(L2_TRUNC_BLOCK(kernstart),
    196 	    L2_TRUNC_BLOCK(data_start), VM_PROT_WRITE);
    197 	pmapboot_protect(L2_ROUND_BLOCK(rodata_start),
    198 	    L2_ROUND_BLOCK(kernend), VM_PROT_EXECUTE);
    199 
    200 	aarch64_tlbi_all();
    201 
    202 	VPRINTF("%s: kernel phys start %lx end %lx+%lx\n", __func__,
    203 	    kernstart_phys, kernend_phys, kernend_extra);
    204 	fdt_memory_remove_range(kernstart_phys,
    205 	     kernend_phys - kernstart_phys + kernend_extra);
    206 
    207 #ifdef KASAN
    208 	kasan_kernelstart = kernstart;
    209 	kasan_kernelsize = L2_ROUND_BLOCK(kernend) - kernstart;
    210 #endif
    211 }
    212 
    213 
    214 
    215 /*
    216  * Upper region: 0xffff_ffff_ffff_ffff  Top of virtual memory
    217  *
    218  *               0xffff_ffff_ffe0_0000  End of KVA
    219  *                                      = VM_MAX_KERNEL_ADDRESS
    220  *
    221  *               0xffff_c000_4000_0000  Start of KVA
    222  *
    223  *               0xffff_c000_0???_????  End of kernel
    224  *                                      = _end[]
    225  *               0xffff_c000_00??_????  Start of kernel
    226  *                                      = __kernel_text[]
    227  *
    228  *               0xffff_c000_0000_0000  Kernel base address
    229  *                                      = VM_MIN_KERNEL_ADDRESS
    230  *
    231  *               0xffff_bfff_ffff_ffff  End of direct mapped
    232  *               0xffff_0000_0000_0000  Start of direct mapped
    233  *                                      = AARCH64_DIRECTMAP_START
    234  *
    235  * Hole:         0xfffe_ffff_ffff_ffff
    236  *               0x0001_0000_0000_0000
    237  *
    238  * Lower region: 0x0000_ffff_ffff_ffff  End of user address space
    239  *                                      = VM_MAXUSER_ADDRESS
    240  *
    241  *               0x0000_0000_0000_0000  Start of user address space
    242  */
    243 vaddr_t
    244 initarm_common(vaddr_t kvm_base, vsize_t kvm_size,
    245     const struct boot_physmem *bp, size_t nbp)
    246 {
    247 	extern char __kernel_text[];
    248 	extern char _end[];
    249 	extern char lwp0uspace[];
    250 
    251 	struct pcb *pcb;
    252 	struct trapframe *tf;
    253 	psize_t memsize_total;
    254 	vaddr_t kernstart, kernend;
    255 	vaddr_t kernstart_l2 __unused, kernend_l2;	/* L2 table 2MB aligned */
    256 	vaddr_t kernelvmstart;
    257 	size_t i;
    258 
    259 	cputype = cpu_idnum();	/* for compatible arm */
    260 
    261 	kernstart = trunc_page((vaddr_t)__kernel_text);
    262 	kernend = round_page((vaddr_t)_end);
    263 
    264 	kernstart_l2 = L2_TRUNC_BLOCK(kernstart);
    265 	kernend_l2 = L2_ROUND_BLOCK(kernend);
    266 
    267 	kernelvmstart = kernend_l2;
    268 
    269 #ifdef MODULAR
    270 	/*
    271 	 * The aarch64 compilers (gcc & llvm) use R_AARCH_CALL26/R_AARCH_JUMP26
    272 	 * for function calls (bl)/jumps(b). At this time, neither compiler
    273 	 * supports -mlong-calls therefore the kernel modules should be loaded
    274 	 * within the maximum range of +/-128MB from kernel text.
    275 	 */
    276 #define MODULE_RESERVED_MAX	(1024 * 1024 * 128)
    277 #define MODULE_RESERVED_SIZE	(1024 * 1024 * 32)	/* good enough? */
    278 	module_start = kernelvmstart;
    279 	module_end = kernend_l2 + MODULE_RESERVED_SIZE;
    280 	if (module_end >= kernstart_l2 + MODULE_RESERVED_MAX)
    281 		module_end = kernstart_l2 + MODULE_RESERVED_MAX;
    282 	KASSERT(module_end > kernend_l2);
    283 	kernelvmstart = module_end;
    284 #endif /* MODULAR */
    285 
    286 	KASSERT(kernelvmstart < VM_KERNEL_VM_BASE);
    287 
    288 	kernelvmstart = VM_KERNEL_VM_BASE;
    289 
    290 	paddr_t kernstart_phys __unused = KERN_VTOPHYS(kernstart);
    291 	paddr_t kernend_phys __unused = KERN_VTOPHYS(kernend);
    292 
    293 	physical_start = bootconfig.dram[0].address;
    294 	physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address +
    295 		       ptoa(bootconfig.dram[bootconfig.dramblocks - 1].pages);
    296 
    297 	/*
    298 	 * msgbuf is allocated from the bottom of any one of memory blocks
    299 	 * to avoid corruption due to bootloader or changing kernel layout.
    300 	 */
    301 	paddr_t msgbufaddr = 0;
    302 	for (i = 0; i < bootconfig.dramblocks; i++) {
    303 		/* this block has enough space for msgbuf? */
    304 		if (bootconfig.dram[i].pages < atop(round_page(MSGBUFSIZE)))
    305 			continue;
    306 
    307 		/* allocate msgbuf from the bottom of this block */
    308 		bootconfig.dram[i].pages -= atop(round_page(MSGBUFSIZE));
    309 		msgbufaddr = bootconfig.dram[i].address +
    310 		    ptoa(bootconfig.dram[i].pages);
    311 		break;
    312 	}
    313 	KASSERT(msgbufaddr != 0);	/* no space for msgbuf */
    314 	initmsgbuf((void *)AARCH64_PA_TO_KVA(msgbufaddr), MSGBUFSIZE);
    315 
    316 	VPRINTF(
    317 	    "------------------------------------------\n"
    318 	    "kern_vtopdiff         = 0x%016lx\n"
    319 	    "physical_start        = 0x%016lx\n"
    320 	    "kernel_start_phys     = 0x%016lx\n"
    321 	    "kernel_end_phys       = 0x%016lx\n"
    322 	    "pagetables_start_phys = 0x%016lx\n"
    323 	    "pagetables_end_phys   = 0x%016lx\n"
    324 	    "msgbuf                = 0x%016lx\n"
    325 	    "physical_end          = 0x%016lx\n"
    326 	    "VM_MIN_KERNEL_ADDRESS = 0x%016lx\n"
    327 	    "kernel_start_l2       = 0x%016lx\n"
    328 	    "kernel_start          = 0x%016lx\n"
    329 	    "kernel_end            = 0x%016lx\n"
    330 	    "(extra)               = 0x%016lx\n"
    331 	    "kernel_end_l2         = 0x%016lx\n"
    332 #ifdef MODULAR
    333 	    "module_start          = 0x%016lx\n"
    334 	    "module_end            = 0x%016lx\n"
    335 #endif
    336 	    "(kernel va area)      = 0x%016lx\n"
    337 	    "(devmap va area)      = 0x%016lx\n"
    338 	    "VM_MAX_KERNEL_ADDRESS = 0x%016lx\n"
    339 	    "------------------------------------------\n",
    340 	    kern_vtopdiff,
    341 	    physical_start,
    342 	    kernstart_phys,
    343 	    kernend_phys,
    344 	    round_page(kernend_phys),
    345 	    round_page(kernend_phys) + kernend_extra,
    346 	    msgbufaddr,
    347 	    physical_end,
    348 	    VM_MIN_KERNEL_ADDRESS,
    349 	    kernstart_l2,
    350 	    kernstart,
    351 	    kernend,
    352 	    kernend_extra,
    353 	    kernend_l2,
    354 #ifdef MODULAR
    355 	    module_start,
    356 	    module_end,
    357 #endif
    358 	    VM_KERNEL_VM_BASE,
    359 	    VM_KERNEL_IO_BASE,
    360 	    VM_MAX_KERNEL_ADDRESS);
    361 
    362 #ifdef DDB
    363 	db_machdep_cpu_init();
    364 #endif
    365 
    366 	uvm_md_init();
    367 
    368 	/* register free physical memory blocks */
    369 	memsize_total = 0;
    370 
    371 	KASSERT(bp != NULL || nbp == 0);
    372 	KASSERT(bp == NULL || nbp != 0);
    373 
    374 	KDASSERT(bootconfig.dramblocks <= DRAM_BLOCKS);
    375 	for (i = 0; i < bootconfig.dramblocks; i++) {
    376 		paddr_t start, end;
    377 
    378 		/* empty is end */
    379 		if (bootconfig.dram[i].address == 0 &&
    380 		    bootconfig.dram[i].pages == 0)
    381 			break;
    382 
    383 		start = atop(bootconfig.dram[i].address);
    384 		end = start + bootconfig.dram[i].pages;
    385 
    386 		int vm_freelist = VM_FREELIST_DEFAULT;
    387 
    388 		VPRINTF("block %2zu start %08lx  end %08lx\n", i, ptoa(start),
    389 		    ptoa(end));
    390 
    391 		/*
    392 		 * This assumes the bp list is sorted in ascending
    393 		 * order.
    394 		 */
    395 		paddr_t segend = end;
    396 		for (size_t j = 0; j < nbp && start < end; j++) {
    397 			paddr_t bp_start = bp[j].bp_start;
    398 			paddr_t bp_end = bp_start + bp[j].bp_pages;
    399 
    400 			VPRINTF("   bp %2zu start %08lx  end %08lx\n",
    401 			    j, ptoa(bp_start), ptoa(bp_end));
    402 
    403 			KASSERT(bp_start < bp_end);
    404 			if (start >= bp_end || segend < bp_start)
    405 				continue;
    406 
    407 			if (start < bp_start)
    408 				start = bp_start;
    409 
    410 			if (start < bp_end) {
    411 				if (segend > bp_end) {
    412 					segend = bp_end;
    413 				}
    414 				vm_freelist = bp[j].bp_freelist;
    415 
    416 				VPRINTF("         start %08lx  end %08lx"
    417 				    "... loading in freelist %d\n", ptoa(start),
    418 				    ptoa(segend), vm_freelist);
    419 
    420 				uvm_page_physload(start, segend, start, segend,
    421 				    vm_freelist);
    422 
    423 				memsize_total += ptoa(segend - start);
    424 				start = segend;
    425 				segend = end;
    426 			}
    427 		}
    428 	}
    429 	physmem = atop(memsize_total);
    430 
    431 	/*
    432 	 * kernel image is mapped on L2 table (2MB*n) by locore.S
    433 	 * virtual space start from 2MB aligned kernend
    434 	 */
    435 	pmap_bootstrap(kernelvmstart, VM_MAX_KERNEL_ADDRESS);
    436 
    437 	kasan_init();
    438 
    439 	/*
    440 	 * setup lwp0
    441 	 */
    442 	uvm_lwp_setuarea(&lwp0, (vaddr_t)lwp0uspace);
    443 	memset(&lwp0.l_md, 0, sizeof(lwp0.l_md));
    444 	pcb = lwp_getpcb(&lwp0);
    445 	memset(pcb, 0, sizeof(struct pcb));
    446 
    447 	tf = (struct trapframe *)(lwp0uspace + USPACE) - 1;
    448 	memset(tf, 0, sizeof(struct trapframe));
    449 	tf->tf_spsr = SPSR_M_EL0T;
    450 	lwp0.l_md.md_utf = pcb->pcb_tf = tf;
    451 
    452 	return (vaddr_t)tf;
    453 }
    454 
    455 /*
    456  * machine dependent system variables.
    457  */
    458 static void
    459 set_user_tagged_address(void *arg1, void *arg2)
    460 {
    461 	uint64_t enable = PTRTOUINT64(arg1);
    462 	uint64_t tcr = reg_tcr_el1_read();
    463 
    464 	if (enable)
    465 		tcr |= TCR_TBI0;
    466 	else
    467 		tcr &= ~TCR_TBI0;
    468 	reg_tcr_el1_write(tcr);
    469 }
    470 
    471 static int
    472 sysctl_machdep_tagged_address(SYSCTLFN_ARGS)
    473 {
    474 	struct sysctlnode node;
    475 	int error, cur, val;
    476 	uint64_t tcr;
    477 
    478 	tcr = reg_tcr_el1_read();
    479 	cur = val = (tcr & TCR_TBI0) ? 1 : 0;
    480 
    481 	node = *rnode;
    482 	node.sysctl_data = &val;
    483 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    484 	if (error || newp == NULL)
    485 		return error;
    486 	if (val < 0 || val > 1)
    487 		return EINVAL;
    488 
    489 	if (cur != val) {
    490 		uint64_t where = xc_broadcast(0, set_user_tagged_address,
    491 		    UINT64TOPTR(val), NULL);
    492 		xc_wait(where);
    493 	}
    494 
    495 	return 0;
    496 }
    497 
    498 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
    499 {
    500 	sysctl_createv(clog, 0, NULL, NULL,
    501 	    CTLFLAG_PERMANENT,
    502 	    CTLTYPE_NODE, "machdep", NULL,
    503 	    NULL, 0, NULL, 0,
    504 	    CTL_MACHDEP, CTL_EOL);
    505 
    506 	sysctl_createv(clog, 0, NULL, NULL,
    507 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    508 	    CTLTYPE_INT, "tagged_address",
    509 	    SYSCTL_DESCR("top byte ignored in the address calculation"),
    510 	    sysctl_machdep_tagged_address, 0, NULL, 0,
    511 	    CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    512 
    513 	sysctl_createv(clog, 0, NULL, NULL,
    514 	    CTLFLAG_PERMANENT,
    515 	    CTLTYPE_INT, "pan",
    516 	    SYSCTL_DESCR("Whether Privileged Access Never is enabled"),
    517 	    NULL, 0,
    518 	    &aarch64_pan_enabled, 0,
    519 	    CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    520 
    521 	sysctl_createv(clog, 0, NULL, NULL,
    522 	    CTLFLAG_PERMANENT,
    523 	    CTLTYPE_INT, "pac",
    524 	    SYSCTL_DESCR("Whether Pointer Authentication is enabled"),
    525 	    NULL, 0,
    526 	    &aarch64_pac_enabled, 0,
    527 	    CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    528 
    529 	sysctl_createv(clog, 0, NULL, NULL,
    530 	    CTLFLAG_PERMANENT,
    531 	    CTLTYPE_INT, "bti",
    532 	    SYSCTL_DESCR("Whether Branch Target Identification is enabled"),
    533 	    NULL, 0,
    534 	    &aarch64_bti_enabled, 0,
    535 	    CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    536 
    537 	sysctl_createv(clog, 0, NULL, NULL,
    538 	    CTLFLAG_PERMANENT,
    539 	    CTLTYPE_INT, "hafdbs",
    540 	    SYSCTL_DESCR("Whether Hardware updates to Access flag and Dirty state is enabled"),
    541 	    NULL, 0,
    542 	    &aarch64_hafdbs_enabled, 0,
    543 	    CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    544 }
    545 
    546 void
    547 parse_mi_bootargs(char *args)
    548 {
    549 	const char *p = args;
    550 
    551 	while (*p != '\0') {
    552 		while (isspace(*p))
    553 			p++;
    554 
    555 		/* parse single dash (`-') options */
    556 		if (*p == '-') {
    557 			p++;
    558 			while (!isspace(*p) && *p != '\0') {
    559 				BOOT_FLAG(*p, boothowto);
    560 				p++;
    561 			}
    562 			continue;
    563 		}
    564 
    565 		/* skip normal argument */
    566 		while (!isspace(*p) && *p != '\0')
    567 			p++;
    568 	}
    569 }
    570 
    571 void
    572 machdep_init(void)
    573 {
    574 	/* clear cpu reset hook for early boot */
    575 	cpu_reset_address0 = NULL;
    576 
    577 	configure_cpu_traps();
    578 }
    579 
    580 #ifdef MODULAR
    581 /* Push any modules loaded by the boot loader */
    582 void
    583 module_init_md(void)
    584 {
    585 #ifdef FDT
    586 	arm_fdt_module_init();
    587 #endif
    588 }
    589 #endif /* MODULAR */
    590 
    591 static bool
    592 in_dram_p(paddr_t pa, psize_t size)
    593 {
    594 	int i;
    595 
    596 	for (i = 0; i < bootconfig.dramblocks; i++) {
    597 		paddr_t s, e;
    598 		s = bootconfig.dram[i].address;
    599 		e = bootconfig.dram[i].address + ptoa(bootconfig.dram[i].pages);
    600 		if ((s <= pa) && ((pa + size) <= e))
    601 			return true;
    602 	}
    603 	return false;
    604 }
    605 
    606 bool
    607 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
    608 {
    609 	if (in_dram_p(pa, 0)) {
    610 		*vap = AARCH64_PA_TO_KVA(pa);
    611 		return true;
    612 	}
    613 	return false;
    614 }
    615 
    616 int
    617 mm_md_physacc(paddr_t pa, vm_prot_t prot)
    618 {
    619 	if (in_dram_p(pa, 0))
    620 		return 0;
    621 
    622 	if (pa >= AARCH64_MAX_PA)
    623 		return EFAULT;
    624 
    625 	return kauth_authorize_machdep(kauth_cred_get(),
    626 	    KAUTH_MACHDEP_UNMANAGEDMEM, NULL, NULL, NULL, NULL);
    627 }
    628 
    629 #ifdef __HAVE_MM_MD_KERNACC
    630 int
    631 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
    632 {
    633 	extern char __kernel_text[];
    634 	extern char _end[];
    635 	extern char __data_start[];
    636 	extern char __rodata_start[];
    637 
    638 	vaddr_t kernstart = trunc_page((vaddr_t)__kernel_text);
    639 	vaddr_t kernend = round_page((vaddr_t)_end);
    640 	paddr_t kernstart_phys = KERN_VTOPHYS(kernstart);
    641 	vaddr_t data_start = (vaddr_t)__data_start;
    642 	vaddr_t rodata_start = (vaddr_t)__rodata_start;
    643 	vsize_t rosize = kernend - rodata_start;
    644 
    645 	const vaddr_t v = (vaddr_t)ptr;
    646 
    647 #define IN_RANGE(addr,sta,end)	(((sta) <= (addr)) && ((addr) < (end)))
    648 
    649 	*handled = false;
    650 	if (IN_RANGE(v, kernstart, kernend)) {
    651 		*handled = true;
    652 		if ((v < data_start) && (prot & VM_PROT_WRITE))
    653 			return EFAULT;
    654 	} else if (IN_RANGE(v, AARCH64_DIRECTMAP_START, AARCH64_DIRECTMAP_END)) {
    655 		/*
    656 		 * if defined PMAP_MAP_POOLPAGE, direct mapped address
    657 		 * will be appeared as kvm(3) address.
    658 		 */
    659 		paddr_t pa = AARCH64_KVA_TO_PA(v);
    660 		if (in_dram_p(pa, 0)) {
    661 			*handled = true;
    662 			if (IN_RANGE(pa, kernstart_phys,
    663 			    kernstart_phys + rosize) &&
    664 			    (prot & VM_PROT_WRITE))
    665 				return EFAULT;
    666 		}
    667 	}
    668 	return 0;
    669 }
    670 #endif
    671 
    672 void
    673 cpu_startup(void)
    674 {
    675 	vaddr_t maxaddr, minaddr;
    676 
    677 	consinit();
    678 
    679 #ifdef FDT
    680 	const struct fdt_platform * const plat = fdt_platform_find();
    681 	if (plat->fp_startup != NULL)
    682 		plat->fp_startup();
    683 #endif
    684 
    685 	/*
    686 	 * Allocate a submap for physio.
    687 	 */
    688 	minaddr = 0;
    689 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
    690 	   VM_PHYS_SIZE, 0, FALSE, NULL);
    691 
    692 #ifdef MODULAR
    693 	uvm_map_setup(&module_map_store, module_start, module_end, 0);
    694 	module_map_store.pmap = pmap_kernel();
    695 	module_map = &module_map_store;
    696 #endif
    697 
    698 	/* Hello! */
    699 	banner();
    700 
    701 	cpu_startup_hook();
    702 }
    703 
    704 __weak_alias(cpu_startup_hook,cpu_startup_default)
    705 void
    706 cpu_startup_default(void)
    707 {
    708 }
    709 
    710 /*
    711  * cpu_dump: dump the machine-dependent kernel core dump headers.
    712  */
    713 int
    714 cpu_dump(void)
    715 {
    716 	int (*dump)(dev_t, daddr_t, void *, size_t);
    717 	char bf[dbtob(1)];
    718 	kcore_seg_t *segp;
    719 	cpu_kcore_hdr_t *cpuhdrp;
    720 	phys_ram_seg_t *memsegp;
    721 	const struct bdevsw *bdev;
    722 	int i;
    723 
    724 	bdev = bdevsw_lookup(dumpdev);
    725 	if (bdev == NULL)
    726 		return (ENXIO);
    727 	dump = bdev->d_dump;
    728 
    729 	memset(bf, 0, sizeof bf);
    730 	segp = (kcore_seg_t *)bf;
    731 	cpuhdrp = (cpu_kcore_hdr_t *)&bf[ALIGN(sizeof(*segp))];
    732 	memsegp = &cpuhdrp->kh_ramsegs[0];
    733 
    734 	/*
    735 	 * Generate a segment header.
    736 	 */
    737 	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
    738 	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
    739 
    740 	/*
    741 	 * Add the machine-dependent header info.
    742 	 */
    743 	cpuhdrp->kh_tcr1 = reg_tcr_el1_read();
    744 	cpuhdrp->kh_ttbr1 = reg_ttbr1_el1_read();
    745 	cpuhdrp->kh_nramsegs = bootconfig.dramblocks;
    746 
    747 	/*
    748 	 * Fill in the memory segment descriptors.
    749 	 */
    750 	for (i = 0; i < bootconfig.dramblocks; i++) {
    751 		memsegp[i].start = bootconfig.dram[i].address;
    752 		memsegp[i].size = ptoa(bootconfig.dram[i].pages);
    753 	}
    754 
    755 	return (dump(dumpdev, dumplo, bf, dbtob(1)));
    756 }
    757 
    758 void
    759 dumpsys(void)
    760 {
    761 	const struct bdevsw *bdev;
    762 	daddr_t blkno;
    763 	int psize;
    764 	int error;
    765 	paddr_t addr = 0, end;
    766 	int block;
    767 	psize_t len;
    768 	vaddr_t dumpspace;
    769 
    770 	/* flush everything out of caches */
    771 	cpu_dcache_wbinv_all();
    772 
    773 	if (dumpdev == NODEV)
    774 		return;
    775 	if (dumpsize == 0) {
    776 		cpu_dumpconf();
    777 	}
    778 	if (dumplo <= 0 || dumpsize == 0) {
    779 		printf("\ndump to dev %u,%u not possible\n",
    780 		    major(dumpdev), minor(dumpdev));
    781 		delay(5000000);
    782 		return;
    783 	}
    784 	printf("\ndumping to dev %u,%u offset %ld\n",
    785 	    major(dumpdev), minor(dumpdev), dumplo);
    786 
    787 
    788 	bdev = bdevsw_lookup(dumpdev);
    789 	if (bdev == NULL || bdev->d_psize == NULL)
    790 		return;
    791 	psize = bdev_size(dumpdev);
    792 	printf("dump ");
    793 	if (psize == -1) {
    794 		printf("area unavailable\n");
    795 		return;
    796 	}
    797 
    798 	if ((error = cpu_dump()) != 0)
    799 		goto err;
    800 
    801 	blkno = dumplo + cpu_dumpsize();
    802 	error = 0;
    803 	len = dumpsize;
    804 
    805 	for (block = 0; block < bootconfig.dramblocks && error == 0; ++block) {
    806 		addr = bootconfig.dram[block].address;
    807 		end = bootconfig.dram[block].address +
    808 		      ptoa(bootconfig.dram[block].pages);
    809 		for (; addr < end; addr += PAGE_SIZE) {
    810 		    	if (((len * PAGE_SIZE) % (1024*1024)) == 0)
    811 		    		printf("%lu ", (len * PAGE_SIZE) / (1024 * 1024));
    812 
    813 			if (!mm_md_direct_mapped_phys(addr, &dumpspace)) {
    814 				error = ENOMEM;
    815 				goto err;
    816 			}
    817 			error = (*bdev->d_dump)(dumpdev,
    818 			    blkno, (void *) dumpspace, PAGE_SIZE);
    819 
    820 			if (error)
    821 				goto err;
    822 			blkno += btodb(PAGE_SIZE);
    823 			len--;
    824 		}
    825 	}
    826 err:
    827 	switch (error) {
    828 	case ENXIO:
    829 		printf("device bad\n");
    830 		break;
    831 
    832 	case EFAULT:
    833 		printf("device not ready\n");
    834 		break;
    835 
    836 	case EINVAL:
    837 		printf("area improper\n");
    838 		break;
    839 
    840 	case EIO:
    841 		printf("i/o error\n");
    842 		break;
    843 
    844 	case EINTR:
    845 		printf("aborted from console\n");
    846 		break;
    847 
    848 	case ENOMEM:
    849 		printf("no direct map for %lx\n", addr);
    850 		break;
    851 
    852 	case 0:
    853 		printf("succeeded\n");
    854 		break;
    855 
    856 	default:
    857 		printf("error %d\n", error);
    858 		break;
    859 	}
    860 	printf("\n\n");
    861 	delay(5000000);
    862 }
    863 
    864 /*
    865  * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
    866  */
    867 int
    868 cpu_dumpsize(void)
    869 {
    870 	int size;
    871 
    872 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
    873 	    ALIGN(bootconfig.dramblocks * sizeof(phys_ram_seg_t));
    874 	if (roundup(size, dbtob(1)) != dbtob(1))
    875 		return -1;
    876 
    877 	return (1);
    878 }
    879 
    880 /*
    881  * cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped.
    882  */
    883 u_long
    884 cpu_dump_mempagecnt(void)
    885 {
    886 	u_long i, n;
    887 
    888 	n = 0;
    889 	for (i = 0; i < bootconfig.dramblocks; i++) {
    890 		n += bootconfig.dram[i].pages;
    891 	}
    892 
    893 	return (n);
    894 }
    895 
    896 /*
    897  * This is called by main to set dumplo and dumpsize.
    898  * Dumps always skip the first PAGE_SIZE of disk space
    899  * in case there might be a disk label stored there.
    900  * If there is extra space, put dump at the end to
    901  * reduce the chance that swapping trashes it.
    902  */
    903 
    904 void
    905 cpu_dumpconf(void)
    906 {
    907 	u_long nblks, dumpblks;	/* size of dump area */
    908 
    909 	if (dumpdev == NODEV)
    910 		return;
    911 	nblks = bdev_size(dumpdev);
    912 	if (nblks <= ctod(1))
    913 		return;
    914 
    915 	dumpblks = cpu_dumpsize();
    916 	if (dumpblks < 0)
    917 		goto bad;
    918 	dumpblks += ctod(cpu_dump_mempagecnt());
    919 
    920 	/* If dump won't fit (incl. room for possible label), punt. */
    921 	if (dumpblks > (nblks - ctod(1)))
    922 		goto bad;
    923 
    924 	/* Put dump at end of partition */
    925 	dumplo = nblks - dumpblks;
    926 
    927 	/* dumpsize is in page units, and doesn't include headers. */
    928 	dumpsize = cpu_dump_mempagecnt();
    929 	return;
    930 
    931  bad:
    932 	dumpsize = 0;
    933 }
    934