Home | History | Annotate | Line # | Download | only in hpcarm
      1 /*	$NetBSD: pxa2x0_hpc_machdep.c,v 1.33 2023/10/12 11:33:39 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1994-1998 Mark Brinicombe.
      5  * Copyright (c) 1994 Brini.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software written for Brini by Mark Brinicombe
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *      This product includes software developed by Brini.
     21  * 4. The name of the company nor the name of the author may be used to
     22  *    endorse or promote products derived from this software without specific
     23  *    prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Machine dependent functions for kernel setup.
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 __KERNEL_RCSID(0, "$NetBSD: pxa2x0_hpc_machdep.c,v 1.33 2023/10/12 11:33:39 skrll Exp $");
     44 
     45 #include "opt_ddb.h"
     46 #include "opt_dram_pages.h"
     47 #include "opt_modular.h"
     48 #include "ksyms.h"
     49 
     50 #include <sys/param.h>
     51 #include <sys/systm.h>
     52 #include <sys/kernel.h>
     53 #include <sys/reboot.h>
     54 #include <sys/proc.h>
     55 #include <sys/msgbuf.h>
     56 #include <sys/exec.h>
     57 #include <sys/ksyms.h>
     58 #include <sys/conf.h>	/* XXX for consinit related hacks */
     59 #include <sys/device.h>
     60 #include <sys/bus.h>
     61 
     62 #if NKSYMS || defined(DDB) || defined(MODULAR)
     63 #include <machine/db_machdep.h>
     64 #include <ddb/db_sym.h>
     65 #include <ddb/db_extern.h>
     66 #include <sys/exec_elf.h>
     67 #endif
     68 
     69 #include <uvm/uvm.h>
     70 
     71 #include <arm/xscale/pxa2x0cpu.h>
     72 #include <arm/xscale/pxa2x0reg.h>
     73 #include <arm/xscale/pxa2x0var.h>
     74 #include <arm/xscale/pxa2x0_gpio.h>
     75 #include <arm/locore.h>
     76 #include <arm/undefined.h>
     77 #include <arm/arm32/machdep.h>
     78 
     79 #include <machine/bootconfig.h>
     80 #include <machine/bootinfo.h>
     81 #include <machine/cpu.h>
     82 #include <machine/frame.h>
     83 #include <machine/intr.h>
     84 #include <machine/io.h>
     85 #include <machine/platid.h>
     86 #include <machine/rtc.h>
     87 #include <machine/signal.h>
     88 
     89 #include <dev/hpc/apm/apmvar.h>
     90 #include <dev/ic/comreg.h>
     91 
     92 /* Kernel text starts 2MB in from the bottom of the kernel address space. */
     93 #define	KERNEL_TEXT_BASE	(KERNEL_BASE + 0x00200000)
     94 #ifndef	KERNEL_VM_BASE
     95 #define	KERNEL_VM_BASE		(KERNEL_BASE + 0x01000000)
     96 #endif
     97 
     98 /*
     99  * The range 0xc1000000 - 0xccffffff is available for kernel VM space
    100  * Core-logic registers and I/O mappings occupy 0xfd000000 - 0xffffffff
    101  */
    102 #define	KERNEL_VM_SIZE		0x0c000000
    103 
    104 extern BootConfig bootconfig;		/* Boot config storage */
    105 
    106 extern paddr_t physical_start;
    107 extern paddr_t physical_freestart;
    108 extern paddr_t physical_freeend;
    109 extern paddr_t physical_end;
    110 
    111 extern paddr_t msgbufphys;
    112 
    113 extern int end;
    114 
    115 #define	KERNEL_PT_SYS		0	/* Page table for mapping proc0 zero page */
    116 #define	KERNEL_PT_KERNEL	1	/* Page table for mapping kernel */
    117 #define	KERNEL_PT_KERNEL_NUM	4
    118 #define	KERNEL_PT_VMDATA	(KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
    119 					/* Page tables for mapping kernel VM */
    120 #define	KERNEL_PT_VMDATA_NUM	4	/* start with 16MB of KVM */
    121 #define	NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
    122 
    123 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
    124 
    125 pv_addr_t minidataclean;
    126 
    127 /* Prototypes */
    128 void data_abort_handler(trapframe_t *);
    129 void prefetch_abort_handler(trapframe_t *);
    130 void undefinedinstruction_bounce(trapframe_t *);
    131 u_int cpu_get_control(void);
    132 
    133 vaddr_t init_pxa2x0(int, char **, struct bootinfo *);
    134 
    135 /* Machine dependent initialize function */
    136 extern void pxa2x0_machdep_init(void);
    137 
    138 /* Mode dependent sleep function holder */
    139 extern void (*__sleep_func)(void *);
    140 extern void *__sleep_ctx;
    141 
    142 #ifdef DEBUG_BEFOREMMU
    143 static void	fakecninit(void);
    144 #endif
    145 
    146 /* Number of DRAM pages which are installed */
    147 /* Units are 4K pages, so 8192 is 32 MB of memory */
    148 #ifndef DRAM_PAGES
    149 #define DRAM_PAGES	8192
    150 #endif
    151 
    152 /*
    153  * Static device mappings. These peripheral registers are mapped at
    154  * fixed virtual addresses very early in initarm() so that we can use
    155  * them while booting the kernel and stay at the same address
    156  * throughout whole kernel's life time.
    157  */
    158 #define	PXA2X0_GPIO_VBASE	0xfd000000
    159 #define	PXA2X0_CLKMAN_VBASE	0xfd100000
    160 #define	PXA2X0_INTCTL_VBASE	0xfd200000
    161 #define	PXA2X0_MEMCTL_VBASE	0xfd300000
    162 #define	PXA2X0_FFUART_VBASE	0xfd400000
    163 #define	PXA2X0_BTUART_VBASE	0xfd500000
    164 #define	PXA2X0_STUART_VBASE	0xfd600000
    165 
    166 const struct pmap_devmap pxa2x0_devmap[] = {
    167     DEVMAP_ENTRY(
    168 	    PXA2X0_GPIO_VBASE,
    169 	    PXA2X0_GPIO_BASE,
    170 	    PXA2X0_GPIO_SIZE
    171     ),
    172     DEVMAP_ENTRY(
    173 	    PXA2X0_CLKMAN_VBASE,
    174 	    PXA2X0_CLKMAN_BASE,
    175 	    PXA2X0_CLKMAN_SIZE
    176     ),
    177     DEVMAP_ENTRY(
    178 	    PXA2X0_INTCTL_VBASE,
    179 	    PXA2X0_INTCTL_BASE,
    180 	    PXA2X0_INTCTL_SIZE
    181     ),
    182     DEVMAP_ENTRY(
    183 	    PXA2X0_MEMCTL_VBASE,
    184 	    PXA2X0_MEMCTL_BASE,
    185 	    PXA2X0_MEMCTL_SIZE
    186     ),
    187     DEVMAP_ENTRY(
    188 	    PXA2X0_FFUART_VBASE,
    189 	    PXA2X0_FFUART_BASE,
    190 	    4 * COM_NPORTS
    191     ),
    192     DEVMAP_ENTRY(
    193 	    PXA2X0_BTUART_VBASE,
    194 	    PXA2X0_BTUART_BASE,
    195 	    4 * COM_NPORTS
    196     ),
    197     DEVMAP_ENTRY(
    198 	    PXA2X0_STUART_VBASE,
    199 	    PXA2X0_STUART_BASE,
    200 	    4 * COM_NPORTS
    201     ),
    202 
    203     DEVMAP_ENTRY_END
    204 };
    205 extern const struct pmap_devmap machdep_devmap[];
    206 
    207 
    208 static inline pd_entry_t *
    209 read_ttb(void)
    210 {
    211 	u_long ttb;
    212 
    213 	__asm volatile("mrc p15, 0, %0, c2, c0, 0" : "=r" (ttb));
    214 
    215 	return (pd_entry_t *)(ttb & ~((1 << 14) - 1));
    216 }
    217 
    218 /*
    219  * It should be responsible for setting up everything that must be
    220  * in place when main is called.
    221  * This includes:
    222  *   Initializing the physical console so characters can be printed.
    223  *   Setting up page tables for the kernel.
    224  */
    225 vaddr_t
    226 init_pxa2x0(int argc, char **argv, struct bootinfo *bi)
    227 {
    228 	u_int kerneldatasize, symbolsize;
    229 	u_int l1pagetable;
    230 	vaddr_t freemempos;
    231 	int loop;
    232 #if NKSYMS || defined(DDB) || defined(MODULAR)
    233 	Elf_Shdr *sh;
    234 #endif
    235 
    236 	pmap_devmap_bootstrap((vaddr_t)read_ttb(), pxa2x0_devmap);
    237 	pxa2x0_memctl_bootstrap(PXA2X0_MEMCTL_VBASE);
    238 	pxa2x0_intr_bootstrap(PXA2X0_INTCTL_VBASE);
    239 	pxa2x0_clkman_bootstrap(PXA2X0_CLKMAN_VBASE);
    240 	pxa2x0_gpio_bootstrap(PXA2X0_GPIO_VBASE);
    241 
    242 	/*
    243 	 * XXX for now, overwrite bootconfig to hardcoded values in
    244 	 * XXX pxa2x0_machdep_init().
    245 	 * XXX kill bootconfig and directly call uvm_physload
    246 	 */
    247 	bootconfig.dram[0].address = 0xa0000000;
    248 	bootconfig.dram[0].pages = DRAM_PAGES;
    249 	bootconfig.dramblocks = 1;
    250 
    251 	pxa2x0_machdep_init();
    252 
    253 #ifdef DEBUG_BEFOREMMU
    254 	/*
    255 	 * At this point, we cannot call real consinit().
    256 	 * Just call a faked up version of consinit(), which does the thing
    257 	 * with MMU disabled.
    258 	 */
    259 	fakecninit();
    260 #endif
    261 
    262 	kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE;
    263 	symbolsize = 0;
    264 #if NKSYMS || defined(DDB) || defined(MODULAR)
    265 	if (!memcmp(&end, "\177ELF", 4)) {
    266 /*
    267  * XXXGCC12.
    268  * This accesses beyond what "int end" technically supplies.
    269  */
    270 #pragma GCC push_options
    271 #pragma GCC diagnostic ignored "-Warray-bounds"
    272 		sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
    273 		loop = ((Elf_Ehdr *)&end)->e_shnum;
    274 #pragma GCC pop_options
    275 		for (; loop; loop--, sh++)
    276 			if (sh->sh_offset > 0 &&
    277 			    (sh->sh_offset + sh->sh_size) > symbolsize)
    278 				symbolsize = sh->sh_offset + sh->sh_size;
    279 	}
    280 #endif
    281 
    282 	printf("kernsize=0x%x\n", kerneldatasize);
    283 	kerneldatasize += symbolsize;
    284 	kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) +
    285 	    PAGE_SIZE * 8;
    286 
    287 	/*
    288 	 * hpcboot has loaded me with MMU disabled.
    289 	 * So create kernel page tables and enable MMU.
    290 	 */
    291 
    292 	/*
    293 	 * Set up the variables that define the availability of physical
    294 	 * memory.
    295 	 */
    296 	physical_start = bootconfig.dram[0].address;
    297 	physical_freestart = physical_start
    298 	    + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
    299 	physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
    300 	    + bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE;
    301 	physical_freeend = physical_end;
    302 
    303 	for (loop = 0; loop < bootconfig.dramblocks; ++loop)
    304 		physmem += bootconfig.dram[loop].pages;
    305 
    306 	/* XXX handle UMA framebuffer memory */
    307 
    308 	freemempos = 0xa0009000UL;
    309 	memset((void *)freemempos, 0, KERNEL_TEXT_BASE - KERNEL_BASE - 0x9000);
    310 
    311 	/*
    312 	 * Right. We have the bottom meg of memory mapped to 0x00000000
    313 	 * so was can get at it. The kernel will occupy the start of it.
    314 	 * After the kernel/args we allocate some of the fixed page tables
    315 	 * we need to get the system going.
    316 	 * We allocate one page directory and NUM_KERNEL_PTS page tables
    317 	 * and store the physical addresses in the kernel_pt_table array.
    318 	 * Must remember that neither the page L1 or L2 page tables are the
    319 	 * same size as a page !
    320 	 *
    321 	 * Ok, the next bit of physical allocate may look complex but it is
    322 	 * simple really. I have done it like this so that no memory gets
    323 	 * wasted during the allocate of various pages and tables that are
    324 	 * all different sizes.
    325 	 * The start address will be page aligned.
    326 	 * We allocate the kernel page directory on the first free 16KB
    327 	 * boundary we find.
    328 	 * We allocate the kernel page tables on the first 1KB boundary we
    329 	 * find.  We allocate at least 9 PT's (12 currently).  This means
    330 	 * that in the process we KNOW that we will encounter at least one
    331 	 * 16KB boundary.
    332 	 *
    333 	 * Eventually if the top end of the memory gets used for process L1
    334 	 * page tables the kernel L1 page table may be moved up there.
    335 	 */
    336 
    337 #ifdef VERBOSE_INIT_ARM
    338 	printf("Allocating page tables\n");
    339 #endif
    340 
    341 	/* Define a macro to simplify memory allocation */
    342 #define	valloc_pages(var, np)			\
    343 	alloc_pages((var).pv_pa, (np));		\
    344 	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;
    345 #define	alloc_pages(var, np)			\
    346 	(var) = freemempos;			\
    347 	freemempos += (np) * PAGE_SIZE;
    348 
    349 	{
    350 		int loop1 = 0;
    351 		kernel_l1pt.pv_pa = 0;
    352 		kernel_l1pt.pv_va = 0;
    353 		for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
    354 			/* Are we 16KB aligned for an L1 ? */
    355 			if (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
    356 			    && kernel_l1pt.pv_pa == 0) {
    357 				valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
    358 			} else {
    359 				valloc_pages(kernel_pt_table[loop1],
    360 				    L2_TABLE_SIZE / PAGE_SIZE);
    361 				++loop1;
    362 			}
    363 		}
    364 	}
    365 
    366 	/* This should never be able to happen but better confirm that. */
    367 	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
    368 		panic("initarm: Failed to align the kernel page directory");
    369 
    370 	/*
    371 	 * Allocate a page for the system page mapped to V0x00000000
    372 	 * This page will just contain the system vectors and can be
    373 	 * shared by all processes.
    374 	 */
    375 	valloc_pages(systempage, 1);
    376 
    377 	/* Allocate stacks for all modes */
    378 	valloc_pages(irqstack, IRQ_STACK_SIZE);
    379 	valloc_pages(abtstack, ABT_STACK_SIZE);
    380 	valloc_pages(undstack, UND_STACK_SIZE);
    381 	valloc_pages(kernelstack, UPAGES);
    382 
    383 #ifdef VERBOSE_INIT_ARM
    384 	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
    385 	    irqstack.pv_va);
    386 	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
    387 	    abtstack.pv_va);
    388 	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
    389 	    undstack.pv_va);
    390 	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
    391 	    kernelstack.pv_va);
    392 #endif
    393 
    394 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
    395 
    396 	/* Allocate enough pages for cleaning the Mini-Data cache. */
    397 	KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
    398 	valloc_pages(minidataclean, 1);
    399 #ifdef VERBOSE_INIT_ARM
    400 	printf("minidataclean: p0x%08lx v0x%08lx, size = %ld\n",
    401 	    minidataclean.pv_pa, minidataclean.pv_va,
    402 	    xscale_minidata_clean_size);
    403 #endif
    404 
    405 	/*
    406 	 * Ok, we have allocated physical pages for the primary kernel
    407 	 * page tables.
    408 	 */
    409 
    410 #ifdef VERBOSE_INIT_ARM
    411 	printf("Creating L1 page table\n");
    412 #endif
    413 
    414 	/*
    415 	 * Now we start construction of the L1 page table.
    416 	 * We start by mapping the L2 page tables into the L1.
    417 	 * This means that we can replace L1 mappings later on if necessary.
    418 	 */
    419 	l1pagetable = kernel_l1pt.pv_pa;
    420 
    421 	/* Map the L2 pages tables in the L1 page table */
    422 	pmap_link_l2pt(l1pagetable, 0x00000000,
    423 	    &kernel_pt_table[KERNEL_PT_SYS]);
    424 	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; ++loop)
    425 		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
    426 		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
    427 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
    428 		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
    429 		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
    430 
    431 	/* update the top of the kernel VM */
    432 	pmap_curmaxkvaddr =
    433 	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
    434 
    435 #ifdef VERBOSE_INIT_ARM
    436 	printf("Mapping kernel\n");
    437 #endif
    438 
    439 	/* Now we fill in the L2 pagetable for the kernel code/data */
    440 
    441 	/*
    442 	 * XXX there is no ELF header to find RO region.
    443 	 * XXX What should we do?
    444 	 */
    445 #if 0
    446 	if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
    447 		logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
    448 		    physical_start, kernexec->a_text,
    449 		    VM_PROT_READ, PTE_CACHE);
    450 		logical += pmap_map_chunk(l1pagetable,
    451 		    KERNEL_TEXT_BASE + logical, physical_start + logical,
    452 		    kerneldatasize - kernexec->a_text,
    453 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    454 	} else
    455 #endif
    456 		pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
    457 		    KERNEL_TEXT_BASE - KERNEL_BASE + physical_start,
    458 		    kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    459 
    460 #ifdef VERBOSE_INIT_ARM
    461 	printf("Constructing L2 page tables\n");
    462 #endif
    463 
    464 	/* Map the stack pages */
    465 	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
    466 	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    467 	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
    468 	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    469 	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
    470 	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    471 	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
    472 	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    473 
    474 	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
    475 	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    476 
    477 	/* Map page tables */
    478 	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
    479 		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
    480 		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
    481 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    482 	}
    483 
    484 	/* Map the Mini-Data cache clean area. */
    485 	xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
    486 	    minidataclean.pv_pa);
    487 
    488 	/* Map the vector page. */
    489 	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
    490 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    491 
    492 	/*
    493 	 * map integrated peripherals at same address in l1pagetable
    494 	 * so that we can continue to use console.
    495 	 */
    496 	pmap_devmap_bootstrap(l1pagetable, pxa2x0_devmap);
    497 	pmap_devmap_bootstrap(l1pagetable, machdep_devmap);
    498 
    499 	/*
    500 	 * Give the XScale global cache clean code an appropriately
    501 	 * sized chunk of unmapped VA space starting at 0xff000000
    502 	 * (our device mappings end before this address).
    503 	 */
    504 	xscale_cache_clean_addr = 0xff000000U;
    505 
    506 	/*
    507 	 * Now we have the real page tables in place so we can switch to them.
    508 	 * Once this is done we will be running with the REAL kernel page
    509 	 * tables.
    510 	 */
    511 
    512 #ifdef VERBOSE_INIT_ARM
    513 	printf("done.\n");
    514 #endif
    515 
    516 	/*
    517 	 * Pages were allocated during the secondary bootstrap for the
    518 	 * stacks for different CPU modes.
    519 	 * We must now set the r13 registers in the different CPU modes to
    520 	 * point to these stacks.
    521 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
    522 	 * of the stack memory.
    523 	 */
    524 #ifdef VERBOSE_INIT_ARM
    525 	printf("init subsystems: stacks ");
    526 #endif
    527 
    528 	set_stackptr(PSR_IRQ32_MODE,
    529 	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
    530 	set_stackptr(PSR_ABT32_MODE,
    531 	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
    532 	set_stackptr(PSR_UND32_MODE,
    533 	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
    534 #ifdef VERBOSE_INIT_ARM
    535 	printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
    536 	    kernelstack.pv_pa);
    537 #endif /* VERBOSE_INIT_ARM */
    538 
    539 	/*
    540 	 * Well we should set a data abort handler.
    541 	 * Once things get going this will change as we will need a proper
    542 	 * handler. Until then we will use a handler that just panics but
    543 	 * tells us why.
    544 	 * Initialization of the vectors will just panic on a data abort.
    545 	 * This just fills in a slightly better one.
    546 	 */
    547 #ifdef VERBOSE_INIT_ARM
    548 	printf("vectors ");
    549 #endif
    550 	data_abort_handler_address = (u_int)data_abort_handler;
    551 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
    552 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
    553 #ifdef DEBUG
    554 	printf("%08x %08x %08x\n", data_abort_handler_address,
    555 	    prefetch_abort_handler_address, undefined_handler_address);
    556 #endif
    557 
    558 	/* Initialize the undefined instruction handlers */
    559 #ifdef VERBOSE_INIT_ARM
    560 	printf("undefined\n");
    561 #endif
    562 	undefined_init();
    563 
    564 	/* Set the page table address. */
    565 #ifdef VERBOSE_INIT_ARM
    566 	printf("switching to new L1 page table  @%#lx...\n", kernel_l1pt.pv_pa);
    567 #endif
    568 	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
    569 	cpu_setttb(kernel_l1pt.pv_pa, true);
    570 	cpu_tlb_flushID();
    571 	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
    572 
    573 	/*
    574 	 * Moved from cpu_startup() as data_abort_handler() references
    575 	 * this during uvm init.
    576 	 */
    577 	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
    578 
    579 	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
    580 
    581 	consinit();
    582 
    583 #ifdef VERBOSE_INIT_ARM
    584 	printf("bootstrap done.\n");
    585 #endif
    586 
    587 #ifdef VERBOSE_INIT_ARM
    588 	printf("freemempos=%08lx\n", freemempos);
    589 	printf("MMU enabled. control=%08x\n", cpu_get_control());
    590 #endif
    591 
    592 	/* Load memory into UVM. */
    593 	uvm_md_init();
    594 	for (loop = 0; loop < bootconfig.dramblocks; loop++) {
    595 		paddr_t dblk_start = (paddr_t)bootconfig.dram[loop].address;
    596 		paddr_t dblk_end = dblk_start
    597 			+ (bootconfig.dram[loop].pages * PAGE_SIZE);
    598 
    599 		if (dblk_start < physical_freestart)
    600 			dblk_start = physical_freestart;
    601 		if (dblk_end > physical_freeend)
    602 			dblk_end = physical_freeend;
    603 
    604 		uvm_page_physload(atop(dblk_start), atop(dblk_end),
    605 		    atop(dblk_start), atop(dblk_end), VM_FREELIST_DEFAULT);
    606 	}
    607 
    608 	/* Boot strap pmap telling it where managed kernel virtual memory is */
    609 	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
    610 
    611 #ifdef DDB
    612 	db_machine_init();
    613 #endif
    614 #if NKSYMS || defined(DDB) || defined(MODULAR)
    615 	ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
    616 #endif
    617 
    618 	printf("kernsize=0x%x", kerneldatasize);
    619 	printf(" (including 0x%x symbols)\n", symbolsize);
    620 
    621 #ifdef DDB
    622 	if (boothowto & RB_KDB)
    623 		Debugger();
    624 #endif /* DDB */
    625 
    626 	/* We return the new stack pointer address */
    627 	return kernelstack.pv_va + USPACE_SVC_STACK_TOP;
    628 }
    629 
    630 #ifdef DEBUG_BEFOREMMU
    631 static void
    632 fakecninit(void)
    633 {
    634 #if (NCOM > 0) && defined(COM_PXA2X0)
    635 	comcnattach(&pxa2x0_a4x_bs_tag, comcnaddr, comcnspeed,
    636 	    PXA2X0_COM_FREQ, COM_TYPE_PXA2x0, comcnmode);
    637 #endif
    638 }
    639 #endif
    640