Home | History | Annotate | Line # | Download | only in arm32
arm32_kvminit.c revision 1.18
      1 /*	$NetBSD: arm32_kvminit.c,v 1.18 2013/02/27 22:15:46 matt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002, 2003, 2005  Genetec Corporation.  All rights reserved.
      5  * Written by Hiroyuki Bessho for Genetec Corporation.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of Genetec Corporation may not be used to endorse or
     16  *    promote products derived from this software without specific prior
     17  *    written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL GENETEC CORPORATION
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  *
     31  * Copyright (c) 2001 Wasabi Systems, Inc.
     32  * All rights reserved.
     33  *
     34  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. All advertising materials mentioning features or use of this software
     45  *    must display the following acknowledgement:
     46  *	This product includes software developed for the NetBSD Project by
     47  *	Wasabi Systems, Inc.
     48  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     49  *    or promote products derived from this software without specific prior
     50  *    written permission.
     51  *
     52  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     54  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     55  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     56  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     57  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     58  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     59  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     60  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     61  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     62  * POSSIBILITY OF SUCH DAMAGE.
     63  *
     64  * Copyright (c) 1997,1998 Mark Brinicombe.
     65  * Copyright (c) 1997,1998 Causality Limited.
     66  * All rights reserved.
     67  *
     68  * Redistribution and use in source and binary forms, with or without
     69  * modification, are permitted provided that the following conditions
     70  * are met:
     71  * 1. Redistributions of source code must retain the above copyright
     72  *    notice, this list of conditions and the following disclaimer.
     73  * 2. Redistributions in binary form must reproduce the above copyright
     74  *    notice, this list of conditions and the following disclaimer in the
     75  *    documentation and/or other materials provided with the distribution.
     76  * 3. All advertising materials mentioning features or use of this software
     77  *    must display the following acknowledgement:
     78  *	This product includes software developed by Mark Brinicombe
     79  *	for the NetBSD Project.
     80  * 4. The name of the company nor the name of the author may be used to
     81  *    endorse or promote products derived from this software without specific
     82  *    prior written permission.
     83  *
     84  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     85  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     86  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     87  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     88  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     89  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     90  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     91  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     92  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     93  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     94  * SUCH DAMAGE.
     95  *
     96  * Copyright (c) 2007 Microsoft
     97  * All rights reserved.
     98  *
     99  * Redistribution and use in source and binary forms, with or without
    100  * modification, are permitted provided that the following conditions
    101  * are met:
    102  * 1. Redistributions of source code must retain the above copyright
    103  *    notice, this list of conditions and the following disclaimer.
    104  * 2. Redistributions in binary form must reproduce the above copyright
    105  *    notice, this list of conditions and the following disclaimer in the
    106  *    documentation and/or other materials provided with the distribution.
    107  * 3. All advertising materials mentioning features or use of this software
    108  *    must display the following acknowledgement:
    109  *	This product includes software developed by Microsoft
    110  *
    111  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
    112  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
    113  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    114  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
    115  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
    116  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
    117  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    118  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    119  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    120  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    121  * SUCH DAMAGE.
    122  */
    123 
    124 #include <sys/cdefs.h>
    125 __KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.18 2013/02/27 22:15:46 matt Exp $");
    126 
    127 #include <sys/param.h>
    128 #include <sys/device.h>
    129 #include <sys/kernel.h>
    130 #include <sys/reboot.h>
    131 #include <sys/bus.h>
    132 
    133 #include <dev/cons.h>
    134 
    135 #include <uvm/uvm_extern.h>
    136 
    137 #include <arm/db_machdep.h>
    138 #include <arm/undefined.h>
    139 #include <arm/bootconfig.h>
    140 #include <arm/arm32/machdep.h>
    141 
    142 #include "ksyms.h"
    143 
    144 struct bootmem_info bootmem_info;
    145 
    146 paddr_t msgbufphys;
    147 paddr_t physical_start;
    148 paddr_t physical_end;
    149 
    150 extern char etext[];
    151 extern char __data_start[], _edata[];
    152 extern char __bss_start[], __bss_end__[];
    153 extern char _end[];
    154 
    155 /* Page tables for mapping kernel VM */
    156 #define KERNEL_L2PT_VMDATA_NUM	8	/* start with 32MB of KVM */
    157 
    158 /*
    159  * Macros to translate between physical and virtual for a subset of the
    160  * kernel address space.  *Not* for general use.
    161  */
    162 #define KERN_VTOPHYS(bmi, va) \
    163 	((paddr_t)((vaddr_t)(va) - KERNEL_BASE + (bmi)->bmi_start))
    164 #define KERN_PHYSTOV(bmi, pa) \
    165 	((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + KERNEL_BASE))
    166 
    167 void
    168 arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart)
    169 {
    170 	struct bootmem_info * const bmi = &bootmem_info;
    171 	pv_addr_t *pv = bmi->bmi_freeblocks;
    172 
    173 #ifdef VERBOSE_INIT_ARM
    174 	printf("%s: memstart=%#lx, memsize=%#lx, kernelstart=%#lx\n",
    175 	    __func__, memstart, memsize, kernelstart);
    176 #endif
    177 
    178 	physical_start = bmi->bmi_start = memstart;
    179 	physical_end = bmi->bmi_end = memstart + memsize;
    180 	physmem = memsize / PAGE_SIZE;
    181 
    182 	/*
    183 	 * Let's record where the kernel lives.
    184 	 */
    185 	bmi->bmi_kernelstart = kernelstart;
    186 	bmi->bmi_kernelend = KERN_VTOPHYS(bmi, round_page((vaddr_t)_end));
    187 
    188 #ifdef VERBOSE_INIT_ARM
    189 	printf("%s: kernelend=%#lx\n", __func__, bmi->bmi_kernelend);
    190 #endif
    191 
    192 	/*
    193 	 * Now the rest of the free memory must be after the kernel.
    194 	 */
    195 	pv->pv_pa = bmi->bmi_kernelend;
    196 	pv->pv_va = KERN_PHYSTOV(bmi, pv->pv_pa);
    197 	pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend;
    198 	bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
    199 #ifdef VERBOSE_INIT_ARM
    200 	printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
    201 	    __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
    202 	    pv->pv_pa + pv->pv_size - 1, pv->pv_va);
    203 #endif
    204 	pv++;
    205 
    206 	/*
    207 	 * Add a free block for any memory before the kernel.
    208 	 */
    209 	if (bmi->bmi_start < bmi->bmi_kernelstart) {
    210 		pv->pv_pa = bmi->bmi_start;
    211 		pv->pv_va = KERNEL_BASE;
    212 		pv->pv_size = bmi->bmi_kernelstart - bmi->bmi_start;
    213 		bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
    214 #ifdef VERBOSE_INIT_ARM
    215 		printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
    216 		    __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
    217 		    pv->pv_pa + pv->pv_size - 1, pv->pv_va);
    218 #endif
    219 		pv++;
    220 	}
    221 
    222 	bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks;
    223 
    224 	SLIST_INIT(&bmi->bmi_freechunks);
    225 	SLIST_INIT(&bmi->bmi_chunks);
    226 }
    227 
    228 static bool
    229 concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv)
    230 {
    231 	if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa
    232 	    && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va
    233 	    && acc_pv->pv_prot == pv->pv_prot
    234 	    && acc_pv->pv_cache == pv->pv_cache) {
    235 #ifdef VERBOSE_INIT_ARMX
    236 		printf("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n",
    237 		    __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size + 1,
    238 		    acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size + 1);
    239 #endif
    240 		acc_pv->pv_size += pv->pv_size;
    241 		return true;
    242 	}
    243 
    244 	return false;
    245 }
    246 
    247 static void
    248 add_pages(struct bootmem_info *bmi, pv_addr_t *pv)
    249 {
    250 	pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks);
    251 	while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) {
    252 		pv_addr_t * const pv0 = (*pvp);
    253 		KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa);
    254 		if (concat_pvaddr(pv0, pv)) {
    255 #ifdef VERBOSE_INIT_ARM
    256 			printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
    257 			    __func__, "appending", pv,
    258 			    pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
    259 			    pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
    260 #endif
    261 			pv = SLIST_NEXT(pv0, pv_list);
    262 			if (pv != NULL && concat_pvaddr(pv0, pv)) {
    263 #ifdef VERBOSE_INIT_ARM
    264 				printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
    265 				    __func__, "merging", pv,
    266 				    pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
    267 				    pv0->pv_pa,
    268 				    pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
    269 #endif
    270 				SLIST_REMOVE_AFTER(pv0, pv_list);
    271 				SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list);
    272 			}
    273 			return;
    274 		}
    275 		KASSERT(pv->pv_va != (*pvp)->pv_va);
    276 		pvp = &SLIST_NEXT(*pvp, pv_list);
    277 	}
    278 	KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va);
    279 	pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks);
    280 	KASSERT(new_pv != NULL);
    281 	SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list);
    282 	*new_pv = *pv;
    283 	SLIST_NEXT(new_pv, pv_list) = *pvp;
    284 	(*pvp) = new_pv;
    285 #ifdef VERBOSE_INIT_ARM
    286 	printf("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ",
    287 	    __func__, new_pv, new_pv->pv_pa, new_pv->pv_va,
    288 	    new_pv->pv_size / PAGE_SIZE);
    289 	if (SLIST_NEXT(new_pv, pv_list))
    290 		printf("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa);
    291 	else
    292 		printf("at tail\n");
    293 #endif
    294 }
    295 
    296 static void
    297 valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages,
    298 	int prot, int cache, bool zero_p)
    299 {
    300 	size_t nbytes = npages * PAGE_SIZE;
    301 	pv_addr_t *free_pv = bmi->bmi_freeblocks;
    302 	size_t free_idx = 0;
    303 	static bool l1pt_found;
    304 
    305 	/*
    306 	 * If we haven't allocated the kernel L1 page table and we are aligned
    307 	 * at a L1 table boundary, alloc the memory for it.
    308 	 */
    309 	if (!l1pt_found
    310 	    && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0
    311 	    && free_pv->pv_size >= L1_TABLE_SIZE) {
    312 		l1pt_found = true;
    313 		valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE,
    314 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
    315 		add_pages(bmi, &kernel_l1pt);
    316 	}
    317 
    318 	while (nbytes > free_pv->pv_size) {
    319 		free_pv++;
    320 		free_idx++;
    321 		if (free_idx == bmi->bmi_nfreeblocks) {
    322 			panic("%s: could not allocate %zu bytes",
    323 			    __func__, nbytes);
    324 		}
    325 	}
    326 
    327 	/*
    328 	 * As we allocate the memory, make sure that we don't walk over
    329 	 * our current first level translation table.
    330 	 */
    331 	KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa);
    332 
    333 	pv->pv_pa = free_pv->pv_pa;
    334 	pv->pv_va = free_pv->pv_va;
    335 	pv->pv_size = nbytes;
    336 	pv->pv_prot = prot;
    337 	pv->pv_cache = cache;
    338 
    339 	/*
    340 	 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE
    341 	 * just use PTE_CACHE.
    342 	 */
    343 	if (cache == PTE_PAGETABLE
    344 	    && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt
    345 	    && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt
    346 	    && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt)
    347 		pv->pv_cache = PTE_CACHE;
    348 
    349 	free_pv->pv_pa += nbytes;
    350 	free_pv->pv_va += nbytes;
    351 	free_pv->pv_size -= nbytes;
    352 	if (free_pv->pv_size == 0) {
    353 		--bmi->bmi_nfreeblocks;
    354 		for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) {
    355 			free_pv[0] = free_pv[1];
    356 		}
    357 	}
    358 
    359 	bmi->bmi_freepages -= npages;
    360 
    361 	if (zero_p)
    362 		memset((void *)pv->pv_va, 0, nbytes);
    363 }
    364 
    365 void
    366 arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase,
    367 	const struct pmap_devmap *devmap, bool mapallmem_p)
    368 {
    369 	struct bootmem_info * const bmi = &bootmem_info;
    370 #ifdef MULTIPROCESSOR
    371 	const size_t cpu_num = arm_cpu_max + 1;
    372 #else
    373 	const size_t cpu_num = 1;
    374 #endif
    375 
    376 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    377 	KASSERT(mapallmem_p);
    378 #endif
    379 
    380 	/*
    381 	 * Calculate the number of L2 pages needed for mapping the
    382 	 * kernel + data + stuff.  Assume 2 L2 pages for kernel, 1 for vectors,
    383 	 * and 1 for IO
    384 	 */
    385 	size_t kernel_size = bmi->bmi_kernelend;
    386 	kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE);
    387 	kernel_size += L1_TABLE_SIZE;
    388 	kernel_size += L2_TABLE_SIZE * (2 + 1 + KERNEL_L2PT_VMDATA_NUM + 1);
    389 	kernel_size +=
    390 	    cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE
    391 	    + UND_STACK_SIZE + UPAGES) * PAGE_SIZE;
    392 	kernel_size += round_page(MSGBUFSIZE);
    393 	kernel_size += 0x10000;	/* slop */
    394 	kernel_size += PAGE_SIZE * (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE;
    395 	kernel_size = round_page(kernel_size);
    396 
    397 	/*
    398 	 * Now we know how many L2 pages it will take.
    399 	 */
    400 	const size_t KERNEL_L2PT_KERNEL_NUM =
    401 	    (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE;
    402 
    403 #ifdef VERBOSE_INIT_ARM
    404 	printf("%s: %zu L2 pages are needed to map %#zx kernel bytes\n",
    405 	    __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size);
    406 #endif
    407 
    408 	KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts));
    409 	pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts;
    410 	pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM;
    411 	pv_addr_t msgbuf;
    412 	pv_addr_t text;
    413 	pv_addr_t data;
    414 	pv_addr_t chunks[KERNEL_L2PT_KERNEL_NUM+KERNEL_L2PT_VMDATA_NUM+11];
    415 #if ARM_MMU_XSCALE == 1
    416 	pv_addr_t minidataclean;
    417 #endif
    418 
    419 	/*
    420 	 * We need to allocate some fixed page tables to get the kernel going.
    421 	 *
    422 	 * We are going to allocate our bootstrap pages from the beginning of
    423 	 * the free space that we just calculated.  We allocate one page
    424 	 * directory and a number of page tables and store the physical
    425 	 * addresses in the bmi_l2pts array in bootmem_info.
    426 	 *
    427 	 * The kernel page directory must be on a 16K boundary.  The page
    428 	 * tables must be on 4K boundaries.  What we do is allocate the
    429 	 * page directory on the first 16K boundary that we encounter, and
    430 	 * the page tables on 4K boundaries otherwise.  Since we allocate
    431 	 * at least 3 L2 page tables, we are guaranteed to encounter at
    432 	 * least one 16K aligned region.
    433 	 */
    434 
    435 #ifdef VERBOSE_INIT_ARM
    436 	printf("%s: allocating page tables for", __func__);
    437 #endif
    438 	for (size_t i = 0; i < __arraycount(chunks); i++) {
    439 		SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list);
    440 	}
    441 
    442 	kernel_l1pt.pv_pa = 0;
    443 	kernel_l1pt.pv_va = 0;
    444 
    445 	/*
    446 	 * Allocate the L2 pages, but if we get to a page that is aligned for
    447 	 * an L1 page table, we will allocate the pages for it first and then
    448 	 * allocate the L2 page.
    449 	 */
    450 
    451 	/*
    452 	 * First allocate L2 page for the vectors.
    453 	 */
    454 #ifdef VERBOSE_INIT_ARM
    455 	printf(" vector");
    456 #endif
    457 	valloc_pages(bmi, &bmi->bmi_vector_l2pt, L2_TABLE_SIZE / PAGE_SIZE,
    458 	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
    459 	add_pages(bmi, &bmi->bmi_vector_l2pt);
    460 
    461 	/*
    462 	 * Now allocate L2 pages for the kernel
    463 	 */
    464 #ifdef VERBOSE_INIT_ARM
    465 	printf(" kernel");
    466 #endif
    467 	for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) {
    468 		valloc_pages(bmi, &kernel_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE,
    469 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
    470 		add_pages(bmi, &kernel_l2pt[idx]);
    471 	}
    472 
    473 	/*
    474 	 * Now allocate L2 pages for the initial kernel VA space.
    475 	 */
    476 #ifdef VERBOSE_INIT_ARM
    477 	printf(" vm");
    478 #endif
    479 	for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) {
    480 		valloc_pages(bmi, &vmdata_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE,
    481 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
    482 		add_pages(bmi, &vmdata_l2pt[idx]);
    483 	}
    484 
    485 	/*
    486 	 * If someone wanted a L2 page for I/O, allocate it now.
    487 	 */
    488 	if (iovbase != 0) {
    489 #ifdef VERBOSE_INIT_ARM
    490 		printf(" io");
    491 #endif
    492 		valloc_pages(bmi, &bmi->bmi_io_l2pt, L2_TABLE_SIZE / PAGE_SIZE,
    493 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
    494 		add_pages(bmi, &bmi->bmi_io_l2pt);
    495 	}
    496 
    497 #ifdef VERBOSE_ARM_INIT
    498 	printf("%s: allocating stacks\n", __func__);
    499 #endif
    500 
    501 	/* Allocate stacks for all modes and CPUs */
    502 	valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num,
    503 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
    504 	add_pages(bmi, &abtstack);
    505 	valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num,
    506 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
    507 	add_pages(bmi, &fiqstack);
    508 	valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num,
    509 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
    510 	add_pages(bmi, &irqstack);
    511 	valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num,
    512 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
    513 	add_pages(bmi, &undstack);
    514 	valloc_pages(bmi, &idlestack, UPAGES * cpu_num,		/* SVC32 */
    515 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
    516 	add_pages(bmi, &idlestack);
    517 	valloc_pages(bmi, &kernelstack, UPAGES,			/* SVC32 */
    518 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
    519 	add_pages(bmi, &kernelstack);
    520 
    521 	/* Allocate the message buffer from the end of memory. */
    522 	const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
    523 	valloc_pages(bmi, &msgbuf, msgbuf_pgs,
    524 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false);
    525 	add_pages(bmi, &msgbuf);
    526 	msgbufphys = msgbuf.pv_pa;
    527 
    528 	/*
    529 	 * Allocate a page for the system vector page.
    530 	 * This page will just contain the system vectors and can be
    531 	 * shared by all processes.
    532 	 */
    533 	valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE,
    534 	    PTE_CACHE, true);
    535 	systempage.pv_va = vectors;
    536 
    537 	/*
    538 	 * If the caller needed a few extra pages for some reason, allocate
    539 	 * them now.
    540 	 */
    541 #if ARM_MMU_XSCALE == 1
    542 #if (ARM_NMMUS > 1)
    543 	if (xscale_use_minidata)
    544 #endif
    545 		valloc_pages(bmi, extrapv, nextrapages,
    546 		    VM_PROT_READ|VM_PROT_WRITE, 0, true);
    547 #endif
    548 
    549 	/*
    550 	 * Ok we have allocated physical pages for the primary kernel
    551 	 * page tables and stacks.  Let's just confirm that.
    552 	 */
    553 	if (kernel_l1pt.pv_va == 0
    554 	    && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0))
    555 		panic("%s: Failed to allocate or align the kernel "
    556 		    "page directory", __func__);
    557 
    558 
    559 #ifdef VERBOSE_INIT_ARM
    560 	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
    561 #endif
    562 
    563 	/*
    564 	 * Now we start construction of the L1 page table
    565 	 * We start by mapping the L2 page tables into the L1.
    566 	 * This means that we can replace L1 mappings later on if necessary
    567 	 */
    568 	vaddr_t l1pt_va = kernel_l1pt.pv_va;
    569 	paddr_t l1pt_pa = kernel_l1pt.pv_pa;
    570 
    571 	/* Map the L2 pages tables in the L1 page table */
    572 	pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE,
    573 	    &bmi->bmi_vector_l2pt);
    574 #ifdef VERBOSE_INIT_ARM
    575 	printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx\n (vectors)",
    576 	    __func__, bmi->bmi_vector_l2pt.pv_va, bmi->bmi_vector_l2pt.pv_pa,
    577 	    systempage.pv_va);
    578 #endif
    579 
    580 	const vaddr_t kernel_base =
    581 	    KERN_PHYSTOV(bmi, bmi->bmi_kernelstart & -L2_S_SEGSIZE);
    582 	for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) {
    583 		pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE,
    584 		    &kernel_l2pt[idx]);
    585 #ifdef VERBOSE_INIT_ARM
    586 		printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n",
    587 		    __func__, kernel_l2pt[idx].pv_va, kernel_l2pt[idx].pv_pa,
    588 		    kernel_base + idx * L2_S_SEGSIZE);
    589 #endif
    590 	}
    591 
    592 	for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) {
    593 		pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE,
    594 		    &vmdata_l2pt[idx]);
    595 #ifdef VERBOSE_INIT_ARM
    596 		printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n",
    597 		    __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa,
    598 		    kernel_vm_base + idx * L2_S_SEGSIZE);
    599 #endif
    600 	}
    601 	if (iovbase) {
    602 		pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt);
    603 #ifdef VERBOSE_INIT_ARM
    604 		printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n",
    605 		    __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa,
    606 		    iovbase & -L2_S_SEGSIZE);
    607 #endif
    608 	}
    609 
    610 	/* update the top of the kernel VM */
    611 	pmap_curmaxkvaddr =
    612 	    kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE);
    613 
    614 #ifdef VERBOSE_INIT_ARM
    615 	printf("Mapping kernel\n");
    616 #endif
    617 
    618 	extern char etext[], _end[];
    619 	size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart;
    620 	size_t textsize = KERN_VTOPHYS(bmi, (uintptr_t)etext) - bmi->bmi_kernelstart;
    621 
    622 	textsize = (textsize + PGOFSET) & ~PGOFSET;
    623 
    624 	/* start at offset of kernel in RAM */
    625 
    626 	text.pv_pa = bmi->bmi_kernelstart;
    627 	text.pv_va = KERN_PHYSTOV(bmi, bmi->bmi_kernelstart);
    628 	text.pv_size = textsize;
    629 	text.pv_prot = VM_PROT_READ|VM_PROT_WRITE; /* XXX VM_PROT_EXECUTE */
    630 	text.pv_cache = PTE_CACHE;
    631 
    632 #ifdef VERBOSE_INIT_ARM
    633 	printf("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n",
    634 	    __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va);
    635 #endif
    636 
    637 	add_pages(bmi, &text);
    638 
    639 	data.pv_pa = text.pv_pa + textsize;
    640 	data.pv_va = text.pv_va + textsize;
    641 	data.pv_size = totalsize - textsize;
    642 	data.pv_prot = VM_PROT_READ|VM_PROT_WRITE;
    643 	data.pv_cache = PTE_CACHE;
    644 
    645 #ifdef VERBOSE_INIT_ARM
    646 	printf("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n",
    647 	    __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va);
    648 #endif
    649 
    650 	add_pages(bmi, &data);
    651 
    652 #ifdef VERBOSE_INIT_ARM
    653 	printf("Listing Chunks\n");
    654 	{
    655 		pv_addr_t *pv;
    656 		SLIST_FOREACH(pv, &bmi->bmi_chunks, pv_list) {
    657 			printf("%s: pv %p: chunk VA %#lx..%#lx "
    658 			    "(PA %#lx, prot %d, cache %d)\n",
    659 			    __func__, pv, pv->pv_va, pv->pv_va + pv->pv_size - 1,
    660 			    pv->pv_pa, pv->pv_prot, pv->pv_cache);
    661 		}
    662 	}
    663 	printf("\nMapping Chunks\n");
    664 #endif
    665 
    666 	pv_addr_t cur_pv;
    667 	pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks);
    668 	if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) {
    669 		cur_pv = *pv;
    670 		pv = SLIST_NEXT(pv, pv_list);
    671 	} else {
    672 		cur_pv.pv_va = KERNEL_BASE;
    673 		cur_pv.pv_pa = bmi->bmi_start;
    674 		cur_pv.pv_size = pv->pv_pa - bmi->bmi_start;
    675 		cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
    676 		cur_pv.pv_cache = PTE_CACHE;
    677 	}
    678 	while (pv != NULL) {
    679 		if (mapallmem_p) {
    680 			if (concat_pvaddr(&cur_pv, pv)) {
    681 				pv = SLIST_NEXT(pv, pv_list);
    682 				continue;
    683 			}
    684 			if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) {
    685 				/*
    686 				 * See if we can extend the current pv to emcompass the
    687 				 * hole, and if so do it and retry the concatenation.
    688 				 */
    689 				if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE)
    690 				    && cur_pv.pv_cache == PTE_CACHE) {
    691 					cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
    692 					continue;
    693 				}
    694 
    695 				/*
    696 				 * We couldn't so emit the current chunk and then
    697 				 */
    698 #ifdef VERBOSE_INIT_ARM
    699 				printf("%s: mapping chunk VA %#lx..%#lx "
    700 				    "(PA %#lx, prot %d, cache %d)\n",
    701 				    __func__,
    702 				    cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
    703 				    cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
    704 #endif
    705 				pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
    706 				    cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
    707 
    708 				/*
    709 				 * set the current chunk to the hole and try again.
    710 				 */
    711 				cur_pv.pv_pa += cur_pv.pv_size;
    712 				cur_pv.pv_va += cur_pv.pv_size;
    713 				cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
    714 				cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
    715 				cur_pv.pv_cache = PTE_CACHE;
    716 				continue;
    717 			}
    718 		}
    719 
    720 		/*
    721 		 * The new pv didn't concatenate so emit the current one
    722 		 * and use the new pv as the current pv.
    723 		 */
    724 #ifdef VERBOSE_INIT_ARM
    725 		printf("%s: mapping chunk VA %#lx..%#lx "
    726 		    "(PA %#lx, prot %d, cache %d)\n",
    727 		    __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
    728 		    cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
    729 #endif
    730 		pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
    731 		    cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
    732 		cur_pv = *pv;
    733 		pv = SLIST_NEXT(pv, pv_list);
    734 	}
    735 
    736 	/*
    737 	 * If we are mapping all of memory, let's map the rest of memory.
    738 	 */
    739 	if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) {
    740 		if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
    741 		    && cur_pv.pv_cache == PTE_CACHE) {
    742 			cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
    743 		} else {
    744 #ifdef VERBOSE_INIT_ARM
    745 			printf("%s: mapping chunk VA %#lx..%#lx "
    746 			    "(PA %#lx, prot %d, cache %d)\n",
    747 			    __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
    748 			    cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
    749 #endif
    750 			pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
    751 			    cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
    752 			cur_pv.pv_pa += cur_pv.pv_size;
    753 			cur_pv.pv_va += cur_pv.pv_size;
    754 			cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
    755 			cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
    756 			cur_pv.pv_cache = PTE_CACHE;
    757 		}
    758 	}
    759 
    760 	/*
    761 	 * Now we map the final chunk.
    762 	 */
    763 #ifdef VERBOSE_INIT_ARM
    764 	printf("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n",
    765 	    __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
    766 	    cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
    767 #endif
    768 	pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
    769 	    cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
    770 
    771 	/*
    772 	 * Now we map the stuff that isn't directly after the kernel
    773 	 */
    774 
    775 	/* Map the vector page. */
    776 	pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
    777 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    778 
    779 	/* Map the Mini-Data cache clean area. */
    780 #if ARM_MMU_XSCALE == 1
    781 #if (ARM_NMMUS > 1)
    782 	if (xscale_use_minidata)
    783 #endif
    784 		xscale_setup_minidata(l1_va, minidataclean.pv_va,
    785 		    minidataclean.pv_pa);
    786 #endif
    787 
    788 	/*
    789 	 * Map integrated peripherals at same address in first level page
    790 	 * table so that we can continue to use console.
    791 	 */
    792 	if (devmap)
    793 		pmap_devmap_bootstrap(l1pt_va, devmap);
    794 
    795 #ifdef VERBOSE_INIT_ARM
    796 	/* Tell the user about where all the bits and pieces live. */
    797 	printf("%22s       Physical              Virtual        Num\n", " ");
    798 	printf("%22s Starting    Ending    Starting    Ending   Pages\n", " ");
    799 
    800 	static const char mem_fmt[] =
    801 	    "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n";
    802 	static const char mem_fmt_nov[] =
    803 	    "%20s: 0x%08lx 0x%08lx                       %zu\n";
    804 
    805 	printf(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1,
    806 	    KERN_PHYSTOV(bmi, bmi->bmi_start), KERN_PHYSTOV(bmi, bmi->bmi_end - 1),
    807 	    physmem);
    808 	printf(mem_fmt, "text section",
    809 	       text.pv_pa, text.pv_pa + text.pv_size - 1,
    810 	       text.pv_va, text.pv_va + text.pv_size - 1,
    811 	       (int)(text.pv_size / PAGE_SIZE));
    812 	printf(mem_fmt, "data section",
    813 	       KERN_VTOPHYS(bmi, __data_start), KERN_VTOPHYS(bmi, _edata),
    814 	       (vaddr_t)__data_start, (vaddr_t)_edata,
    815 	       (int)((round_page((vaddr_t)_edata)
    816 		      - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
    817 	printf(mem_fmt, "bss section",
    818 	       KERN_VTOPHYS(bmi, __bss_start), KERN_VTOPHYS(bmi, __bss_end__),
    819 	       (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
    820 	       (int)((round_page((vaddr_t)__bss_end__)
    821 		      - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
    822 	printf(mem_fmt, "L1 page directory",
    823 	    kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
    824 	    kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
    825 	    L1_TABLE_SIZE / PAGE_SIZE);
    826 	printf(mem_fmt, "ABT stack (CPU 0)",
    827 	    abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
    828 	    abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
    829 	    ABT_STACK_SIZE);
    830 	printf(mem_fmt, "FIQ stack (CPU 0)",
    831 	    fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
    832 	    fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
    833 	    FIQ_STACK_SIZE);
    834 	printf(mem_fmt, "IRQ stack (CPU 0)",
    835 	    irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
    836 	    irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
    837 	    IRQ_STACK_SIZE);
    838 	printf(mem_fmt, "UND stack (CPU 0)",
    839 	    undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
    840 	    undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
    841 	    UND_STACK_SIZE);
    842 	printf(mem_fmt, "IDLE stack (CPU 0)",
    843 	    idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
    844 	    idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1,
    845 	    UPAGES);
    846 	printf(mem_fmt, "SVC stack",
    847 	    kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
    848 	    kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
    849 	    UPAGES);
    850 	printf(mem_fmt, "Message Buffer",
    851 	    msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1,
    852 	    msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1,
    853 	    (int)msgbuf_pgs);
    854 	printf(mem_fmt, "Exception Vectors",
    855 	    systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
    856 	    systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1,
    857 	    1);
    858 	for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) {
    859 		pv = &bmi->bmi_freeblocks[i];
    860 
    861 		printf(mem_fmt_nov, "Free Memory",
    862 		    pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
    863 		    pv->pv_size / PAGE_SIZE);
    864 	}
    865 #endif
    866 	/*
    867 	 * Now we have the real page tables in place so we can switch to them.
    868 	 * Once this is done we will be running with the REAL kernel page
    869 	 * tables.
    870 	 */
    871 
    872 #if defined(VERBOSE_INIT_ARM) && 0
    873 	printf("TTBR0=%#x", armreg_ttbr_read());
    874 #ifdef _ARM_ARCH_6
    875 	printf(" TTBR1=%#x TTBCR=%#x",
    876 	    armreg_ttbr1_read(), armreg_ttbcr_read());
    877 #endif
    878 	printf("\n");
    879 #endif
    880 
    881 	/* Switch tables */
    882 #ifdef VERBOSE_INIT_ARM
    883 	printf("switching to new L1 page table @%#lx...", l1pt_pa);
    884 #endif
    885 
    886 	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
    887 	cpu_idcache_wbinv_all();
    888 #ifdef ARM_MMU_EXTENDED
    889 	cpu_setttb(l1pt_pa, KERNEL_PID);
    890 #else
    891 	cpu_setttb(l1pt_pa, true);
    892 #endif
    893 	cpu_tlb_flushID();
    894 	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
    895 
    896 #ifdef VERBOSE_INIT_ARM
    897 	printf("TTBR0=%#x OK\n", armreg_ttbr_read());
    898 #endif
    899 }
    900