Home | History | Annotate | Line # | Download | only in arm32
arm32_kvminit.c revision 1.13
      1 /*	$NetBSD: arm32_kvminit.c,v 1.13 2012/10/21 22:04:05 matt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002, 2003, 2005  Genetec Corporation.  All rights reserved.
      5  * Written by Hiroyuki Bessho for Genetec Corporation.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of Genetec Corporation may not be used to endorse or
     16  *    promote products derived from this software without specific prior
     17  *    written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL GENETEC CORPORATION
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  *
     31  * Copyright (c) 2001 Wasabi Systems, Inc.
     32  * All rights reserved.
     33  *
     34  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. All advertising materials mentioning features or use of this software
     45  *    must display the following acknowledgement:
     46  *	This product includes software developed for the NetBSD Project by
     47  *	Wasabi Systems, Inc.
     48  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     49  *    or promote products derived from this software without specific prior
     50  *    written permission.
     51  *
     52  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     54  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     55  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     56  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     57  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     58  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     59  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     60  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     61  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     62  * POSSIBILITY OF SUCH DAMAGE.
     63  *
     64  * Copyright (c) 1997,1998 Mark Brinicombe.
     65  * Copyright (c) 1997,1998 Causality Limited.
     66  * All rights reserved.
     67  *
     68  * Redistribution and use in source and binary forms, with or without
     69  * modification, are permitted provided that the following conditions
     70  * are met:
     71  * 1. Redistributions of source code must retain the above copyright
     72  *    notice, this list of conditions and the following disclaimer.
     73  * 2. Redistributions in binary form must reproduce the above copyright
     74  *    notice, this list of conditions and the following disclaimer in the
     75  *    documentation and/or other materials provided with the distribution.
     76  * 3. All advertising materials mentioning features or use of this software
     77  *    must display the following acknowledgement:
     78  *	This product includes software developed by Mark Brinicombe
     79  *	for the NetBSD Project.
     80  * 4. The name of the company nor the name of the author may be used to
     81  *    endorse or promote products derived from this software without specific
     82  *    prior written permission.
     83  *
     84  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     85  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     86  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     87  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     88  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     89  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     90  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     91  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     92  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     93  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     94  * SUCH DAMAGE.
     95  *
     96  * Copyright (c) 2007 Microsoft
     97  * All rights reserved.
     98  *
     99  * Redistribution and use in source and binary forms, with or without
    100  * modification, are permitted provided that the following conditions
    101  * are met:
    102  * 1. Redistributions of source code must retain the above copyright
    103  *    notice, this list of conditions and the following disclaimer.
    104  * 2. Redistributions in binary form must reproduce the above copyright
    105  *    notice, this list of conditions and the following disclaimer in the
    106  *    documentation and/or other materials provided with the distribution.
    107  * 3. All advertising materials mentioning features or use of this software
    108  *    must display the following acknowledgement:
    109  *	This product includes software developed by Microsoft
    110  *
    111  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
    112  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
    113  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    114  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
    115  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
    116  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
    117  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    118  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    119  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    120  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    121  * SUCH DAMAGE.
    122  */
    123 
    124 #include <sys/cdefs.h>
    125 __KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.13 2012/10/21 22:04:05 matt Exp $");
    126 
    127 #include <sys/param.h>
    128 #include <sys/device.h>
    129 #include <sys/kernel.h>
    130 #include <sys/reboot.h>
    131 #include <sys/bus.h>
    132 
    133 #include <dev/cons.h>
    134 
    135 #include <uvm/uvm_extern.h>
    136 
    137 #include <arm/db_machdep.h>
    138 #include <arm/undefined.h>
    139 #include <arm/bootconfig.h>
    140 #include <arm/arm32/machdep.h>
    141 
    142 #include "ksyms.h"
    143 
    144 struct bootmem_info bootmem_info;
    145 
    146 paddr_t msgbufphys;
    147 paddr_t physical_start;
    148 paddr_t physical_end;
    149 
    150 extern char etext[];
    151 extern char __data_start[], _edata[];
    152 extern char __bss_start[], __bss_end__[];
    153 extern char _end[];
    154 
    155 /* Page tables for mapping kernel VM */
    156 #define KERNEL_L2PT_VMDATA_NUM	8	/* start with 32MB of KVM */
    157 
    158 /*
    159  * Macros to translate between physical and virtual for a subset of the
    160  * kernel address space.  *Not* for general use.
    161  */
    162 #define KERN_VTOPHYS(bmi, va) \
    163 	((paddr_t)((vaddr_t)(va) - KERNEL_BASE + (bmi)->bmi_start))
    164 #define KERN_PHYSTOV(bmi, pa) \
    165 	((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + KERNEL_BASE))
    166 
    167 void
    168 arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart)
    169 {
    170 	struct bootmem_info * const bmi = &bootmem_info;
    171 	pv_addr_t *pv = bmi->bmi_freeblocks;
    172 
    173 #ifdef VERBOSE_INIT_ARM
    174 	printf("%s: memstart=%#lx, memsize=%#lx, kernelstart=%#lx\n",
    175 	    __func__, memstart, memsize, kernelstart);
    176 #endif
    177 
    178 	physical_start = bmi->bmi_start = memstart;
    179 	physical_end = bmi->bmi_end = memstart + memsize;
    180 	physmem = memsize / PAGE_SIZE;
    181 
    182 	/*
    183 	 * Let's record where the kernel lives.
    184 	 */
    185 	bmi->bmi_kernelstart = kernelstart;
    186 	bmi->bmi_kernelend = KERN_VTOPHYS(bmi, round_page((vaddr_t)_end));
    187 
    188 #ifdef VERBOSE_INIT_ARM
    189 	printf("%s: kernelend=%#lx\n", __func__, bmi->bmi_kernelend);
    190 #endif
    191 
    192 	/*
    193 	 * Now the rest of the free memory must be after the kernel.
    194 	 */
    195 	pv->pv_pa = bmi->bmi_kernelend;
    196 	pv->pv_va = KERN_PHYSTOV(bmi, pv->pv_pa);
    197 	pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend;
    198 	bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
    199 #ifdef VERBOSE_INIT_ARM
    200 	printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
    201 	    __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
    202 	    pv->pv_pa + pv->pv_size - 1, pv->pv_va);
    203 #endif
    204 	pv++;
    205 
    206 	/*
    207 	 * Add a free block for any memory before the kernel.
    208 	 */
    209 	if (bmi->bmi_start < bmi->bmi_kernelstart) {
    210 		pv->pv_pa = bmi->bmi_start;
    211 		pv->pv_va = KERNEL_BASE;
    212 		pv->pv_size = bmi->bmi_kernelstart - bmi->bmi_start;
    213 		bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
    214 #ifdef VERBOSE_INIT_ARM
    215 		printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
    216 		    __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
    217 		    pv->pv_pa + pv->pv_size - 1, pv->pv_va);
    218 #endif
    219 		pv++;
    220 	}
    221 
    222 	bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks;
    223 
    224 	SLIST_INIT(&bmi->bmi_freechunks);
    225 	SLIST_INIT(&bmi->bmi_chunks);
    226 }
    227 
    228 static bool
    229 concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv)
    230 {
    231 	if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa
    232 	    && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va
    233 	    && acc_pv->pv_prot == pv->pv_prot
    234 	    && acc_pv->pv_cache == pv->pv_cache) {
    235 #ifdef VERBOSE_INIT_ARMX
    236 		printf("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n",
    237 		    __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size + 1,
    238 		    acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size + 1);
    239 #endif
    240 		acc_pv->pv_size += pv->pv_size;
    241 		return true;
    242 	}
    243 
    244 	return false;
    245 }
    246 
    247 static void
    248 add_pages(struct bootmem_info *bmi, pv_addr_t *pv)
    249 {
    250 	pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks);
    251 	while ((*pvp) != 0 && (*pvp)->pv_va <= pv->pv_va) {
    252 		pv_addr_t * const pv0 = (*pvp);
    253 		KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa);
    254 		if (concat_pvaddr(pv0, pv)) {
    255 #ifdef VERBOSE_INIT_ARM
    256 			printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
    257 			    __func__, "appending", pv,
    258 			    pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
    259 			    pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
    260 #endif
    261 			pv = SLIST_NEXT(pv0, pv_list);
    262 			if (pv != NULL && concat_pvaddr(pv0, pv)) {
    263 #ifdef VERBOSE_INIT_ARM
    264 				printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
    265 				    __func__, "merging", pv,
    266 				    pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
    267 				    pv0->pv_pa,
    268 				    pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
    269 #endif
    270 				SLIST_REMOVE_AFTER(pv0, pv_list);
    271 				SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list);
    272 			}
    273 			return;
    274 		}
    275 		KASSERT(pv->pv_va != (*pvp)->pv_va);
    276 		pvp = &SLIST_NEXT(*pvp, pv_list);
    277 	}
    278 	KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va);
    279 	pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks);
    280 	KASSERT(new_pv != NULL);
    281 	SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list);
    282 	*new_pv = *pv;
    283 	SLIST_NEXT(new_pv, pv_list) = *pvp;
    284 	(*pvp) = new_pv;
    285 #ifdef VERBOSE_INIT_ARM
    286 	printf("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ",
    287 	    __func__, new_pv, new_pv->pv_pa, new_pv->pv_va,
    288 	    new_pv->pv_size / PAGE_SIZE);
    289 	if (SLIST_NEXT(new_pv, pv_list))
    290 		printf("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa);
    291 	else
    292 		printf("at tail\n");
    293 #endif
    294 }
    295 
    296 static void
    297 valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages,
    298 	int prot, int cache)
    299 {
    300 	size_t nbytes = npages * PAGE_SIZE;
    301 	pv_addr_t *free_pv = bmi->bmi_freeblocks;
    302 	size_t free_idx = 0;
    303 	static bool l1pt_found;
    304 
    305 	/*
    306 	 * If we haven't allocated the kernel L1 page table and we are aligned
    307 	 * at a L1 table boundary, alloc the memory for it.
    308 	 */
    309 	if (!l1pt_found
    310 	    && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0
    311 	    && free_pv->pv_size >= L1_TABLE_SIZE) {
    312 		l1pt_found = true;
    313 		valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE,
    314 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    315 		add_pages(bmi, &kernel_l1pt);
    316 	}
    317 
    318 	while (nbytes > free_pv->pv_size) {
    319 		free_pv++;
    320 		free_idx++;
    321 		if (free_idx == bmi->bmi_nfreeblocks) {
    322 			panic("%s: could not allocate %zu bytes",
    323 			    __func__, nbytes);
    324 		}
    325 	}
    326 
    327 	/*
    328 	 * As we allocate the memory, make sure that we don't walk over
    329 	 * our current first level translation table.
    330 	 */
    331 	KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa);
    332 
    333 	pv->pv_pa = free_pv->pv_pa;
    334 	pv->pv_va = free_pv->pv_va;
    335 	pv->pv_size = nbytes;
    336 	pv->pv_prot = prot;
    337 	pv->pv_cache = cache;
    338 
    339 	/*
    340 	 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE
    341 	 * just use PTE_CACHE.
    342 	 */
    343 	if (cache == PTE_PAGETABLE
    344 	    && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt
    345 	    && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt
    346 	    && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt)
    347 		pv->pv_cache = PTE_CACHE;
    348 
    349 	free_pv->pv_pa += nbytes;
    350 	free_pv->pv_va += nbytes;
    351 	free_pv->pv_size -= nbytes;
    352 	if (free_pv->pv_size == 0) {
    353 		--bmi->bmi_nfreeblocks;
    354 		for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) {
    355 			free_pv[0] = free_pv[1];
    356 		}
    357 	}
    358 
    359 	bmi->bmi_freepages -= npages;
    360 
    361 	memset((void *)pv->pv_va, 0, nbytes);
    362 }
    363 
    364 void
    365 arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase,
    366 	const struct pmap_devmap *devmap, bool mapallmem_p)
    367 {
    368 	struct bootmem_info * const bmi = &bootmem_info;
    369 #ifdef MULTIPROCESSOR
    370 	const size_t cpu_num = arm_cpu_max + 1;
    371 #else
    372 	const size_t cpu_num = 1;
    373 #endif
    374 
    375 	/*
    376 	 * Calculate the number of L2 pages needed for mapping the
    377 	 * kernel + data + stuff.  Assume 2 L2 pages for kernel, 1 for vectors,
    378 	 * and 1 for IO
    379 	 */
    380 	size_t kernel_size = bmi->bmi_kernelend;
    381 	kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE);
    382 	kernel_size += L1_TABLE_SIZE;
    383 	kernel_size += L2_TABLE_SIZE * (2 + 1 + KERNEL_L2PT_VMDATA_NUM + 1);
    384 	kernel_size +=
    385 	    cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE
    386 	    + UND_STACK_SIZE + UPAGES) * PAGE_SIZE;
    387 	kernel_size += round_page(MSGBUFSIZE);
    388 	kernel_size += 0x10000;	/* slop */
    389 	kernel_size += (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE;
    390 	kernel_size = round_page(kernel_size);
    391 
    392 	/*
    393 	 * Now we know how many L2 pages it will take.
    394 	 */
    395 	const size_t KERNEL_L2PT_KERNEL_NUM =
    396 	    (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE;
    397 
    398 #ifdef VERBOSE_INIT_ARM
    399 	printf("%s: %zu L2 pages are needed to map %#zx kernel bytes\n",
    400 	    __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size);
    401 #endif
    402 
    403 	KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts));
    404 	pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts;
    405 	pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM;
    406 	pv_addr_t msgbuf;
    407 	pv_addr_t text;
    408 	pv_addr_t data;
    409 	pv_addr_t chunks[KERNEL_L2PT_KERNEL_NUM+KERNEL_L2PT_VMDATA_NUM+11];
    410 #if ARM_MMU_XSCALE == 1
    411 	pv_addr_t minidataclean;
    412 #endif
    413 
    414 	/*
    415 	 * We need to allocate some fixed page tables to get the kernel going.
    416 	 *
    417 	 * We are going to allocate our bootstrap pages from the beginning of
    418 	 * the free space that we just calculated.  We allocate one page
    419 	 * directory and a number of page tables and store the physical
    420 	 * addresses in the bmi_l2pts array in bootmem_info.
    421 	 *
    422 	 * The kernel page directory must be on a 16K boundary.  The page
    423 	 * tables must be on 4K boundaries.  What we do is allocate the
    424 	 * page directory on the first 16K boundary that we encounter, and
    425 	 * the page tables on 4K boundaries otherwise.  Since we allocate
    426 	 * at least 3 L2 page tables, we are guaranteed to encounter at
    427 	 * least one 16K aligned region.
    428 	 */
    429 
    430 #ifdef VERBOSE_INIT_ARM
    431 	printf("%s: allocating page tables for", __func__);
    432 #endif
    433 	for (size_t i = 0; i < __arraycount(chunks); i++) {
    434 		SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list);
    435 	}
    436 
    437 	kernel_l1pt.pv_pa = 0;
    438 	kernel_l1pt.pv_va = 0;
    439 
    440 	/*
    441 	 * Allocate the L2 pages, but if we get to a page that is aligned for
    442 	 * an L1 page table, we will allocate the pages for it first and then
    443 	 * allocate the L2 page.
    444 	 */
    445 
    446 	/*
    447 	 * First allocate L2 page for the vectors.
    448 	 */
    449 #ifdef VERBOSE_INIT_ARM
    450 	printf(" vector");
    451 #endif
    452 	valloc_pages(bmi, &bmi->bmi_vector_l2pt, L2_TABLE_SIZE / PAGE_SIZE,
    453 	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    454 	add_pages(bmi, &bmi->bmi_vector_l2pt);
    455 
    456 	/*
    457 	 * Now allocate L2 pages for the kernel
    458 	 */
    459 #ifdef VERBOSE_INIT_ARM
    460 	printf(" kernel");
    461 #endif
    462 	for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) {
    463 		valloc_pages(bmi, &kernel_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE,
    464 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    465 		add_pages(bmi, &kernel_l2pt[idx]);
    466 	}
    467 
    468 	/*
    469 	 * Now allocate L2 pages for the initial kernel VA space.
    470 	 */
    471 #ifdef VERBOSE_INIT_ARM
    472 	printf(" vm");
    473 #endif
    474 	for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) {
    475 		valloc_pages(bmi, &vmdata_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE,
    476 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    477 		add_pages(bmi, &vmdata_l2pt[idx]);
    478 	}
    479 
    480 	/*
    481 	 * If someone wanted a L2 page for I/O, allocate it now.
    482 	 */
    483 	if (iovbase != 0) {
    484 #ifdef VERBOSE_INIT_ARM
    485 		printf(" io");
    486 #endif
    487 		valloc_pages(bmi, &bmi->bmi_io_l2pt, L2_TABLE_SIZE / PAGE_SIZE,
    488 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    489 		add_pages(bmi, &bmi->bmi_io_l2pt);
    490 	}
    491 
    492 #ifdef VERBOSE_ARM_INIT
    493 	printf("%s: allocating stacks\n", __func__);
    494 #endif
    495 
    496 	/* Allocate stacks for all modes and CPUs */
    497 	valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num,
    498 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    499 	add_pages(bmi, &abtstack);
    500 	valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num,
    501 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    502 	add_pages(bmi, &fiqstack);
    503 	valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num,
    504 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    505 	add_pages(bmi, &irqstack);
    506 	valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num,
    507 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    508 	add_pages(bmi, &undstack);
    509 	valloc_pages(bmi, &idlestack, UPAGES * cpu_num,		/* SVC32 */
    510 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    511 	add_pages(bmi, &idlestack);
    512 	valloc_pages(bmi, &kernelstack, UPAGES,			/* SVC32 */
    513 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    514 	add_pages(bmi, &kernelstack);
    515 
    516 	/* Allocate the message buffer from the end of memory. */
    517 	const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
    518 	valloc_pages(bmi, &msgbuf, msgbuf_pgs,
    519 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    520 	add_pages(bmi, &msgbuf);
    521 	msgbufphys = msgbuf.pv_pa;
    522 
    523 	/*
    524 	 * Allocate a page for the system vector page.
    525 	 * This page will just contain the system vectors and can be
    526 	 * shared by all processes.
    527 	 */
    528 	valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    529 	systempage.pv_va = vectors;
    530 
    531 	/*
    532 	 * If the caller needed a few extra pages for some reason, allocate
    533 	 * them now.
    534 	 */
    535 #if ARM_MMU_XSCALE == 1
    536 #if (ARM_NMMUS > 1)
    537 	if (xscale_use_minidata)
    538 #endif
    539 		valloc_pages(bmi, extrapv, nextrapages,
    540 		    VM_PROT_READ|VM_PROT_WRITE, 0);
    541 #endif
    542 
    543 	/*
    544 	 * Ok we have allocated physical pages for the primary kernel
    545 	 * page tables and stacks.  Let's just confirm that.
    546 	 */
    547 	if (kernel_l1pt.pv_va == 0
    548 	    && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0))
    549 		panic("%s: Failed to allocate or align the kernel "
    550 		    "page directory", __func__);
    551 
    552 
    553 #ifdef VERBOSE_INIT_ARM
    554 	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
    555 #endif
    556 
    557 	/*
    558 	 * Now we start construction of the L1 page table
    559 	 * We start by mapping the L2 page tables into the L1.
    560 	 * This means that we can replace L1 mappings later on if necessary
    561 	 */
    562 	vaddr_t l1pt_va = kernel_l1pt.pv_va;
    563 	paddr_t l1pt_pa = kernel_l1pt.pv_pa;
    564 
    565 	/* Map the L2 pages tables in the L1 page table */
    566 	pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE,
    567 	    &bmi->bmi_vector_l2pt);
    568 #ifdef VERBOSE_INIT_ARM
    569 	printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx\n (vectors)",
    570 	    __func__, bmi->bmi_vector_l2pt.pv_va, bmi->bmi_vector_l2pt.pv_pa,
    571 	    systempage.pv_va);
    572 #endif
    573 
    574 	const vaddr_t kernel_base =
    575 	    KERN_PHYSTOV(bmi, bmi->bmi_kernelstart & -L2_S_SEGSIZE);
    576 	for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) {
    577 		pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE,
    578 		    &kernel_l2pt[idx]);
    579 #ifdef VERBOSE_INIT_ARM
    580 		printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n",
    581 		    __func__, kernel_l2pt[idx].pv_va, kernel_l2pt[idx].pv_pa,
    582 		    kernel_base + idx * L2_S_SEGSIZE);
    583 #endif
    584 	}
    585 
    586 	for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) {
    587 		pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE,
    588 		    &vmdata_l2pt[idx]);
    589 #ifdef VERBOSE_INIT_ARM
    590 		printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n",
    591 		    __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa,
    592 		    kernel_vm_base + idx * L2_S_SEGSIZE);
    593 #endif
    594 	}
    595 	if (iovbase) {
    596 		pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt);
    597 #ifdef VERBOSE_INIT_ARM
    598 		printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n",
    599 		    __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa,
    600 		    iovbase & -L2_S_SEGSIZE);
    601 #endif
    602 	}
    603 
    604 	/* update the top of the kernel VM */
    605 	pmap_curmaxkvaddr =
    606 	    kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE);
    607 
    608 #ifdef VERBOSE_INIT_ARM
    609 	printf("Mapping kernel\n");
    610 #endif
    611 
    612 	extern char etext[], _end[];
    613 	size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart;
    614 	size_t textsize = KERN_VTOPHYS(bmi, (uintptr_t)etext) - bmi->bmi_kernelstart;
    615 
    616 	textsize = (textsize + PGOFSET) & ~PGOFSET;
    617 
    618 	/* start at offset of kernel in RAM */
    619 
    620 	text.pv_pa = bmi->bmi_kernelstart;
    621 	text.pv_va = KERN_PHYSTOV(bmi, bmi->bmi_kernelstart);
    622 	text.pv_size = textsize;
    623 	text.pv_prot = VM_PROT_READ|VM_PROT_WRITE; /* XXX VM_PROT_EXECUTE */
    624 	text.pv_cache = PTE_CACHE;
    625 
    626 #ifdef VERBOSE_INIT_ARM
    627 	printf("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n",
    628 	    __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va);
    629 #endif
    630 
    631 	add_pages(bmi, &text);
    632 
    633 	data.pv_pa = text.pv_pa + textsize;
    634 	data.pv_va = text.pv_va + textsize;
    635 	data.pv_size = totalsize - textsize;
    636 	data.pv_prot = VM_PROT_READ|VM_PROT_WRITE;
    637 	data.pv_cache = PTE_CACHE;
    638 
    639 #ifdef VERBOSE_INIT_ARM
    640 	printf("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n",
    641 	    __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va);
    642 #endif
    643 
    644 	add_pages(bmi, &data);
    645 
    646 #ifdef VERBOSE_INIT_ARM
    647 	printf("Listing Chunks\n");
    648 	{
    649 		pv_addr_t *pv;
    650 		SLIST_FOREACH(pv, &bmi->bmi_chunks, pv_list) {
    651 			printf("%s: pv %p: chunk VA %#lx..%#lx "
    652 			    "(PA %#lx, prot %d, cache %d)\n",
    653 			    __func__, pv, pv->pv_va, pv->pv_va + pv->pv_size - 1,
    654 			    pv->pv_pa, pv->pv_prot, pv->pv_cache);
    655 		}
    656 	}
    657 	printf("\nMapping Chunks\n");
    658 #endif
    659 
    660 	pv_addr_t cur_pv;
    661 	pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks);
    662 	if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) {
    663 		cur_pv = *pv;
    664 		pv = SLIST_NEXT(pv, pv_list);
    665 	} else {
    666 		cur_pv.pv_va = KERNEL_BASE;
    667 		cur_pv.pv_pa = bmi->bmi_start;
    668 		cur_pv.pv_size = pv->pv_pa - bmi->bmi_start;
    669 		cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
    670 		cur_pv.pv_cache = PTE_CACHE;
    671 	}
    672 	while (pv != NULL) {
    673 		if (mapallmem_p) {
    674 			if (concat_pvaddr(&cur_pv, pv)) {
    675 				pv = SLIST_NEXT(pv, pv_list);
    676 				continue;
    677 			}
    678 			if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) {
    679 				/*
    680 				 * See if we can extend the current pv to emcompass the
    681 				 * hole, and if so do it and retry the concatenation.
    682 				 */
    683 				if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE)
    684 				    && cur_pv.pv_cache == PTE_CACHE) {
    685 					cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
    686 					continue;
    687 				}
    688 
    689 				/*
    690 				 * We couldn't so emit the current chunk and then
    691 				 */
    692 #ifdef VERBOSE_INIT_ARM
    693 				printf("%s: mapping chunk VA %#lx..%#lx "
    694 				    "(PA %#lx, prot %d, cache %d)\n",
    695 				    __func__,
    696 				    cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
    697 				    cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
    698 #endif
    699 				pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
    700 				    cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
    701 
    702 				/*
    703 				 * set the current chunk to the hole and try again.
    704 				 */
    705 				cur_pv.pv_pa += cur_pv.pv_size;
    706 				cur_pv.pv_va += cur_pv.pv_size;
    707 				cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
    708 				cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
    709 				cur_pv.pv_cache = PTE_CACHE;
    710 				continue;
    711 			}
    712 		}
    713 
    714 		/*
    715 		 * The new pv didn't concatenate so emit the current one
    716 		 * and use the new pv as the current pv.
    717 		 */
    718 #ifdef VERBOSE_INIT_ARM
    719 		printf("%s: mapping chunk VA %#lx..%#lx "
    720 		    "(PA %#lx, prot %d, cache %d)\n",
    721 		    __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
    722 		    cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
    723 #endif
    724 		pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
    725 		    cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
    726 		cur_pv = *pv;
    727 		pv = SLIST_NEXT(pv, pv_list);
    728 	}
    729 
    730 	/*
    731 	 * If we are mapping all of memory, let's map the rest of memory.
    732 	 */
    733 	if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) {
    734 		if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
    735 		    && cur_pv.pv_cache == PTE_CACHE) {
    736 			cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
    737 		} else {
    738 #ifdef VERBOSE_INIT_ARM
    739 			printf("%s: mapping chunk VA %#lx..%#lx "
    740 			    "(PA %#lx, prot %d, cache %d)\n",
    741 			    __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
    742 			    cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
    743 #endif
    744 			pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
    745 			    cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
    746 			cur_pv.pv_pa += cur_pv.pv_size;
    747 			cur_pv.pv_va += cur_pv.pv_size;
    748 			cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
    749 			cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
    750 			cur_pv.pv_cache = PTE_CACHE;
    751 		}
    752 	}
    753 
    754 	/*
    755 	 * Now we map the final chunk.
    756 	 */
    757 #ifdef VERBOSE_INIT_ARM
    758 	printf("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n",
    759 	    __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
    760 	    cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
    761 #endif
    762 	pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
    763 	    cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
    764 
    765 	/*
    766 	 * Now we map the stuff that isn't directly after the kernel
    767 	 */
    768 
    769 	/* Map the vector page. */
    770 	pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
    771 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    772 
    773 	/* Map the Mini-Data cache clean area. */
    774 #if ARM_MMU_XSCALE == 1
    775 #if (ARM_NMMUS > 1)
    776 	if (xscale_use_minidata)
    777 #endif
    778 		xscale_setup_minidata(l1_va, minidataclean.pv_va,
    779 		    minidataclean.pv_pa);
    780 #endif
    781 
    782 	/*
    783 	 * Map integrated peripherals at same address in first level page
    784 	 * table so that we can continue to use console.
    785 	 */
    786 	if (devmap)
    787 		pmap_devmap_bootstrap(l1pt_va, devmap);
    788 
    789 #ifdef VERBOSE_INIT_ARM
    790 	/* Tell the user about where all the bits and pieces live. */
    791 	printf("%22s       Physical              Virtual        Num\n", " ");
    792 	printf("%22s Starting    Ending    Starting    Ending   Pages\n", " ");
    793 
    794 	static const char mem_fmt[] =
    795 	    "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n";
    796 	static const char mem_fmt_nov[] =
    797 	    "%20s: 0x%08lx 0x%08lx                       %zu\n";
    798 
    799 	printf(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1,
    800 	    KERN_PHYSTOV(bmi, bmi->bmi_start), KERN_PHYSTOV(bmi, bmi->bmi_end - 1),
    801 	    physmem);
    802 	printf(mem_fmt, "text section",
    803 	       text.pv_pa, text.pv_pa + text.pv_size - 1,
    804 	       text.pv_va, text.pv_va + text.pv_size - 1,
    805 	       (int)(text.pv_size / PAGE_SIZE));
    806 	printf(mem_fmt, "data section",
    807 	       KERN_VTOPHYS(bmi, __data_start), KERN_VTOPHYS(bmi, _edata),
    808 	       (vaddr_t)__data_start, (vaddr_t)_edata,
    809 	       (int)((round_page((vaddr_t)_edata)
    810 		      - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
    811 	printf(mem_fmt, "bss section",
    812 	       KERN_VTOPHYS(bmi, __bss_start), KERN_VTOPHYS(bmi, __bss_end__),
    813 	       (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
    814 	       (int)((round_page((vaddr_t)__bss_end__)
    815 		      - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
    816 	printf(mem_fmt, "L1 page directory",
    817 	    kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
    818 	    kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
    819 	    L1_TABLE_SIZE / PAGE_SIZE);
    820 	printf(mem_fmt, "ABT stack (CPU 0)",
    821 	    abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
    822 	    abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
    823 	    ABT_STACK_SIZE);
    824 	printf(mem_fmt, "FIQ stack (CPU 0)",
    825 	    fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
    826 	    fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
    827 	    FIQ_STACK_SIZE);
    828 	printf(mem_fmt, "IRQ stack (CPU 0)",
    829 	    irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
    830 	    irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
    831 	    IRQ_STACK_SIZE);
    832 	printf(mem_fmt, "UND stack (CPU 0)",
    833 	    undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
    834 	    undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
    835 	    UND_STACK_SIZE);
    836 	printf(mem_fmt, "IDLE stack (CPU 0)",
    837 	    idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
    838 	    idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1,
    839 	    UPAGES);
    840 	printf(mem_fmt, "SVC stack",
    841 	    kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
    842 	    kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
    843 	    UPAGES);
    844 	printf(mem_fmt, "Message Buffer",
    845 	    msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1,
    846 	    msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1,
    847 	    (int)msgbuf_pgs);
    848 	printf(mem_fmt, "Exception Vectors",
    849 	    systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
    850 	    systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1,
    851 	    1);
    852 	for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) {
    853 		pv = &bmi->bmi_freeblocks[i];
    854 
    855 		printf(mem_fmt_nov, "Free Memory",
    856 		    pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
    857 		    pv->pv_size / PAGE_SIZE);
    858 	}
    859 #endif
    860 	/*
    861 	 * Now we have the real page tables in place so we can switch to them.
    862 	 * Once this is done we will be running with the REAL kernel page
    863 	 * tables.
    864 	 */
    865 
    866 #if defined(VERBOSE_INIT_ARM) && 0
    867 	printf("TTBR0=%#x", armreg_ttbr_read());
    868 #ifdef _ARM_ARCH_6
    869 	printf(" TTBR1=%#x TTBCR=%#x",
    870 	    armreg_ttbr1_read(), armreg_ttbcr_read());
    871 #endif
    872 	printf("\n");
    873 #endif
    874 
    875 	/* Switch tables */
    876 #ifdef VERBOSE_INIT_ARM
    877 	printf("switching to new L1 page table @%#lx...", l1pt_pa);
    878 #endif
    879 
    880 	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
    881 	cpu_idcache_wbinv_all();
    882 	cpu_setttb(l1pt_pa, true);
    883 	cpu_tlb_flushID();
    884 	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
    885 
    886 #ifdef VERBOSE_INIT_ARM
    887 	printf("TTBR0=%#x OK\n", armreg_ttbr_read());
    888 #endif
    889 }
    890