Home | History | Annotate | Line # | Download | only in ixm1200
ixm1200_machdep.c revision 1.14
      1 /*	$NetBSD: ixm1200_machdep.c,v 1.14 2003/04/02 03:49:26 thorpej Exp $ */
      2 #undef DEBUG_BEFOREMMU
      3 /*
      4  * Copyright (c) 2002, 2003
      5  *	Ichiro FUKUHARA <ichiro (at) ichiro.org>.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Ichiro FUKUHARA.
     19  * 4. The name of the company nor the name of the author may be used to
     20  *    endorse or promote products derived from this software without specific
     21  *    prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR
     27  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  */
     35 /*
     36  * Copyright (c) 1997,1998 Mark Brinicombe.
     37  * Copyright (c) 1997,1998 Causality Limited.
     38  * All rights reserved.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  * 3. All advertising materials mentioning features or use of this software
     49  *    must display the following acknowledgement:
     50  *      This product includes software developed by Mark Brinicombe
     51  *      for the NetBSD Project.
     52  * 4. The name of the company nor the name of the author may be used to
     53  *    endorse or promote products derived from this software without specific
     54  *    prior written permission.
     55  *
     56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     57  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     58  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     59  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     66  * SUCH DAMAGE.
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: ixm1200_machdep.c,v 1.14 2003/04/02 03:49:26 thorpej Exp $");
     71 
     72 #include "opt_ddb.h"
     73 #include "opt_pmap_debug.h"
     74 
     75 #include <sys/param.h>
     76 #include <sys/device.h>
     77 #include <sys/systm.h>
     78 #include <sys/kernel.h>
     79 #include <sys/exec.h>
     80 #include <sys/proc.h>
     81 #include <sys/msgbuf.h>
     82 #include <sys/reboot.h>
     83 #include <sys/termios.h>
     84 
     85 #include <uvm/uvm_extern.h>
     86 
     87 #include <dev/cons.h>
     88 
     89 #ifdef DDB
     90 #include <machine/db_machdep.h>
     91 #include <ddb/db_sym.h>
     92 #include <ddb/db_extern.h>
     93 #ifndef DB_ELFSIZE
     94 #error Must define DB_ELFSIZE!
     95 #endif
     96 #define ELFSIZE	DB_ELFSIZE
     97 #include <sys/exec_elf.h>
     98 #endif
     99 
    100 #include <machine/bootconfig.h>
    101 #include <machine/bus.h>
    102 #include <machine/cpu.h>
    103 #include <machine/frame.h>
    104 #include <arm/undefined.h>
    105 
    106 #include <arm/arm32/machdep.h>
    107 
    108 #include <arm/ixp12x0/ixp12x0reg.h>
    109 #include <arm/ixp12x0/ixp12x0var.h>
    110 #include <arm/ixp12x0/ixp12x0_comreg.h>
    111 #include <arm/ixp12x0/ixp12x0_comvar.h>
    112 #include <arm/ixp12x0/ixp12x0_pcireg.h>
    113 
    114 #include <evbarm/ixm1200/ixm1200reg.h>
    115 #include <evbarm/ixm1200/ixm1200var.h>
    116 
    117 #include "opt_ipkdb.h"
    118 
    119 /* XXX for consinit related hacks */
    120 #include <sys/conf.h>
    121 
    122 void ixp12x0_reset(void) __attribute__((noreturn));
    123 
    124 /*
    125  * Address to call from cpu_reset() to reset the machine.
    126  * This is machine architecture dependant as it varies depending
    127  * on where the ROM appears when you turn the MMU off.
    128  */
    129 
    130 u_int cpu_reset_address = (u_int) ixp12x0_reset;
    131 
    132 /*
    133  * Define the default console speed for the board.
    134  */
    135 #ifndef CONMODE
    136 #define CONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB)) | CS8) /* 8N1 */
    137 #endif
    138 #ifndef CONSPEED
    139 #define CONSPEED B38400
    140 #endif
    141 #ifndef CONADDR
    142 #define CONADDR IXPCOM_UART_BASE
    143 #endif
    144 
    145 cons_decl(com);
    146 cons_decl(ixpcom);
    147 
    148 struct consdev constab[] = {
    149 #if (NIXPCOM > 0)
    150 	cons_init(ixpcom),
    151 #endif
    152 	{ 0 },
    153 };
    154 
    155 /* Define various stack sizes in pages */
    156 #define IRQ_STACK_SIZE  1
    157 #define ABT_STACK_SIZE  1
    158 #ifdef IPKDB
    159 #define UND_STACK_SIZE  2
    160 #else
    161 #define UND_STACK_SIZE  1
    162 #endif
    163 
    164 BootConfig bootconfig;          /* Boot config storage */
    165 char *boot_args = NULL;
    166 char *boot_file = NULL;
    167 
    168 vm_offset_t physical_start;
    169 vm_offset_t physical_freestart;
    170 vm_offset_t physical_freeend;
    171 vm_offset_t physical_end;
    172 u_int free_pages;
    173 vm_offset_t pagetables_start;
    174 int physmem = 0;
    175 
    176 /*int debug_flags;*/
    177 #ifndef PMAP_STATIC_L1S
    178 int max_processes = 64;                 /* Default number */
    179 #endif  /* !PMAP_STATIC_L1S */
    180 
    181 /* Physical and virtual addresses for some global pages */
    182 pv_addr_t systempage;
    183 pv_addr_t irqstack;
    184 pv_addr_t undstack;
    185 pv_addr_t abtstack;
    186 pv_addr_t kernelstack;
    187 
    188 vm_offset_t msgbufphys;
    189 
    190 extern u_int data_abort_handler_address;
    191 extern u_int prefetch_abort_handler_address;
    192 extern u_int undefined_handler_address;
    193 extern int end;
    194 
    195 #ifdef PMAP_DEBUG
    196 extern int pmap_debug_level;
    197 #endif  /* PMAP_DEBUG */
    198 
    199 #define KERNEL_PT_SYS		0	/* Page table for mapping proc0 zero page */
    200 #define KERNEL_PT_KERNEL	1	/* Page table for mapping kernel */
    201 #define KERNEL_PT_KERNEL_NUM	2
    202 #define KERNEL_PT_IO		(KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
    203 					/* Page table for mapping IO */
    204 #define KERNEL_PT_VMDATA	(KERNEL_PT_IO + 1)
    205 					/* Page tables for mapping kernel VM */
    206 #define KERNEL_PT_VMDATA_NUM	4	/* start with 16MB of KVM */
    207 #define NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
    208 
    209 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
    210 
    211 struct user *proc0paddr;
    212 
    213 #ifdef CPU_IXP12X0
    214 #define CPU_IXP12X0_CACHE_CLEAN_SIZE (0x4000 * 2)
    215 extern unsigned int ixp12x0_cache_clean_addr;
    216 extern unsigned int ixp12x0_cache_clean_size;
    217 static vaddr_t ixp12x0_cc_base;
    218 #endif  /* CPU_IXP12X0 */
    219 
    220 /* Prototypes */
    221 
    222 void consinit		__P((void));
    223 u_int cpu_get_control	__P((void));
    224 
    225 void ixdp_ixp12x0_cc_setup(void);
    226 
    227 #ifdef DEBUG_BEFOREMMU
    228 static void fakecninit();
    229 #endif
    230 
    231 extern int db_trapper(u_int, u_int, trapframe_t *, int);
    232 
    233 /*
    234  * void cpu_reboot(int howto, char *bootstr)
    235  *
    236  * Reboots the system
    237  *
    238  * Deal with any syncing, unmounting, dumping and shutdown hooks,
    239  * then reset the CPU.
    240  */
    241 
    242 void
    243 cpu_reboot(howto, bootstr)
    244 	int howto;
    245 	char *bootstr;
    246 {
    247 	/*
    248 	 * If we are still cold then hit the air brakes
    249 	 * and crash to earth fast
    250 	 */
    251 	if (cold) {
    252 		doshutdownhooks();
    253 		printf("Halted while still in the ICE age.\n");
    254 		printf("The operating system has halted.\n");
    255 		printf("Please press any key to reboot.\n\n");
    256 		cngetc();
    257 		printf("rebooting...\n");
    258 		ixp12x0_reset();
    259 	}
    260 
    261 	/* Disable console buffering */
    262 	cnpollc(1);
    263 
    264 	/*
    265 	 * If RB_NOSYNC was not specified sync the discs.
    266 	 * Note: Unless cold is set to 1 here, syslogd will die during the unmount.
    267 	 * It looks like syslogd is getting woken up only to find that it cannot
    268 	 * page part of the binary in as the filesystem has been unmounted.
    269 	 */
    270 	if (!(howto & RB_NOSYNC))
    271 		bootsync();
    272 
    273 	/* Say NO to interrupts */
    274 	splhigh();
    275 
    276 	/* Do a dump if requested. */
    277 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
    278 		dumpsys();
    279 
    280 	/* Run any shutdown hooks */
    281 	doshutdownhooks();
    282 
    283 	/* Make sure IRQ's are disabled */
    284 	IRQdisable;
    285 
    286 	if (howto & RB_HALT) {
    287 		printf("The operating system has halted.\n");
    288 		printf("Please press any key to reboot.\n\n");
    289 		cngetc();
    290 	}
    291 
    292 	printf("rebooting...\n");
    293 
    294 	/* all interrupts are disabled */
    295 	disable_interrupts(I32_bit);
    296 
    297 	ixp12x0_reset();
    298 
    299 	/* ...and if that didn't work, just croak. */
    300 	printf("RESET FAILED!\n");
    301 	for (;;);
    302 }
    303 
    304 /*
    305  * Initial entry point on startup. This gets called before main() is
    306  * entered.
    307  * It should be responsible for setting up everything that must be
    308  * in place when main is called.
    309  * This includes
    310  *   Taking a copy of the boot configuration structure.
    311  *   Initialising the physical console so characters can be printed.
    312  *   Setting up page tables for the kernel
    313  *   Relocating the kernel to the bottom of physical memory
    314  */
    315 u_int
    316 initarm(void *arg)
    317 {
    318         int loop;
    319 	int loop1;
    320 	u_int kerneldatasize, symbolsize;
    321 	vaddr_t l1pagetable;
    322 	vaddr_t freemempos;
    323 	pv_addr_t kernel_l1pt;
    324 	pv_addr_t kernel_ptpt;
    325 #ifdef DDB
    326         Elf_Shdr *sh;
    327 #endif
    328 
    329 #ifdef DEBUG_BEFOREMMU
    330 	/*
    331 	 * At this point, we cannot call real consinit().
    332 	 * Just call a faked up version of consinit(), which does the thing
    333 	 * with MMU disabled.
    334 	 */
    335 	fakecninit();
    336 #endif
    337         /*
    338          * Since we map v0xf0000000 == p0x90000000, it's possible for
    339          * us to initialize the console now.
    340          */
    341 	consinit();
    342 
    343 	/* Talk to the user */
    344 	printf("\nNetBSD/evbarm (IXM1200) booting ...\n");
    345 
    346 	/*
    347 	 * Heads up ... Setup the CPU / MMU / TLB functions
    348 	 */
    349 	if (set_cpufuncs())
    350 		panic("cpu not recognized!");
    351 
    352 	/* XXX overwrite bootconfig to hardcoded values */
    353 	bootconfig.dram[0].address = 0xc0000000;
    354 	bootconfig.dram[0].pages   = 0x10000000 / PAGE_SIZE; /* SDRAM 256MB */
    355 	bootconfig.dramblocks = 1;
    356 
    357 	kerneldatasize = (u_int32_t)&end - (u_int32_t)KERNEL_TEXT_BASE;
    358 
    359 	symbolsize = 0;
    360 
    361 #ifdef PMAP_DEBUG
    362 	pmap_debug(-1);
    363 #endif
    364 
    365 #ifdef DDB
    366         if (! memcmp(&end, "\177ELF", 4)) {
    367                 sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
    368                 loop = ((Elf_Ehdr *)&end)->e_shnum;
    369                 for(; loop; loop--, sh++)
    370                         if (sh->sh_offset > 0 &&
    371                             (sh->sh_offset + sh->sh_size) > symbolsize)
    372                                 symbolsize = sh->sh_offset + sh->sh_size;
    373         }
    374 #endif
    375 	printf("kernsize=0x%x\n", kerneldatasize);
    376 	kerneldatasize += symbolsize;
    377 	kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) + PAGE_SIZE * 8;
    378 
    379 	/*
    380 	 * Set up the variables that define the availablilty of physcial
    381 	 * memory
    382 	 */
    383 	physical_start = bootconfig.dram[0].address;
    384 	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);
    385 
    386 	physical_freestart = physical_start
    387 		+ (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
    388 	physical_freeend = physical_end;
    389 
    390 	physmem = (physical_end - physical_start) / PAGE_SIZE;
    391 
    392 	freemempos = 0xc0000000;
    393 
    394 #ifdef VERBOSE_INIT_ARM
    395 	printf("Allocating page tables\n");
    396 #endif
    397 	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;
    398 
    399 #ifdef VERBOSE_INIT_ARM
    400 	printf("CP15 Register1 = 0x%08x\n", cpu_get_control());
    401 	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
    402 		physical_freestart, free_pages, free_pages);
    403 	printf("physical_start = 0x%08lx, physical_end = 0x%08lx\n",
    404 		physical_start, physical_end);
    405 #endif
    406 
    407 	/* Define a macro to simplify memory allocation */
    408 #define valloc_pages(var, np)			\
    409 	alloc_pages((var).pv_pa, (np));		\
    410 	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;
    411 #define alloc_pages(var, np)				\
    412 	(var) = freemempos;				\
    413 	memset((char *)(var), 0, ((np) * PAGE_SIZE));	\
    414 	freemempos += (np) * PAGE_SIZE;
    415 
    416 	loop1 = 0;
    417 	kernel_l1pt.pv_pa = 0;
    418 	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
    419 		/* Are we 16KB aligned for an L1 ? */
    420 		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
    421 		    && kernel_l1pt.pv_pa == 0) {
    422 			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
    423 		} else {
    424 			alloc_pages(kernel_pt_table[loop1].pv_pa,
    425 			    L2_TABLE_SIZE / PAGE_SIZE);
    426 			kernel_pt_table[loop1].pv_va =
    427 			    kernel_pt_table[loop1].pv_pa;
    428 			++loop1;
    429 		}
    430 	}
    431 
    432 #ifdef DIAGNOSTIC
    433 	/* This should never be able to happen but better confirm that. */
    434 	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
    435 		panic("initarm: Failed to align the kernel page directory");
    436 #endif
    437 
    438 	/*
    439 	 * Allocate a page for the system page mapped to V0x00000000
    440 	 * This page will just contain the system vectors and can be
    441 	 * shared by all processes.
    442 	 */
    443 	alloc_pages(systempage.pv_pa, 1);
    444 
    445 	/* Allocate a page for the page table to map kernel page tables. */
    446 	valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE);
    447 
    448 	/* Allocate stacks for all modes */
    449 	valloc_pages(irqstack, IRQ_STACK_SIZE);
    450 	valloc_pages(abtstack, ABT_STACK_SIZE);
    451 	valloc_pages(undstack, UND_STACK_SIZE);
    452 	valloc_pages(kernelstack, UPAGES);
    453 
    454 #ifdef VERBOSE_INIT_ARM
    455 	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va);
    456 	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va);
    457 	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va);
    458 	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va);
    459 #endif
    460 
    461 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
    462 
    463 #ifdef CPU_IXP12X0
    464         /*
    465          * XXX totally stuffed hack to work round problems introduced
    466          * in recent versions of the pmap code. Due to the calls used there
    467          * we cannot allocate virtual memory during bootstrap.
    468          */
    469 	for(;;) {
    470 		alloc_pages(ixp12x0_cc_base, 1);
    471 		if (! (ixp12x0_cc_base & (CPU_IXP12X0_CACHE_CLEAN_SIZE - 1)))
    472 			break;
    473 	}
    474 	{
    475 		vaddr_t dummy;
    476 		alloc_pages(dummy, CPU_IXP12X0_CACHE_CLEAN_SIZE / PAGE_SIZE - 1);
    477 	}
    478 	ixp12x0_cache_clean_addr = ixp12x0_cc_base;
    479 	ixp12x0_cache_clean_size = CPU_IXP12X0_CACHE_CLEAN_SIZE / 2;
    480 #endif /* CPU_IXP12X0 */
    481 
    482 #ifdef VERBOSE_INIT_ARM
    483 	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
    484 #endif
    485 
    486 	/*
    487 	 * Now we start construction of the L1 page table
    488 	 * We start by mapping the L2 page tables into the L1.
    489 	 * This means that we can replace L1 mappings later on if necessary
    490 	 */
    491 	l1pagetable = kernel_l1pt.pv_pa;
    492 
    493 	/* Map the L2 pages tables in the L1 page table */
    494 	pmap_link_l2pt(l1pagetable, 0x00000000,
    495 	    &kernel_pt_table[KERNEL_PT_SYS]);
    496 
    497 	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
    498 		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
    499 		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
    500 
    501 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
    502 		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
    503 		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
    504 	pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt);
    505 
    506 	/* update the top of the kernel VM */
    507 	pmap_curmaxkvaddr =
    508 	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
    509 
    510 	pmap_link_l2pt(l1pagetable, IXP12X0_IO_VBASE,
    511 	    &kernel_pt_table[KERNEL_PT_IO]);
    512 
    513 #ifdef VERBOSE_INIT_ARM
    514 	printf("Mapping kernel\n");
    515 #endif
    516 
    517 #if XXX
    518 	/* Now we fill in the L2 pagetable for the kernel code/data */
    519 	{
    520 		extern char etext[], _end[];
    521 		size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
    522 		size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
    523 		u_int logical;
    524 
    525 		textsize = (textsize + PGOFSET) & ~PGOFSET;
    526 		totalsize = (totalsize + PGOFSET) & ~PGOFSET;
    527 
    528 		logical = 0x00200000;   /* offset of kernel in RAM */
    529 
    530 		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
    531 		    physical_start + logical, textsize,
    532 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    533 		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
    534 		    physical_start + logical, totalsize - textsize,
    535 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    536 	}
    537 #else
    538 	{
    539 		pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
    540                     KERNEL_TEXT_BASE, kerneldatasize,
    541                     VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    542 	}
    543 #endif
    544 
    545 #ifdef VERBOSE_INIT_ARM
    546         printf("Constructing L2 page tables\n");
    547 #endif
    548 
    549 	/* Map the stack pages */
    550 	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
    551 	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    552 	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
    553 	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    554 	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
    555 	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    556 	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
    557 	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    558 
    559 	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
    560 	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    561 
    562 	/* Map the page table that maps the kernel pages */
    563 	pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa,
    564 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
    565 
    566 	/*
    567 	 * Map entries in the page table used to map PTE's
    568 	 * Basically every kernel page table gets mapped here
    569 	 */
    570 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
    571 	pmap_map_entry(l1pagetable,
    572 	    PTE_BASE + (0x00000000 >> (PGSHIFT-2)),
    573 	    kernel_pt_table[KERNEL_PT_SYS].pv_pa,
    574 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    575 
    576 	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
    577 		pmap_map_entry(l1pagetable,
    578 		    PTE_BASE + ((KERNEL_BASE +
    579 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
    580 		    kernel_pt_table[KERNEL_PT_KERNEL + loop].pv_pa,
    581 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    582 
    583 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
    584 		pmap_map_entry(l1pagetable,
    585 		    PTE_BASE + ((KERNEL_VM_BASE +
    586 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
    587 		    kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
    588 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    589 
    590 	pmap_map_entry(l1pagetable,
    591 	    PTE_BASE + (PTE_BASE >> (PGSHIFT-2)),
    592 	    kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
    593 
    594 	/* Map the vector page. */
    595 	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
    596 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    597 
    598 #ifdef VERBOSE_INIT_ARM
    599 	printf("systempage (vector page): p0x%08lx v0x%08lx\n",
    600 	       systempage.pv_pa, vector_page);
    601 #endif
    602 
    603 	/*
    604 	 * Map the PCI I/O spaces and IXP12x0 registers
    605 	 */
    606 
    607 	ixp12x0_pmap_io_reg(l1pagetable);
    608 
    609 	printf("done.\n");
    610 
    611 	/*
    612 	 * Map the Dcache Flush page.
    613 	 * Hw Ref Manual 3.2.4.5 Software Dcache Flush
    614 	 */
    615 	pmap_map_chunk(l1pagetable, ixp12x0_cache_clean_addr, 0xe0000000,
    616 	    CPU_IXP12X0_CACHE_CLEAN_SIZE, VM_PROT_READ, PTE_CACHE);
    617 
    618 	/*
    619 	 * Now we have the real page tables in place so we can switch to them.
    620 	 * Once this is done we will be running with the REAL kernel page
    621 	 * tables.
    622 	 */
    623 
    624 	/* Switch tables */
    625 	setttb(kernel_l1pt.pv_pa);
    626 	cpu_tlb_flushID();
    627 
    628 	/*
    629 	 * We must now clean the cache again....
    630 	 * Cleaning may be done by reading new data to displace any
    631 	 * dirty data in the cache. This will have happened in setttb()
    632 	 * but since we are boot strapping the addresses used for the read
    633 	 * may have just been remapped and thus the cache could be out
    634 	 * of sync. A re-clean after the switch will cure this.
    635 	 * After booting there are no gross reloations of the kernel thus
    636 	 * this problem will not occur after initarm().
    637 	 */
    638 	cpu_idcache_wbinv_all();
    639 
    640 	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
    641 
    642 	/*
    643 	 * Pages were allocated during the secondary bootstrap for the
    644 	 * stacks for different CPU modes.
    645 	 * We must now set the r13 registers in the different CPU modes to
    646 	 * point to these stacks.
    647 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
    648 	 * of the stack memory.
    649 	 */
    650 	printf("init subsystems: stacks ");
    651 
    652 	set_stackptr(PSR_IRQ32_MODE,
    653 	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
    654 	set_stackptr(PSR_ABT32_MODE,
    655 	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
    656 	set_stackptr(PSR_UND32_MODE,
    657 	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
    658 #ifdef PMAP_DEBUG
    659 	if (pmap_debug_level >= 0)
    660 		printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
    661 		    kernelstack.pv_pa);
    662 #endif  /* PMAP_DEBUG */
    663 
    664 	/*
    665 	 * Well we should set a data abort handler.
    666 	 * Once things get going this will change as we will need a proper
    667 	 * handler. Until then we will use a handler that just panics but
    668 	 * tells us why.
    669 	 * Initialisation of the vetcors will just panic on a data abort.
    670 	 * This just fills in a slighly better one.
    671 	 */
    672 	printf("vectors ");
    673 	data_abort_handler_address = (u_int)data_abort_handler;
    674 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
    675 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
    676 	printf("\ndata_abort_handler_address = %08x\n", data_abort_handler_address);
    677 	printf("prefetch_abort_handler_address = %08x\n", prefetch_abort_handler_address);
    678 	printf("undefined_handler_address = %08x\n", undefined_handler_address);
    679 
    680 	/* Initialise the undefined instruction handlers */
    681 	printf("undefined ");
    682 	undefined_init();
    683 
    684 	/* Load memory into UVM. */
    685 	printf("page ");
    686 	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
    687 	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
    688 	    atop(physical_freestart), atop(physical_freeend),
    689 	    VM_FREELIST_DEFAULT);
    690 
    691 	/* Boot strap pmap telling it where the kernel page table is */
    692 	printf("pmap ");
    693 	pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, kernel_ptpt);
    694 
    695 	/* Setup the IRQ system */
    696 	printf("irq ");
    697 	ixp12x0_intr_init();
    698 	printf("done.\n");
    699 
    700 #ifdef VERBOSE_INIT_ARM
    701 	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
    702 		physical_freestart, free_pages, free_pages);
    703 	printf("freemempos=%08lx\n", freemempos);
    704 	printf("switching to new L1 page table  @%#lx... \n", kernel_l1pt.pv_pa);
    705 #endif
    706 
    707 	consinit();
    708 	printf("consinit \n");
    709 
    710 	ixdp_ixp12x0_cc_setup();
    711 
    712 	printf("bootstrap done.\n");
    713 
    714 #ifdef IPKDB
    715 	/* Initialise ipkdb */
    716 	ipkdb_init();
    717 	if (boothowto & RB_KDB)
    718 		ipkdb_connect(0);
    719 #endif  /* NIPKDB */
    720 
    721 #ifdef DDB
    722 	{
    723 		static struct undefined_handler uh;
    724 
    725 		uh.uh_handler = db_trapper;
    726 		install_coproc_handler_static(0, &uh);
    727 	}
    728 	ddb_init(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
    729 
    730 	if (boothowto & RB_KDB)
    731 		Debugger();
    732 #endif
    733 
    734 	/* We return the new stack pointer address */
    735 	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
    736 }
    737 
    738 void
    739 consinit(void)
    740 {
    741 	extern struct bus_space ixpsip_bs_tag;
    742 	static int consinit_called = 0;
    743 
    744 	if (consinit_called != 0)
    745 		return;
    746 
    747 	consinit_called = 1;
    748 
    749 	if (ixpcomcnattach(&ixpsip_bs_tag,
    750 			   IXPCOM_UART_HWBASE, IXPCOM_UART_VBASE,
    751 			   CONSPEED, CONMODE))
    752 		panic("can't init serial console @%lx", IXPCOM_UART_HWBASE);
    753 }
    754 
    755 #ifdef DEBUG_BEFOREMMU
    756 cons_decl(ixpcom);
    757 void
    758 fakecninit()
    759 {
    760 	static struct consdev fakecntab = cons_init(ixpcom);
    761 	cn_tab = &fakecntab;
    762 
    763 	(*cn_tab->cn_init)(0);
    764 	cn_tab->cn_pri = CN_REMOTE;
    765 }
    766 #endif
    767 
    768 /*
    769  * For optimal cache cleaning we need two 16K banks of
    770  * virtual address space that NOTHING else will access
    771  * and then we alternate the cache cleaning between the
    772  * two banks.
    773  * The cache cleaning code requires requires 2 banks aligned
    774  * on total size boundry so the banks can be alternated by
    775  * eorring the size bit (assumes the bank size is a power of 2)
    776  */
    777 void
    778 ixdp_ixp12x0_cc_setup(void)
    779 {
    780 	int loop;
    781 	paddr_t kaddr;
    782 	pt_entry_t *pte;
    783 
    784 	(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
    785 	for (loop = 0; loop < CPU_IXP12X0_CACHE_CLEAN_SIZE; loop += PAGE_SIZE) {
    786                 pte = vtopte(ixp12x0_cc_base + loop);
    787                 *pte = L2_S_PROTO | kaddr |
    788                     L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
    789 		PTE_SYNC(pte);
    790         }
    791 	ixp12x0_cache_clean_addr = ixp12x0_cc_base;
    792 	ixp12x0_cache_clean_size = CPU_IXP12X0_CACHE_CLEAN_SIZE / 2;
    793 }
    794