brh_machdep.c revision 1.6 1 /* $NetBSD: brh_machdep.c,v 1.6 2003/04/26 11:05:09 ragge Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1997,1998 Mark Brinicombe.
40 * Copyright (c) 1997,1998 Causality Limited.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by Mark Brinicombe
54 * for the NetBSD Project.
55 * 4. The name of the company nor the name of the author may be used to
56 * endorse or promote products derived from this software without specific
57 * prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * Machine dependant functions for kernel setup for the ADI Engineering
72 * BRH i80200 evaluation platform.
73 */
74
75 #include "opt_ddb.h"
76 #include "opt_pmap_debug.h"
77
78 #include <sys/param.h>
79 #include <sys/device.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/exec.h>
83 #include <sys/proc.h>
84 #include <sys/msgbuf.h>
85 #include <sys/reboot.h>
86 #include <sys/termios.h>
87 #include <sys/ksyms.h>
88
89 #include <uvm/uvm_extern.h>
90
91 #include <dev/cons.h>
92
93 #include <machine/db_machdep.h>
94 #include <ddb/db_sym.h>
95 #include <ddb/db_extern.h>
96
97 #include <machine/bootconfig.h>
98 #include <machine/bus.h>
99 #include <machine/cpu.h>
100 #include <machine/frame.h>
101 #include <arm/undefined.h>
102
103 #include <arm/arm32/machdep.h>
104
105 #include <arm/xscale/i80200reg.h>
106 #include <arm/xscale/i80200var.h>
107
108 #include <dev/pci/ppbreg.h>
109
110 #include <arm/xscale/beccreg.h>
111 #include <arm/xscale/beccvar.h>
112
113 #include <evbarm/adi_brh/brhreg.h>
114 #include <evbarm/adi_brh/brhvar.h>
115 #include <evbarm/adi_brh/obiovar.h>
116
117 #include "opt_ipkdb.h"
118 #include "ksyms.h"
119
120 /*
121 * Address to call from cpu_reset() to reset the machine.
122 * This is machine architecture dependant as it varies depending
123 * on where the ROM appears when you turn the MMU off.
124 */
125
126 u_int cpu_reset_address = 0x00000000;
127
128 /* Define various stack sizes in pages */
129 #define IRQ_STACK_SIZE 1
130 #define ABT_STACK_SIZE 1
131 #ifdef IPKDB
132 #define UND_STACK_SIZE 2
133 #else
134 #define UND_STACK_SIZE 1
135 #endif
136
137 BootConfig bootconfig; /* Boot config storage */
138 char *boot_args = NULL;
139 char *boot_file = NULL;
140
141 vm_offset_t physical_start;
142 vm_offset_t physical_freestart;
143 vm_offset_t physical_freeend;
144 vm_offset_t physical_end;
145 u_int free_pages;
146 vm_offset_t pagetables_start;
147 int physmem = 0;
148
149 /*int debug_flags;*/
150 #ifndef PMAP_STATIC_L1S
151 int max_processes = 64; /* Default number */
152 #endif /* !PMAP_STATIC_L1S */
153
154 /* Physical and virtual addresses for some global pages */
155 pv_addr_t systempage;
156 pv_addr_t irqstack;
157 pv_addr_t undstack;
158 pv_addr_t abtstack;
159 pv_addr_t kernelstack;
160 pv_addr_t minidataclean;
161
162 vm_offset_t msgbufphys;
163
164 extern u_int data_abort_handler_address;
165 extern u_int prefetch_abort_handler_address;
166 extern u_int undefined_handler_address;
167
168 #ifdef PMAP_DEBUG
169 extern int pmap_debug_level;
170 #endif
171
172 #define KERNEL_PT_SYS 0 /* L2 table for mapping zero page */
173
174 #define KERNEL_PT_KERNEL 1 /* L2 table for mapping kernel */
175 #define KERNEL_PT_KERNEL_NUM 2
176
177 /* L2 tables for mapping kernel VM */
178 #define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
179 #define KERNEL_PT_VMDATA_NUM 4 /* start with 16MB of KVM */
180 #define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
181
182 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
183
184 struct user *proc0paddr;
185
186 /* Prototypes */
187
188 void consinit(void);
189
190 #include "com.h"
191 #if NCOM > 0
192 #include <dev/ic/comreg.h>
193 #include <dev/ic/comvar.h>
194 #endif
195
196 /*
197 * Define the default console speed for the board. This is generally
198 * what the firmware provided with the board defaults to.
199 */
200 #ifndef CONSPEED
201 #define CONSPEED B57600
202 #endif /* ! CONSPEED */
203
204 #ifndef CONUNIT
205 #define CONUNIT 0
206 #endif
207
208 #ifndef CONMODE
209 #define CONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */
210 #endif
211
212 int comcnspeed = CONSPEED;
213 int comcnmode = CONMODE;
214 int comcnunit = CONUNIT;
215
216 /*
217 * void cpu_reboot(int howto, char *bootstr)
218 *
219 * Reboots the system
220 *
221 * Deal with any syncing, unmounting, dumping and shutdown hooks,
222 * then reset the CPU.
223 */
224 void
225 cpu_reboot(int howto, char *bootstr)
226 {
227 #ifdef DIAGNOSTIC
228 /* info */
229 printf("boot: howto=%08x curproc=%p\n", howto, curproc);
230 #endif
231
232 /*
233 * If we are still cold then hit the air brakes
234 * and crash to earth fast
235 */
236 if (cold) {
237 doshutdownhooks();
238 printf("The operating system has halted.\n");
239 printf("Please press any key to reboot.\n\n");
240 cngetc();
241 printf("rebooting...\n");
242 goto reset;
243 }
244
245 /* Disable console buffering */
246
247 /*
248 * If RB_NOSYNC was not specified sync the discs.
249 * Note: Unless cold is set to 1 here, syslogd will die during the
250 * unmount. It looks like syslogd is getting woken up only to find
251 * that it cannot page part of the binary in as the filesystem has
252 * been unmounted.
253 */
254 if (!(howto & RB_NOSYNC))
255 bootsync();
256
257 /* Say NO to interrupts */
258 splhigh();
259
260 /* Do a dump if requested. */
261 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
262 dumpsys();
263
264 /* Run any shutdown hooks */
265 doshutdownhooks();
266
267 /* Make sure IRQ's are disabled */
268 IRQdisable;
269
270 if (howto & RB_HALT) {
271 brh_7seg('8');
272 printf("The operating system has halted.\n");
273 printf("Please press any key to reboot.\n\n");
274 cngetc();
275 }
276
277 printf("rebooting...\n\r");
278 reset:
279 cpu_reset();
280 }
281
282 /*
283 * Mapping table for core kernel memory. This memory is mapped at init
284 * time with section mappings.
285 */
286 struct l1_sec_map {
287 vaddr_t va;
288 vaddr_t pa;
289 vsize_t size;
290 vm_prot_t prot;
291 int cache;
292 } l1_sec_table[] = {
293 {
294 BRH_PCI_CONF_VBASE,
295 BECC_PCI_CONF_BASE,
296 BRH_PCI_CONF_VSIZE,
297 VM_PROT_READ|VM_PROT_WRITE,
298 PTE_NOCACHE,
299 },
300 {
301 BRH_PCI_MEM1_VBASE,
302 BECC_PCI_MEM1_BASE,
303 BRH_PCI_MEM1_VSIZE,
304 VM_PROT_READ|VM_PROT_WRITE,
305 PTE_NOCACHE,
306 },
307 {
308 BRH_PCI_MEM2_VBASE,
309 BECC_PCI_MEM2_BASE,
310 BRH_PCI_MEM2_VSIZE,
311 VM_PROT_READ|VM_PROT_WRITE,
312 PTE_NOCACHE,
313 },
314 {
315 BRH_UART1_VBASE,
316 BRH_UART1_BASE,
317 BRH_UART1_VSIZE,
318 VM_PROT_READ|VM_PROT_WRITE,
319 PTE_NOCACHE,
320 },
321 {
322 BRH_UART2_VBASE,
323 BRH_UART2_BASE,
324 BRH_UART2_VSIZE,
325 VM_PROT_READ|VM_PROT_WRITE,
326 PTE_NOCACHE,
327 },
328 {
329 BRH_LED_VBASE,
330 BRH_LED_BASE,
331 BRH_LED_VSIZE,
332 VM_PROT_READ|VM_PROT_WRITE,
333 PTE_NOCACHE,
334 },
335 {
336 BRH_PCI_IO_VBASE,
337 BECC_PCI_IO_BASE,
338 BRH_PCI_IO_VSIZE,
339 VM_PROT_READ|VM_PROT_WRITE,
340 PTE_NOCACHE,
341 },
342 {
343 BRH_BECC_VBASE,
344 BECC_REG_BASE,
345 BRH_BECC_VSIZE,
346 VM_PROT_READ|VM_PROT_WRITE,
347 PTE_NOCACHE,
348 },
349 {
350 0,
351 0,
352 0,
353 0,
354 0,
355 }
356 };
357
358 static void
359 brh_hardclock_hook(void)
360 {
361 static int snakefreq;
362
363 if ((snakefreq++ & 15) == 0)
364 brh_7seg_snake();
365 }
366
367 /*
368 * u_int initarm(...)
369 *
370 * Initial entry point on startup. This gets called before main() is
371 * entered.
372 * It should be responsible for setting up everything that must be
373 * in place when main is called.
374 * This includes
375 * Taking a copy of the boot configuration structure.
376 * Initialising the physical console so characters can be printed.
377 * Setting up page tables for the kernel
378 * Relocating the kernel to the bottom of physical memory
379 */
380 u_int
381 initarm(void *arg)
382 {
383 extern vaddr_t xscale_cache_clean_addr;
384 #ifdef DIAGNOSTIC
385 extern vsize_t xscale_minidata_clean_size;
386 #endif
387 int loop;
388 int loop1;
389 u_int l1pagetable;
390 pv_addr_t kernel_l1pt;
391 pv_addr_t kernel_ptpt;
392 paddr_t memstart;
393 psize_t memsize;
394
395 /*
396 * Clear out the 7-segment display. Whee, the first visual
397 * indication that we're running kernel code.
398 */
399 brh_7seg(' ');
400
401 /*
402 * Since we have mapped the on-board devices at their permanent
403 * locations already, it is possible for us to initialize
404 * the console now.
405 */
406 consinit();
407
408 /* Talk to the user */
409 printf("\nNetBSD/evbarm (ADI BRH) booting ...\n");
410
411 /* Calibrate the delay loop. */
412 becc_calibrate_delay();
413 becc_hardclock_hook = brh_hardclock_hook;
414
415 /*
416 * Heads up ... Setup the CPU / MMU / TLB functions
417 */
418 if (set_cpufuncs())
419 panic("cpu not recognized!");
420
421 /*
422 * We are currently running with the MMU enabled and the
423 * entire address space mapped VA==PA. Memory conveniently
424 * starts at 0xc0000000, which is where we want it. Certain
425 * on-board devices have already been mapped where we want
426 * them to be. There is an L1 page table at 0xc0004000.
427 */
428
429 becc_icu_init();
430
431 /*
432 * Memory always starts at 0xc0000000 on a BRH, and the
433 * memory size is always 128M.
434 */
435 memstart = 0xc0000000UL;
436 memsize = (128UL * 1024 * 1024);
437
438 printf("initarm: Configuring system ...\n");
439
440 /* Fake bootconfig structure for the benefit of pmap.c */
441 /* XXX must make the memory description h/w independant */
442 bootconfig.dramblocks = 1;
443 bootconfig.dram[0].address = memstart;
444 bootconfig.dram[0].pages = memsize / PAGE_SIZE;
445
446 /*
447 * Set up the variables that define the availablilty of
448 * physical memory. For now, we're going to set
449 * physical_freestart to 0xc0200000 (where the kernel
450 * was loaded), and allocate the memory we need downwards.
451 * If we get too close to the L1 table that we set up, we
452 * will panic. We will update physical_freestart and
453 * physical_freeend later to reflect what pmap_bootstrap()
454 * wants to see.
455 *
456 * XXX pmap_bootstrap() needs an enema.
457 */
458 physical_start = bootconfig.dram[0].address;
459 physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);
460
461 physical_freestart = 0xc0009000UL;
462 physical_freeend = 0xc0200000UL;
463
464 /* Tell the user about the memory */
465 printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
466 physical_start, physical_end - 1);
467
468 /*
469 * Okay, the kernel starts 2MB in from the bottom of physical
470 * memory. We are going to allocate our bootstrap pages downwards
471 * from there.
472 *
473 * We need to allocate some fixed page tables to get the kernel
474 * going. We allocate one page directory and a number of page
475 * tables and store the physical addresses in the kernel_pt_table
476 * array.
477 *
478 * The kernel page directory must be on a 16K boundary. The page
479 * tables must be on 4K bounaries. What we do is allocate the
480 * page directory on the first 16K boundary that we encounter, and
481 * the page tables on 4K boundaries otherwise. Since we allocate
482 * at least 3 L2 page tables, we are guaranteed to encounter at
483 * least one 16K aligned region.
484 */
485
486 #ifdef VERBOSE_INIT_ARM
487 printf("Allocating page tables\n");
488 #endif
489
490 free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;
491
492 #ifdef VERBOSE_INIT_ARM
493 printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
494 physical_freestart, free_pages, free_pages);
495 #endif
496
497 /* Define a macro to simplify memory allocation */
498 #define valloc_pages(var, np) \
499 alloc_pages((var).pv_pa, (np)); \
500 (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;
501
502 #define alloc_pages(var, np) \
503 physical_freeend -= ((np) * PAGE_SIZE); \
504 if (physical_freeend < physical_freestart) \
505 panic("initarm: out of memory"); \
506 (var) = physical_freeend; \
507 free_pages -= (np); \
508 memset((char *)(var), 0, ((np) * PAGE_SIZE));
509
510 loop1 = 0;
511 kernel_l1pt.pv_pa = 0;
512 for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
513 /* Are we 16KB aligned for an L1 ? */
514 if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
515 && kernel_l1pt.pv_pa == 0) {
516 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
517 } else {
518 #ifdef ARM32_PMAP_NEW
519 valloc_pages(kernel_pt_table[loop1],
520 L2_TABLE_SIZE / PAGE_SIZE);
521 #else
522 alloc_pages(kernel_pt_table[loop1].pv_pa,
523 L2_TABLE_SIZE / PAGE_SIZE);
524 kernel_pt_table[loop1].pv_va =
525 kernel_pt_table[loop1].pv_pa;
526 #endif
527 ++loop1;
528 }
529 }
530
531 /* This should never be able to happen but better confirm that. */
532 if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
533 panic("initarm: Failed to align the kernel page directory\n");
534
535 /*
536 * Allocate a page for the system page mapped to V0x00000000
537 * This page will just contain the system vectors and can be
538 * shared by all processes.
539 */
540 alloc_pages(systempage.pv_pa, 1);
541
542 /* Allocate a page for the page table to map kernel page tables. */
543 valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE);
544
545 /* Allocate stacks for all modes */
546 valloc_pages(irqstack, IRQ_STACK_SIZE);
547 valloc_pages(abtstack, ABT_STACK_SIZE);
548 valloc_pages(undstack, UND_STACK_SIZE);
549 valloc_pages(kernelstack, UPAGES);
550
551 /* Allocate enough pages for cleaning the Mini-Data cache. */
552 KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
553 valloc_pages(minidataclean, 1);
554
555 #ifdef VERBOSE_INIT_ARM
556 printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
557 irqstack.pv_va);
558 printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
559 abtstack.pv_va);
560 printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
561 undstack.pv_va);
562 printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
563 kernelstack.pv_va);
564 #endif
565
566 /*
567 * XXX Defer this to later so that we can reclaim the memory
568 * XXX used by the RedBoot page tables.
569 */
570 alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
571
572 /*
573 * Ok we have allocated physical pages for the primary kernel
574 * page tables
575 */
576
577 #ifdef VERBOSE_INIT_ARM
578 printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
579 #endif
580
581 /*
582 * Now we start construction of the L1 page table
583 * We start by mapping the L2 page tables into the L1.
584 * This means that we can replace L1 mappings later on if necessary
585 */
586 l1pagetable = kernel_l1pt.pv_pa;
587
588 /* Map the L2 pages tables in the L1 page table */
589 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
590 &kernel_pt_table[KERNEL_PT_SYS]);
591 for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
592 pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
593 &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
594 for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
595 pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
596 &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
597 pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt);
598
599 /* update the top of the kernel VM */
600 pmap_curmaxkvaddr =
601 KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
602
603 #ifdef VERBOSE_INIT_ARM
604 printf("Mapping kernel\n");
605 #endif
606
607 /* Now we fill in the L2 pagetable for the kernel static code/data */
608 {
609 extern char etext[], _end[];
610 size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
611 size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
612 u_int logical;
613
614 textsize = (textsize + PGOFSET) & ~PGOFSET;
615 totalsize = (totalsize + PGOFSET) & ~PGOFSET;
616
617 logical = 0x00200000; /* offset of kernel in RAM */
618
619 logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
620 physical_start + logical, textsize,
621 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
622 logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
623 physical_start + logical, totalsize - textsize,
624 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
625 }
626
627 #ifdef VERBOSE_INIT_ARM
628 printf("Constructing L2 page tables\n");
629 #endif
630
631 /* Map the stack pages */
632 pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
633 IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
634 pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
635 ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
636 pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
637 UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
638 pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
639 UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
640
641 #ifndef ARM32_PMAP_NEW
642 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
643 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
644 #else
645 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
646 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
647
648 for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
649 pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
650 kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
651 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
652 }
653 #endif
654
655 /* Map the Mini-Data cache clean area. */
656 xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
657 minidataclean.pv_pa);
658
659 /* Map the page table that maps the kernel pages */
660 pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa,
661 #ifndef ARM32_PMAP_NEW
662 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
663 #else
664 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
665 #endif
666
667 /*
668 * Map entries in the page table used to map PTE's
669 * Basically every kernel page table gets mapped here
670 */
671 /* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
672 for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++) {
673 pmap_map_entry(l1pagetable,
674 PTE_BASE + ((KERNEL_BASE +
675 (loop * 0x00400000)) >> (PGSHIFT-2)),
676 kernel_pt_table[KERNEL_PT_KERNEL + loop].pv_pa,
677 #ifndef ARM32_PMAP_NEW
678 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
679 #else
680 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
681 #endif
682 }
683 pmap_map_entry(l1pagetable,
684 PTE_BASE + (PTE_BASE >> (PGSHIFT-2)),
685 #ifndef ARM32_PMAP_NEW
686 kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
687 #else
688 kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
689 #endif
690 pmap_map_entry(l1pagetable,
691 trunc_page(PTE_BASE + (ARM_VECTORS_HIGH >> (PGSHIFT-2))),
692 kernel_pt_table[KERNEL_PT_SYS].pv_pa,
693 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
694 for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
695 pmap_map_entry(l1pagetable,
696 PTE_BASE + ((KERNEL_VM_BASE +
697 (loop * 0x00400000)) >> (PGSHIFT-2)),
698 kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
699 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
700
701 /* Map the vector page. */
702 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
703 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
704
705 /*
706 * Map devices we can map w/ section mappings.
707 */
708 loop = 0;
709 while (l1_sec_table[loop].size) {
710 vm_size_t sz;
711
712 #ifdef VERBOSE_INIT_ARM
713 printf("%08lx -> %08lx @ %08lx\n", l1_sec_table[loop].pa,
714 l1_sec_table[loop].pa + l1_sec_table[loop].size - 1,
715 l1_sec_table[loop].va);
716 #endif
717 for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_S_SIZE)
718 pmap_map_section(l1pagetable,
719 l1_sec_table[loop].va + sz,
720 l1_sec_table[loop].pa + sz,
721 l1_sec_table[loop].prot,
722 l1_sec_table[loop].cache);
723 ++loop;
724 }
725
726 /*
727 * Give the XScale global cache clean code an appropriately
728 * sized chunk of unmapped VA space starting at 0xff500000
729 * (our device mappings end before this address).
730 */
731 xscale_cache_clean_addr = 0xff500000U;
732
733 /*
734 * Now we have the real page tables in place so we can switch to them.
735 * Once this is done we will be running with the REAL kernel page
736 * tables.
737 */
738
739 /* Switch tables */
740 #ifdef VERBOSE_INIT_ARM
741 printf("switching to new L1 page table @%#lx...", kernel_l1pt.pv_pa);
742 #endif
743 #ifdef ARM32_PMAP_NEW
744 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
745 #endif
746 setttb(kernel_l1pt.pv_pa);
747 cpu_tlb_flushID();
748 #ifdef ARM32_PMAP_NEW
749 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
750
751 /*
752 * Move from cpu_startup() as data_abort_handler() references
753 * this during uvm init
754 */
755 proc0paddr = (struct user *)kernelstack.pv_va;
756 lwp0.l_addr = proc0paddr;
757 #endif
758
759 #ifdef VERBOSE_INIT_ARM
760 printf("done!\n");
761 #endif
762
763 #ifdef VERBOSE_INIT_ARM
764 printf("bootstrap done.\n");
765 #endif
766
767 /*
768 * Inform the BECC code where the BECC is mapped.
769 */
770 becc_vaddr = BRH_BECC_VBASE;
771
772 /*
773 * BECC <= Rev7 can only address 64M through the inbound
774 * PCI windows. Limit memory to 64M on those revs. (This
775 * problem was fixed in Rev8 of the BECC; get an FPGA upgrade.)
776 */
777 {
778 vaddr_t va = BRH_PCI_CONF_VBASE | (1U << BECC_IDSEL_BIT) |
779 PCI_CLASS_REG;
780 uint32_t reg;
781
782 reg = *(__volatile uint32_t *) va;
783 becc_rev = PCI_REVISION(reg);
784 if (becc_rev <= BECC_REV_V7 &&
785 memsize > (64UL * 1024 * 1024)) {
786 memsize = (64UL * 1024 * 1024);
787 bootconfig.dram[0].pages = memsize / PAGE_SIZE;
788 physical_end = physical_start +
789 (bootconfig.dram[0].pages * PAGE_SIZE);
790 printf("BECC <= Rev7: memory truncated to 64M\n");
791 }
792 }
793
794 /*
795 * Update the physical_freestart/physical_freeend/free_pages
796 * variables.
797 */
798 {
799 extern char _end[];
800
801 physical_freestart = physical_start +
802 (((((uintptr_t) _end) + PGOFSET) & ~PGOFSET) -
803 KERNEL_BASE);
804 physical_freeend = physical_end;
805 free_pages =
806 (physical_freeend - physical_freestart) / PAGE_SIZE;
807 }
808 #ifdef VERBOSE_INIT_ARM
809 printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
810 physical_freestart, free_pages, free_pages);
811 #endif
812
813 physmem = (physical_end - physical_start) / PAGE_SIZE;
814
815 arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
816
817 /*
818 * Pages were allocated during the secondary bootstrap for the
819 * stacks for different CPU modes.
820 * We must now set the r13 registers in the different CPU modes to
821 * point to these stacks.
822 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
823 * of the stack memory.
824 */
825 printf("init subsystems: stacks ");
826
827 set_stackptr(PSR_IRQ32_MODE,
828 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
829 set_stackptr(PSR_ABT32_MODE,
830 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
831 set_stackptr(PSR_UND32_MODE,
832 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
833
834 /*
835 * Well we should set a data abort handler.
836 * Once things get going this will change as we will need a proper
837 * handler.
838 * Until then we will use a handler that just panics but tells us
839 * why.
840 * Initialisation of the vectors will just panic on a data abort.
841 * This just fills in a slighly better one.
842 */
843 printf("vectors ");
844 data_abort_handler_address = (u_int)data_abort_handler;
845 prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
846 undefined_handler_address = (u_int)undefinedinstruction_bounce;
847
848 /* Initialise the undefined instruction handlers */
849 printf("undefined ");
850 undefined_init();
851
852 /* Load memory into UVM. */
853 printf("page ");
854 uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */
855 uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
856 atop(physical_freestart), atop(physical_freeend),
857 VM_FREELIST_DEFAULT);
858
859 /* Boot strap pmap telling it where the kernel page table is */
860 printf("pmap ");
861 #ifdef ARM32_PMAP_NEW
862 pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va);
863 #else
864 pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, kernel_ptpt);
865 #endif
866
867 /* Setup the IRQ system */
868 printf("irq ");
869 becc_intr_init();
870 printf("done.\n");
871
872 #ifdef IPKDB
873 /* Initialise ipkdb */
874 ipkdb_init();
875 if (boothowto & RB_KDB)
876 ipkdb_connect(0);
877 #endif
878
879
880 #if NKSYMS || defined(DDB) || defined(LKM)
881 /* Firmware doesn't load symbols. */
882 ksyms_init(0, NULL, NULL);
883 #endif
884
885 #ifdef DDB
886 db_machine_init();
887 if (boothowto & RB_KDB)
888 Debugger();
889 #endif
890
891 /* We return the new stack pointer address */
892 return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
893 }
894
895 void
896 consinit(void)
897 {
898 static const bus_addr_t comcnaddrs[] = {
899 BRH_UART1_BASE, /* com0 */
900 BRH_UART2_BASE, /* com1 */
901 };
902 static int consinit_called;
903
904 if (consinit_called != 0)
905 return;
906
907 consinit_called = 1;
908
909 #if NCOM > 0
910 if (comcnattach(&obio_bs_tag, comcnaddrs[comcnunit], comcnspeed,
911 BECC_PERIPH_CLOCK, comcnmode))
912 panic("can't init serial console @%lx", comcnaddrs[comcnunit]);
913 #else
914 panic("serial console @%lx not configured", comcnaddrs[comcnunit]);
915 #endif
916 }
917