brh_machdep.c revision 1.5 1 /* $NetBSD: brh_machdep.c,v 1.5 2003/04/22 13:51:11 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1997,1998 Mark Brinicombe.
40 * Copyright (c) 1997,1998 Causality Limited.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by Mark Brinicombe
54 * for the NetBSD Project.
55 * 4. The name of the company nor the name of the author may be used to
56 * endorse or promote products derived from this software without specific
57 * prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * Machine dependant functions for kernel setup for the ADI Engineering
72 * BRH i80200 evaluation platform.
73 */
74
75 #include "opt_ddb.h"
76 #include "opt_pmap_debug.h"
77
78 #include <sys/param.h>
79 #include <sys/device.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/exec.h>
83 #include <sys/proc.h>
84 #include <sys/msgbuf.h>
85 #include <sys/reboot.h>
86 #include <sys/termios.h>
87
88 #include <uvm/uvm_extern.h>
89
90 #include <dev/cons.h>
91
92 #include <machine/db_machdep.h>
93 #include <ddb/db_sym.h>
94 #include <ddb/db_extern.h>
95
96 #include <machine/bootconfig.h>
97 #include <machine/bus.h>
98 #include <machine/cpu.h>
99 #include <machine/frame.h>
100 #include <arm/undefined.h>
101
102 #include <arm/arm32/machdep.h>
103
104 #include <arm/xscale/i80200reg.h>
105 #include <arm/xscale/i80200var.h>
106
107 #include <dev/pci/ppbreg.h>
108
109 #include <arm/xscale/beccreg.h>
110 #include <arm/xscale/beccvar.h>
111
112 #include <evbarm/adi_brh/brhreg.h>
113 #include <evbarm/adi_brh/brhvar.h>
114 #include <evbarm/adi_brh/obiovar.h>
115
116 #include "opt_ipkdb.h"
117
118 /*
119 * Address to call from cpu_reset() to reset the machine.
120 * This is machine architecture dependant as it varies depending
121 * on where the ROM appears when you turn the MMU off.
122 */
123
124 u_int cpu_reset_address = 0x00000000;
125
126 /* Define various stack sizes in pages */
127 #define IRQ_STACK_SIZE 1
128 #define ABT_STACK_SIZE 1
129 #ifdef IPKDB
130 #define UND_STACK_SIZE 2
131 #else
132 #define UND_STACK_SIZE 1
133 #endif
134
135 BootConfig bootconfig; /* Boot config storage */
136 char *boot_args = NULL;
137 char *boot_file = NULL;
138
139 vm_offset_t physical_start;
140 vm_offset_t physical_freestart;
141 vm_offset_t physical_freeend;
142 vm_offset_t physical_end;
143 u_int free_pages;
144 vm_offset_t pagetables_start;
145 int physmem = 0;
146
147 /*int debug_flags;*/
148 #ifndef PMAP_STATIC_L1S
149 int max_processes = 64; /* Default number */
150 #endif /* !PMAP_STATIC_L1S */
151
152 /* Physical and virtual addresses for some global pages */
153 pv_addr_t systempage;
154 pv_addr_t irqstack;
155 pv_addr_t undstack;
156 pv_addr_t abtstack;
157 pv_addr_t kernelstack;
158 pv_addr_t minidataclean;
159
160 vm_offset_t msgbufphys;
161
162 extern u_int data_abort_handler_address;
163 extern u_int prefetch_abort_handler_address;
164 extern u_int undefined_handler_address;
165
166 #ifdef PMAP_DEBUG
167 extern int pmap_debug_level;
168 #endif
169
170 #define KERNEL_PT_SYS 0 /* L2 table for mapping zero page */
171
172 #define KERNEL_PT_KERNEL 1 /* L2 table for mapping kernel */
173 #define KERNEL_PT_KERNEL_NUM 2
174
175 /* L2 tables for mapping kernel VM */
176 #define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
177 #define KERNEL_PT_VMDATA_NUM 4 /* start with 16MB of KVM */
178 #define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
179
180 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
181
182 struct user *proc0paddr;
183
184 /* Prototypes */
185
186 void consinit(void);
187
188 #include "com.h"
189 #if NCOM > 0
190 #include <dev/ic/comreg.h>
191 #include <dev/ic/comvar.h>
192 #endif
193
194 /*
195 * Define the default console speed for the board. This is generally
196 * what the firmware provided with the board defaults to.
197 */
198 #ifndef CONSPEED
199 #define CONSPEED B57600
200 #endif /* ! CONSPEED */
201
202 #ifndef CONUNIT
203 #define CONUNIT 0
204 #endif
205
206 #ifndef CONMODE
207 #define CONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */
208 #endif
209
210 int comcnspeed = CONSPEED;
211 int comcnmode = CONMODE;
212 int comcnunit = CONUNIT;
213
214 /*
215 * void cpu_reboot(int howto, char *bootstr)
216 *
217 * Reboots the system
218 *
219 * Deal with any syncing, unmounting, dumping and shutdown hooks,
220 * then reset the CPU.
221 */
222 void
223 cpu_reboot(int howto, char *bootstr)
224 {
225 #ifdef DIAGNOSTIC
226 /* info */
227 printf("boot: howto=%08x curproc=%p\n", howto, curproc);
228 #endif
229
230 /*
231 * If we are still cold then hit the air brakes
232 * and crash to earth fast
233 */
234 if (cold) {
235 doshutdownhooks();
236 printf("The operating system has halted.\n");
237 printf("Please press any key to reboot.\n\n");
238 cngetc();
239 printf("rebooting...\n");
240 goto reset;
241 }
242
243 /* Disable console buffering */
244
245 /*
246 * If RB_NOSYNC was not specified sync the discs.
247 * Note: Unless cold is set to 1 here, syslogd will die during the
248 * unmount. It looks like syslogd is getting woken up only to find
249 * that it cannot page part of the binary in as the filesystem has
250 * been unmounted.
251 */
252 if (!(howto & RB_NOSYNC))
253 bootsync();
254
255 /* Say NO to interrupts */
256 splhigh();
257
258 /* Do a dump if requested. */
259 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
260 dumpsys();
261
262 /* Run any shutdown hooks */
263 doshutdownhooks();
264
265 /* Make sure IRQ's are disabled */
266 IRQdisable;
267
268 if (howto & RB_HALT) {
269 brh_7seg('8');
270 printf("The operating system has halted.\n");
271 printf("Please press any key to reboot.\n\n");
272 cngetc();
273 }
274
275 printf("rebooting...\n\r");
276 reset:
277 cpu_reset();
278 }
279
280 /*
281 * Mapping table for core kernel memory. This memory is mapped at init
282 * time with section mappings.
283 */
284 struct l1_sec_map {
285 vaddr_t va;
286 vaddr_t pa;
287 vsize_t size;
288 vm_prot_t prot;
289 int cache;
290 } l1_sec_table[] = {
291 {
292 BRH_PCI_CONF_VBASE,
293 BECC_PCI_CONF_BASE,
294 BRH_PCI_CONF_VSIZE,
295 VM_PROT_READ|VM_PROT_WRITE,
296 PTE_NOCACHE,
297 },
298 {
299 BRH_PCI_MEM1_VBASE,
300 BECC_PCI_MEM1_BASE,
301 BRH_PCI_MEM1_VSIZE,
302 VM_PROT_READ|VM_PROT_WRITE,
303 PTE_NOCACHE,
304 },
305 {
306 BRH_PCI_MEM2_VBASE,
307 BECC_PCI_MEM2_BASE,
308 BRH_PCI_MEM2_VSIZE,
309 VM_PROT_READ|VM_PROT_WRITE,
310 PTE_NOCACHE,
311 },
312 {
313 BRH_UART1_VBASE,
314 BRH_UART1_BASE,
315 BRH_UART1_VSIZE,
316 VM_PROT_READ|VM_PROT_WRITE,
317 PTE_NOCACHE,
318 },
319 {
320 BRH_UART2_VBASE,
321 BRH_UART2_BASE,
322 BRH_UART2_VSIZE,
323 VM_PROT_READ|VM_PROT_WRITE,
324 PTE_NOCACHE,
325 },
326 {
327 BRH_LED_VBASE,
328 BRH_LED_BASE,
329 BRH_LED_VSIZE,
330 VM_PROT_READ|VM_PROT_WRITE,
331 PTE_NOCACHE,
332 },
333 {
334 BRH_PCI_IO_VBASE,
335 BECC_PCI_IO_BASE,
336 BRH_PCI_IO_VSIZE,
337 VM_PROT_READ|VM_PROT_WRITE,
338 PTE_NOCACHE,
339 },
340 {
341 BRH_BECC_VBASE,
342 BECC_REG_BASE,
343 BRH_BECC_VSIZE,
344 VM_PROT_READ|VM_PROT_WRITE,
345 PTE_NOCACHE,
346 },
347 {
348 0,
349 0,
350 0,
351 0,
352 0,
353 }
354 };
355
356 static void
357 brh_hardclock_hook(void)
358 {
359 static int snakefreq;
360
361 if ((snakefreq++ & 15) == 0)
362 brh_7seg_snake();
363 }
364
365 /*
366 * u_int initarm(...)
367 *
368 * Initial entry point on startup. This gets called before main() is
369 * entered.
370 * It should be responsible for setting up everything that must be
371 * in place when main is called.
372 * This includes
373 * Taking a copy of the boot configuration structure.
374 * Initialising the physical console so characters can be printed.
375 * Setting up page tables for the kernel
376 * Relocating the kernel to the bottom of physical memory
377 */
378 u_int
379 initarm(void *arg)
380 {
381 extern vaddr_t xscale_cache_clean_addr;
382 #ifdef DIAGNOSTIC
383 extern vsize_t xscale_minidata_clean_size;
384 #endif
385 int loop;
386 int loop1;
387 u_int l1pagetable;
388 pv_addr_t kernel_l1pt;
389 pv_addr_t kernel_ptpt;
390 paddr_t memstart;
391 psize_t memsize;
392
393 /*
394 * Clear out the 7-segment display. Whee, the first visual
395 * indication that we're running kernel code.
396 */
397 brh_7seg(' ');
398
399 /*
400 * Since we have mapped the on-board devices at their permanent
401 * locations already, it is possible for us to initialize
402 * the console now.
403 */
404 consinit();
405
406 /* Talk to the user */
407 printf("\nNetBSD/evbarm (ADI BRH) booting ...\n");
408
409 /* Calibrate the delay loop. */
410 becc_calibrate_delay();
411 becc_hardclock_hook = brh_hardclock_hook;
412
413 /*
414 * Heads up ... Setup the CPU / MMU / TLB functions
415 */
416 if (set_cpufuncs())
417 panic("cpu not recognized!");
418
419 /*
420 * We are currently running with the MMU enabled and the
421 * entire address space mapped VA==PA. Memory conveniently
422 * starts at 0xc0000000, which is where we want it. Certain
423 * on-board devices have already been mapped where we want
424 * them to be. There is an L1 page table at 0xc0004000.
425 */
426
427 becc_icu_init();
428
429 /*
430 * Memory always starts at 0xc0000000 on a BRH, and the
431 * memory size is always 128M.
432 */
433 memstart = 0xc0000000UL;
434 memsize = (128UL * 1024 * 1024);
435
436 printf("initarm: Configuring system ...\n");
437
438 /* Fake bootconfig structure for the benefit of pmap.c */
439 /* XXX must make the memory description h/w independant */
440 bootconfig.dramblocks = 1;
441 bootconfig.dram[0].address = memstart;
442 bootconfig.dram[0].pages = memsize / PAGE_SIZE;
443
444 /*
445 * Set up the variables that define the availablilty of
446 * physical memory. For now, we're going to set
447 * physical_freestart to 0xc0200000 (where the kernel
448 * was loaded), and allocate the memory we need downwards.
449 * If we get too close to the L1 table that we set up, we
450 * will panic. We will update physical_freestart and
451 * physical_freeend later to reflect what pmap_bootstrap()
452 * wants to see.
453 *
454 * XXX pmap_bootstrap() needs an enema.
455 */
456 physical_start = bootconfig.dram[0].address;
457 physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);
458
459 physical_freestart = 0xc0009000UL;
460 physical_freeend = 0xc0200000UL;
461
462 /* Tell the user about the memory */
463 printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
464 physical_start, physical_end - 1);
465
466 /*
467 * Okay, the kernel starts 2MB in from the bottom of physical
468 * memory. We are going to allocate our bootstrap pages downwards
469 * from there.
470 *
471 * We need to allocate some fixed page tables to get the kernel
472 * going. We allocate one page directory and a number of page
473 * tables and store the physical addresses in the kernel_pt_table
474 * array.
475 *
476 * The kernel page directory must be on a 16K boundary. The page
477 * tables must be on 4K bounaries. What we do is allocate the
478 * page directory on the first 16K boundary that we encounter, and
479 * the page tables on 4K boundaries otherwise. Since we allocate
480 * at least 3 L2 page tables, we are guaranteed to encounter at
481 * least one 16K aligned region.
482 */
483
484 #ifdef VERBOSE_INIT_ARM
485 printf("Allocating page tables\n");
486 #endif
487
488 free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;
489
490 #ifdef VERBOSE_INIT_ARM
491 printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
492 physical_freestart, free_pages, free_pages);
493 #endif
494
495 /* Define a macro to simplify memory allocation */
496 #define valloc_pages(var, np) \
497 alloc_pages((var).pv_pa, (np)); \
498 (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;
499
500 #define alloc_pages(var, np) \
501 physical_freeend -= ((np) * PAGE_SIZE); \
502 if (physical_freeend < physical_freestart) \
503 panic("initarm: out of memory"); \
504 (var) = physical_freeend; \
505 free_pages -= (np); \
506 memset((char *)(var), 0, ((np) * PAGE_SIZE));
507
508 loop1 = 0;
509 kernel_l1pt.pv_pa = 0;
510 for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
511 /* Are we 16KB aligned for an L1 ? */
512 if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
513 && kernel_l1pt.pv_pa == 0) {
514 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
515 } else {
516 #ifdef ARM32_PMAP_NEW
517 valloc_pages(kernel_pt_table[loop1],
518 L2_TABLE_SIZE / PAGE_SIZE);
519 #else
520 alloc_pages(kernel_pt_table[loop1].pv_pa,
521 L2_TABLE_SIZE / PAGE_SIZE);
522 kernel_pt_table[loop1].pv_va =
523 kernel_pt_table[loop1].pv_pa;
524 #endif
525 ++loop1;
526 }
527 }
528
529 /* This should never be able to happen but better confirm that. */
530 if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
531 panic("initarm: Failed to align the kernel page directory\n");
532
533 /*
534 * Allocate a page for the system page mapped to V0x00000000
535 * This page will just contain the system vectors and can be
536 * shared by all processes.
537 */
538 alloc_pages(systempage.pv_pa, 1);
539
540 /* Allocate a page for the page table to map kernel page tables. */
541 valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE);
542
543 /* Allocate stacks for all modes */
544 valloc_pages(irqstack, IRQ_STACK_SIZE);
545 valloc_pages(abtstack, ABT_STACK_SIZE);
546 valloc_pages(undstack, UND_STACK_SIZE);
547 valloc_pages(kernelstack, UPAGES);
548
549 /* Allocate enough pages for cleaning the Mini-Data cache. */
550 KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
551 valloc_pages(minidataclean, 1);
552
553 #ifdef VERBOSE_INIT_ARM
554 printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
555 irqstack.pv_va);
556 printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
557 abtstack.pv_va);
558 printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
559 undstack.pv_va);
560 printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
561 kernelstack.pv_va);
562 #endif
563
564 /*
565 * XXX Defer this to later so that we can reclaim the memory
566 * XXX used by the RedBoot page tables.
567 */
568 alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
569
570 /*
571 * Ok we have allocated physical pages for the primary kernel
572 * page tables
573 */
574
575 #ifdef VERBOSE_INIT_ARM
576 printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
577 #endif
578
579 /*
580 * Now we start construction of the L1 page table
581 * We start by mapping the L2 page tables into the L1.
582 * This means that we can replace L1 mappings later on if necessary
583 */
584 l1pagetable = kernel_l1pt.pv_pa;
585
586 /* Map the L2 pages tables in the L1 page table */
587 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
588 &kernel_pt_table[KERNEL_PT_SYS]);
589 for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
590 pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
591 &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
592 for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
593 pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
594 &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
595 pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt);
596
597 /* update the top of the kernel VM */
598 pmap_curmaxkvaddr =
599 KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
600
601 #ifdef VERBOSE_INIT_ARM
602 printf("Mapping kernel\n");
603 #endif
604
605 /* Now we fill in the L2 pagetable for the kernel static code/data */
606 {
607 extern char etext[], _end[];
608 size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
609 size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
610 u_int logical;
611
612 textsize = (textsize + PGOFSET) & ~PGOFSET;
613 totalsize = (totalsize + PGOFSET) & ~PGOFSET;
614
615 logical = 0x00200000; /* offset of kernel in RAM */
616
617 logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
618 physical_start + logical, textsize,
619 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
620 logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
621 physical_start + logical, totalsize - textsize,
622 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
623 }
624
625 #ifdef VERBOSE_INIT_ARM
626 printf("Constructing L2 page tables\n");
627 #endif
628
629 /* Map the stack pages */
630 pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
631 IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
632 pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
633 ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
634 pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
635 UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
636 pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
637 UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
638
639 #ifndef ARM32_PMAP_NEW
640 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
641 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
642 #else
643 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
644 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
645
646 for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
647 pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
648 kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
649 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
650 }
651 #endif
652
653 /* Map the Mini-Data cache clean area. */
654 xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
655 minidataclean.pv_pa);
656
657 /* Map the page table that maps the kernel pages */
658 pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa,
659 #ifndef ARM32_PMAP_NEW
660 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
661 #else
662 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
663 #endif
664
665 /*
666 * Map entries in the page table used to map PTE's
667 * Basically every kernel page table gets mapped here
668 */
669 /* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
670 for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++) {
671 pmap_map_entry(l1pagetable,
672 PTE_BASE + ((KERNEL_BASE +
673 (loop * 0x00400000)) >> (PGSHIFT-2)),
674 kernel_pt_table[KERNEL_PT_KERNEL + loop].pv_pa,
675 #ifndef ARM32_PMAP_NEW
676 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
677 #else
678 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
679 #endif
680 }
681 pmap_map_entry(l1pagetable,
682 PTE_BASE + (PTE_BASE >> (PGSHIFT-2)),
683 #ifndef ARM32_PMAP_NEW
684 kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
685 #else
686 kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
687 #endif
688 pmap_map_entry(l1pagetable,
689 trunc_page(PTE_BASE + (ARM_VECTORS_HIGH >> (PGSHIFT-2))),
690 kernel_pt_table[KERNEL_PT_SYS].pv_pa,
691 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
692 for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
693 pmap_map_entry(l1pagetable,
694 PTE_BASE + ((KERNEL_VM_BASE +
695 (loop * 0x00400000)) >> (PGSHIFT-2)),
696 kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
697 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
698
699 /* Map the vector page. */
700 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
701 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
702
703 /*
704 * Map devices we can map w/ section mappings.
705 */
706 loop = 0;
707 while (l1_sec_table[loop].size) {
708 vm_size_t sz;
709
710 #ifdef VERBOSE_INIT_ARM
711 printf("%08lx -> %08lx @ %08lx\n", l1_sec_table[loop].pa,
712 l1_sec_table[loop].pa + l1_sec_table[loop].size - 1,
713 l1_sec_table[loop].va);
714 #endif
715 for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_S_SIZE)
716 pmap_map_section(l1pagetable,
717 l1_sec_table[loop].va + sz,
718 l1_sec_table[loop].pa + sz,
719 l1_sec_table[loop].prot,
720 l1_sec_table[loop].cache);
721 ++loop;
722 }
723
724 /*
725 * Give the XScale global cache clean code an appropriately
726 * sized chunk of unmapped VA space starting at 0xff500000
727 * (our device mappings end before this address).
728 */
729 xscale_cache_clean_addr = 0xff500000U;
730
731 /*
732 * Now we have the real page tables in place so we can switch to them.
733 * Once this is done we will be running with the REAL kernel page
734 * tables.
735 */
736
737 /* Switch tables */
738 #ifdef VERBOSE_INIT_ARM
739 printf("switching to new L1 page table @%#lx...", kernel_l1pt.pv_pa);
740 #endif
741 #ifdef ARM32_PMAP_NEW
742 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
743 #endif
744 setttb(kernel_l1pt.pv_pa);
745 cpu_tlb_flushID();
746 #ifdef ARM32_PMAP_NEW
747 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
748
749 /*
750 * Move from cpu_startup() as data_abort_handler() references
751 * this during uvm init
752 */
753 proc0paddr = (struct user *)kernelstack.pv_va;
754 lwp0.l_addr = proc0paddr;
755 #endif
756
757 #ifdef VERBOSE_INIT_ARM
758 printf("done!\n");
759 #endif
760
761 #ifdef VERBOSE_INIT_ARM
762 printf("bootstrap done.\n");
763 #endif
764
765 /*
766 * Inform the BECC code where the BECC is mapped.
767 */
768 becc_vaddr = BRH_BECC_VBASE;
769
770 /*
771 * BECC <= Rev7 can only address 64M through the inbound
772 * PCI windows. Limit memory to 64M on those revs. (This
773 * problem was fixed in Rev8 of the BECC; get an FPGA upgrade.)
774 */
775 {
776 vaddr_t va = BRH_PCI_CONF_VBASE | (1U << BECC_IDSEL_BIT) |
777 PCI_CLASS_REG;
778 uint32_t reg;
779
780 reg = *(__volatile uint32_t *) va;
781 becc_rev = PCI_REVISION(reg);
782 if (becc_rev <= BECC_REV_V7 &&
783 memsize > (64UL * 1024 * 1024)) {
784 memsize = (64UL * 1024 * 1024);
785 bootconfig.dram[0].pages = memsize / PAGE_SIZE;
786 physical_end = physical_start +
787 (bootconfig.dram[0].pages * PAGE_SIZE);
788 printf("BECC <= Rev7: memory truncated to 64M\n");
789 }
790 }
791
792 /*
793 * Update the physical_freestart/physical_freeend/free_pages
794 * variables.
795 */
796 {
797 extern char _end[];
798
799 physical_freestart = physical_start +
800 (((((uintptr_t) _end) + PGOFSET) & ~PGOFSET) -
801 KERNEL_BASE);
802 physical_freeend = physical_end;
803 free_pages =
804 (physical_freeend - physical_freestart) / PAGE_SIZE;
805 }
806 #ifdef VERBOSE_INIT_ARM
807 printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
808 physical_freestart, free_pages, free_pages);
809 #endif
810
811 physmem = (physical_end - physical_start) / PAGE_SIZE;
812
813 arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
814
815 /*
816 * Pages were allocated during the secondary bootstrap for the
817 * stacks for different CPU modes.
818 * We must now set the r13 registers in the different CPU modes to
819 * point to these stacks.
820 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
821 * of the stack memory.
822 */
823 printf("init subsystems: stacks ");
824
825 set_stackptr(PSR_IRQ32_MODE,
826 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
827 set_stackptr(PSR_ABT32_MODE,
828 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
829 set_stackptr(PSR_UND32_MODE,
830 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
831
832 /*
833 * Well we should set a data abort handler.
834 * Once things get going this will change as we will need a proper
835 * handler.
836 * Until then we will use a handler that just panics but tells us
837 * why.
838 * Initialisation of the vectors will just panic on a data abort.
839 * This just fills in a slighly better one.
840 */
841 printf("vectors ");
842 data_abort_handler_address = (u_int)data_abort_handler;
843 prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
844 undefined_handler_address = (u_int)undefinedinstruction_bounce;
845
846 /* Initialise the undefined instruction handlers */
847 printf("undefined ");
848 undefined_init();
849
850 /* Load memory into UVM. */
851 printf("page ");
852 uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */
853 uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
854 atop(physical_freestart), atop(physical_freeend),
855 VM_FREELIST_DEFAULT);
856
857 /* Boot strap pmap telling it where the kernel page table is */
858 printf("pmap ");
859 #ifdef ARM32_PMAP_NEW
860 pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va);
861 #else
862 pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, kernel_ptpt);
863 #endif
864
865 /* Setup the IRQ system */
866 printf("irq ");
867 becc_intr_init();
868 printf("done.\n");
869
870 #ifdef IPKDB
871 /* Initialise ipkdb */
872 ipkdb_init();
873 if (boothowto & RB_KDB)
874 ipkdb_connect(0);
875 #endif
876
877 #ifdef DDB
878 db_machine_init();
879
880 /* Firmware doesn't load symbols. */
881 ddb_init(0, NULL, NULL);
882
883 if (boothowto & RB_KDB)
884 Debugger();
885 #endif
886
887 /* We return the new stack pointer address */
888 return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
889 }
890
891 void
892 consinit(void)
893 {
894 static const bus_addr_t comcnaddrs[] = {
895 BRH_UART1_BASE, /* com0 */
896 BRH_UART2_BASE, /* com1 */
897 };
898 static int consinit_called;
899
900 if (consinit_called != 0)
901 return;
902
903 consinit_called = 1;
904
905 #if NCOM > 0
906 if (comcnattach(&obio_bs_tag, comcnaddrs[comcnunit], comcnspeed,
907 BECC_PERIPH_CLOCK, comcnmode))
908 panic("can't init serial console @%lx", comcnaddrs[comcnunit]);
909 #else
910 panic("serial console @%lx not configured", comcnaddrs[comcnunit]);
911 #endif
912 }
913