gemini_machdep.c revision 1.15.4.1 1 1.15 uebayasi /* $NetBSD: gemini_machdep.c,v 1.15.4.1 2011/03/05 20:50:05 rmind Exp $ */
2 1.1 matt
3 1.1 matt /* adapted from:
4 1.1 matt * NetBSD: sdp24xx_machdep.c,v 1.4 2008/08/27 11:03:10 matt Exp
5 1.1 matt */
6 1.1 matt
7 1.1 matt /*
8 1.1 matt * Machine dependent functions for kernel setup for TI OSK5912 board.
9 1.1 matt * Based on lubbock_machdep.c which in turn was based on iq80310_machhdep.c
10 1.1 matt *
11 1.1 matt * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
12 1.1 matt * Written by Hiroyuki Bessho for Genetec Corporation.
13 1.1 matt *
14 1.1 matt * Redistribution and use in source and binary forms, with or without
15 1.1 matt * modification, are permitted provided that the following conditions
16 1.1 matt * are met:
17 1.1 matt * 1. Redistributions of source code must retain the above copyright
18 1.1 matt * notice, this list of conditions and the following disclaimer.
19 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
20 1.1 matt * notice, this list of conditions and the following disclaimer in the
21 1.1 matt * documentation and/or other materials provided with the distribution.
22 1.1 matt * 3. The name of Genetec Corporation may not be used to endorse or
23 1.1 matt * promote products derived from this software without specific prior
24 1.1 matt * written permission.
25 1.1 matt *
26 1.1 matt * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND
27 1.1 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION
30 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
37 1.1 matt *
38 1.1 matt * Copyright (c) 2001 Wasabi Systems, Inc.
39 1.1 matt * All rights reserved.
40 1.1 matt *
41 1.1 matt * Written by Jason R. Thorpe for Wasabi Systems, Inc.
42 1.1 matt *
43 1.1 matt * Redistribution and use in source and binary forms, with or without
44 1.1 matt * modification, are permitted provided that the following conditions
45 1.1 matt * are met:
46 1.1 matt * 1. Redistributions of source code must retain the above copyright
47 1.1 matt * notice, this list of conditions and the following disclaimer.
48 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
49 1.1 matt * notice, this list of conditions and the following disclaimer in the
50 1.1 matt * documentation and/or other materials provided with the distribution.
51 1.1 matt * 3. All advertising materials mentioning features or use of this software
52 1.1 matt * must display the following acknowledgement:
53 1.1 matt * This product includes software developed for the NetBSD Project by
54 1.1 matt * Wasabi Systems, Inc.
55 1.1 matt * 4. The name of Wasabi Systems, Inc. may not be used to endorse
56 1.1 matt * or promote products derived from this software without specific prior
57 1.1 matt * written permission.
58 1.1 matt *
59 1.1 matt * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
60 1.1 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
63 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
70 1.1 matt *
71 1.1 matt * Copyright (c) 1997,1998 Mark Brinicombe.
72 1.1 matt * Copyright (c) 1997,1998 Causality Limited.
73 1.1 matt * All rights reserved.
74 1.1 matt *
75 1.1 matt * Redistribution and use in source and binary forms, with or without
76 1.1 matt * modification, are permitted provided that the following conditions
77 1.1 matt * are met:
78 1.1 matt * 1. Redistributions of source code must retain the above copyright
79 1.1 matt * notice, this list of conditions and the following disclaimer.
80 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
81 1.1 matt * notice, this list of conditions and the following disclaimer in the
82 1.1 matt * documentation and/or other materials provided with the distribution.
83 1.1 matt * 3. All advertising materials mentioning features or use of this software
84 1.1 matt * must display the following acknowledgement:
85 1.1 matt * This product includes software developed by Mark Brinicombe
86 1.1 matt * for the NetBSD Project.
87 1.1 matt * 4. The name of the company nor the name of the author may be used to
88 1.1 matt * endorse or promote products derived from this software without specific
89 1.1 matt * prior written permission.
90 1.1 matt *
91 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
92 1.1 matt * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
93 1.1 matt * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
94 1.1 matt * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
95 1.1 matt * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
96 1.1 matt * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
97 1.1 matt * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
98 1.1 matt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
99 1.1 matt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
100 1.1 matt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
101 1.1 matt * SUCH DAMAGE.
102 1.1 matt *
103 1.1 matt * Copyright (c) 2007 Microsoft
104 1.1 matt * All rights reserved.
105 1.1 matt *
106 1.1 matt * Redistribution and use in source and binary forms, with or without
107 1.1 matt * modification, are permitted provided that the following conditions
108 1.1 matt * are met:
109 1.1 matt * 1. Redistributions of source code must retain the above copyright
110 1.1 matt * notice, this list of conditions and the following disclaimer.
111 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
112 1.1 matt * notice, this list of conditions and the following disclaimer in the
113 1.1 matt * documentation and/or other materials provided with the distribution.
114 1.1 matt * 3. All advertising materials mentioning features or use of this software
115 1.1 matt * must display the following acknowledgement:
116 1.1 matt * This product includes software developed by Microsoft
117 1.1 matt *
118 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
119 1.1 matt * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
120 1.1 matt * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
121 1.1 matt * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
122 1.1 matt * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
123 1.1 matt * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
124 1.1 matt * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
125 1.1 matt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
126 1.1 matt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
127 1.1 matt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
128 1.1 matt * SUCH DAMAGE.
129 1.1 matt */
130 1.1 matt
131 1.1 matt #include <sys/cdefs.h>
132 1.15 uebayasi __KERNEL_RCSID(0, "$NetBSD: gemini_machdep.c,v 1.15.4.1 2011/03/05 20:50:05 rmind Exp $");
133 1.1 matt
134 1.1 matt #include "opt_machdep.h"
135 1.1 matt #include "opt_ddb.h"
136 1.1 matt #include "opt_kgdb.h"
137 1.1 matt #include "opt_ipkdb.h"
138 1.1 matt #include "opt_md.h"
139 1.1 matt #include "opt_com.h"
140 1.1 matt #include "opt_gemini.h"
141 1.1 matt #include "geminiwdt.h"
142 1.11 cliff #include "geminiipm.h"
143 1.1 matt
144 1.1 matt #include <sys/param.h>
145 1.1 matt #include <sys/device.h>
146 1.1 matt #include <sys/systm.h>
147 1.1 matt #include <sys/kernel.h>
148 1.1 matt #include <sys/exec.h>
149 1.1 matt #include <sys/proc.h>
150 1.1 matt #include <sys/msgbuf.h>
151 1.1 matt #include <sys/reboot.h>
152 1.1 matt #include <sys/termios.h>
153 1.1 matt #include <sys/ksyms.h>
154 1.1 matt
155 1.1 matt #include <uvm/uvm_extern.h>
156 1.1 matt
157 1.1 matt #include <sys/conf.h>
158 1.1 matt #include <dev/cons.h>
159 1.1 matt #include <dev/md.h>
160 1.1 matt
161 1.1 matt #include <machine/db_machdep.h>
162 1.1 matt #include <ddb/db_sym.h>
163 1.1 matt #include <ddb/db_extern.h>
164 1.1 matt #ifdef KGDB
165 1.1 matt #include <sys/kgdb.h>
166 1.1 matt #endif
167 1.1 matt
168 1.1 matt #include <machine/bootconfig.h>
169 1.1 matt #include <machine/bus.h>
170 1.1 matt #include <machine/cpu.h>
171 1.1 matt #include <machine/frame.h>
172 1.1 matt #include <arm/armreg.h>
173 1.1 matt #include <arm/undefined.h>
174 1.1 matt
175 1.1 matt #include <arm/arm32/machdep.h>
176 1.1 matt
177 1.1 matt #include <arm/gemini/gemini_reg.h>
178 1.1 matt #include <arm/gemini/gemini_var.h>
179 1.1 matt #include <arm/gemini/gemini_wdtvar.h>
180 1.1 matt #include <arm/gemini/gemini_com.h>
181 1.6 cliff #include <arm/gemini/lpc_com.h>
182 1.1 matt
183 1.1 matt #include <evbarm/gemini/gemini.h>
184 1.1 matt
185 1.6 cliff #if defined(VERBOSE_INIT_ARM)
186 1.5 cliff # define GEMINI_PUTCHAR(c) gemini_putchar(c)
187 1.5 cliff # define GEMINI_PUTHEX(n) gemini_puthex(n)
188 1.5 cliff #else /* VERBOSE_INIT_ARM */
189 1.5 cliff # define GEMINI_PUTCHAR(c)
190 1.5 cliff # define GEMINI_PUTHEX(n)
191 1.5 cliff #endif /* VERBOSE_INIT_ARM */
192 1.5 cliff
193 1.1 matt /*
194 1.1 matt * Address to call from cpu_reset() to reset the machine.
195 1.1 matt * This is machine architecture dependant as it varies depending
196 1.1 matt * on where the ROM appears when you turn the MMU off.
197 1.1 matt */
198 1.1 matt
199 1.1 matt u_int cpu_reset_address = 0;
200 1.1 matt
201 1.1 matt /* Define various stack sizes in pages */
202 1.1 matt #define IRQ_STACK_SIZE 1
203 1.1 matt #define FIQ_STACK_SIZE 1
204 1.1 matt #define ABT_STACK_SIZE 1
205 1.1 matt #ifdef IPKDB
206 1.1 matt #define UND_STACK_SIZE 2
207 1.1 matt #else
208 1.1 matt #define UND_STACK_SIZE 1
209 1.1 matt #endif
210 1.1 matt
211 1.1 matt BootConfig bootconfig; /* Boot config storage */
212 1.1 matt char *boot_args = NULL;
213 1.1 matt char *boot_file = NULL;
214 1.1 matt
215 1.1 matt /* Physical address of the beginning of SDRAM. */
216 1.1 matt paddr_t physical_start;
217 1.1 matt /* Physical address of the first byte after the end of SDRAM. */
218 1.1 matt paddr_t physical_end;
219 1.1 matt
220 1.1 matt /* Same things, but for the free (unused by the kernel) memory. */
221 1.1 matt static paddr_t physical_freestart, physical_freeend;
222 1.1 matt static u_int free_pages;
223 1.1 matt
224 1.1 matt /* Physical and virtual addresses for some global pages */
225 1.1 matt pv_addr_t fiqstack;
226 1.1 matt pv_addr_t irqstack;
227 1.1 matt pv_addr_t undstack;
228 1.1 matt pv_addr_t abtstack;
229 1.1 matt pv_addr_t kernelstack; /* stack for SVC mode */
230 1.1 matt
231 1.1 matt /* Physical address of the message buffer. */
232 1.1 matt paddr_t msgbufphys;
233 1.1 matt
234 1.1 matt extern u_int data_abort_handler_address;
235 1.1 matt extern u_int prefetch_abort_handler_address;
236 1.1 matt extern u_int undefined_handler_address;
237 1.1 matt extern char KERNEL_BASE_phys[];
238 1.1 matt extern char KERNEL_BASE_virt[];
239 1.1 matt extern char etext[], __data_start[], _edata[], __bss_start[], __bss_end__[];
240 1.1 matt extern char _end[];
241 1.1 matt
242 1.1 matt #define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */
243 1.1 matt #define KERNEL_PT_KERNEL 1 /* Page table for mapping kernel */
244 1.1 matt #define KERNEL_PT_KERNEL_NUM 4
245 1.1 matt #define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL+KERNEL_PT_KERNEL_NUM)
246 1.1 matt /* Page tables for mapping kernel VM */
247 1.1 matt #define KERNEL_PT_VMDATA_NUM 4 /* start with 16MB of KVM */
248 1.1 matt #define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
249 1.1 matt
250 1.1 matt pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
251 1.1 matt
252 1.11 cliff
253 1.11 cliff #if (NGEMINIIPM > 0)
254 1.11 cliff pv_addr_t ipmq_pt; /* L2 Page table for mapping IPM queues */
255 1.11 cliff #if defined(DEBUG) || 1
256 1.11 cliff unsigned long gemini_ipmq_pbase = GEMINI_IPMQ_PBASE;
257 1.11 cliff unsigned long gemini_ipmq_vbase = GEMINI_IPMQ_VBASE;
258 1.11 cliff #endif /* DEBUG */
259 1.11 cliff #endif /* NGEMINIIPM > 0 */
260 1.11 cliff
261 1.11 cliff
262 1.1 matt /*
263 1.1 matt * Macros to translate between physical and virtual for a subset of the
264 1.1 matt * kernel address space. *Not* for general use.
265 1.1 matt */
266 1.1 matt #define KERNEL_BASE_PHYS ((paddr_t)&KERNEL_BASE_phys)
267 1.1 matt
268 1.1 matt #define KERN_VTOPHYS(va) \
269 1.1 matt ((paddr_t)((vaddr_t)va - KERNEL_BASE + GEMINI_DRAM_BASE))
270 1.1 matt #define KERN_PHYSTOV(pa) \
271 1.1 matt ((vaddr_t)((paddr_t)pa - GEMINI_DRAM_BASE + KERNEL_BASE))
272 1.1 matt
273 1.1 matt /* Prototypes */
274 1.1 matt
275 1.1 matt void gemini_intr_init(bus_space_tag_t);
276 1.1 matt void consinit(void);
277 1.1 matt #ifdef KGDB
278 1.1 matt static void kgdb_port_init(void);
279 1.1 matt #endif
280 1.1 matt
281 1.1 matt static void setup_real_page_tables(void);
282 1.1 matt static void init_clocks(void);
283 1.1 matt
284 1.1 matt bs_protos(bs_notimpl);
285 1.1 matt
286 1.1 matt #include "com.h"
287 1.1 matt #if NCOM > 0
288 1.1 matt #include <dev/ic/comreg.h>
289 1.1 matt #include <dev/ic/comvar.h>
290 1.1 matt #endif
291 1.1 matt
292 1.3 cliff
293 1.3 cliff static void gemini_global_reset(void) __attribute__ ((noreturn));
294 1.8 cliff static void gemini_cpu1_start(void);
295 1.9 cliff static void gemini_memchk(void);
296 1.3 cliff
297 1.3 cliff static void
298 1.3 cliff gemini_global_reset(void)
299 1.3 cliff {
300 1.8 cliff #if defined(GEMINI_MASTER) || defined(GEMINI_SINGLE)
301 1.3 cliff volatile uint32_t *rp;
302 1.3 cliff uint32_t r;
303 1.3 cliff
304 1.3 cliff rp = (volatile uint32_t *)
305 1.3 cliff (GEMINI_GLOBAL_VBASE + GEMINI_GLOBAL_RESET_CTL);
306 1.3 cliff r = *rp;
307 1.3 cliff r |= GLOBAL_RESET_GLOBAL;
308 1.3 cliff *rp = r;
309 1.8 cliff #endif
310 1.3 cliff for(;;);
311 1.3 cliff /* NOTREACHED */
312 1.3 cliff }
313 1.3 cliff
314 1.8 cliff static void
315 1.8 cliff gemini_cpu1_start(void)
316 1.8 cliff {
317 1.8 cliff #ifdef GEMINI_MASTER
318 1.8 cliff volatile uint32_t *rp;
319 1.8 cliff uint32_t r;
320 1.8 cliff
321 1.8 cliff rp = (volatile uint32_t *)
322 1.8 cliff (GEMINI_GLOBAL_VBASE + GEMINI_GLOBAL_RESET_CTL);
323 1.8 cliff r = *rp;
324 1.8 cliff r &= ~GLOBAL_RESET_CPU1;
325 1.8 cliff *rp = r;
326 1.8 cliff #endif
327 1.8 cliff }
328 1.8 cliff
329 1.9 cliff static void
330 1.9 cliff gemini_memchk(void)
331 1.9 cliff {
332 1.9 cliff volatile uint32_t *rp;
333 1.9 cliff uint32_t r;
334 1.9 cliff uint32_t base;
335 1.9 cliff uint32_t size;
336 1.9 cliff
337 1.9 cliff rp = (volatile uint32_t *)
338 1.9 cliff (GEMINI_DRAMC_VBASE + GEMINI_DRAMC_RMCR);
339 1.9 cliff r = *rp;
340 1.9 cliff base = (r & DRAMC_RMCR_RMBAR) >> DRAMC_RMCR_RMBAR_SHFT;
341 1.9 cliff size = (r & DRAMC_RMCR_RMSZR) >> DRAMC_RMCR_RMSZR_SHFT;
342 1.10 cliff #if defined(GEMINI_SINGLE)
343 1.10 cliff if (r != 0)
344 1.10 cliff panic("%s: RMCR %#x, MEMSIZE %d mismatch\n",
345 1.10 cliff __FUNCTION__, r, MEMSIZE);
346 1.10 cliff #elif defined(GEMINI_MASTER)
347 1.9 cliff if (base != MEMSIZE)
348 1.9 cliff panic("%s: RMCR %#x, MEMSIZE %d mismatch\n",
349 1.9 cliff __FUNCTION__, r, MEMSIZE);
350 1.10 cliff #elif defined(GEMINI_SLAVE)
351 1.9 cliff if (size != MEMSIZE)
352 1.9 cliff panic("%s: RMCR %#x, MEMSIZE %d mismatch\n",
353 1.9 cliff __FUNCTION__, r, MEMSIZE);
354 1.9 cliff #endif
355 1.9 cliff #if defined(VERBOSE_INIT_ARM) || 1
356 1.9 cliff printf("DRAM Remap: base=%dMB, size=%dMB\n", base, size);
357 1.9 cliff #endif
358 1.9 cliff }
359 1.9 cliff
360 1.1 matt /*
361 1.1 matt * void cpu_reboot(int howto, char *bootstr)
362 1.1 matt *
363 1.1 matt * Reboots the system
364 1.1 matt *
365 1.1 matt * Deal with any syncing, unmounting, dumping and shutdown hooks,
366 1.1 matt * then reset the CPU.
367 1.1 matt */
368 1.1 matt void
369 1.1 matt cpu_reboot(int howto, char *bootstr)
370 1.1 matt {
371 1.5 cliff extern struct geminitmr_softc *ref_sc;
372 1.5 cliff
373 1.1 matt #ifdef DIAGNOSTIC
374 1.1 matt /* info */
375 1.1 matt printf("boot: howto=%08x curproc=%p\n", howto, curproc);
376 1.1 matt #endif
377 1.1 matt
378 1.1 matt /*
379 1.1 matt * If we are still cold then hit the air brakes
380 1.1 matt * and crash to earth fast
381 1.1 matt */
382 1.1 matt if (cold) {
383 1.1 matt doshutdownhooks();
384 1.7 dyoung pmf_system_shutdown(boothowto);
385 1.1 matt printf("The operating system has halted.\n");
386 1.1 matt printf("Please press any key to reboot.\n\n");
387 1.1 matt cngetc();
388 1.1 matt printf("rebooting...\n");
389 1.5 cliff if (ref_sc != NULL)
390 1.5 cliff delay(2000); /* cnflush(); */
391 1.3 cliff gemini_global_reset();
392 1.1 matt /*NOTREACHED*/
393 1.1 matt }
394 1.1 matt
395 1.1 matt /* Disable console buffering */
396 1.8 cliff cnpollc(1);
397 1.1 matt
398 1.1 matt /*
399 1.1 matt * If RB_NOSYNC was not specified sync the discs.
400 1.1 matt * Note: Unless cold is set to 1 here, syslogd will die during the
401 1.1 matt * unmount. It looks like syslogd is getting woken up only to find
402 1.1 matt * that it cannot page part of the binary in as the filesystem has
403 1.1 matt * been unmounted.
404 1.1 matt */
405 1.1 matt if (!(howto & RB_NOSYNC))
406 1.1 matt bootsync();
407 1.1 matt
408 1.1 matt /* Say NO to interrupts */
409 1.1 matt splhigh();
410 1.1 matt
411 1.1 matt /* Do a dump if requested. */
412 1.1 matt if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
413 1.1 matt dumpsys();
414 1.1 matt
415 1.1 matt /* Run any shutdown hooks */
416 1.1 matt doshutdownhooks();
417 1.1 matt
418 1.7 dyoung pmf_system_shutdown(boothowto);
419 1.7 dyoung
420 1.1 matt /* Make sure IRQ's are disabled */
421 1.1 matt IRQdisable;
422 1.1 matt
423 1.1 matt if (howto & RB_HALT) {
424 1.1 matt printf("The operating system has halted.\n");
425 1.1 matt printf("Please press any key to reboot.\n\n");
426 1.1 matt cngetc();
427 1.1 matt }
428 1.1 matt
429 1.1 matt printf("rebooting...\n");
430 1.5 cliff if (ref_sc != NULL)
431 1.5 cliff delay(2000); /* cnflush(); */
432 1.3 cliff gemini_global_reset();
433 1.1 matt /*NOTREACHED*/
434 1.1 matt }
435 1.1 matt
436 1.1 matt /*
437 1.1 matt * Static device mappings. These peripheral registers are mapped at
438 1.1 matt * fixed virtual addresses very early in initarm() so that we can use
439 1.1 matt * them while booting the kernel, and stay at the same address
440 1.1 matt * throughout whole kernel's life time.
441 1.1 matt *
442 1.1 matt * We use this table twice; once with bootstrap page table, and once
443 1.1 matt * with kernel's page table which we build up in initarm().
444 1.1 matt *
445 1.1 matt * Since we map these registers into the bootstrap page table using
446 1.1 matt * pmap_devmap_bootstrap() which calls pmap_map_chunk(), we map
447 1.1 matt * registers segment-aligned and segment-rounded in order to avoid
448 1.1 matt * using the 2nd page tables.
449 1.1 matt */
450 1.1 matt
451 1.1 matt #define _A(a) ((a) & ~L1_S_OFFSET)
452 1.1 matt #define _S(s) (((s) + L1_S_SIZE - 1) & ~(L1_S_SIZE-1))
453 1.1 matt
454 1.1 matt static const struct pmap_devmap devmap[] = {
455 1.3 cliff /* Global regs */
456 1.3 cliff {
457 1.3 cliff .pd_va = _A(GEMINI_GLOBAL_VBASE),
458 1.3 cliff .pd_pa = _A(GEMINI_GLOBAL_BASE),
459 1.3 cliff .pd_size = _S(L1_S_SIZE),
460 1.3 cliff .pd_prot = VM_PROT_READ|VM_PROT_WRITE,
461 1.3 cliff .pd_cache = PTE_NOCACHE
462 1.3 cliff },
463 1.3 cliff
464 1.1 matt /* Watchdog */
465 1.1 matt {
466 1.1 matt .pd_va = _A(GEMINI_WATCHDOG_VBASE),
467 1.1 matt .pd_pa = _A(GEMINI_WATCHDOG_BASE),
468 1.1 matt .pd_size = _S(L1_S_SIZE),
469 1.1 matt .pd_prot = VM_PROT_READ|VM_PROT_WRITE,
470 1.1 matt .pd_cache = PTE_NOCACHE
471 1.1 matt },
472 1.1 matt
473 1.1 matt /* UART */
474 1.1 matt {
475 1.6 cliff .pd_va = _A(GEMINI_UART_VBASE),
476 1.6 cliff .pd_pa = _A(GEMINI_UART_BASE),
477 1.1 matt .pd_size = _S(L1_S_SIZE),
478 1.1 matt .pd_prot = VM_PROT_READ|VM_PROT_WRITE,
479 1.1 matt .pd_cache = PTE_NOCACHE
480 1.1 matt },
481 1.1 matt
482 1.5 cliff /* LPCHC */
483 1.5 cliff {
484 1.5 cliff .pd_va = _A(GEMINI_LPCHC_VBASE),
485 1.5 cliff .pd_pa = _A(GEMINI_LPCHC_BASE),
486 1.5 cliff .pd_size = _S(L1_S_SIZE),
487 1.5 cliff .pd_prot = VM_PROT_READ|VM_PROT_WRITE,
488 1.5 cliff .pd_cache = PTE_NOCACHE
489 1.5 cliff },
490 1.5 cliff
491 1.5 cliff /* LPCIO */
492 1.5 cliff {
493 1.5 cliff .pd_va = _A(GEMINI_LPCIO_VBASE),
494 1.5 cliff .pd_pa = _A(GEMINI_LPCIO_BASE),
495 1.5 cliff .pd_size = _S(L1_S_SIZE),
496 1.5 cliff .pd_prot = VM_PROT_READ|VM_PROT_WRITE,
497 1.5 cliff .pd_cache = PTE_NOCACHE
498 1.5 cliff },
499 1.5 cliff
500 1.1 matt /* Timers */
501 1.1 matt {
502 1.1 matt .pd_va = _A(GEMINI_TIMER_VBASE),
503 1.1 matt .pd_pa = _A(GEMINI_TIMER_BASE),
504 1.1 matt .pd_size = _S(L1_S_SIZE),
505 1.1 matt .pd_prot = VM_PROT_READ|VM_PROT_WRITE,
506 1.1 matt .pd_cache = PTE_NOCACHE
507 1.1 matt },
508 1.1 matt
509 1.9 cliff /* DRAM Controller */
510 1.9 cliff {
511 1.9 cliff .pd_va = _A(GEMINI_DRAMC_VBASE),
512 1.9 cliff .pd_pa = _A(GEMINI_DRAMC_BASE),
513 1.9 cliff .pd_size = _S(L1_S_SIZE),
514 1.9 cliff .pd_prot = VM_PROT_READ|VM_PROT_WRITE,
515 1.9 cliff .pd_cache = PTE_NOCACHE
516 1.9 cliff },
517 1.9 cliff
518 1.5 cliff #if defined(MEMORY_DISK_DYNAMIC)
519 1.5 cliff /* Ramdisk */
520 1.5 cliff {
521 1.5 cliff .pd_va = _A(GEMINI_RAMDISK_VBASE),
522 1.5 cliff .pd_pa = _A(GEMINI_RAMDISK_PBASE),
523 1.5 cliff .pd_size = _S(GEMINI_RAMDISK_SIZE),
524 1.5 cliff .pd_prot = VM_PROT_READ|VM_PROT_WRITE,
525 1.5 cliff .pd_cache = PTE_NOCACHE
526 1.5 cliff },
527 1.5 cliff #endif
528 1.5 cliff
529 1.1 matt {0} /* list terminator */
530 1.1 matt };
531 1.1 matt
532 1.1 matt #undef _A
533 1.1 matt #undef _S
534 1.1 matt
535 1.1 matt #ifdef DDB
536 1.1 matt static void gemini_db_trap(int where)
537 1.1 matt {
538 1.1 matt #if NGEMINIWDT > 0
539 1.1 matt static int oldwatchdogstate;
540 1.1 matt
541 1.1 matt if (where) {
542 1.1 matt oldwatchdogstate = geminiwdt_enable(0);
543 1.1 matt } else {
544 1.1 matt geminiwdt_enable(oldwatchdogstate);
545 1.1 matt }
546 1.1 matt #endif
547 1.1 matt }
548 1.1 matt #endif
549 1.1 matt
550 1.6 cliff #if defined(VERBOSE_INIT_ARM) || 1
551 1.1 matt void gemini_putchar(char c);
552 1.1 matt void
553 1.1 matt gemini_putchar(char c)
554 1.1 matt {
555 1.6 cliff unsigned char *com0addr = (unsigned char *)GEMINI_UART_VBASE;
556 1.1 matt int timo = 150000;
557 1.1 matt
558 1.1 matt while ((com0addr[COM_REG_LSR * 4] & LSR_TXRDY) == 0)
559 1.1 matt if (--timo == 0)
560 1.1 matt break;
561 1.1 matt
562 1.1 matt com0addr[COM_REG_TXDATA] = c;
563 1.1 matt
564 1.1 matt while ((com0addr[COM_REG_LSR * 4] & LSR_TSRE) == 0)
565 1.1 matt if (--timo == 0)
566 1.1 matt break;
567 1.1 matt }
568 1.1 matt
569 1.1 matt void gemini_puthex(unsigned int);
570 1.1 matt void
571 1.1 matt gemini_puthex(unsigned int val)
572 1.1 matt {
573 1.1 matt char hexc[] = "0123456789abcdef";
574 1.1 matt
575 1.1 matt gemini_putchar('0');
576 1.1 matt gemini_putchar('x');
577 1.1 matt gemini_putchar(hexc[(val >> 28) & 0xf]);
578 1.1 matt gemini_putchar(hexc[(val >> 24) & 0xf]);
579 1.1 matt gemini_putchar(hexc[(val >> 20) & 0xf]);
580 1.1 matt gemini_putchar(hexc[(val >> 16) & 0xf]);
581 1.1 matt gemini_putchar(hexc[(val >> 12) & 0xf]);
582 1.1 matt gemini_putchar(hexc[(val >> 8) & 0xf]);
583 1.1 matt gemini_putchar(hexc[(val >> 4) & 0xf]);
584 1.1 matt gemini_putchar(hexc[(val >> 0) & 0xf]);
585 1.1 matt }
586 1.5 cliff #endif /* VERBOSE_INIT_ARM */
587 1.1 matt
588 1.1 matt /*
589 1.1 matt * u_int initarm(...)
590 1.1 matt *
591 1.1 matt * Initial entry point on startup. This gets called before main() is
592 1.1 matt * entered.
593 1.1 matt * It should be responsible for setting up everything that must be
594 1.1 matt * in place when main is called.
595 1.1 matt * This includes
596 1.1 matt * Taking a copy of the boot configuration structure.
597 1.1 matt * Initialising the physical console so characters can be printed.
598 1.1 matt * Setting up page tables for the kernel
599 1.1 matt * Relocating the kernel to the bottom of physical memory
600 1.1 matt */
601 1.1 matt u_int
602 1.1 matt initarm(void *arg)
603 1.1 matt {
604 1.5 cliff GEMINI_PUTCHAR('0');
605 1.3 cliff
606 1.1 matt /*
607 1.8 cliff * start cpu#1 now
608 1.8 cliff */
609 1.8 cliff gemini_cpu1_start();
610 1.8 cliff
611 1.8 cliff /*
612 1.1 matt * When we enter here, we are using a temporary first level
613 1.1 matt * translation table with section entries in it to cover the OBIO
614 1.1 matt * peripherals and SDRAM. The temporary first level translation table
615 1.1 matt * is at the end of SDRAM.
616 1.1 matt */
617 1.1 matt
618 1.1 matt /* Heads up ... Setup the CPU / MMU / TLB functions. */
619 1.5 cliff GEMINI_PUTCHAR('1');
620 1.1 matt if (set_cpufuncs())
621 1.1 matt panic("cpu not recognized!");
622 1.1 matt
623 1.5 cliff GEMINI_PUTCHAR('2');
624 1.1 matt init_clocks();
625 1.5 cliff GEMINI_PUTCHAR('3');
626 1.1 matt
627 1.1 matt /* The console is going to try to map things. Give pmap a devmap. */
628 1.1 matt pmap_devmap_register(devmap);
629 1.5 cliff GEMINI_PUTCHAR('4');
630 1.1 matt consinit();
631 1.5 cliff GEMINI_PUTCHAR('5');
632 1.1 matt #ifdef KGDB
633 1.1 matt kgdb_port_init();
634 1.1 matt #endif
635 1.1 matt
636 1.1 matt /* Talk to the user */
637 1.1 matt printf("\nNetBSD/evbarm (gemini) booting ...\n");
638 1.1 matt
639 1.1 matt #ifdef BOOT_ARGS
640 1.1 matt char mi_bootargs[] = BOOT_ARGS;
641 1.1 matt parse_mi_bootargs(mi_bootargs);
642 1.1 matt #endif
643 1.1 matt
644 1.1 matt #ifdef VERBOSE_INIT_ARM
645 1.1 matt printf("initarm: Configuring system ...\n");
646 1.1 matt #endif
647 1.1 matt
648 1.1 matt /*
649 1.1 matt * Set up the variables that define the availability of physical
650 1.1 matt * memory.
651 1.1 matt */
652 1.9 cliff gemini_memchk();
653 1.1 matt physical_start = GEMINI_DRAM_BASE;
654 1.1 matt #define MEMSIZE_BYTES (MEMSIZE * 1024 * 1024)
655 1.1 matt physical_end = (physical_start & ~(0x400000-1)) + MEMSIZE_BYTES;
656 1.1 matt physmem = (physical_end - physical_start) / PAGE_SIZE;
657 1.1 matt
658 1.1 matt /* Fake bootconfig structure for the benefit of pmap.c. */
659 1.1 matt bootconfig.dramblocks = 1;
660 1.1 matt bootconfig.dram[0].address = physical_start;
661 1.1 matt bootconfig.dram[0].pages = physmem;
662 1.1 matt
663 1.1 matt /*
664 1.1 matt * Our kernel is at the beginning of memory, so set our free space to
665 1.1 matt * all the memory after the kernel.
666 1.1 matt */
667 1.1 matt physical_freestart = KERN_VTOPHYS(round_page((vaddr_t) _end));
668 1.1 matt physical_freeend = physical_end;
669 1.1 matt free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;
670 1.1 matt
671 1.1 matt /*
672 1.1 matt * This is going to do all the hard work of setting up the first and
673 1.1 matt * and second level page tables. Pages of memory will be allocated
674 1.1 matt * and mapped for other structures that are required for system
675 1.1 matt * operation. When it returns, physical_freestart and free_pages will
676 1.1 matt * have been updated to reflect the allocations that were made. In
677 1.1 matt * addition, kernel_l1pt, kernel_pt_table[], systempage, irqstack,
678 1.1 matt * abtstack, undstack, kernelstack, msgbufphys will be set to point to
679 1.1 matt * the memory that was allocated for them.
680 1.1 matt */
681 1.1 matt setup_real_page_tables();
682 1.1 matt
683 1.1 matt /*
684 1.1 matt * Moved from cpu_startup() as data_abort_handler() references
685 1.1 matt * this during uvm init.
686 1.1 matt */
687 1.14 rmind uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
688 1.1 matt
689 1.1 matt #ifdef VERBOSE_INIT_ARM
690 1.1 matt printf("bootstrap done.\n");
691 1.1 matt #endif
692 1.1 matt
693 1.1 matt arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
694 1.1 matt
695 1.1 matt /*
696 1.1 matt * Pages were allocated during the secondary bootstrap for the
697 1.1 matt * stacks for different CPU modes.
698 1.1 matt * We must now set the r13 registers in the different CPU modes to
699 1.1 matt * point to these stacks.
700 1.1 matt * Since the ARM stacks use STMFD etc. we must set r13 to the top end
701 1.1 matt * of the stack memory.
702 1.1 matt */
703 1.1 matt #ifdef VERBOSE_INIT_ARM
704 1.1 matt printf("init subsystems: stacks ");
705 1.1 matt #endif
706 1.1 matt
707 1.1 matt set_stackptr(PSR_FIQ32_MODE, fiqstack.pv_va + FIQ_STACK_SIZE * PAGE_SIZE);
708 1.1 matt set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
709 1.1 matt set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
710 1.1 matt set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
711 1.1 matt
712 1.1 matt /*
713 1.1 matt * Well we should set a data abort handler.
714 1.1 matt * Once things get going this will change as we will need a proper
715 1.1 matt * handler.
716 1.1 matt * Until then we will use a handler that just panics but tells us
717 1.1 matt * why.
718 1.1 matt * Initialisation of the vectors will just panic on a data abort.
719 1.1 matt * This just fills in a slightly better one.
720 1.1 matt */
721 1.1 matt #ifdef VERBOSE_INIT_ARM
722 1.1 matt printf("vectors ");
723 1.1 matt #endif
724 1.1 matt data_abort_handler_address = (u_int)data_abort_handler;
725 1.1 matt prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
726 1.1 matt undefined_handler_address = (u_int)undefinedinstruction_bounce;
727 1.1 matt
728 1.1 matt /* Initialise the undefined instruction handlers */
729 1.1 matt #ifdef VERBOSE_INIT_ARM
730 1.1 matt printf("undefined ");
731 1.1 matt #endif
732 1.1 matt undefined_init();
733 1.1 matt
734 1.1 matt /* Load memory into UVM. */
735 1.1 matt #ifdef VERBOSE_INIT_ARM
736 1.1 matt printf("page ");
737 1.1 matt #endif
738 1.1 matt uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */
739 1.5 cliff
740 1.11 cliff #if (GEMINI_RAM_RESV_PBASE != 0)
741 1.11 cliff uvm_page_physload(atop(physical_freestart), atop(GEMINI_RAM_RESV_PBASE),
742 1.11 cliff atop(physical_freestart), atop(GEMINI_RAM_RESV_PBASE),
743 1.5 cliff VM_FREELIST_DEFAULT);
744 1.11 cliff uvm_page_physload(atop(GEMINI_RAM_RESV_PEND), atop(physical_freeend),
745 1.11 cliff atop(GEMINI_RAM_RESV_PEND), atop(physical_freeend),
746 1.5 cliff VM_FREELIST_DEFAULT);
747 1.5 cliff #else
748 1.1 matt uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
749 1.1 matt atop(physical_freestart), atop(physical_freeend),
750 1.1 matt VM_FREELIST_DEFAULT);
751 1.5 cliff #endif
752 1.2 cliff uvm_page_physload(atop(GEMINI_DRAM_BASE), atop(KERNEL_BASE_phys),
753 1.2 cliff atop(GEMINI_DRAM_BASE), atop(KERNEL_BASE_phys),
754 1.2 cliff VM_FREELIST_DEFAULT);
755 1.1 matt
756 1.1 matt /* Boot strap pmap telling it where the kernel page table is */
757 1.1 matt #ifdef VERBOSE_INIT_ARM
758 1.1 matt printf("pmap ");
759 1.1 matt #endif
760 1.1 matt pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
761 1.1 matt
762 1.1 matt #ifdef VERBOSE_INIT_ARM
763 1.1 matt printf("done.\n");
764 1.1 matt #endif
765 1.1 matt
766 1.1 matt #ifdef IPKDB
767 1.1 matt /* Initialise ipkdb */
768 1.1 matt ipkdb_init();
769 1.1 matt if (boothowto & RB_KDB)
770 1.1 matt ipkdb_connect(0);
771 1.1 matt #endif
772 1.1 matt
773 1.5 cliff #if defined(MEMORY_DISK_DYNAMIC)
774 1.5 cliff md_root_setconf((char *)GEMINI_RAMDISK_VBASE, GEMINI_RAMDISK_SIZE);
775 1.5 cliff #endif
776 1.5 cliff
777 1.1 matt #ifdef KGDB
778 1.1 matt if (boothowto & RB_KDB) {
779 1.1 matt kgdb_debug_init = 1;
780 1.1 matt kgdb_connect(1);
781 1.1 matt }
782 1.1 matt #endif
783 1.1 matt
784 1.1 matt #ifdef DDB
785 1.1 matt db_trap_callback = gemini_db_trap;
786 1.1 matt db_machine_init();
787 1.1 matt
788 1.1 matt /* Firmware doesn't load symbols. */
789 1.1 matt ddb_init(0, NULL, NULL);
790 1.1 matt
791 1.1 matt if (boothowto & RB_KDB)
792 1.1 matt Debugger();
793 1.1 matt #endif
794 1.1 matt printf("initarm done.\n");
795 1.1 matt
796 1.1 matt /* We return the new stack pointer address */
797 1.1 matt return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
798 1.1 matt }
799 1.1 matt
800 1.1 matt static void
801 1.1 matt init_clocks(void)
802 1.1 matt {
803 1.1 matt }
804 1.1 matt
805 1.1 matt #ifndef CONSADDR
806 1.1 matt #error Specify the address of the console UART with the CONSADDR option.
807 1.1 matt #endif
808 1.1 matt #ifndef CONSPEED
809 1.1 matt #define CONSPEED 19200
810 1.1 matt #endif
811 1.1 matt #ifndef CONMODE
812 1.1 matt #define CONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */
813 1.1 matt #endif
814 1.1 matt
815 1.1 matt static const bus_addr_t consaddr = CONSADDR;
816 1.1 matt static const int conspeed = CONSPEED;
817 1.1 matt static const int conmode = CONMODE;
818 1.1 matt
819 1.6 cliff #if CONSADDR==0x42000000
820 1.6 cliff /*
821 1.6 cliff * console initialization for obio com console
822 1.6 cliff */
823 1.1 matt void
824 1.1 matt consinit(void)
825 1.1 matt {
826 1.1 matt static int consinit_called = 0;
827 1.1 matt
828 1.1 matt if (consinit_called != 0)
829 1.1 matt return;
830 1.1 matt consinit_called = 1;
831 1.1 matt
832 1.1 matt if (comcnattach(&gemini_a4x_bs_tag, consaddr, conspeed,
833 1.1 matt GEMINI_COM_FREQ, COM_TYPE_16550_NOERS, conmode))
834 1.1 matt panic("Serial console can not be initialized.");
835 1.6 cliff }
836 1.6 cliff
837 1.6 cliff #elif CONSADDR==0x478003f8
838 1.6 cliff # include <arm/gemini/gemini_lpcvar.h>
839 1.6 cliff /*
840 1.6 cliff * console initialization for lpc com console
841 1.6 cliff */
842 1.6 cliff void
843 1.6 cliff consinit(void)
844 1.6 cliff {
845 1.6 cliff static int consinit_called = 0;
846 1.6 cliff bus_space_tag_t iot = &gemini_bs_tag;
847 1.6 cliff bus_space_handle_t lpchc_ioh;
848 1.6 cliff bus_space_handle_t lpcio_ioh;
849 1.6 cliff bus_size_t sz = L1_S_SIZE;
850 1.6 cliff gemini_lpc_softc_t lpcsoftc;
851 1.6 cliff gemini_lpc_bus_ops_t *ops;
852 1.6 cliff void *lpctag = &lpcsoftc;
853 1.6 cliff uint32_t r;
854 1.6 cliff extern gemini_lpc_bus_ops_t gemini_lpc_bus_ops;
855 1.6 cliff
856 1.6 cliff ops = &gemini_lpc_bus_ops;
857 1.6 cliff
858 1.6 cliff if (consinit_called != 0)
859 1.6 cliff return;
860 1.6 cliff consinit_called = 1;
861 1.6 cliff
862 1.6 cliff if (bus_space_map(iot, GEMINI_LPCHC_BASE, sz, 0, &lpchc_ioh))
863 1.6 cliff panic("consinit: LPCHC can not be mapped.");
864 1.6 cliff
865 1.6 cliff if (bus_space_map(iot, GEMINI_LPCIO_BASE, sz, 0, &lpcio_ioh))
866 1.6 cliff panic("consinit: LPCIO can not be mapped.");
867 1.6 cliff
868 1.6 cliff /* enable the LPC bus */
869 1.6 cliff r = bus_space_read_4(iot, lpchc_ioh, GEMINI_LPCHC_CSR);
870 1.6 cliff r |= LPCHC_CSR_BEN;
871 1.6 cliff bus_space_write_4(iot, lpchc_ioh, GEMINI_LPCHC_CSR, r);
872 1.6 cliff
873 1.6 cliff memset(&lpcsoftc, 0, sizeof(lpcsoftc));
874 1.6 cliff lpcsoftc.sc_iot = iot;
875 1.6 cliff lpcsoftc.sc_ioh = lpcio_ioh;
876 1.6 cliff
877 1.6 cliff /* activate Serial Port 1 */
878 1.6 cliff (*ops->lpc_pnp_enter)(lpctag);
879 1.6 cliff (*ops->lpc_pnp_write)(lpctag, 1, 0x30, 0x01);
880 1.6 cliff (*ops->lpc_pnp_exit)(lpctag);
881 1.6 cliff
882 1.6 cliff if (comcnattach(iot, consaddr, conspeed,
883 1.6 cliff IT8712F_COM_FREQ, COM_TYPE_NORMAL, conmode)) {
884 1.6 cliff panic("Serial console can not be initialized.");
885 1.6 cliff }
886 1.1 matt
887 1.6 cliff bus_space_unmap(iot, lpcio_ioh, sz);
888 1.6 cliff bus_space_unmap(iot, lpchc_ioh, sz);
889 1.1 matt }
890 1.6 cliff #else
891 1.6 cliff # error unknown console
892 1.6 cliff #endif
893 1.1 matt
894 1.1 matt #ifdef KGDB
895 1.1 matt #ifndef KGDB_DEVADDR
896 1.1 matt #error Specify the address of the kgdb UART with the KGDB_DEVADDR option.
897 1.1 matt #endif
898 1.1 matt #ifndef KGDB_DEVRATE
899 1.1 matt #define KGDB_DEVRATE 19200
900 1.1 matt #endif
901 1.1 matt
902 1.1 matt #ifndef KGDB_DEVMODE
903 1.1 matt #define KGDB_DEVMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */
904 1.1 matt #endif
905 1.1 matt static const vaddr_t comkgdbaddr = KGDB_DEVADDR;
906 1.1 matt static const int comkgdbspeed = KGDB_DEVRATE;
907 1.1 matt static const int comkgdbmode = KGDB_DEVMODE;
908 1.1 matt
909 1.1 matt void
910 1.1 matt static kgdb_port_init(void)
911 1.1 matt {
912 1.1 matt static int kgdbsinit_called = 0;
913 1.1 matt
914 1.1 matt if (kgdbsinit_called != 0)
915 1.1 matt return;
916 1.1 matt
917 1.1 matt kgdbsinit_called = 1;
918 1.1 matt
919 1.1 matt bus_space_handle_t bh;
920 1.1 matt if (bus_space_map(&gemini_a4x_bs_tag, comkgdbaddr,
921 1.1 matt GEMINI_UART_SIZE, 0, &bh))
922 1.1 matt panic("kgdb port can not be mapped.");
923 1.1 matt
924 1.1 matt if (com_kgdb_attach(&gemini_a4x_bs_tag, comkgdbaddr, comkgdbspeed,
925 1.1 matt GEMINI_UART_SIZE, COM_TYPE_16550_NOERS, comkgdbmode))
926 1.1 matt panic("KGDB uart can not be initialized.");
927 1.1 matt
928 1.1 matt bus_space_unmap(&gemini_a4x_bs_tag, bh, GEMINI_UART_SIZE);
929 1.1 matt }
930 1.1 matt #endif
931 1.1 matt
932 1.1 matt static void
933 1.1 matt setup_real_page_tables(void)
934 1.1 matt {
935 1.1 matt /*
936 1.1 matt * We need to allocate some fixed page tables to get the kernel going.
937 1.1 matt *
938 1.1 matt * We are going to allocate our bootstrap pages from the beginning of
939 1.1 matt * the free space that we just calculated. We allocate one page
940 1.1 matt * directory and a number of page tables and store the physical
941 1.1 matt * addresses in the kernel_pt_table array.
942 1.1 matt *
943 1.1 matt * The kernel page directory must be on a 16K boundary. The page
944 1.1 matt * tables must be on 4K boundaries. What we do is allocate the
945 1.1 matt * page directory on the first 16K boundary that we encounter, and
946 1.1 matt * the page tables on 4K boundaries otherwise. Since we allocate
947 1.1 matt * at least 3 L2 page tables, we are guaranteed to encounter at
948 1.1 matt * least one 16K aligned region.
949 1.1 matt */
950 1.1 matt
951 1.1 matt #ifdef VERBOSE_INIT_ARM
952 1.1 matt printf("Allocating page tables\n");
953 1.1 matt #endif
954 1.1 matt
955 1.1 matt /*
956 1.1 matt * Define a macro to simplify memory allocation. As we allocate the
957 1.1 matt * memory, make sure that we don't walk over our temporary first level
958 1.1 matt * translation table.
959 1.1 matt */
960 1.1 matt #define valloc_pages(var, np) \
961 1.1 matt (var).pv_pa = physical_freestart; \
962 1.1 matt physical_freestart += ((np) * PAGE_SIZE); \
963 1.1 matt if (physical_freestart > (physical_freeend - L1_TABLE_SIZE)) \
964 1.1 matt panic("initarm: out of memory"); \
965 1.1 matt free_pages -= (np); \
966 1.1 matt (var).pv_va = KERN_PHYSTOV((var).pv_pa); \
967 1.1 matt memset((char *)(var).pv_va, 0, ((np) * PAGE_SIZE));
968 1.1 matt
969 1.1 matt int loop, pt_index;
970 1.1 matt
971 1.1 matt pt_index = 0;
972 1.1 matt kernel_l1pt.pv_pa = 0;
973 1.1 matt kernel_l1pt.pv_va = 0;
974 1.5 cliff #ifdef VERBOSE_INIT_ARM
975 1.5 cliff printf("%s: physical_freestart %#lx\n", __func__, physical_freestart);
976 1.5 cliff #endif
977 1.1 matt for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
978 1.1 matt /* Are we 16KB aligned for an L1 ? */
979 1.1 matt if ((physical_freestart & (L1_TABLE_SIZE - 1)) == 0
980 1.1 matt && kernel_l1pt.pv_pa == 0) {
981 1.1 matt valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
982 1.1 matt } else {
983 1.1 matt valloc_pages(kernel_pt_table[pt_index],
984 1.1 matt L2_TABLE_SIZE / PAGE_SIZE);
985 1.1 matt ++pt_index;
986 1.1 matt }
987 1.1 matt }
988 1.5 cliff
989 1.11 cliff #if (NGEMINIIPM > 0)
990 1.11 cliff valloc_pages(ipmq_pt, L2_TABLE_SIZE / PAGE_SIZE);
991 1.11 cliff #endif
992 1.11 cliff
993 1.5 cliff #ifdef VERBOSE_INIT_ARM
994 1.5 cliff pt_index=0;
995 1.5 cliff printf("%s: kernel_l1pt: %#lx:%#lx\n",
996 1.5 cliff __func__, kernel_l1pt.pv_va, kernel_l1pt.pv_pa);
997 1.5 cliff printf("%s: kernel_pt_table:\n", __func__);
998 1.5 cliff for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
999 1.5 cliff printf("\t%#lx:%#lx\n", kernel_pt_table[pt_index].pv_va,
1000 1.5 cliff kernel_pt_table[pt_index].pv_pa);
1001 1.5 cliff ++pt_index;
1002 1.5 cliff }
1003 1.11 cliff #if (NGEMINIIPM > 0)
1004 1.11 cliff printf("%s: ipmq_pt:\n", __func__);
1005 1.11 cliff printf("\t%#lx:%#lx\n", ipmq_pt.pv_va, ipmq_pt.pv_pa);
1006 1.11 cliff #endif
1007 1.5 cliff #endif
1008 1.1 matt
1009 1.1 matt /* This should never be able to happen but better confirm that. */
1010 1.1 matt if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
1011 1.1 matt panic("initarm: Failed to align the kernel page directory");
1012 1.1 matt
1013 1.1 matt /*
1014 1.1 matt * Allocate a page for the system page mapped to V0x00000000
1015 1.1 matt * This page will just contain the system vectors and can be
1016 1.1 matt * shared by all processes.
1017 1.1 matt */
1018 1.1 matt valloc_pages(systempage, 1);
1019 1.1 matt systempage.pv_va = ARM_VECTORS_HIGH;
1020 1.1 matt
1021 1.1 matt /* Allocate stacks for all modes */
1022 1.1 matt valloc_pages(fiqstack, FIQ_STACK_SIZE);
1023 1.1 matt valloc_pages(irqstack, IRQ_STACK_SIZE);
1024 1.1 matt valloc_pages(abtstack, ABT_STACK_SIZE);
1025 1.1 matt valloc_pages(undstack, UND_STACK_SIZE);
1026 1.1 matt valloc_pages(kernelstack, UPAGES);
1027 1.1 matt
1028 1.1 matt /* Allocate the message buffer. */
1029 1.1 matt pv_addr_t msgbuf;
1030 1.1 matt int msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
1031 1.1 matt valloc_pages(msgbuf, msgbuf_pgs);
1032 1.1 matt msgbufphys = msgbuf.pv_pa;
1033 1.1 matt
1034 1.1 matt /*
1035 1.1 matt * Ok we have allocated physical pages for the primary kernel
1036 1.1 matt * page tables
1037 1.1 matt */
1038 1.1 matt
1039 1.1 matt #ifdef VERBOSE_INIT_ARM
1040 1.1 matt printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
1041 1.1 matt #endif
1042 1.1 matt
1043 1.1 matt /*
1044 1.1 matt * Now we start construction of the L1 page table
1045 1.1 matt * We start by mapping the L2 page tables into the L1.
1046 1.1 matt * This means that we can replace L1 mappings later on if necessary
1047 1.1 matt */
1048 1.1 matt vaddr_t l1_va = kernel_l1pt.pv_va;
1049 1.1 matt paddr_t l1_pa = kernel_l1pt.pv_pa;
1050 1.1 matt
1051 1.1 matt /* Map the L2 pages tables in the L1 page table */
1052 1.1 matt pmap_link_l2pt(l1_va, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
1053 1.1 matt &kernel_pt_table[KERNEL_PT_SYS]);
1054 1.1 matt for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
1055 1.1 matt pmap_link_l2pt(l1_va, KERNEL_BASE + loop * 0x00400000,
1056 1.1 matt &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
1057 1.1 matt for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
1058 1.1 matt pmap_link_l2pt(l1_va, KERNEL_VM_BASE + loop * 0x00400000,
1059 1.1 matt &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
1060 1.1 matt
1061 1.1 matt /* update the top of the kernel VM */
1062 1.1 matt pmap_curmaxkvaddr =
1063 1.1 matt KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
1064 1.1 matt
1065 1.11 cliff #if (NGEMINIIPM > 0)
1066 1.11 cliff printf("%s:%d: pmap_link_l2pt ipmq_pt\n", __FUNCTION__, __LINE__);
1067 1.11 cliff pmap_link_l2pt(l1_va, GEMINI_IPMQ_VBASE, &ipmq_pt);
1068 1.11 cliff #endif
1069 1.11 cliff
1070 1.1 matt #ifdef VERBOSE_INIT_ARM
1071 1.1 matt printf("Mapping kernel\n");
1072 1.1 matt #endif
1073 1.1 matt
1074 1.1 matt /* Now we fill in the L2 pagetable for the kernel static code/data */
1075 1.1 matt #define round_L_page(x) (((x) + L2_L_OFFSET) & L2_L_FRAME)
1076 1.1 matt size_t textsize = round_L_page(etext - KERNEL_BASE_virt);
1077 1.1 matt size_t totalsize = round_L_page(_end - KERNEL_BASE_virt);
1078 1.1 matt /* offset of kernel in RAM */
1079 1.1 matt u_int offset = (u_int)KERNEL_BASE_virt - KERNEL_BASE;
1080 1.1 matt
1081 1.9 cliff #ifdef DDB
1082 1.9 cliff /* Map text section read-write. */
1083 1.9 cliff offset += pmap_map_chunk(l1_va,
1084 1.9 cliff (vaddr_t)KERNEL_BASE + offset,
1085 1.9 cliff physical_start + offset, textsize,
1086 1.9 cliff VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
1087 1.9 cliff PTE_CACHE);
1088 1.9 cliff #else
1089 1.1 matt /* Map text section read-only. */
1090 1.1 matt offset += pmap_map_chunk(l1_va,
1091 1.1 matt (vaddr_t)KERNEL_BASE + offset,
1092 1.1 matt physical_start + offset, textsize,
1093 1.1 matt VM_PROT_READ|VM_PROT_EXECUTE, PTE_CACHE);
1094 1.9 cliff #endif
1095 1.1 matt /* Map data and bss sections read-write. */
1096 1.1 matt offset += pmap_map_chunk(l1_va,
1097 1.1 matt (vaddr_t)KERNEL_BASE + offset,
1098 1.1 matt physical_start + offset, totalsize - textsize,
1099 1.1 matt VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1100 1.1 matt
1101 1.1 matt #ifdef VERBOSE_INIT_ARM
1102 1.1 matt printf("Constructing L2 page tables\n");
1103 1.1 matt #endif
1104 1.1 matt
1105 1.1 matt /* Map the stack pages */
1106 1.1 matt pmap_map_chunk(l1_va, fiqstack.pv_va, fiqstack.pv_pa,
1107 1.1 matt FIQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1108 1.1 matt pmap_map_chunk(l1_va, irqstack.pv_va, irqstack.pv_pa,
1109 1.1 matt IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1110 1.1 matt pmap_map_chunk(l1_va, abtstack.pv_va, abtstack.pv_pa,
1111 1.1 matt ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1112 1.1 matt pmap_map_chunk(l1_va, undstack.pv_va, undstack.pv_pa,
1113 1.1 matt UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1114 1.1 matt pmap_map_chunk(l1_va, kernelstack.pv_va, kernelstack.pv_pa,
1115 1.1 matt UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
1116 1.1 matt
1117 1.1 matt pmap_map_chunk(l1_va, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
1118 1.1 matt L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);
1119 1.1 matt
1120 1.1 matt for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
1121 1.1 matt pmap_map_chunk(l1_va, kernel_pt_table[loop].pv_va,
1122 1.1 matt kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
1123 1.1 matt VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1124 1.1 matt }
1125 1.1 matt
1126 1.1 matt /* Map the vector page. */
1127 1.1 matt pmap_map_entry(l1_va, ARM_VECTORS_HIGH, systempage.pv_pa,
1128 1.1 matt VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1129 1.1 matt
1130 1.11 cliff #if (NGEMINIIPM > 0)
1131 1.11 cliff /* Map the IPM queue l2pt */
1132 1.11 cliff pmap_map_chunk(l1_va, ipmq_pt.pv_va, ipmq_pt.pv_pa,
1133 1.11 cliff L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1134 1.11 cliff
1135 1.11 cliff /* Map the IPM queue pages */
1136 1.11 cliff pmap_map_chunk(l1_va, GEMINI_IPMQ_VBASE, GEMINI_IPMQ_PBASE,
1137 1.11 cliff GEMINI_IPMQ_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
1138 1.11 cliff
1139 1.11 cliff #ifdef GEMINI_SLAVE
1140 1.11 cliff /*
1141 1.11 cliff * Map all memory, incluuding that owned by other core
1142 1.11 cliff * take into account the RAM remap, so view in this region
1143 1.11 cliff * is consistent with MASTER
1144 1.11 cliff */
1145 1.11 cliff pmap_map_chunk(l1_va,
1146 1.11 cliff GEMINI_ALLMEM_VBASE,
1147 1.11 cliff GEMINI_ALLMEM_PBASE + ((GEMINI_ALLMEM_SIZE - MEMSIZE) * 1024 * 1024),
1148 1.11 cliff (GEMINI_ALLMEM_SIZE - MEMSIZE) * 1024 * 1024,
1149 1.11 cliff VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1150 1.11 cliff pmap_map_chunk(l1_va,
1151 1.11 cliff GEMINI_ALLMEM_VBASE + GEMINI_BUSBASE * 1024 * 1024,
1152 1.11 cliff GEMINI_ALLMEM_PBASE,
1153 1.11 cliff (MEMSIZE * 1024 * 1024),
1154 1.11 cliff VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1155 1.11 cliff #else
1156 1.11 cliff /* Map all memory, incluuding that owned by other core */
1157 1.11 cliff pmap_map_chunk(l1_va, GEMINI_ALLMEM_VBASE, GEMINI_ALLMEM_PBASE,
1158 1.11 cliff GEMINI_ALLMEM_SIZE * 1024 * 1024, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1159 1.11 cliff #endif /* GEMINI_SLAVE */
1160 1.11 cliff #endif /* NGEMINIIPM */
1161 1.11 cliff
1162 1.1 matt /*
1163 1.1 matt * Map integrated peripherals at same address in first level page
1164 1.1 matt * table so that we can continue to use console.
1165 1.1 matt */
1166 1.1 matt pmap_devmap_bootstrap(l1_va, devmap);
1167 1.1 matt
1168 1.1 matt
1169 1.1 matt #ifdef VERBOSE_INIT_ARM
1170 1.1 matt /* Tell the user about where all the bits and pieces live. */
1171 1.1 matt printf("%22s Physical Virtual Num\n", " ");
1172 1.1 matt printf("%22s Starting Ending Starting Ending Pages\n", " ");
1173 1.1 matt
1174 1.1 matt static const char mem_fmt[] =
1175 1.1 matt "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %d\n";
1176 1.1 matt static const char mem_fmt_nov[] =
1177 1.1 matt "%20s: 0x%08lx 0x%08lx %d\n";
1178 1.1 matt
1179 1.1 matt printf(mem_fmt, "SDRAM", physical_start, physical_end-1,
1180 1.1 matt KERN_PHYSTOV(physical_start), KERN_PHYSTOV(physical_end-1),
1181 1.1 matt physmem);
1182 1.1 matt printf(mem_fmt, "text section",
1183 1.1 matt KERN_VTOPHYS(KERNEL_BASE_virt), KERN_VTOPHYS(etext-1),
1184 1.1 matt (vaddr_t)KERNEL_BASE_virt, (vaddr_t)etext-1,
1185 1.1 matt (int)(textsize / PAGE_SIZE));
1186 1.1 matt printf(mem_fmt, "data section",
1187 1.1 matt KERN_VTOPHYS(__data_start), KERN_VTOPHYS(_edata),
1188 1.1 matt (vaddr_t)__data_start, (vaddr_t)_edata,
1189 1.1 matt (int)((round_page((vaddr_t)_edata)
1190 1.1 matt - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
1191 1.1 matt printf(mem_fmt, "bss section",
1192 1.1 matt KERN_VTOPHYS(__bss_start), KERN_VTOPHYS(__bss_end__),
1193 1.1 matt (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
1194 1.1 matt (int)((round_page((vaddr_t)__bss_end__)
1195 1.1 matt - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
1196 1.1 matt printf(mem_fmt, "L1 page directory",
1197 1.1 matt kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
1198 1.1 matt kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
1199 1.1 matt L1_TABLE_SIZE / PAGE_SIZE);
1200 1.1 matt printf(mem_fmt, "Exception Vectors",
1201 1.1 matt systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
1202 1.1 matt (vaddr_t)ARM_VECTORS_HIGH, (vaddr_t)ARM_VECTORS_HIGH + PAGE_SIZE - 1,
1203 1.1 matt 1);
1204 1.1 matt printf(mem_fmt, "FIQ stack",
1205 1.1 matt fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
1206 1.1 matt fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
1207 1.1 matt FIQ_STACK_SIZE);
1208 1.1 matt printf(mem_fmt, "IRQ stack",
1209 1.1 matt irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
1210 1.1 matt irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
1211 1.1 matt IRQ_STACK_SIZE);
1212 1.1 matt printf(mem_fmt, "ABT stack",
1213 1.1 matt abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
1214 1.1 matt abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
1215 1.1 matt ABT_STACK_SIZE);
1216 1.1 matt printf(mem_fmt, "UND stack",
1217 1.1 matt undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
1218 1.1 matt undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
1219 1.1 matt UND_STACK_SIZE);
1220 1.1 matt printf(mem_fmt, "SVC stack",
1221 1.1 matt kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
1222 1.1 matt kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
1223 1.1 matt UPAGES);
1224 1.1 matt printf(mem_fmt_nov, "Message Buffer",
1225 1.1 matt msgbufphys, msgbufphys + msgbuf_pgs * PAGE_SIZE - 1, msgbuf_pgs);
1226 1.1 matt printf(mem_fmt, "Free Memory", physical_freestart, physical_freeend-1,
1227 1.1 matt KERN_PHYSTOV(physical_freestart), KERN_PHYSTOV(physical_freeend-1),
1228 1.1 matt free_pages);
1229 1.1 matt #endif
1230 1.1 matt
1231 1.1 matt /*
1232 1.1 matt * Now we have the real page tables in place so we can switch to them.
1233 1.1 matt * Once this is done we will be running with the REAL kernel page
1234 1.1 matt * tables.
1235 1.1 matt */
1236 1.1 matt
1237 1.1 matt /* Switch tables */
1238 1.1 matt #ifdef VERBOSE_INIT_ARM
1239 1.1 matt printf("switching to new L1 page table @%#lx...", l1_pa);
1240 1.1 matt #endif
1241 1.1 matt
1242 1.1 matt cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
1243 1.15 uebayasi cpu_setttb(l1_pa);
1244 1.1 matt cpu_tlb_flushID();
1245 1.1 matt cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
1246 1.1 matt
1247 1.1 matt #ifdef VERBOSE_INIT_ARM
1248 1.1 matt printf("OK.\n");
1249 1.1 matt #endif
1250 1.1 matt }
1251