imx23_olinuxino_machdep.c revision 1.2.4.2 1 /* $Id: imx23_olinuxino_machdep.c,v 1.2.4.2 2013/02/25 00:28:36 tls Exp $ */
2
3 /*
4 * Copyright (c) 2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Petri Laakso.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/bus.h>
33 #include <sys/cdefs.h>
34 #include <sys/device.h>
35 #include <sys/lwp.h>
36 #include <sys/mount.h>
37 #include <sys/mutex.h>
38 #include <sys/param.h>
39 #include <sys/reboot.h>
40 #include <sys/rnd.h>
41 #include <sys/systm.h>
42 #include <sys/termios.h>
43 #include <sys/types.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_prot.h>
47 #include <uvm/uvm_pmap.h>
48
49 #include <machine/db_machdep.h>
50 #include <machine/bootconfig.h>
51 #include <machine/frame.h>
52 #include <machine/param.h>
53 #include <machine/pcb.h>
54 #include <machine/pmap.h>
55
56 #include <arm/undefined.h>
57 #include <arm/arm32/machdep.h>
58
59 #include <arm/imx/imx23_digctlreg.h>
60 #include <arm/imx/imx23_clkctrlreg.h>
61 #include <arm/imx/imx23_rtcreg.h>
62 #include <arm/imx/imx23_uartdbgreg.h>
63 #include <arm/imx/imx23var.h>
64
65 #include "plcom.h"
66 #if (NPLCOM > 0)
67 #include <evbarm/dev/plcomreg.h>
68 #include <evbarm/dev/plcomvar.h>
69 #endif
70
71 #include "opt_evbarm_boardtype.h"
72
73 static vaddr_t get_ttb(void);
74 static void setup_real_page_tables(void);
75 //static void entropy_init(void);
76
77 /*
78 * Static device map for i.MX23 peripheral address space.
79 */
80 #define _A(a) ((a) & ~L1_S_OFFSET)
81 #define _S(s) (((s) + L1_S_SIZE - 1) & ~(L1_S_SIZE-1))
82 static const struct pmap_devmap imx23_devmap[] = {
83 {
84 _A(APBH_BASE), /* Virtual address. */
85 _A(APBH_BASE), /* Physical address. */
86 _S(APBH_SIZE + APBX_SIZE), /* APBX located after APBH. */
87 VM_PROT_READ|VM_PROT_WRITE, /* Protection bits. */
88 PTE_NOCACHE /* Cache attributes. */
89 },
90 { 0, 0, 0, 0, 0 }
91 };
92 #undef _A
93 #undef _S
94
95 static vm_offset_t physical_freestart;
96 static vm_offset_t physical_freeend;
97 static u_int free_pages;
98
99 BootConfig bootconfig;
100 vm_offset_t physical_start;
101 vm_offset_t physical_end;
102 static char kernel_boot_args[MAX_BOOT_STRING];
103 char *boot_args;
104 paddr_t msgbufphys;
105
106 extern char KERNEL_BASE_phys;
107 extern char KERNEL_BASE_virt;
108 extern char _end[];
109 extern char __data_start[];
110 extern char _edata[];
111 extern char __bss_start[];
112 extern char __bss_end__[];
113 extern pv_addr_t kernelstack;
114
115 extern u_int data_abort_handler_address;
116 extern u_int prefetch_abort_handler_address;
117
118 /* Define various stack sizes in pages. */
119 #define FIQ_STACK_SIZE 1
120 #define IRQ_STACK_SIZE 1
121 #define ABT_STACK_SIZE 1
122 #define UND_STACK_SIZE 1
123
124 /* Macros to translate between physical and virtual addresses. */
125 #define KERNEL_BASE_PHYS ((paddr_t)&KERNEL_BASE_phys)
126 #define KERNEL_BASE_VIRT ((vaddr_t)&KERNEL_BASE_virt)
127 #define KERN_VTOPHYS(va) \
128 ((paddr_t)((vaddr_t)va - KERNEL_BASE_VIRT + KERNEL_BASE_PHYS))
129 #define KERN_PHYSTOV(pa) \
130 ((vaddr_t)((paddr_t)pa - KERNEL_BASE_PHYS + KERNEL_BASE_VIRT))
131
132 #define KERNEL_PT_SYS 0 /* L2 table for mapping vectors page. */
133 #define KERNEL_PT_KERNEL 1 /* L2 table for mapping kernel. */
134 #define KERNEL_PT_KERNEL_NUM 4
135
136 #define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
137 /* Page tables for mapping kernel VM */
138 #define KERNEL_PT_VMDATA_NUM 4 /* start with 16MB of KVM */
139 #define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
140
141 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
142
143 #define KERNEL_VM_BASE (KERNEL_BASE + 0x01000000)
144 #define KERNEL_VM_SIZE (0xf0000000 - KERNEL_VM_BASE)
145
146 #define L1_PAGE_TABLE (DRAM_BASE + MEMSIZE * 1024 * 1024 - L1_TABLE_SIZE)
147 #define BOOTIMX23_ARGS (L1_PAGE_TABLE - MAX_BOOT_STRING - 1)
148
149 #define REG_RD(reg) *(volatile uint32_t *)(reg)
150 #define REG_WR(reg, val) \
151 do { \
152 *(volatile uint32_t *)((reg)) = val; \
153 } while (0)
154
155 /*
156 * Initialize everything and return new svc stack pointer.
157 */
158 u_int
159 initarm(void *arg)
160 {
161
162 if (set_cpufuncs())
163 panic("set_cpufuncs failed");
164
165 pmap_devmap_bootstrap(get_ttb(), imx23_devmap);
166
167 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
168
169 consinit();
170
171 /* Talk to the user. */
172 #define BDSTR(s) _BDSTR(s)
173 #define _BDSTR(s) #s
174 printf("\nNetBSD/evbarm (" BDSTR(EVBARM_BOARDTYPE) ") booting ...\n");
175 #undef BDSTR
176 #undef _BDSTR
177
178 /* Copy boot arguments passed from bootimx23. */
179 boot_args = (char *)BOOTIMX23_ARGS;
180 memcpy(kernel_boot_args, boot_args, MAX_BOOT_STRING);
181 boot_args = kernel_boot_args;
182 #ifdef VERBOSE_INIT_ARM
183 printf("boot_args: %s\n", boot_args);
184 #endif
185 parse_mi_bootargs(boot_args);
186
187 #ifdef VERBOSE_INIT_ARM
188 printf("initarm: Configuring system ...\n");
189 #endif
190
191 physical_start = DRAM_BASE;
192 physical_end = DRAM_BASE + MEMSIZE * 1024 * 1024;
193 physmem = (physical_end - physical_start) / PAGE_SIZE;
194
195 /* bootconfig is used by cpu_dump() and cousins. */
196 bootconfig.dramblocks = 1;
197 bootconfig.dram[0].address = DRAM_BASE;
198 bootconfig.dram[0].pages = physmem;
199
200 /*
201 * Our kernel is at the beginning of the DRAM, so set our free space to
202 * all the memory after the kernel.
203 */
204 physical_freestart = KERN_VTOPHYS(round_page((vaddr_t) _end));
205 physical_freeend = physical_end;
206 free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;
207
208 #ifdef VERBOSE_INIT_ARM
209 /* Tell the user about the memory. */
210 printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
211 physical_start, physical_end - 1);
212 #endif
213
214 /*
215 * Set up first and second level page tables. Pages of memory will be
216 * allocated and mapped for structures required for system operation.
217 * kernel_l1pt, kernel_pt_table[], systempage, irqstack, abtstack,
218 * undstack, kernelstack, msgbufphys will be set to point to the memory
219 * that was allocated for them.
220 */
221 setup_real_page_tables();
222
223 #ifdef VERBOSE_INIT_ARM
224 printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
225 physical_freestart, free_pages, free_pages);
226 #endif
227
228 uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
229
230 #ifdef VERBOSE_INIT_ARM
231 printf("bootstrap done.\n");
232 #endif
233
234 /* Copy vectors from page0 to vectors page. */
235 arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
236 #ifdef VERBOSE_INIT_ARM
237 printf("init subsystems: stacks ");
238 #endif
239 set_stackptr(PSR_FIQ32_MODE,
240 fiqstack.pv_va + FIQ_STACK_SIZE * PAGE_SIZE);
241 set_stackptr(PSR_IRQ32_MODE,
242 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
243 set_stackptr(PSR_ABT32_MODE,
244 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
245 set_stackptr(PSR_UND32_MODE,
246 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
247 #ifdef VERBOSE_INIT_ARM
248 printf("vectors ");
249 #endif
250 data_abort_handler_address = (u_int)data_abort_handler;
251 prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
252 undefined_handler_address = (u_int)undefinedinstruction_bounce;
253 #ifdef VERBOSE_INIT_ARM
254 printf("undefined ");
255 #endif
256 undefined_init();
257 /* Load memory into UVM. */
258 #ifdef VERBOSE_INIT_ARM
259 printf("page ");
260 #endif
261 uvm_setpagesize();
262 uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
263 atop(physical_freestart), atop(physical_freeend),
264 VM_FREELIST_DEFAULT);
265
266 /* Boot strap pmap telling it where the kernel page table is. */
267 #ifdef VERBOSE_INIT_ARM
268 printf("pmap ");
269 #endif
270 pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
271
272 #ifdef VERBOSE_INIT_ARM
273 printf("done.\n");
274 #endif
275
276 #ifdef __HAVE_MEMORY_DISK__
277 md_root_setconf(memory_disk, sizeof memory_disk);
278 #endif
279
280 #ifdef BOOTHOWTO
281 boothowto |= BOOTHOWTO;
282 #endif
283
284 #ifdef KGDB
285 if (boothowto & RB_KDB) {
286 kgdb_debug_init = 1;
287 kgdb_connect(1);
288 }
289 #endif
290
291 #ifdef DDB
292 db_machine_init();
293 if (boothowto & RB_KDB)
294 Debugger();
295 #endif
296
297 return kernelstack.pv_va + USPACE_SVC_STACK_TOP;
298 }
299
300 /*
301 * Return TTBR (Translation Table Base Register) value from coprocessor.
302 */
303 static vaddr_t
304 get_ttb(void)
305 {
306 vaddr_t ttb;
307
308 __asm volatile("mrc p15, 0, %0, c2, c0, 0" : "=r" (ttb));
309
310 return ttb;
311 }
312
313 /*
314 * valloc_pages() is used to allocate free memory to be used for kernel pages.
315 * Virtual and physical addresses of the allocated memory are saved for the
316 * later use by the structures:
317 *
318 * - kernel_l1pt which holds the address of the kernel's L1 translaton table.
319 * - kernel_pt_table[] holds the addresses of the kernel's L2 page tables.
320 *
321 * pmap_link_l2pt() is used to create link from L1 table entry to the L2 page
322 * table. Link is a reference to coarse page table which has 256 entries,
323 * splitting the 1MB that the table describes into 4kB blocks.
324 *
325 * pmap_map_entry() updates the PTE in L2 PT for an VA to point to single
326 * physical page previously allocated.
327 *
328 * pmap_map_chunk() maps a chunk of memory using the most efficient
329 * mapping possible (section, large page, small page) into the provided L1 and
330 * L2 tables at the specified virtual address. pmap_map_chunk() excepts linking
331 * to be done before it is called for chunks smaller than a section.
332 */
333 static void
334 setup_real_page_tables(void)
335 {
336 /*
337 * Define a macro to simplify memory allocation. As we allocate the
338 * memory, make sure that we don't walk over our temporary first level
339 * translation table.
340 */
341 #define valloc_pages(var, np) \
342 (var).pv_pa = physical_freestart; \
343 physical_freestart += ((np) * PAGE_SIZE); \
344 if (physical_freestart > (physical_freeend - L1_TABLE_SIZE)) \
345 panic("%s: out of memory", __func__); \
346 free_pages -= (np); \
347 (var).pv_va = KERN_PHYSTOV((var).pv_pa); \
348 memset((char *)(var).pv_va, 0, ((np) * PAGE_SIZE));
349
350 int loop, pt_index;
351
352 pt_index = 0;
353 kernel_l1pt.pv_pa = 0;
354 kernel_l1pt.pv_va = 0;
355 for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
356 /* Are we 16kB aligned for an L1? */
357 if ((physical_freestart & (L1_TABLE_SIZE - 1)) == 0 &&
358 kernel_l1pt.pv_pa == 0) {
359 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
360 } else {
361 valloc_pages(kernel_pt_table[pt_index],
362 L2_TABLE_SIZE / PAGE_SIZE);
363 ++pt_index;
364 }
365 }
366
367 /* Make sure L1 page table is aligned to 16kB. */
368 if (!kernel_l1pt.pv_pa ||
369 (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)
370 panic("%s: Failed to align the kernel page directory",
371 __func__);
372
373 /*
374 * Allocate a page for the system page mapped to ARM_VECTORS_HIGH.
375 * This page will just contain the system vectors and can be shared by
376 * all processes.
377 */
378 valloc_pages(systempage, 1);
379 systempage.pv_va = ARM_VECTORS_HIGH;
380
381 /* Allocate stacks for all modes. */
382 valloc_pages(fiqstack, FIQ_STACK_SIZE);
383 valloc_pages(irqstack, IRQ_STACK_SIZE);
384 valloc_pages(abtstack, ABT_STACK_SIZE);
385 valloc_pages(undstack, UND_STACK_SIZE);
386 valloc_pages(kernelstack, UPAGES);
387
388 /* Allocate the message buffer. */
389 pv_addr_t msgbuf;
390 int msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
391 valloc_pages(msgbuf, msgbuf_pgs);
392 msgbufphys = msgbuf.pv_pa;
393
394 vaddr_t l1_va = kernel_l1pt.pv_va;
395 vaddr_t l1_pa = kernel_l1pt.pv_pa;
396
397 /* Map the L2 pages tables in the L1 page table. */
398
399 pmap_link_l2pt(l1_va, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
400 &kernel_pt_table[KERNEL_PT_SYS]);
401
402 for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
403 pmap_link_l2pt(l1_va, KERNEL_BASE + loop * 0x00400000,
404 &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
405
406 for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
407 pmap_link_l2pt(l1_va, KERNEL_VM_BASE + loop * 0x00400000,
408 &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
409
410 /* Update the top of the kernel VM. */
411 pmap_curmaxkvaddr =
412 KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
413
414 extern char etext[];
415 size_t textsize = (uintptr_t)etext - KERNEL_BASE;
416 size_t totalsize = (uintptr_t)_end - KERNEL_BASE;
417 u_int logical;
418
419 textsize = (textsize + PGOFSET) & ~PGOFSET;
420 totalsize = (totalsize + PGOFSET) & ~PGOFSET;
421
422 logical = 0x00000000; /* offset of kernel in RAM */
423
424 logical += pmap_map_chunk(l1_va, KERNEL_BASE + logical,
425 physical_start + logical, textsize,
426 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
427
428 logical += pmap_map_chunk(l1_va, KERNEL_BASE + logical,
429 physical_start + logical, totalsize - textsize,
430 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
431
432 /* Map the stack pages. */
433 pmap_map_chunk(l1_va, fiqstack.pv_va, fiqstack.pv_pa,
434 FIQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
435
436 pmap_map_chunk(l1_va, irqstack.pv_va, irqstack.pv_pa,
437 IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
438
439 pmap_map_chunk(l1_va, abtstack.pv_va, abtstack.pv_pa,
440 ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
441
442 pmap_map_chunk(l1_va, undstack.pv_va, undstack.pv_pa,
443 UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
444
445 pmap_map_chunk(l1_va, kernelstack.pv_va, kernelstack.pv_pa,
446 UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
447
448 pmap_map_chunk(l1_va, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
449 L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);
450
451 for (loop = 0; loop < NUM_KERNEL_PTS; ++loop)
452 pmap_map_chunk(l1_va, kernel_pt_table[loop].pv_va,
453 kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
454 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
455
456 /* Map the vector page. */
457 pmap_map_entry(l1_va, ARM_VECTORS_HIGH, systempage.pv_pa,
458 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
459
460 pmap_devmap_bootstrap(l1_va, imx23_devmap);
461
462 #ifdef VERBOSE_INIT_ARM
463 /* Tell the user about where all the bits and pieces live. */
464 printf("%22s Physical Virtual Num\n", " ");
465 printf("%22s Starting Ending Starting Ending Pages\n", " ");
466
467 static const char mem_fmt[] =
468 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %d\n";
469 static const char mem_fmt_nov[] =
470 "%20s: 0x%08lx 0x%08lx %d\n";
471
472 printf(mem_fmt, "SDRAM", physical_start, physical_end-1,
473 KERN_PHYSTOV(physical_start), KERN_PHYSTOV(physical_end-1),
474 physmem);
475 printf(mem_fmt, "text section",
476 KERN_VTOPHYS(KERNEL_BASE), KERN_VTOPHYS(etext-1),
477 (vaddr_t)KERNEL_BASE, (vaddr_t)etext-1,
478 (int)(textsize / PAGE_SIZE));
479 printf(mem_fmt, "data section",
480 KERN_VTOPHYS(__data_start), KERN_VTOPHYS(_edata),
481 (vaddr_t)__data_start, (vaddr_t)_edata,
482 (int)((round_page((vaddr_t)_edata)
483 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
484 printf(mem_fmt, "bss section",
485 KERN_VTOPHYS(__bss_start), KERN_VTOPHYS(__bss_end__),
486 (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
487 (int)((round_page((vaddr_t)__bss_end__)
488 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
489 printf(mem_fmt, "L1 page directory",
490 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
491 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
492 L1_TABLE_SIZE / PAGE_SIZE);
493 printf(mem_fmt, "Exception Vectors",
494 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
495 (vaddr_t)ARM_VECTORS_HIGH,
496 (vaddr_t)ARM_VECTORS_HIGH + PAGE_SIZE - 1, 1);
497 printf(mem_fmt, "FIQ stack",
498 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
499 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
500 FIQ_STACK_SIZE);
501 printf(mem_fmt, "IRQ stack",
502 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
503 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
504 IRQ_STACK_SIZE);
505 printf(mem_fmt, "ABT stack",
506 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
507 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
508 ABT_STACK_SIZE);
509 printf(mem_fmt, "UND stack",
510 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
511 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
512 UND_STACK_SIZE);
513 printf(mem_fmt, "SVC stack",
514 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
515 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
516 UPAGES);
517 printf(mem_fmt_nov, "Message Buffer",
518 msgbufphys, msgbufphys + msgbuf_pgs * PAGE_SIZE - 1, msgbuf_pgs);
519 printf(mem_fmt, "Free Memory", physical_freestart, physical_freeend-1,
520 KERN_PHYSTOV(physical_freestart), KERN_PHYSTOV(physical_freeend-1),
521 free_pages);
522 #endif
523
524 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
525 cpu_setttb(l1_pa, FALSE);
526 cpu_tlb_flushID();
527 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
528
529 return;
530 }
531
532 /*
533 * Initialize console.
534 */
535 static struct plcom_instance imx23_pi = {
536 .pi_type = PLCOM_TYPE_PL011,
537 .pi_iot = &imx23_bus_space,
538 .pi_size = PL011COM_UART_SIZE,
539 .pi_iobase = HW_UARTDBG_BASE
540 };
541
542 #define PLCONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */
543 #define PLCONSPEED 115200
544 void
545 consinit(void)
546 {
547 /* consinit() is called from also from the main(). */
548 static int consinit_called = 0;
549
550 if (consinit_called)
551 return;
552
553 plcomcnattach(&imx23_pi, PLCONSPEED, IMX23_UART_CLK, PLCONMODE, 0);
554
555 consinit_called = 1;
556
557 return;
558 }
559
560 /*
561 * Reboot or halt the system.
562 */
563 void
564 cpu_reboot(int howto, char *bootstr)
565 {
566 static int cpu_reboot_called = 0;
567
568 boothowto |= howto;
569
570 /*
571 * If this is the first invocation of cpu_reboot() and the RB_NOSYNC
572 * flag is not set in howto; sync and unmount the system disks by
573 * calling vfs_shutdown(9) and set the time of day clock by calling
574 * resettodr(9).
575 */
576 if (!cpu_reboot_called && !(boothowto & RB_NOSYNC)) {
577 vfs_shutdown();
578 resettodr();
579 }
580
581 cpu_reboot_called = 1;
582
583 IRQdisable; /* FIQ's stays on because they are special. */
584
585 /*
586 * If rebooting after a crash (i.e., if RB_DUMP is set in howto, but
587 * RB_HALT is not), save a system crash dump.
588 */
589 if ((boothowto & RB_DUMP) && !(boothowto & RB_HALT))
590 panic("please implement crash dump!"); // XXX
591
592 /* Run any shutdown hooks by calling pmf_system_shutdown(9). */
593 pmf_system_shutdown(boothowto);
594
595 printf("system %s.\n", boothowto & RB_HALT ? "halted" : "rebooted");
596
597 if (boothowto & RB_HALT) {
598 /* Enable i.MX233 wait-for-interrupt mode. */
599 REG_WR(HW_CLKCTRL_BASE + HW_CLKCTRL_CPU,
600 (REG_RD(HW_CLKCTRL_BASE + HW_CLKCTRL_CPU) |
601 HW_CLKCTRL_CPU_INTERRUPT_WAIT));
602
603 /* Disable FIQ's and wait for interrupt (which never arrives) */
604 __asm volatile( \
605 "mrs r0, cpsr\n\t" \
606 "orr r0, #0x40\n\t" \
607 "msr cpsr_c, r0\n\t" \
608 "mov r0, #0\n\t" \
609 "mcr p15, 0, r0, c7, c0, 4\n\t"
610 );
611
612 for(;;);
613
614 /* NOT REACHED */
615 }
616
617 /* Reboot the system. */
618 REG_WR(HW_RTC_BASE + HW_RTC_WATCHDOG, 10000);
619 REG_WR(HW_RTC_BASE + HW_RTC_CTRL_SET, HW_RTC_CTRL_WATCHDOGEN);
620 REG_WR(HW_RTC_BASE + HW_RTC_WATCHDOG, 0);
621
622 for(;;);
623
624 /* NOT REACHED */
625 }
626
627 /*
628 * Delay us microseconds.
629 */
630 void
631 delay(unsigned int us)
632 {
633 uint32_t start;
634 uint32_t now;
635 uint32_t elapsed;
636 uint32_t total;
637 uint32_t last;
638
639 total = 0;
640 last = 0;
641 start = REG_RD(HW_DIGCTL_BASE + HW_DIGCTL_MICROSECONDS);
642
643 do {
644 now = REG_RD(HW_DIGCTL_BASE + HW_DIGCTL_MICROSECONDS);
645
646 if (start <= now)
647 elapsed = now - start;
648 else /* Take care of overflow. */
649 elapsed = (UINT32_MAX - start) + 1 + now;
650
651 total += elapsed - last;
652 last = elapsed;
653
654 } while (total < us);
655
656 return;
657 }
658