imx23_olinuxino_machdep.c revision 1.1.2.2 1 /* $Id: imx23_olinuxino_machdep.c,v 1.1.2.2 2013/01/16 05:32:53 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Petri Laakso.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/bus.h>
33 #include <sys/cdefs.h>
34 #include <sys/device.h>
35 #include <sys/lwp.h>
36 #include <sys/mount.h>
37 #include <sys/mutex.h>
38 #include <sys/param.h>
39 #include <sys/reboot.h>
40 #include <sys/rnd.h>
41 #include <sys/termios.h>
42 #include <sys/types.h>
43
44 #include <uvm/uvm.h>
45 #include <uvm/uvm_prot.h>
46 #include <uvm/uvm_pmap.h>
47
48 #include <machine/db_machdep.h>
49 #include <machine/bootconfig.h>
50 #include <machine/frame.h>
51 #include <machine/param.h>
52 #include <machine/pcb.h>
53 #include <machine/pmap.h>
54
55 #include <arm/undefined.h>
56 #include <arm/arm32/machdep.h>
57
58 #include <arm/imx/imx23_digctlreg.h>
59 #include <arm/imx/imx23_clkctrlreg.h>
60 #include <arm/imx/imx23_rtcreg.h>
61 #include <arm/imx/imx23_uartdbgreg.h>
62 #include <arm/imx/imx23var.h>
63
64 #include "plcom.h"
65 #if (NPLCOM > 0)
66 #include <evbarm/dev/plcomreg.h>
67 #include <evbarm/dev/plcomvar.h>
68 #endif
69
70 #include "opt_evbarm_boardtype.h"
71
72 static vaddr_t get_ttb(void);
73 static void setup_real_page_tables(void);
74 //static void entropy_init(void);
75
76 /*
77 * Static device map for i.MX23 peripheral address space.
78 */
79 #define _A(a) ((a) & ~L1_S_OFFSET)
80 #define _S(s) (((s) + L1_S_SIZE - 1) & ~(L1_S_SIZE-1))
81 static const struct pmap_devmap imx23_devmap[] = {
82 {
83 _A(APBH_BASE), /* Virtual address. */
84 _A(APBH_BASE), /* Physical address. */
85 _S(APBH_SIZE + APBX_SIZE), /* APBX located after APBH. */
86 VM_PROT_READ|VM_PROT_WRITE, /* Protection bits. */
87 PTE_NOCACHE /* Cache attributes. */
88 },
89 { 0, 0, 0, 0, 0 }
90 };
91 #undef _A
92 #undef _S
93
94 static vm_offset_t physical_freestart;
95 static vm_offset_t physical_freeend;
96 static u_int free_pages;
97 //static rndsave_t imx23_boot_rsp;
98
99 BootConfig bootconfig;
100 vm_offset_t physical_start;
101 vm_offset_t physical_end;
102 char *boot_args;
103 paddr_t msgbufphys;
104
105 extern char KERNEL_BASE_phys;
106 extern char KERNEL_BASE_virt;
107 extern char _end[];
108 extern char __data_start[];
109 extern char _edata[];
110 extern char __bss_start[];
111 extern char __bss_end__[];
112 extern pv_addr_t kernelstack;
113
114 extern u_int data_abort_handler_address;
115 extern u_int prefetch_abort_handler_address;
116
117 /* Define various stack sizes in pages. */
118 #define FIQ_STACK_SIZE 1
119 #define IRQ_STACK_SIZE 1
120 #define ABT_STACK_SIZE 1
121 #define UND_STACK_SIZE 1
122
123 /* Macros to translate between physical and virtual addresses. */
124 #define KERNEL_BASE_PHYS ((paddr_t)&KERNEL_BASE_phys)
125 #define KERNEL_BASE_VIRT ((vaddr_t)&KERNEL_BASE_virt)
126 #define KERN_VTOPHYS(va) \
127 ((paddr_t)((vaddr_t)va - KERNEL_BASE_VIRT + KERNEL_BASE_PHYS))
128 #define KERN_PHYSTOV(pa) \
129 ((vaddr_t)((paddr_t)pa - KERNEL_BASE_PHYS + KERNEL_BASE_VIRT))
130
131 #define KERNEL_PT_SYS 0 /* L2 table for mapping vectors page. */
132 #define KERNEL_PT_KERNEL 1 /* L2 table for mapping kernel. */
133 #define KERNEL_PT_KERNEL_NUM 4
134
135 #define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
136 /* Page tables for mapping kernel VM */
137 #define KERNEL_PT_VMDATA_NUM 4 /* start with 16MB of KVM */
138 #define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
139
140 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
141
142 #define KERNEL_VM_BASE (KERNEL_BASE + 0x01000000)
143 #define KERNEL_VM_SIZE (0xf0000000 - KERNEL_VM_BASE)
144
145 #define REG_RD(reg) *(volatile uint32_t *)(reg)
146 #define REG_WR(reg, val) \
147 do { \
148 *(volatile uint32_t *)((reg)) = val; \
149 } while (0)
150
151 /*
152 * Initialize everything and return new svc stack pointer.
153 */
154 u_int
155 initarm(void *arg)
156 {
157
158 if (set_cpufuncs())
159 panic("set_cpufuncs failed");
160
161 pmap_devmap_bootstrap(get_ttb(), imx23_devmap);
162
163 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
164
165 consinit();
166 //entropy_init();
167
168 /* Talk to the user. */
169 #define BDSTR(s) _BDSTR(s)
170 #define _BDSTR(s) #s
171 printf("\nNetBSD/evbarm (" BDSTR(EVBARM_BOARDTYPE) ") booting ...\n");
172 #undef BDSTR
173 #undef _BDSTR
174
175 boot_args[0] = '\0';
176
177 #ifdef VERBOSE_INIT_ARM
178 printf("initarm: Configuring system ...\n");
179 #endif
180
181 physical_start = DRAM_BASE;
182 physical_end = DRAM_BASE + MEMSIZE * 1024 * 1024;
183 physmem = (physical_end - physical_start) / PAGE_SIZE;
184
185 /* bootconfig is used by cpu_dump() and cousins. */
186 bootconfig.dramblocks = 1;
187 bootconfig.dram[0].address = DRAM_BASE;
188 bootconfig.dram[0].pages = physmem;
189
190 /*
191 * Our kernel is at the beginning of the DRAM, so set our free space to
192 * all the memory after the kernel.
193 */
194 physical_freestart = KERN_VTOPHYS(round_page((vaddr_t) _end));
195 physical_freeend = physical_end;
196 free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;
197
198 #ifdef VERBOSE_INIT_ARM
199 /* Tell the user about the memory. */
200 printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
201 physical_start, physical_end - 1);
202 #endif
203
204 /*
205 * Set up first and second level page tables. Pages of memory will be
206 * allocated and mapped for structures required for system operation.
207 * kernel_l1pt, kernel_pt_table[], systempage, irqstack, abtstack,
208 * undstack, kernelstack, msgbufphys will be set to point to the memory
209 * that was allocated for them.
210 */
211 setup_real_page_tables();
212
213 #ifdef VERBOSE_INIT_ARM
214 printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
215 physical_freestart, free_pages, free_pages);
216 #endif
217
218 uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
219
220 #ifdef VERBOSE_INIT_ARM
221 printf("bootstrap done.\n");
222 #endif
223
224 /* Copy vectors from page0 to vectors page. */
225 arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
226 #ifdef VERBOSE_INIT_ARM
227 printf("init subsystems: stacks ");
228 #endif
229 set_stackptr(PSR_FIQ32_MODE,
230 fiqstack.pv_va + FIQ_STACK_SIZE * PAGE_SIZE);
231 set_stackptr(PSR_IRQ32_MODE,
232 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
233 set_stackptr(PSR_ABT32_MODE,
234 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
235 set_stackptr(PSR_UND32_MODE,
236 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
237 #ifdef VERBOSE_INIT_ARM
238 printf("vectors ");
239 #endif
240 data_abort_handler_address = (u_int)data_abort_handler;
241 prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
242 undefined_handler_address = (u_int)undefinedinstruction_bounce;
243 #ifdef VERBOSE_INIT_ARM
244 printf("undefined ");
245 #endif
246 undefined_init();
247 /* Load memory into UVM. */
248 #ifdef VERBOSE_INIT_ARM
249 printf("page ");
250 #endif
251 uvm_setpagesize();
252 uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
253 atop(physical_freestart), atop(physical_freeend),
254 VM_FREELIST_DEFAULT);
255
256 /* Boot strap pmap telling it where the kernel page table is. */
257 #ifdef VERBOSE_INIT_ARM
258 printf("pmap ");
259 #endif
260 pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
261
262 #ifdef VERBOSE_INIT_ARM
263 printf("done.\n");
264 #endif
265
266 #ifdef __HAVE_MEMORY_DISK__
267 md_root_setconf(memory_disk, sizeof memory_disk);
268 #endif
269
270 #ifdef BOOTHOWTO
271 boothowto |= BOOTHOWTO;
272 #endif
273
274 #ifdef KGDB
275 if (boothowto & RB_KDB) {
276 kgdb_debug_init = 1;
277 kgdb_connect(1);
278 }
279 #endif
280
281 #ifdef DDB
282 db_machine_init();
283 if (boothowto & RB_KDB)
284 Debugger();
285 #endif
286
287 return kernelstack.pv_va + USPACE_SVC_STACK_TOP;
288 }
289
290 /*
291 * Return TTBR (Translation Table Base Register) value from coprocessor.
292 */
293 static vaddr_t
294 get_ttb(void)
295 {
296 vaddr_t ttb;
297
298 __asm volatile("mrc p15, 0, %0, c2, c0, 0" : "=r" (ttb));
299
300 return ttb;
301 }
302
303 /*
304 * valloc_pages() is used to allocate free memory to be used for kernel pages.
305 * Virtual and physical addresses of the allocated memory are saved for the
306 * later use by the structures:
307 *
308 * - kernel_l1pt which holds the address of the kernel's L1 translaton table.
309 * - kernel_pt_table[] holds the addresses of the kernel's L2 page tables.
310 *
311 * pmap_link_l2pt() is used to create link from L1 table entry to the L2 page
312 * table. Link is a reference to coarse page table which has 256 entries,
313 * splitting the 1MB that the table describes into 4kB blocks.
314 *
315 * pmap_map_entry() updates the PTE in L2 PT for an VA to point to single
316 * physical page previously allocated.
317 *
318 * pmap_map_chunk() maps a chunk of memory using the most efficient
319 * mapping possible (section, large page, small page) into the provided L1 and
320 * L2 tables at the specified virtual address. pmap_map_chunk() excepts linking
321 * to be done before it is called for chunks smaller than a section.
322 */
323 static void
324 setup_real_page_tables(void)
325 {
326 /*
327 * Define a macro to simplify memory allocation. As we allocate the
328 * memory, make sure that we don't walk over our temporary first level
329 * translation table.
330 */
331 #define valloc_pages(var, np) \
332 (var).pv_pa = physical_freestart; \
333 physical_freestart += ((np) * PAGE_SIZE); \
334 if (physical_freestart > (physical_freeend - L1_TABLE_SIZE)) \
335 panic("%s: out of memory", __func__); \
336 free_pages -= (np); \
337 (var).pv_va = KERN_PHYSTOV((var).pv_pa); \
338 memset((char *)(var).pv_va, 0, ((np) * PAGE_SIZE));
339
340 int loop, pt_index;
341
342 pt_index = 0;
343 kernel_l1pt.pv_pa = 0;
344 kernel_l1pt.pv_va = 0;
345 for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
346 /* Are we 16kB aligned for an L1? */
347 if ((physical_freestart & (L1_TABLE_SIZE - 1)) == 0 &&
348 kernel_l1pt.pv_pa == 0) {
349 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
350 } else {
351 valloc_pages(kernel_pt_table[pt_index],
352 L2_TABLE_SIZE / PAGE_SIZE);
353 ++pt_index;
354 }
355 }
356
357 /* Make sure L1 page table is aligned to 16kB. */
358 if (!kernel_l1pt.pv_pa ||
359 (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)
360 panic("%s: Failed to align the kernel page directory",
361 __func__);
362
363 /*
364 * Allocate a page for the system page mapped to ARM_VECTORS_HIGH.
365 * This page will just contain the system vectors and can be shared by
366 * all processes.
367 */
368 valloc_pages(systempage, 1);
369 systempage.pv_va = ARM_VECTORS_HIGH;
370
371 /* Allocate stacks for all modes. */
372 valloc_pages(fiqstack, FIQ_STACK_SIZE);
373 valloc_pages(irqstack, IRQ_STACK_SIZE);
374 valloc_pages(abtstack, ABT_STACK_SIZE);
375 valloc_pages(undstack, UND_STACK_SIZE);
376 valloc_pages(kernelstack, UPAGES);
377
378 /* Allocate the message buffer. */
379 pv_addr_t msgbuf;
380 int msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
381 valloc_pages(msgbuf, msgbuf_pgs);
382 msgbufphys = msgbuf.pv_pa;
383
384 vaddr_t l1_va = kernel_l1pt.pv_va;
385 vaddr_t l1_pa = kernel_l1pt.pv_pa;
386
387 /* Map the L2 pages tables in the L1 page table. */
388
389 pmap_link_l2pt(l1_va, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
390 &kernel_pt_table[KERNEL_PT_SYS]);
391
392 for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
393 pmap_link_l2pt(l1_va, KERNEL_BASE + loop * 0x00400000,
394 &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
395
396 for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
397 pmap_link_l2pt(l1_va, KERNEL_VM_BASE + loop * 0x00400000,
398 &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
399
400 /* Update the top of the kernel VM. */
401 pmap_curmaxkvaddr =
402 KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
403
404 extern char etext[];
405 size_t textsize = (uintptr_t)etext - KERNEL_BASE;
406 size_t totalsize = (uintptr_t)_end - KERNEL_BASE;
407 u_int logical;
408
409 textsize = (textsize + PGOFSET) & ~PGOFSET;
410 totalsize = (totalsize + PGOFSET) & ~PGOFSET;
411
412 logical = 0x00000000; /* offset of kernel in RAM */
413
414 logical += pmap_map_chunk(l1_va, KERNEL_BASE + logical,
415 physical_start + logical, textsize,
416 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
417
418 logical += pmap_map_chunk(l1_va, KERNEL_BASE + logical,
419 physical_start + logical, totalsize - textsize,
420 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
421
422 /* Map the stack pages. */
423 pmap_map_chunk(l1_va, fiqstack.pv_va, fiqstack.pv_pa,
424 FIQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
425
426 pmap_map_chunk(l1_va, irqstack.pv_va, irqstack.pv_pa,
427 IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
428
429 pmap_map_chunk(l1_va, abtstack.pv_va, abtstack.pv_pa,
430 ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
431
432 pmap_map_chunk(l1_va, undstack.pv_va, undstack.pv_pa,
433 UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
434
435 pmap_map_chunk(l1_va, kernelstack.pv_va, kernelstack.pv_pa,
436 UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
437
438 pmap_map_chunk(l1_va, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
439 L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);
440
441 for (loop = 0; loop < NUM_KERNEL_PTS; ++loop)
442 pmap_map_chunk(l1_va, kernel_pt_table[loop].pv_va,
443 kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
444 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
445
446 /* Map the vector page. */
447 pmap_map_entry(l1_va, ARM_VECTORS_HIGH, systempage.pv_pa,
448 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
449
450 pmap_devmap_bootstrap(l1_va, imx23_devmap);
451
452 #ifdef VERBOSE_INIT_ARM
453 /* Tell the user about where all the bits and pieces live. */
454 printf("%22s Physical Virtual Num\n", " ");
455 printf("%22s Starting Ending Starting Ending Pages\n", " ");
456
457 static const char mem_fmt[] =
458 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %d\n";
459 static const char mem_fmt_nov[] =
460 "%20s: 0x%08lx 0x%08lx %d\n";
461
462 printf(mem_fmt, "SDRAM", physical_start, physical_end-1,
463 KERN_PHYSTOV(physical_start), KERN_PHYSTOV(physical_end-1),
464 physmem);
465 printf(mem_fmt, "text section",
466 KERN_VTOPHYS(KERNEL_BASE), KERN_VTOPHYS(etext-1),
467 (vaddr_t)KERNEL_BASE, (vaddr_t)etext-1,
468 (int)(textsize / PAGE_SIZE));
469 printf(mem_fmt, "data section",
470 KERN_VTOPHYS(__data_start), KERN_VTOPHYS(_edata),
471 (vaddr_t)__data_start, (vaddr_t)_edata,
472 (int)((round_page((vaddr_t)_edata)
473 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
474 printf(mem_fmt, "bss section",
475 KERN_VTOPHYS(__bss_start), KERN_VTOPHYS(__bss_end__),
476 (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
477 (int)((round_page((vaddr_t)__bss_end__)
478 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
479 printf(mem_fmt, "L1 page directory",
480 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
481 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
482 L1_TABLE_SIZE / PAGE_SIZE);
483 printf(mem_fmt, "Exception Vectors",
484 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
485 (vaddr_t)ARM_VECTORS_HIGH,
486 (vaddr_t)ARM_VECTORS_HIGH + PAGE_SIZE - 1, 1);
487 printf(mem_fmt, "FIQ stack",
488 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
489 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
490 FIQ_STACK_SIZE);
491 printf(mem_fmt, "IRQ stack",
492 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
493 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
494 IRQ_STACK_SIZE);
495 printf(mem_fmt, "ABT stack",
496 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
497 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
498 ABT_STACK_SIZE);
499 printf(mem_fmt, "UND stack",
500 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
501 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
502 UND_STACK_SIZE);
503 printf(mem_fmt, "SVC stack",
504 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
505 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
506 UPAGES);
507 printf(mem_fmt_nov, "Message Buffer",
508 msgbufphys, msgbufphys + msgbuf_pgs * PAGE_SIZE - 1, msgbuf_pgs);
509 printf(mem_fmt, "Free Memory", physical_freestart, physical_freeend-1,
510 KERN_PHYSTOV(physical_freestart), KERN_PHYSTOV(physical_freeend-1),
511 free_pages);
512 #endif
513
514 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
515 cpu_setttb(l1_pa, FALSE);
516 cpu_tlb_flushID();
517 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
518
519 return;
520 }
521
522 /*
523 * Generate initial random bits for rnd_init().
524 */
525 #ifdef notyet
526 static void
527 entropy_init(void)
528 {
529 uint32_t tmp;
530 int loop, index;
531
532 /* Test if HW_DIGCTL_ENTROPY is feeding random numbers. */
533 tmp = REG_RD(HW_DIGCTL_BASE + HW_DIGCTL_ENTROPY);
534 if (tmp == REG_RD(HW_DIGCTL_BASE + HW_DIGCTL_ENTROPY))
535 return;
536
537 index = 0;
538 for (loop = 0; loop < RND_SAVEWORDS; loop++) {
539 imx23_boot_rsp.data[index++] = (uint8_t)(tmp);
540 imx23_boot_rsp.data[index++] = (uint8_t)(tmp>>8);
541 imx23_boot_rsp.data[index++] = (uint8_t)(tmp>>16);
542 imx23_boot_rsp.data[index++] = (uint8_t)(tmp>>24);
543 imx23_boot_rsp.entropy += 32;
544 tmp = REG_RD(HW_DIGCTL_BASE + HW_DIGCTL_ENTROPY);
545 }
546
547 extern rndsave_t *boot_rsp;
548 boot_rsp = &imx23_boot_rsp;
549
550 return;
551 }
552 #endif
553
554 /*
555 * Initialize console.
556 */
557 static struct plcom_instance imx23_pi = {
558 .pi_type = PLCOM_TYPE_PL011,
559 .pi_iot = &imx23_bus_space,
560 .pi_size = PL011COM_UART_SIZE,
561 .pi_iobase = HW_UARTDBG_BASE
562 };
563
564 #define PLCONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */
565 #define PLCONSPEED 115200
566 void
567 consinit(void)
568 {
569 /* consinit() is called from also from the main(). */
570 static int consinit_called = 0;
571
572 if (consinit_called)
573 return;
574
575 plcomcnattach(&imx23_pi, PLCONSPEED, IMX23_UART_CLK, PLCONMODE, 0);
576
577 consinit_called = 1;
578
579 return;
580 }
581
582 /*
583 * Reboot or halt the system.
584 */
585 void
586 cpu_reboot(int howto, char *bootstr)
587 {
588 static int cpu_reboot_called = 0;
589
590 boothowto |= howto;
591
592 /*
593 * If this is the first invocation of cpu_reboot() and the RB_NOSYNC
594 * flag is not set in howto; sync and unmount the system disks by
595 * calling vfs_shutdown(9) and set the time of day clock by calling
596 * resettodr(9).
597 */
598 if (!cpu_reboot_called && !(boothowto & RB_NOSYNC)) {
599 vfs_shutdown();
600 resettodr();
601 }
602
603 cpu_reboot_called = 1;
604
605 IRQdisable; /* FIQ's stays on because they are special. */
606
607 /*
608 * If rebooting after a crash (i.e., if RB_DUMP is set in howto, but
609 * RB_HALT is not), save a system crash dump.
610 */
611 if ((boothowto & RB_DUMP) && !(boothowto & RB_HALT))
612 panic("please implement crash dump!"); // XXX
613
614 /* Run any shutdown hooks by calling pmf_system_shutdown(9). */
615 pmf_system_shutdown(boothowto);
616
617 printf("system %s.\n", boothowto & RB_HALT ? "halted" : "rebooted");
618
619 if (boothowto & RB_HALT) {
620 /* Enable i.MX233 wait-for-interrupt mode. */
621 REG_WR(HW_CLKCTRL_BASE + HW_CLKCTRL_CPU,
622 (REG_RD(HW_CLKCTRL_BASE + HW_CLKCTRL_CPU) |
623 HW_CLKCTRL_CPU_INTERRUPT_WAIT));
624
625 /* Disable FIQ's and wait for interrupt (which never arrives) */
626 __asm volatile( \
627 "mrs r0, cpsr\n\t" \
628 "orr r0, #0x40\n\t" \
629 "msr cpsr_c, r0\n\t" \
630 "mov r0, #0\n\t" \
631 "mcr p15, 0, r0, c7, c0, 4\n\t"
632 );
633
634 for(;;);
635
636 /* NOT REACHED */
637 }
638
639 /* Reboot the system. */
640 REG_WR(HW_RTC_BASE + HW_RTC_WATCHDOG, 10000);
641 REG_WR(HW_RTC_BASE + HW_RTC_CTRL_SET, HW_RTC_CTRL_WATCHDOGEN);
642 REG_WR(HW_RTC_BASE + HW_RTC_WATCHDOG, 0);
643
644 for(;;);
645
646 /* NOT REACHED */
647 }
648
649 /*
650 * Delay us microseconds.
651 */
652 void
653 delay(unsigned int us)
654 {
655 uint32_t start;
656 uint32_t now;
657 uint32_t elapsed;
658 uint32_t total;
659 uint32_t last;
660
661 total = 0;
662 last = 0;
663 start = REG_RD(HW_DIGCTL_BASE + HW_DIGCTL_MICROSECONDS);
664
665 do {
666 now = REG_RD(HW_DIGCTL_BASE + HW_DIGCTL_MICROSECONDS);
667
668 if (start <= now)
669 elapsed = now - start;
670 else /* Take care of overflow. */
671 elapsed = (UINT32_MAX - start) + 1 + now;
672
673 total += elapsed - last;
674 last = elapsed;
675
676 } while (total < us);
677
678 return;
679 }
680