nslu2_start.S revision 1.1.22.2 1 /* $NetBSD: nslu2_start.S,v 1.1.22.2 2006/09/09 02:38:55 rpaulo Exp $ */
2
3 /*
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Ichiro FUKUHARA, Jason R. Thorpe, and Steve C. Woodford.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <machine/asm.h>
40 #include <arm/armreg.h>
41 #include <arm/arm32/pte.h>
42
43 #include <arm/xscale/ixp425reg.h>
44
45 .section .start,"ax",%progbits
46
47 .global _C_LABEL(nslu2_start)
48 _C_LABEL(nslu2_start):
49 /*
50 * We will go ahead and disable the MMU here so that we don't
51 * have to worry about flushing caches, etc.
52 *
53 * Note that we may not currently be running VA==PA, which means
54 * we'll need to leap to the next insn after disabing the MMU.
55 */
56 adr r8, Lunmapped
57 bic r8, r8, #0xff000000 /* clear upper 8 bits */
58 orr r8, r8, #0x10000000 /* OR in physical base address */
59
60 mrc p15, 0, r2, c1, c0, 0
61 bic r2, r2, #CPU_CONTROL_MMU_ENABLE
62 orr r2, r2, #CPU_CONTROL_BEND_ENABLE
63 mcr p15, 0, r2, c1, c0, 0
64
65 nop
66 nop
67 nop
68
69 mov pc, r8 /* Heave-ho! */
70
71 Lunmapped:
72 /*
73 * We want to construct a memory map that maps us
74 * VA==PA (SDRAM at 0x10000000). We create these
75 * mappings uncached and unbuffered to be safe.
76 */
77
78 /*
79 * Step 1: Map the entire address space VA==PA.
80 */
81 adr r0, Ltable
82 ldr r0, [r0] /* r0 = &l1table */
83
84 mov r1, #(L1_TABLE_SIZE / 4) /* 4096 entry */
85 mov r2, #(L1_S_SIZE) /* 1MB / section */
86 mov r3, #(L1_S_AP(AP_KRW)) /* kernel read/write */
87 orr r3, r3, #(L1_TYPE_S) /* L1 entry is section */
88 1:
89 str r3, [r0], #0x04
90 add r3, r3, r2
91 subs r1, r1, #1
92 bgt 1b
93
94 /*
95 * Step 2: Map VA 0xc0000000->0xc3ffffff to PA 0x10000000->0x13ffffff.
96 */
97 adr r0, Ltable /* r0 = &l1table */
98 ldr r0, [r0]
99
100 mov r3, #(L1_S_AP(AP_KRW))
101 orr r3, r3, #(L1_TYPE_S)
102 orr r3, r3, #0x10000000
103 add r0, r0, #(0xc00 * 4) /* offset to 0xc00xxxxx */
104 mov r1, #0x40 /* 64MB */
105 1:
106 str r3, [r0], #0x04
107 add r3, r3, r2
108 subs r1, r1, #1
109 bgt 1b
110
111 /*
112 * Step 3: Map VA 0xf0000000->0xf0100000 to PA 0xc8000000->0xc8100000.
113 */
114 adr r0, Ltable /* r0 = &l1table */
115 ldr r0, [r0]
116
117 add r0, r0, #(0xf00 * 4) /* offset to 0xf0000000 */
118 mov r3, #0xc8000000
119 add r3, r3, #0x00100000
120 orr r3, r3, #(L1_S_AP(AP_KRW))
121 orr r3, r3, #(L1_TYPE_S)
122 str r3, [r0]
123
124 /*
125 * Step 4: Map VA 0xf0200000->0xf0300000 to PA 0xcc000000->0xcc100000.
126 */
127 adr r0, Ltable /* r0 = &l1table */
128 ldr r0, [r0]
129
130 add r0, r0, #(0xf00 * 4) /* offset to 0xf0200000 */
131 add r0, r0, #(0x002 * 4)
132 mov r3, #0xcc000000
133 add r3, r3, #0x00100000
134 orr r3, r3, #(L1_S_AP(AP_KRW))
135 orr r3, r3, #(L1_TYPE_S)
136 str r3, [r0]
137
138 /* OK! Page table is set up. Give it to the CPU. */
139 adr r0, Ltable
140 ldr r0, [r0]
141 mcr p15, 0, r0, c2, c0, 0
142
143 /* Flush the old TLBs, just in case. */
144 mcr p15, 0, r0, c8, c7, 0
145
146 /* Set the Domain Access register. Very important! */
147 mov r0, #1
148 mcr p15, 0, r0, c3, c0, 0
149
150 /* Get ready to jump to the "real" kernel entry point... */
151 ldr r1, Lstart
152 mov r1, r1 /* Make sure the load completes! */
153
154 /* OK, let's enable the MMU. */
155 mrc p15, 0, r2, c1, c0, 0
156 orr r2, r2, #CPU_CONTROL_MMU_ENABLE
157 orr r2, r2, #CPU_CONTROL_BEND_ENABLE
158 mcr p15, 0, r2, c1, c0, 0
159
160 nop
161 nop
162 nop
163
164 /* CPWAIT sequence to make sure the MMU is on... */
165 mrc p15, 0, r2, c2, c0, 0 /* arbitrary read of CP15 */
166 mov r2, r2 /* force it to complete */
167 mov pc, r1 /* leap to kernel entry point! */
168
169 Ltable:
170 .word 0x10200000 - 0x4000
171
172 Lstart:
173 .word start
174