marvell_start.S revision 1.14 1 /* $NetBSD: marvell_start.S,v 1.14 2022/05/20 15:11:07 rin Exp $ */
2 /*
3 * Copyright (C) 2005, 2006 WIDE Project and SOUM Corporation.
4 * All rights reserved.
5 *
6 * Written by Takashi Kiyohara and Susumu Miki for WIDE Project and SOUM
7 * Corporation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the project nor the name of SOUM Corporation
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE PROJECT and SOUM CORPORATION ``AS IS''
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT AND SOUM CORPORATION
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33 /*
34 * Copyright (c) 2002, 2003 Genetec Corporation. All rights reserved.
35 * Written by Hiroyuki Bessho for Genetec Corporation.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. The name of Genetec Corporation may not be used to endorse or
46 * promote products derived from this software without specific prior
47 * written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 #include "opt_cputypes.h"
63 #include "opt_mvsoc.h"
64 #include <machine/asm.h>
65 #include <arm/armreg.h>
66 #include <evbarm/marvell/marvellreg.h>
67 #include "assym.h"
68
69 RCSID("$NetBSD: marvell_start.S,v 1.14 2022/05/20 15:11:07 rin Exp $")
70
71 #ifndef SDRAM_START
72 #define SDRAM_START 0x00000000
73 #endif
74
75 #define SHEEVA 1
76 #define PJ4B 2
77
78 /*
79 * CPWAIT -- Canonical method to wait for CP15 update.
80 * NOTE: Clobbers the specified temp reg.
81 * copied from arm/arm/cpufunc_asm_xscale.S
82 * XXX: better be in a common header file.
83 */
84 #define CPWAIT_BRANCH \
85 sub pc, pc, #4
86
87 #define CPWAIT(tmp) \
88 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
89 mov tmp, tmp /* wait for it to complete */ ;\
90 CPWAIT_BRANCH /* branch to next insn */
91
92 /*
93 * Kernel start routine for Marvell boards
94 * this code is excuted at the very first after the kernel is loaded
95 * by U-Boot.
96 */
97 .text
98
99 .global _C_LABEL(marvell_start)
100 _C_LABEL(marvell_start):
101 /* The Loader for Marvell board is u-boot. it's running on RAM */
102 /*
103 * Kernel is loaded in SDRAM (0x00200000..), and is expected to run
104 * in VA 0xc0200000..
105 */
106
107 #ifdef __ARMEB__
108 /*
109 * u-boot is running in little-endian mode. Therefore, we need to
110 * encode first few instructions in the opposite byte order.
111 */
112
113 #ifdef CPU_SHEEVA
114 /*
115 * XXX
116 * For now, disable L2 unconditionally ifdef CPU_SHEEVA.
117 * Not working for older CPUs.
118 */
119 .word 0x115f3fee /* mrc p15, 1, r5, c15, c1, 0 */
120 .word 0x0155c5e3 /* bic r5, r5, #0x400000 */
121 .word 0x115f2fee /* mcr p15, 1, r5, c15, c1, 0 */
122
123 /* Flush prefetch buffer. */
124 .word 0x0000a0e1 /* nop */
125 .word 0x0000a0e1 /* nop */
126 .word 0x0000a0e1 /* nop */
127 #endif
128
129 /* Turn on CPU_CONTROL_BEND_ENABLE bit. */
130 .word 0x104f11ee /* mrc p15, 0, r4, c1, c0, 0 */
131 .word 0x804084e3 /* orr r4, r4, #CPU_CONTROL_BEND_ENABLE */
132 .word 0x104f01ee /* mcr p15, 0, r4, c1, c0, 0 */
133
134 /* Flush prefetch buffer. */
135 .word 0x0000a0e1 /* nop */
136 .word 0x0000a0e1 /* nop */
137 .word 0x0000a0e1 /* nop */
138
139 CPWAIT(r4)
140 #endif
141
142 /* Check cores */
143 mrc p15, 0, r4, c0, c0, 0
144 and r4, r4, #CPU_ID_CPU_MASK
145 adr r5, cores_start
146 adr r6, cores_end
147 0:
148 cmp r5, r6
149 beq 1f
150 ldmia r5!, {r7, r8}
151 cmp r4, r7
152 bne 0b
153
154 cmp r8, #SHEEVA
155 bne 1f
156
157 sheeva_l2_disable:
158 /* Make sure L2 is disabled */
159 mrc p15, 1, r5, c15, c1, 0 @ Get Marvell Extra Features Register
160 bic r5, r5, #0x00400000 @ disable L2 cache
161 mcr p15, 1, r5, c15, c1, 0
162
163 #ifdef SHEEVA_L2_CACHE_WT
164 /* L2 WT Mode */
165 ldr r5, =0xf1020128 /* CPU L2 Configuration Register */
166 ldr r6, [r5]
167 bic r6, r6, #0x10 /* Force Write Through */
168 str r6, [r5]
169 #endif
170
171 1:
172
173 /* save u-boot's args */
174 adr r4, u_boot_args
175 nop
176 nop
177 nop
178 stmia r4!, {r0, r1, r2, r3}
179 nop
180 nop
181 nop
182
183 #if defined(MVSOC_FIXUP_DEVID) && MVSOC_FIXUP_DEVID > 0
184 adr r6, marvell_interregs_pbase
185 ldr r7, [r6]
186 add r7, r7, #0x40000
187 ldr r6, [r7]
188 bic r6, r6, 0xff000000
189 bic r6, r6, 0x00ff0000
190 /*
191 * Some SoC returns ugly DeviceID. Fixup it.
192 */
193 adr r5, devid
194 ldr r5, [r5]
195 orr r6, r6, r5, lsl #16
196 str r6, [r7]
197 b 1f
198 devid:
199 .word MVSOC_FIXUP_DEVID
200 marvell_interregs_pbase:
201 .word MARVELL_INTERREGS_PBASE
202 #endif
203 1:
204
205 /* build page table from scratch */
206 ldr r0, Lstartup_pagetable /* pagetable */
207 adr r4, mmu_init_table
208 b 3f
209
210 2:
211 str r3, [r0, r2]
212 add r2, r2, #4
213 add r3, r3, #(L1_S_SIZE)
214 adds r1, r1, #-1
215 bhi 2b
216 3:
217 ldmia r4!, {r1, r2, r3} /* # of sections, VA, PA|attr */
218 cmp r1, #0
219 bne 2b
220
221 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
222 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
223 cmp r8, #PJ4B
224 mcreq p15, 0, r0, c2, c0, 1 /* Set TTB1 */
225 moveq r1, #TTBCR_S_N_1
226 mcreq p15, 0, r1, c2, c0, 2 /* Set TTBCR */
227 mov r0, #0
228 mcreq p15, 0, r0, c8, c7, 0 /* Flush TLB */
229
230 mcreq p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */
231 mcr p15, 0, r0, c7, c6, 0 /* Invalidate D cache */
232 mcr p15, 0, r0, c7, c10, 4 /* Drain write-buffer */
233
234 /* Ensure safe Translation Table. */
235
236 /* Set the Domain Access register. Very important! */
237 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
238 mcr p15, 0, r0, c3, c0, 0
239
240 /* Enable MMU */
241 mrc p15, 0, r0, c1, c0, 0
242 cmp r8, #PJ4B
243 orreq r0, r0, #CPU_CONTROL_XP_ENABLE
244 biceq r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE)
245 biceq r0, r0, #(CPU_CONTROL_IC_ENABLE)
246 biceq r0, r0, #(CPU_CONTROL_BPRD_ENABLE)
247 orr r0, r0, #CPU_CONTROL_SYST_ENABLE
248 orr r0, r0, #CPU_CONTROL_MMU_ENABLE
249 mcr p15, 0, r0, c1, c0, 0
250 CPWAIT(r0)
251
252 /* Jump to kernel code in TRUE VA */
253 adr r0, Lstart
254 ldr pc, [r0]
255
256 Lstart:
257 .word start
258
259 #ifndef STARTUP_PAGETABLE_ADDR
260 #define STARTUP_PAGETABLE_ADDR 0x00004000 /* aligned 16kByte */
261 #endif
262 Lstartup_pagetable:
263 .word STARTUP_PAGETABLE_ADDR
264
265 .globl _C_LABEL(u_boot_args)
266 u_boot_args:
267 .space 16 /* r0, r1, r2, r3 */
268
269 cores_start:
270 .word CPU_ID_MV88SV131, SHEEVA
271 .word CPU_ID_MV88FR571_VD, SHEEVA /* Is it Sheeva? */
272 .word CPU_ID_MV88SV581X_V6, PJ4B
273 .word CPU_ID_MV88SV581X_V7, PJ4B
274 .word CPU_ID_MV88SV584X_V7, PJ4B
275 .word CPU_ID_ARM_88SV581X_V6, PJ4B
276 .word CPU_ID_ARM_88SV581X_V7, PJ4B
277 .word 0, 0
278 cores_end:
279
280 #define MMU_INIT(va,pa,n_sec,attr) \
281 .word n_sec ; \
282 .word 4 * (((va) & 0xffffffff) >> L1_S_SHIFT) ; \
283 .word ((pa) & 0xffffffff) | (attr) ;
284
285 mmu_init_table:
286 /* fill all table VA==PA */
287 MMU_INIT(0x00000000, 0x00000000,
288 1 << (32 - L1_S_SHIFT), L1_TYPE_S | L1_S_AP_KRW)
289
290 /* map SDRAM VA==PA, WT cacheable */
291 MMU_INIT(SDRAM_START, SDRAM_START,
292 128, L1_TYPE_S | L1_S_C | L1_S_AP_KRW)
293
294 /* map VA KERNEL_BASE..KERNEL_BASE+7ffffff to PA 0x00000000..0x07ffffff */
295 MMU_INIT(KERNEL_BASE, SDRAM_START,
296 128, L1_TYPE_S | L1_S_C | L1_S_AP_KRW)
297
298 .word 0 /* end of table */
299