imx31lk_start.S revision 1.3
11.3Suebayasi/* $NetBSD: imx31lk_start.S,v 1.3 2009/11/05 16:28:09 uebayasi Exp $ */ 21.2Smatt 31.2Smatt#include <machine/asm.h> 41.2Smatt#include <arm/armreg.h> 51.2Smatt#include <arm/arm32/pmap.h> 61.2Smatt#include <arm/arm32/pte.h> 71.2Smatt 81.2Smatt 91.2Smatt/* 101.2Smatt */ 111.2Smatt 121.2Smatt#define CPWAIT_BRANCH \ 131.2Smatt sub pc, pc, #4 141.2Smatt 151.2Smatt#define CPWAIT(tmp) \ 161.2Smatt mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ 171.2Smatt mov tmp, tmp /* wait for it to complete */ ;\ 181.2Smatt CPWAIT_BRANCH /* branch to next insn */ 191.2Smatt 201.2Smatt 211.2Smatt#ifndef SDRAM_START 221.2Smatt#define SDRAM_START 0x80000000 231.2Smatt#endif 241.2Smatt 251.3Suebayasi#define IMX31_DCACHE_SIZE 0x4000 /* 16KB L1 */ 261.2Smatt 271.3Suebayasi/* 281.3Suebayasi * L1 == "Level One" == "first-level" 291.3Suebayasi * L2 == "Level Two" == "second-level" 301.3Suebayasi */ 311.2Smatt 321.2Smatt .text 331.2Smatt 341.2Smatt .global _C_LABEL(imx31lk_start) 351.2Smatt_C_LABEL(imx31lk_start): 361.2Smatt /* Figure out where we want to jump to when the time comes */ 371.2Smatt adr r8, .Lstart 381.2Smatt ldr r8, [r8] 391.2Smatt 401.2Smatt /* 411.2Smatt * set up virtual address space mapping 421.2Smatt * for initial bootstrap. 431.2Smatt */ 441.2Smatt mov r2, #(L1_S_SIZE) /* 1MB chunks */ 451.2Smatt 461.2Smatt /* 471.2Smatt * Firmware already mapped SDRAM VA == PA. at 0x800.. 481.2Smatt * now map SDRAM also at VA 0x800... 491.2Smatt */ 501.3Suebayasi mrc p15, 0, r0, c2, c0, 0 /* L1 table addr into r0 */ 511.2Smatt add r0, r0, #(0x800 * 4) /* offset to 0x80000000 */ 521.2Smatt 531.2Smatt mov r3, #SDRAM_START /* map to 0x800.. */ 541.2Smatt orr r3, r3, #(L1_S_AP(AP_KRW)) /* the usual perms & stuff */ 551.2Smatt orr r3, r3, #(L1_TYPE_S) 561.2Smatt orr r3, r3, #(L1_S_DOM(PMAP_DOMAIN_KERNEL)) 571.2Smatt 581.2Smatt mov r1, #0x80 /* 128 1MB entries */ 591.2Smatt1: 601.2Smatt /* and looplooploop */ 611.2Smatt str r3, [r0], #4 621.2Smatt add r3, r3, r2 631.2Smatt subs r1, r1, #1 641.2Smatt bgt 1b 651.2Smatt 661.2Smatt /* 671.2Smatt * Map an L1 section for each device to make this easy. 681.2Smatt */ 691.2Smatt /* UART1 */ 701.3Suebayasi mrc p15, 0, r0, c2, c0, 0 /* L1 table addr into r0 */ 711.2Smatt add r0, r0, #(0xfd0 * 4) /* offset to 0xfd000000 */ 721.2Smatt 731.2Smatt mov r3, #0x43000000 741.2Smatt orr r3, r3, #0x00f00000 751.2Smatt orr r3, r3, #(L1_S_AP(AP_KRW)) 761.2Smatt orr r3, r3, #(L1_TYPE_S) 771.2Smatt orr r3, r3, #(L1_S_DOM(PMAP_DOMAIN_KERNEL)) 781.2Smatt str r3, [r0], #4 /* note autoinc */ 791.2Smatt 801.2Smatt /* etc, TBD... */ 811.2Smatt 821.2Smatt /* 831.2Smatt * Make domain control go full art. 841.2Smatt */ 851.2Smatt mov r0, #0xffffffff 861.2Smatt mcr p15, 0, r0, c3, c0, 0 871.2Smatt 881.2Smatt /* 891.2Smatt * Now let's clean the cache again to make sure everything 901.2Smatt * is in place. 911.2Smatt * 921.2Smatt * XXX: should this take into account the XScale cache clean bug? 931.2Smatt */ 941.3Suebayasi mov r3, #(IMX31_DCACHE_SIZE) 951.2Smatt subs r3, r3, #32 961.2Smatt1: 971.2Smatt mcr p15, 0, r3, c7, c10, 2 981.2Smatt subs r3, r3, #32 991.2Smatt bne 1b 1001.2Smatt CPWAIT(r3) 1011.2Smatt 1021.2Smatt /* Drain write buffer */ 1031.2Smatt mcr p15, 0, r6, c7, c10, 4 1041.2Smatt 1051.2Smatt /* Invalidate TLBs just to be sure */ 1061.2Smatt mcr p15, 0, r0, c8, c7, 0 1071.2Smatt 1081.2Smatt /* 1091.2Smatt * You are standing at the gate to NetBSD. --More-- 1101.2Smatt * Unspeakable cruelty and harm lurk down there. --More-- 1111.2Smatt * Are you sure you want to enter? 1121.2Smatt */ 1131.2Smatt adr r8, .Lstart 1141.2Smatt ldr r8, [r8] 1151.2Smatt mov pc, r8 /* So be it */ 1161.2Smatt 1171.2Smatt/* symbol to use for address calculation in the right VA */ 1181.2Smatt.Lstart: 1191.2Smatt .word start 1201.2Smatt 1211.2Smatt 1221.2Smatt/* 1231.2Smatt * Calculate size of kernel to copy. Don't bother to copy bss, 1241.2Smatt * although I guess the CPU could use the warmup exercise ... 1251.2Smatt */ 1261.2Smatt.Lcopy_size: 1271.2Smatt .word _edata - _C_LABEL(imx31lk_start) 1281.2Smatt 129