imx31lk_start.S revision 1.2
11.2Smatt/* $NetBSD: imx31lk_start.S,v 1.2 2008/04/27 18:58:46 matt Exp $ */ 21.2Smatt 31.2Smatt#include <machine/asm.h> 41.2Smatt#include <arm/armreg.h> 51.2Smatt#include <arm/arm32/pmap.h> 61.2Smatt#include <arm/arm32/pte.h> 71.2Smatt 81.2Smatt 91.2Smatt/* 101.2Smatt */ 111.2Smatt 121.2Smatt#define CPWAIT_BRANCH \ 131.2Smatt sub pc, pc, #4 141.2Smatt 151.2Smatt#define CPWAIT(tmp) \ 161.2Smatt mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ 171.2Smatt mov tmp, tmp /* wait for it to complete */ ;\ 181.2Smatt CPWAIT_BRANCH /* branch to next insn */ 191.2Smatt 201.2Smatt 211.2Smatt#ifndef SDRAM_START 221.2Smatt#define SDRAM_START 0x80000000 231.2Smatt#endif 241.2Smatt 251.2Smatt#define IXM31_DCACHE_SIZE 0x4000 /* 16KB L1 */ 261.2Smatt 271.2Smatt 281.2Smatt 291.2Smatt .text 301.2Smatt 311.2Smatt .global _C_LABEL(imx31lk_start) 321.2Smatt_C_LABEL(imx31lk_start): 331.2Smatt /* Figure out where we want to jump to when the time comes */ 341.2Smatt adr r8, .Lstart 351.2Smatt ldr r8, [r8] 361.2Smatt 371.2Smatt /* 381.2Smatt * set up virtual address space mapping 391.2Smatt * for initial bootstrap. 401.2Smatt */ 411.2Smatt mov r2, #(L1_S_SIZE) /* 1MB chunks */ 421.2Smatt 431.2Smatt /* 441.2Smatt * Firmware already mapped SDRAM VA == PA. at 0x800.. 451.2Smatt * now map SDRAM also at VA 0x800... 461.2Smatt */ 471.2Smatt mrc p15, 0, r0, c2, c0, 0 /* L1 addr into r0 */ 481.2Smatt add r0, r0, #(0x800 * 4) /* offset to 0x80000000 */ 491.2Smatt 501.2Smatt mov r3, #SDRAM_START /* map to 0x800.. */ 511.2Smatt orr r3, r3, #(L1_S_AP(AP_KRW)) /* the usual perms & stuff */ 521.2Smatt orr r3, r3, #(L1_TYPE_S) 531.2Smatt orr r3, r3, #(L1_S_DOM(PMAP_DOMAIN_KERNEL)) 541.2Smatt 551.2Smatt mov r1, #0x80 /* 128 1MB entries */ 561.2Smatt1: 571.2Smatt /* and looplooploop */ 581.2Smatt str r3, [r0], #4 591.2Smatt add r3, r3, r2 601.2Smatt subs r1, r1, #1 611.2Smatt bgt 1b 621.2Smatt 631.2Smatt /* 641.2Smatt * Map an L1 section for each device to make this easy. 651.2Smatt */ 661.2Smatt /* UART1 */ 671.2Smatt mrc p15, 0, r0, c2, c0, 0 /* Get L1 */ 681.2Smatt add r0, r0, #(0xfd0 * 4) /* offset to 0xfd000000 */ 691.2Smatt 701.2Smatt mov r3, #0x43000000 711.2Smatt orr r3, r3, #0x00f00000 721.2Smatt orr r3, r3, #(L1_S_AP(AP_KRW)) 731.2Smatt orr r3, r3, #(L1_TYPE_S) 741.2Smatt orr r3, r3, #(L1_S_DOM(PMAP_DOMAIN_KERNEL)) 751.2Smatt str r3, [r0], #4 /* note autoinc */ 761.2Smatt 771.2Smatt /* etc, TBD... */ 781.2Smatt 791.2Smatt /* 801.2Smatt * Make domain control go full art. 811.2Smatt */ 821.2Smatt mov r0, #0xffffffff 831.2Smatt mcr p15, 0, r0, c3, c0, 0 841.2Smatt 851.2Smatt /* 861.2Smatt * Now let's clean the cache again to make sure everything 871.2Smatt * is in place. 881.2Smatt * 891.2Smatt * XXX: should this take into account the XScale cache clean bug? 901.2Smatt */ 911.2Smatt mov r3, #(IXM31_DCACHE_SIZE) 921.2Smatt subs r3, r3, #32 931.2Smatt1: 941.2Smatt mcr p15, 0, r3, c7, c10, 2 951.2Smatt subs r3, r3, #32 961.2Smatt bne 1b 971.2Smatt CPWAIT(r3) 981.2Smatt 991.2Smatt /* Drain write buffer */ 1001.2Smatt mcr p15, 0, r6, c7, c10, 4 1011.2Smatt 1021.2Smatt /* Invalidate TLBs just to be sure */ 1031.2Smatt mcr p15, 0, r0, c8, c7, 0 1041.2Smatt 1051.2Smatt /* 1061.2Smatt * You are standing at the gate to NetBSD. --More-- 1071.2Smatt * Unspeakable cruelty and harm lurk down there. --More-- 1081.2Smatt * Are you sure you want to enter? 1091.2Smatt */ 1101.2Smatt adr r8, .Lstart 1111.2Smatt ldr r8, [r8] 1121.2Smatt mov pc, r8 /* So be it */ 1131.2Smatt 1141.2Smatt/* symbol to use for address calculation in the right VA */ 1151.2Smatt.Lstart: 1161.2Smatt .word start 1171.2Smatt 1181.2Smatt 1191.2Smatt/* 1201.2Smatt * Calculate size of kernel to copy. Don't bother to copy bss, 1211.2Smatt * although I guess the CPU could use the warmup exercise ... 1221.2Smatt */ 1231.2Smatt.Lcopy_size: 1241.2Smatt .word _edata - _C_LABEL(imx31lk_start) 1251.2Smatt 126