Home | History | Annotate | Line # | Download | only in arm
      1 /*	$NetBSD: cpufunc_asm_arm8.S,v 1.11 2022/10/20 06:58:38 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 ARM Limited
      5  * Copyright (c) 1997 Causality Limited
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Causality Limited.
     19  * 4. The name of Causality Limited may not be used to endorse or promote
     20  *    products derived from this software without specific prior written
     21  *    permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
     24  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     26  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
     27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  * ARM8 assembly functions for CPU / MMU / TLB specific operations
     36  */
     37 
     38 #include "assym.h"
     39 #include <machine/asm.h>
     40 #include <arm/locore.h>
     41 
     42 ENTRY(arm8_clock_config)
     43 	mrc	p15, 0, r3, c15, c0, 0	/* Read the clock register */
     44 	bic	r2, r3, #0x11		/* turn off dynamic clocking
     45 					   and clear L bit */
     46 	mcr	p15, 0, r2, c15, c0, 0	/* Write clock register */
     47 
     48 	bic	r2, r3, r0		/* Clear bits */
     49 	eor	r2, r2, r1		/* XOR bits */
     50 	bic	r2, r2, #0x10		/* clear the L bit */
     51 
     52 	bic	r1, r2, #0x01		/* still keep dynamic clocking off */
     53 	mcr	p15, 0, r1, c15, c0, 0	/* Write clock register */
     54 	mov	r0, r0			/* NOP */
     55 	mov	r0, r0			/* NOP */
     56 	mov	r0, r0			/* NOP */
     57 	mov	r0, r0			/* NOP */
     58 	mcr	p15, 0, r2, c15, c0, 0 	/* Write clock register */
     59 	mov	r0, r3			/* Return old value */
     60 	mov	pc, lr
     61 END(arm8_clock_config)
     62 
     63 /*
     64  * Functions to set the MMU Translation Table Base register
     65  *
     66  * We need to clean and flush the cache as it uses virtual
     67  * addresses that are about to change.
     68  */
     69 ENTRY(arm8_setttb)
     70 	mrs	r3, cpsr
     71 	orr	r2, r3, #(I32_bit | F32_bit)
     72 	msr	cpsr_all, r2
     73 
     74 	stmfd	sp!, {r0-r3, lr}
     75 	bl	_C_LABEL(arm8_cache_cleanID)
     76 	ldmfd	sp!, {r0-r3, lr}
     77 
     78 	cmp	r1, #0			@ do we need to flush the caches?
     79 
     80 	mcrne	p15, 0, r0, c7, c7, 0	/* flush I+D cache */
     81 
     82 	/* Write the TTB */
     83 	mcr	p15, 0, r0, c2, c0, 0
     84 
     85 	/* If we have updated the TTB we must flush the TLB */
     86 	mcrne	p15, 0, r0, c8, c7, 0
     87 
     88 	/* For good measure we will flush the IDC as well */
     89 	mcrne	p15, 0, r0, c7, c7, 0
     90 
     91 	/* Make sure that pipeline is emptied */
     92 	mov	r0, r0
     93 	mov	r0, r0
     94 	msr	cpsr_all, r3
     95 
     96 	mov	pc, lr
     97 END(arm8_setttb)
     98 
     99 /*
    100  * TLB functions
    101  */
    102 ENTRY(arm8_tlb_flushID)
    103 	mcr	p15, 0, r0, c8, c7, 0	/* flush I+D tlb */
    104 	mov	pc, lr
    105 END(arm8_tlb_flushID)
    106 
    107 ENTRY(arm8_tlb_flushID_SE)
    108 	mcr	p15, 0, r0, c8, c7, 1	/* flush I+D tlb single entry */
    109 #if PAGE_SIZE == 2 * L2_S_SIZE
    110 	add	r0, r0, #L2_S_SIZE
    111 	mcr	p15, 0, r0, c8, c7, 1	/* flush I+D tlb single entry */
    112 #endif
    113 	mov	pc, lr
    114 END(arm8_tlb_flushID_SE)
    115 
    116 /*
    117  * Cache functions
    118  */
    119 ENTRY(arm8_cache_flushID)
    120 	mcr	p15, 0, r0, c7, c7, 0	/* flush I+D cache */
    121 	mov	pc, lr
    122 END(arm8_cache_flushID)
    123 
    124 ENTRY(arm8_cache_flushID_E)
    125 	mcr	p15, 0, r0, c7, c7, 1	/* flush I+D single entry */
    126 	mov	pc, lr
    127 END(arm8_cache_flushID_E)
    128 
    129 ENTRY(arm8_cache_cleanID)
    130 	mov	r0, #0x00000000
    131 
    132 1:	mov	r2, r0
    133 	mcr	p15, 0, r2, c7, c11, 1
    134 	add	r2, r2, #0x10
    135 	mcr	p15, 0, r2, c7, c11, 1
    136 	add	r2, r2, #0x10
    137 	mcr	p15, 0, r2, c7, c11, 1
    138 	add	r2, r2, #0x10
    139 	mcr	p15, 0, r2, c7, c11, 1
    140 	add	r2, r2, #0x10
    141 	mcr	p15, 0, r2, c7, c11, 1
    142 	add	r2, r2, #0x10
    143 	mcr	p15, 0, r2, c7, c11, 1
    144 	add	r2, r2, #0x10
    145 	mcr	p15, 0, r2, c7, c11, 1
    146 	add	r2, r2, #0x10
    147 	mcr	p15, 0, r2, c7, c11, 1
    148 	add	r2, r2, #0x10
    149 	mcr	p15, 0, r2, c7, c11, 1
    150 	add	r2, r2, #0x10
    151 	mcr	p15, 0, r2, c7, c11, 1
    152 	add	r2, r2, #0x10
    153 	mcr	p15, 0, r2, c7, c11, 1
    154 	add	r2, r2, #0x10
    155 	mcr	p15, 0, r2, c7, c11, 1
    156 	add	r2, r2, #0x10
    157 	mcr	p15, 0, r2, c7, c11, 1
    158 	add	r2, r2, #0x10
    159 	mcr	p15, 0, r2, c7, c11, 1
    160 	add	r2, r2, #0x10
    161 	mcr	p15, 0, r2, c7, c11, 1
    162 	add	r2, r2, #0x10
    163 	mcr	p15, 0, r2, c7, c11, 1
    164 
    165 	adds	r0, r0, #0x04000000
    166 	bne	1b
    167 
    168 	mov	pc, lr
    169 END(arm8_cache_cleanID)
    170 
    171 ENTRY(arm8_cache_cleanID_E)
    172 	mcr	p15, 0, r0, c7, c11, 1	/* clean I+D single entry */
    173 	mov	pc, lr
    174 END(arm8_cache_cleanID_E)
    175 
    176 ENTRY(arm8_cache_purgeID)
    177 	/*
    178 	 * ARM810 bug 3
    179 	 *
    180 	 * Clean and invalidate entry will not invalidate the entry
    181 	 * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
    182 	 *
    183 	 * Instead of using the clean and invalidate entry operation
    184 	 * use a separate clean and invalidate entry operations.
    185 	 * i.e.
    186 	 * mcr p15, 0, rd, c7, c11, 1
    187 	 * mcr p15, 0, rd, c7, c7, 1
    188 	 */
    189 
    190 	mov	r0, #0x00000000
    191 
    192 	mrs	r3, cpsr
    193 	orr	r2, r3, #(I32_bit | F32_bit)
    194 	msr	cpsr_all, r2
    195 
    196 1:	mov	r2, r0
    197 	mcr	p15, 0, r2, c7, c11, 1
    198 	mcr	p15, 0, r2, c7, c7, 1
    199 	add	r2, r2, #0x10
    200 	mcr	p15, 0, r2, c7, c11, 1
    201 	mcr	p15, 0, r2, c7, c7, 1
    202 	add	r2, r2, #0x10
    203 	mcr	p15, 0, r2, c7, c11, 1
    204 	mcr	p15, 0, r2, c7, c7, 1
    205 	add	r2, r2, #0x10
    206 	mcr	p15, 0, r2, c7, c11, 1
    207 	mcr	p15, 0, r2, c7, c7, 1
    208 	add	r2, r2, #0x10
    209 	mcr	p15, 0, r2, c7, c11, 1
    210 	mcr	p15, 0, r2, c7, c7, 1
    211 	add	r2, r2, #0x10
    212 	mcr	p15, 0, r2, c7, c11, 1
    213 	mcr	p15, 0, r2, c7, c7, 1
    214 	add	r2, r2, #0x10
    215 	mcr	p15, 0, r2, c7, c11, 1
    216 	mcr	p15, 0, r2, c7, c7, 1
    217 	add	r2, r2, #0x10
    218 	mcr	p15, 0, r2, c7, c11, 1
    219 	mcr	p15, 0, r2, c7, c7, 1
    220 	add	r2, r2, #0x10
    221 	mcr	p15, 0, r2, c7, c11, 1
    222 	mcr	p15, 0, r2, c7, c7, 1
    223 	add	r2, r2, #0x10
    224 	mcr	p15, 0, r2, c7, c11, 1
    225 	mcr	p15, 0, r2, c7, c7, 1
    226 	add	r2, r2, #0x10
    227 	mcr	p15, 0, r2, c7, c11, 1
    228 	mcr	p15, 0, r2, c7, c7, 1
    229 	add	r2, r2, #0x10
    230 	mcr	p15, 0, r2, c7, c11, 1
    231 	mcr	p15, 0, r2, c7, c7, 1
    232 	add	r2, r2, #0x10
    233 	mcr	p15, 0, r2, c7, c11, 1
    234 	mcr	p15, 0, r2, c7, c7, 1
    235 	add	r2, r2, #0x10
    236 	mcr	p15, 0, r2, c7, c11, 1
    237 	mcr	p15, 0, r2, c7, c7, 1
    238 	add	r2, r2, #0x10
    239 	mcr	p15, 0, r2, c7, c11, 1
    240 	mcr	p15, 0, r2, c7, c7, 1
    241 	add	r2, r2, #0x10
    242 	mcr	p15, 0, r2, c7, c11, 1
    243 	mcr	p15, 0, r2, c7, c7, 1
    244 
    245 	adds	r0, r0, #0x04000000
    246 	bne	1b
    247 
    248 	msr	cpsr_all, r3
    249 	mov	pc, lr
    250 END(arm8_cache_purgeID)
    251 
    252 ENTRY(arm8_cache_purgeID_E)
    253 	/*
    254 	 * ARM810 bug 3
    255 	 *
    256 	 * Clean and invalidate entry will not invalidate the entry
    257 	 * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
    258 	 *
    259 	 * Instead of using the clean and invalidate entry operation
    260 	 * use a separate clean and invalidate entry operations.
    261 	 * i.e.
    262 	 * mcr p15, 0, rd, c7, c11, 1
    263 	 * mcr p15, 0, rd, c7, c7, 1
    264 	 */
    265 	mrs	r3, cpsr
    266 	orr	r2, r3, #(I32_bit | F32_bit)
    267 	msr	cpsr_all, r2
    268 	mcr	p15, 0, r0, c7, c11, 1	/* clean I+D single entry */
    269 	mcr	p15, 0, r0, c7, c7, 1	/* flush I+D single entry */
    270 	msr	cpsr_all, r3
    271 	mov	pc, lr
    272 END(arm8_cache_purgeID_E)
    273 
    274 /*
    275  * Context switch.
    276  *
    277  * These are the CPU-specific parts of the context switcher cpu_switch()
    278  * These functions actually perform the TTB reload.
    279  */
    280 ENTRY(arm8_context_switch)
    281 	/* For good measure we will flush the IDC as well */
    282 	mcr	p15, 0, r0, c7, c7, 0	/* flush I+D cache */
    283 
    284 	/* Write the TTB */
    285 	mcr	p15, 0, r0, c2, c0, 0
    286 
    287 	/* If we have updated the TTB we must flush the TLB */
    288 	mcr	p15, 0, r0, c8, c7, 0	/* flush the I+D tlb */
    289 
    290 #if 0
    291 	/* For good measure we will flush the IDC as well */
    292 	mcr	p15, 0, r0, c7, c7, 0	/* flush I+D cache */
    293 #endif
    294 
    295 	/* Make sure that pipeline is emptied */
    296 	mov	r0, r0
    297 	mov	r0, r0
    298 	mov	pc, lr
    299 END(arm8_context_switch)
    300