Home | History | Annotate | Line # | Download | only in arm
      1 /*	$NetBSD: cpufunc_asm_arm9.S,v 1.12 2022/10/20 06:58:38 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2004 ARM Limited
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the company may not be used to endorse or promote
     16  *    products derived from this software without specific prior written
     17  *    permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     20  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     21  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     22  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     23  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  * ARM9 assembly functions for CPU / MMU / TLB specific operations
     32  */
     33 
     34 #include "assym.h"
     35 #include <machine/asm.h>
     36 #include <arm/locore.h>
     37 
     38 /*
     39  * Functions to set the MMU Translation Table Base register
     40  *
     41  * We need to clean and flush the cache as it uses virtual
     42  * addresses that are about to change.
     43  */
     44 ENTRY(arm9_setttb)
     45 	cmp	r1, #0
     46 	beq	1f
     47 
     48 	stmfd	sp!, {r0, lr}
     49 	bl	_C_LABEL(arm9_idcache_wbinv_all)
     50 	ldmfd	sp!, {r0, lr}
     51 	cmp	r0, #1			/* make sure EQ=0 */
     52 1:
     53 	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
     54 
     55 	mcrne	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
     56 	RET
     57 END(arm9_setttb)
     58 
     59 /*
     60  * TLB functions
     61  */
     62 ENTRY(arm9_tlb_flushID_SE)
     63 	mcr	p15, 0, r0, c8, c6, 1	/* flush D tlb single entry */
     64 	mcr	p15, 0, r0, c8, c5, 1	/* flush I tlb single entry */
     65 #if PAGE_SIZE == 2 * L2_S_SIZE
     66 	add	r0, r0, #L2_S_SIZE
     67 	mcr	p15, 0, r0, c8, c6, 1	/* flush D tlb single entry */
     68 	mcr	p15, 0, r0, c8, c5, 1	/* flush I tlb single entry */
     69 #endif
     70 	mov	pc, lr
     71 END(arm9_tlb_flushID_SE)
     72 
     73 /*
     74  * Cache operations.  For the entire cache we use the set/index
     75  * operations.
     76  */
     77 	s_max	.req r0
     78 	i_max	.req r1
     79 	s_inc	.req r2
     80 	i_inc	.req r3
     81 
     82 ENTRY_NP(arm9_icache_sync_range)
     83 	ldr	ip, .Larm9_line_size
     84 	cmp	r1, #0x4000
     85 	bcs	.Larm9_icache_sync_all
     86 	ldr	ip, [ip]
     87 	sub	r3, ip, #1
     88 	and	r2, r0, r3
     89 	add	r1, r1, r2
     90 	bic	r0, r0, r3
     91 .Larm9_sync_next:
     92 	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
     93 	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
     94 	add	r0, r0, ip
     95 	subs	r1, r1, ip
     96 	bpl	.Larm9_sync_next
     97 	mov	pc, lr
     98 END(arm9_icache_sync_range)
     99 
    100 ENTRY_NP(arm9_icache_sync_all)
    101 .Larm9_icache_sync_all:
    102 	/*
    103 	 * We assume that the code here can never be out of sync with the
    104 	 * dcache, so that we can safely flush the Icache and fall through
    105 	 * into the Dcache cleaning code.
    106 	 */
    107 	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
    108 	/* Fall through to clean Dcache. */
    109 
    110 .Larm9_dcache_wb:
    111 	ldr	ip, .Larm9_cache_data
    112 	ldmia	ip, {s_max, i_max, s_inc, i_inc}
    113 .Lnext_set:
    114 	orr	ip, s_max, i_max
    115 .Lnext_index:
    116 	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
    117 	sub	ip, ip, i_inc
    118 	tst	ip, i_max		/* Index 0 is last one */
    119 	bne	.Lnext_index		/* Next index */
    120 	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
    121 	subs	s_max, s_max, s_inc
    122 	bpl	.Lnext_set		/* Next set */
    123 	mov	pc, lr
    124 
    125 .Larm9_line_size:
    126 	.word	_C_LABEL(arm_pcache) + DCACHE_LINE_SIZE
    127 END(arm9_icache_sync_all)
    128 
    129 ENTRY(arm9_dcache_wb_range)
    130 	ldr	ip, .Larm9_line_size
    131 	cmp	r1, #0x4000
    132 	bcs	.Larm9_dcache_wb
    133 	ldr	ip, [ip]
    134 	sub	r3, ip, #1
    135 	and	r2, r0, r3
    136 	add	r1, r1, r2
    137 	bic	r0, r0, r3
    138 .Larm9_wb_next:
    139 	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
    140 	add	r0, r0, ip
    141 	subs	r1, r1, ip
    142 	bpl	.Larm9_wb_next
    143 	mov	pc, lr
    144 END(arm9_dcache_wb_range)
    145 
    146 ENTRY(arm9_dcache_wbinv_range)
    147 	ldr	ip, .Larm9_line_size
    148 	cmp	r1, #0x4000
    149 	bcs	.Larm9_dcache_wbinv_all
    150 	ldr	ip, [ip]
    151 	sub	r3, ip, #1
    152 	and	r2, r0, r3
    153 	add	r1, r1, r2
    154 	bic	r0, r0, r3
    155 .Larm9_wbinv_next:
    156 	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
    157 	add	r0, r0, ip
    158 	subs	r1, r1, ip
    159 	bpl	.Larm9_wbinv_next
    160 	mov	pc, lr
    161 END(arm9_dcache_wbinv_range)
    162 
    163 /*
    164  * Note, we must not invalidate everything.  If the range is too big we
    165  * must use wb-inv of the entire cache.
    166  */
    167 ENTRY(arm9_dcache_inv_range)
    168 	ldr	ip, .Larm9_line_size
    169 	cmp	r1, #0x4000
    170 	bcs	.Larm9_dcache_wbinv_all
    171 	ldr	ip, [ip]
    172 	sub	r3, ip, #1
    173 	and	r2, r0, r3
    174 	add	r1, r1, r2
    175 	bic	r0, r0, r3
    176 .Larm9_inv_next:
    177 	mcr	p15, 0, r0, c7, c6, 1	/* Invalidate D cache SE with VA */
    178 	add	r0, r0, ip
    179 	subs	r1, r1, ip
    180 	bpl	.Larm9_inv_next
    181 	mov	pc, lr
    182 END(arm9_dcache_inv_range)
    183 
    184 ENTRY(arm9_idcache_wbinv_range)
    185 	ldr	ip, .Larm9_line_size
    186 	cmp	r1, #0x4000
    187 	bcs	.Larm9_idcache_wbinv_all
    188 	ldr	ip, [ip]
    189 	sub	r3, ip, #1
    190 	and	r2, r0, r3
    191 	add	r1, r1, r2
    192 	bic	r0, r0, r3
    193 .Larm9_id_wbinv_next:
    194 	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
    195 	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
    196 	add	r0, r0, ip
    197 	subs	r1, r1, ip
    198 	bpl	.Larm9_id_wbinv_next
    199 	mov	pc, lr
    200 END(arm9_idcache_wbinv_range)
    201 
    202 ENTRY_NP(arm9_idcache_wbinv_all)
    203 .Larm9_idcache_wbinv_all:
    204 	/*
    205 	 * We assume that the code here can never be out of sync with the
    206 	 * dcache, so that we can safely flush the Icache and fall through
    207 	 * into the Dcache purging code.
    208 	 */
    209 	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
    210 	/* Fall through to purge Dcache. */
    211 
    212 ENTRY(arm9_dcache_wbinv_all)
    213 .Larm9_dcache_wbinv_all:
    214 	ldr	ip, .Larm9_cache_data
    215 	ldmia	ip, {s_max, i_max, s_inc, i_inc}
    216 .Lnext_set_inv:
    217 	orr	ip, s_max, i_max
    218 .Lnext_index_inv:
    219 	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
    220 	sub	ip, ip, i_inc
    221 	tst	ip, i_max		/* Index 0 is last one */
    222 	bne	.Lnext_index_inv		/* Next index */
    223 	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
    224 	subs	s_max, s_max, s_inc
    225 	bpl	.Lnext_set_inv		/* Next set */
    226 	mov	pc, lr
    227 
    228 .Larm9_cache_data:
    229 	.word	_C_LABEL(arm9_dcache_sets_max)
    230 END(arm9_dcache_wbinv_all)
    231 END(arm9_idcache_wbinv_all)
    232 
    233 /*
    234  * Context switch.
    235  *
    236  * These are the CPU-specific parts of the context switcher cpu_switch()
    237  * These functions actually perform the TTB reload.
    238  */
    239 ENTRY(arm9_context_switch)
    240 	/*
    241 	 * We can assume that the caches will only contain kernel addresses
    242 	 * at this point.  So no need to flush them again.
    243 	 */
    244 	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
    245 	mcr	p15, 0, r0, c2, c0, 0	/* set the new TTB */
    246 	mcr	p15, 0, r0, c8, c7, 0	/* and flush the I+D tlbs */
    247 
    248 	/* Paranoia -- make sure the pipeline is empty. */
    249 	nop
    250 	nop
    251 	nop
    252 	mov	pc, lr
    253 END(arm9_context_switch)
    254 
    255 	.bss
    256 
    257 /* XXX The following macros should probably be moved to asm.h */
    258 #define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
    259 #define C_OBJECT(x)	_DATA_OBJECT(_C_LABEL(x))
    260 
    261 /*
    262  * Parameters for the cache cleaning code.  Note that the order of these
    263  * four variables is assumed in the code above.  Hence the reason for
    264  * declaring them in the assembler file.
    265  */
    266 	.align 0
    267 C_OBJECT(arm9_dcache_sets_max)
    268 	.space	4
    269 C_OBJECT(arm9_dcache_index_max)
    270 	.space	4
    271 C_OBJECT(arm9_dcache_sets_inc)
    272 	.space	4
    273 C_OBJECT(arm9_dcache_index_inc)
    274 	.space	4
    275