Home | History | Annotate | Line # | Download | only in aarch64
      1 /*	$NetBSD: cpufunc_asm_armv8.S,v 1.8 2021/02/11 08:35:11 ryo Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 Robin Randhawa
      5  * Copyright (c) 2015 The FreeBSD Foundation
      6  * All rights reserved.
      7  *
      8  * Portions of this software were developed by Andrew Turner
      9  * under sponsorship from the FreeBSD Foundation
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     30  * SUCH DAMAGE.
     31  *
     32  * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $
     33  */
     34 
     35 #include "opt_cputypes.h"
     36 #include "opt_gprof.h"
     37 #include "opt_multiprocessor.h"
     38 #include <aarch64/asm.h>
     39 
     40 	.text
     41 	.align	2
     42 
     43 /*
     44  * Macro to handle the cache. This takes the start address in x0, length
     45  * in x1. It will corrupt x2-x5.
     46  */
     47 .macro cache_handle_range dcop = "", icop = ""
     48 	mrs	x3, ctr_el0
     49 	mov	x4, #4			/* size of word */
     50 .ifnb \dcop
     51 	ubfx	x2, x3, #16, #4		/* x2 = D cache shift */
     52 	lsl	x2, x4, x2		/* x2 = D cache line size */
     53 .endif
     54 .ifnb \icop
     55 	and	x3, x3, #15		/* x3 = I cache shift */
     56 	lsl	x3, x4, x3		/* x3 = I cache line size */
     57 .endif
     58 .ifnb \dcop
     59 	sub	x4, x2, #1		/* Get the address mask */
     60 	and	x4, x0, x4		/* Get the low bits of the address */
     61 	add	x5, x1, x4		/* Add these to the size */
     62 	bic	x4, x0, x4		/* Clear the low bit of the address */
     63 1:
     64 	dc	\dcop, x4
     65 	add	x4, x4, x2		/* Move to the next line */
     66 	subs	x5, x5, x2		/* Reduce the size */
     67 	b.hi	1b			/* Check if we are done */
     68 	dsb	ish
     69 .endif
     70 .ifnb \icop
     71 	sub	x4, x3, #1		/* Get the address mask */
     72 	and	x4, x0, x4		/* Get the low bits of the address */
     73 	add	x5, x1, x4		/* Add these to the size */
     74 	bic	x4, x0, x4		/* Clear the low bit of the address */
     75 1:
     76 	ic	\icop, x4
     77 	add	x4, x4, x3		/* Move to the next line */
     78 	subs	x5, x5, x3		/* Reduce the size */
     79 	b.hi	1b			/* Check if we are done */
     80 	dsb	ish
     81 	isb
     82 .endif
     83 .endm
     84 
     85 
     86 ENTRY(aarch64_nullop)
     87 	ret
     88 END(aarch64_nullop)
     89 
     90 ENTRY(aarch64_cpuid)
     91 	mrs	x0, midr_el1
     92 	ret
     93 END(aarch64_cpuid)
     94 
     95 /*
     96  * void aarch64_dcache_wb_range(vaddr_t, vsize_t)
     97  */
     98 ENTRY(aarch64_dcache_wb_range)
     99 	cache_handle_range	dcop = cvac
    100 	ret
    101 END(aarch64_dcache_wb_range)
    102 
    103 /*
    104  * void aarch64_dcache_wbinv_range(vaddr_t, vsize_t)
    105  */
    106 ENTRY(aarch64_dcache_wbinv_range)
    107 	cache_handle_range	dcop = civac
    108 	ret
    109 END(aarch64_dcache_wbinv_range)
    110 
    111 /*
    112  * void aarch64_dcache_inv_range(vaddr_t, vsize_t)
    113  *
    114  * Note, we must not invalidate everything.  If the range is too big we
    115  * must use wb-inv of the entire cache.
    116  */
    117 ENTRY(aarch64_dcache_inv_range)
    118 	cache_handle_range	dcop = ivac
    119 	ret
    120 END(aarch64_dcache_inv_range)
    121 
    122 /*
    123  * void aarch64_idcache_wbinv_range(vaddr_t, vsize_t)
    124  */
    125 ENTRY(aarch64_idcache_wbinv_range)
    126 	cache_handle_range	dcop = civac, icop = ivau
    127 	ret
    128 END(aarch64_idcache_wbinv_range)
    129 
    130 /*
    131  * void aarch64_icache_sync_range(vaddr_t, vsize_t)
    132  */
    133 ENTRY(aarch64_icache_sync_range)
    134 	cache_handle_range	dcop = cvau, icop = ivau
    135 	ret
    136 END(aarch64_icache_sync_range)
    137 
    138 /*
    139  * void aarch64_icache_inv_range(vaddr_t, vsize_t)
    140  */
    141 ENTRY(aarch64_icache_inv_range)
    142 	cache_handle_range	icop = ivau
    143 	ret
    144 END(aarch64_icache_inv_range)
    145 
    146 /*
    147  * void aarch64_icache_barrier_range(vaddr_t, vsize_t)
    148  */
    149 ENTRY(aarch64_icache_barrier_range)
    150 	dsb	ishst
    151 	isb
    152 	ret
    153 END(aarch64_icache_barrier_range)
    154 
    155 /*
    156  * void aarch64_icache_inv_all(void)
    157  */
    158 ENTRY(aarch64_icache_inv_all)
    159 	dsb	ish
    160 #ifdef MULTIPROCESSOR
    161 	ic	ialluis
    162 #else
    163 	ic	iallu
    164 #endif
    165 	dsb	ish
    166 	isb
    167 	ret
    168 END(aarch64_icache_inv_all)
    169 
    170 
    171 
    172 ENTRY(aarch64_drain_writebuf)
    173 	dsb	sy
    174 	ret
    175 END(aarch64_drain_writebuf)
    176 
    177 
    178 /*
    179  * TLB ops
    180  */
    181 
    182 /* void aarch64_set_ttbr0(uint64_t ttbr0) */
    183 ENTRY(aarch64_set_ttbr0)
    184 	dsb	ish
    185 	msr	ttbr0_el1, x0
    186 	dsb	ish
    187 	isb
    188 	ret
    189 END(aarch64_set_ttbr0)
    190 
    191 #ifdef CPU_THUNDERX
    192 /*
    193  * Cavium erratum 27456
    194  * void aarch64_set_ttbr0_thunderx(uint64_t ttbr0)
    195  */
    196 ENTRY(aarch64_set_ttbr0_thunderx)
    197 	dsb	ish
    198 	msr	ttbr0_el1, x0
    199 	isb
    200 	ic	iallu
    201 	dsb	nsh
    202 	isb
    203 	ret
    204 END(aarch64_set_ttbr0_thunderx)
    205 #endif /* CPU_THUNDERX */
    206 
    207 /* void aarch64_tlbi_all(void) */
    208 ENTRY(aarch64_tlbi_all)
    209 	dsb	ishst
    210 #ifdef MULTIPROCESSOR
    211 	tlbi	vmalle1is
    212 #else
    213 	tlbi	vmalle1
    214 #endif
    215 	dsb	ish
    216 	isb
    217 	ret
    218 END(aarch64_tlbi_all)
    219 
    220 /* void aarch64_tlbi_by_asid(int asid) */
    221 ENTRY(aarch64_tlbi_by_asid)
    222 	/* x8 = bit 63[ASID]48, 47[RES0]0 */
    223 	lsl	x8, x0, #48
    224 	dsb	ishst
    225 #ifdef MULTIPROCESSOR
    226 	tlbi	aside1is, x8
    227 #else
    228 	tlbi	aside1, x8
    229 #endif
    230 	dsb	ish
    231 	isb
    232 	ret
    233 END(aarch64_tlbi_by_asid)
    234 
    235 /* aarch64_tlbi_by_va(vaddr_t va) */
    236 ENTRY(aarch64_tlbi_by_va)
    237 	/* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
    238 	ubfx	x8, x0, #12, #44
    239 	dsb	ishst
    240 #ifdef MULTIPROCESSOR
    241 	tlbi	vaae1is, x8
    242 #else
    243 	tlbi	vaae1, x8
    244 #endif
    245 	dsb	ish
    246 	isb
    247 	ret
    248 END(aarch64_tlbi_by_va)
    249 
    250 /* aarch64_tlbi_by_va_ll(vaddr_t va) */
    251 ENTRY(aarch64_tlbi_by_va_ll)
    252 	/* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
    253 	ubfx	x8, x0, #12, #44
    254 	dsb	ishst
    255 #ifdef MULTIPROCESSOR
    256 	tlbi	vaale1is, x8
    257 #else
    258 	tlbi	vaale1, x8
    259 #endif
    260 	dsb	ish
    261 	isb
    262 	ret
    263 END(aarch64_tlbi_by_va_ll)
    264 
    265 /* aarch64_tlbi_by_asid_va(int asid, vaddr_t va) */
    266 ENTRY(aarch64_tlbi_by_asid_va)
    267 	/* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
    268 	lsl	x8, x0, #48
    269 	bfxil	x8, x1, #12, #44
    270 	dsb	ishst
    271 #ifdef MULTIPROCESSOR
    272 	tlbi	vae1is, x8
    273 #else
    274 	tlbi	vae1, x8
    275 #endif
    276 	dsb	ish
    277 	isb
    278 	ret
    279 END(aarch64_tlbi_by_asid_va)
    280 
    281 /* aarch64_tlbi_by_asid_va_ll(int asid, vaddr_t va) */
    282 ENTRY(aarch64_tlbi_by_asid_va_ll)
    283 	/* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
    284 	lsl	x8, x0, #48
    285 	bfxil	x8, x1, #12, #44
    286 	dsb	ishst
    287 #ifdef MULTIPROCESSOR
    288 	tlbi	vale1is, x8
    289 #else
    290 	tlbi	vale1, x8
    291 #endif
    292 	dsb	ish
    293 	isb
    294 	ret
    295 END(aarch64_tlbi_by_asid_va_ll)
    296