Home | History | Annotate | Line # | Download | only in aarch64
cpufunc_asm_armv8.S revision 1.5
      1 /*	$NetBSD: cpufunc_asm_armv8.S,v 1.5 2020/06/01 08:59:00 ryo Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 Robin Randhawa
      5  * Copyright (c) 2015 The FreeBSD Foundation
      6  * All rights reserved.
      7  *
      8  * Portions of this software were developed by Andrew Turner
      9  * under sponsorship from the FreeBSD Foundation
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     30  * SUCH DAMAGE.
     31  *
     32  * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $
     33  */
     34 
     35 #include "opt_cputypes.h"
     36 #include "opt_multiprocessor.h"
     37 #include <aarch64/asm.h>
     38 
     39 	.text
     40 	.align	2
     41 
     42 /*
     43  * Macro to handle the cache. This takes the start address in x0, length
     44  * in x1. It will corrupt x2-x5.
     45  */
     46 .macro cache_handle_range dcop = 0, icop = 0
     47 	mrs	x3, ctr_el0
     48 	mov	x4, #4			/* size of word */
     49 .if \dcop != 0
     50 	ubfx	x2, x3, #16, #4		/* x2 = D cache shift */
     51 	lsl	x2, x4, x2		/* x2 = D cache line size */
     52 .endif
     53 .if \icop != 0
     54 	and	x3, x3, #15		/* x3 = I cache shift */
     55 	lsl	x3, x4, x3		/* x3 = I cache line size */
     56 .endif
     57 .if \dcop != 0
     58 	sub	x4, x2, #1		/* Get the address mask */
     59 	and	x4, x0, x4		/* Get the low bits of the address */
     60 	add	x5, x1, x4		/* Add these to the size */
     61 	bic	x4, x0, x4		/* Clear the low bit of the address */
     62 1:
     63 	dc	\dcop, x4
     64 	add	x4, x4, x2		/* Move to the next line */
     65 	subs	x5, x5, x2		/* Reduce the size */
     66 	b.hi	1b			/* Check if we are done */
     67 	dsb	ish
     68 .endif
     69 .if \icop != 0
     70 	sub	x4, x3, #1		/* Get the address mask */
     71 	and	x4, x0, x4		/* Get the low bits of the address */
     72 	add	x5, x1, x4		/* Add these to the size */
     73 	bic	x4, x0, x4		/* Clear the low bit of the address */
     74 1:
     75 	ic	\icop, x4
     76 	add	x4, x4, x3		/* Move to the next line */
     77 	subs	x5, x5, x3		/* Reduce the size */
     78 	b.hi	1b			/* Check if we are done */
     79 	dsb	ish
     80 	isb
     81 .endif
     82 .endm
     83 
     84 
     85 ENTRY(aarch64_nullop)
     86 	ret
     87 END(aarch64_nullop)
     88 
     89 ENTRY(aarch64_cpuid)
     90 	mrs	x0, midr_el1
     91 	ret
     92 END(aarch64_cpuid)
     93 
     94 /*
     95  * void aarch64_dcache_wb_range(vaddr_t, vsize_t)
     96  */
     97 ENTRY(aarch64_dcache_wb_range)
     98 	cache_handle_range	dcop = cvac
     99 	ret
    100 END(aarch64_dcache_wb_range)
    101 
    102 /*
    103  * void aarch64_dcache_wbinv_range(vaddr_t, vsize_t)
    104  */
    105 ENTRY(aarch64_dcache_wbinv_range)
    106 	cache_handle_range	dcop = civac
    107 	ret
    108 END(aarch64_dcache_wbinv_range)
    109 
    110 /*
    111  * void aarch64_dcache_inv_range(vaddr_t, vsize_t)
    112  *
    113  * Note, we must not invalidate everything.  If the range is too big we
    114  * must use wb-inv of the entire cache.
    115  */
    116 ENTRY(aarch64_dcache_inv_range)
    117 	cache_handle_range	dcop = ivac
    118 	ret
    119 END(aarch64_dcache_inv_range)
    120 
    121 /*
    122  * void aarch64_idcache_wbinv_range(vaddr_t, vsize_t)
    123  */
    124 ENTRY(aarch64_idcache_wbinv_range)
    125 	cache_handle_range	dcop = civac, icop = ivau
    126 	ret
    127 END(aarch64_idcache_wbinv_range)
    128 
    129 /*
    130  * void aarch64_icache_sync_range(vaddr_t, vsize_t)
    131  */
    132 ENTRY(aarch64_icache_sync_range)
    133 	cache_handle_range	dcop = cvau, icop = ivau
    134 	ret
    135 END(aarch64_icache_sync_range)
    136 
    137 /*
    138  * void aarch64_icache_inv_all(void)
    139  */
    140 ENTRY(aarch64_icache_inv_all)
    141 	dsb	ish
    142 #ifdef MULTIPROCESSOR
    143 	ic	ialluis
    144 #else
    145 	ic	iallu
    146 #endif
    147 	dsb	ish
    148 	isb
    149 	ret
    150 END(aarch64_icache_inv_all)
    151 
    152 
    153 
    154 ENTRY(aarch64_drain_writebuf)
    155 	dsb	sy
    156 	ret
    157 END(aarch64_drain_writebuf)
    158 
    159 
    160 /*
    161  * TLB ops
    162  */
    163 
    164 /* void aarch64_set_ttbr0(uint64_t ttbr0) */
    165 ENTRY(aarch64_set_ttbr0)
    166 	dsb	ish
    167 	msr	ttbr0_el1, x0
    168 	dsb	ish
    169 	isb
    170 	ret
    171 END(aarch64_set_ttbr0)
    172 
    173 #ifdef CPU_THUNDERX
    174 /*
    175  * Cavium erratum 27456
    176  * void aarch64_set_ttbr0_thunderx(uint64_t ttbr0)
    177  */
    178 ENTRY(aarch64_set_ttbr0_thunderx)
    179 	dsb	ish
    180 	msr	ttbr0_el1, x0
    181 	isb
    182 	ic	iallu
    183 	dsb	nsh
    184 	isb
    185 	ret
    186 END(aarch64_set_ttbr0_thunderx)
    187 #endif /* CPU_THUNDERX */
    188 
    189 /* void aarch64_tlbi_all(void) */
    190 ENTRY(aarch64_tlbi_all)
    191 	dsb	ishst
    192 #ifdef MULTIPROCESSOR
    193 	tlbi	vmalle1is
    194 #else
    195 	tlbi	vmalle1
    196 #endif
    197 	dsb	ish
    198 	isb
    199 	ret
    200 END(aarch64_tlbi_all)
    201 
    202 /* void aarch64_tlbi_by_asid(int asid) */
    203 ENTRY(aarch64_tlbi_by_asid)
    204 	/* x8 = bit 63[ASID]48, 47[RES0]0 */
    205 	lsl	x8, x0, #48
    206 	dsb	ishst
    207 #ifdef MULTIPROCESSOR
    208 	tlbi	aside1is, x8
    209 #else
    210 	tlbi	aside1, x8
    211 #endif
    212 	dsb	ish
    213 	isb
    214 	ret
    215 END(aarch64_tlbi_by_asid)
    216 
    217 /* aarch64_tlbi_by_va(vaddr_t va) */
    218 ENTRY(aarch64_tlbi_by_va)
    219 	/* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
    220 	ubfx	x8, x0, #12, #44
    221 	dsb	ishst
    222 #ifdef MULTIPROCESSOR
    223 	tlbi	vaae1is, x8
    224 #else
    225 	tlbi	vaae1, x8
    226 #endif
    227 	dsb	ish
    228 	isb
    229 	ret
    230 END(aarch64_tlbi_by_va)
    231 
    232 /* aarch64_tlbi_by_va_ll(vaddr_t va) */
    233 ENTRY(aarch64_tlbi_by_va_ll)
    234 	/* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
    235 	ubfx	x8, x0, #12, #44
    236 	dsb	ishst
    237 #ifdef MULTIPROCESSOR
    238 	tlbi	vaale1is, x8
    239 #else
    240 	tlbi	vaale1, x8
    241 #endif
    242 	dsb	ish
    243 	isb
    244 	ret
    245 END(aarch64_tlbi_by_va_ll)
    246 
    247 /* aarch64_tlbi_by_asid_va(int asid, vaddr_t va) */
    248 ENTRY(aarch64_tlbi_by_asid_va)
    249 	/* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
    250 	lsl	x8, x0, #48
    251 	bfxil	x8, x1, #12, #44
    252 	dsb	ishst
    253 #ifdef MULTIPROCESSOR
    254 	tlbi	vae1is, x8
    255 #else
    256 	tlbi	vae1, x8
    257 #endif
    258 	dsb	ish
    259 	isb
    260 	ret
    261 END(aarch64_tlbi_by_asid_va)
    262 
    263 /* aarch64_tlbi_by_asid_va_ll(int asid, vaddr_t va) */
    264 ENTRY(aarch64_tlbi_by_asid_va_ll)
    265 	/* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
    266 	lsl	x8, x0, #48
    267 	bfxil	x8, x1, #12, #44
    268 	dsb	ishst
    269 #ifdef MULTIPROCESSOR
    270 	tlbi	vale1is, x8
    271 #else
    272 	tlbi	vale1, x8
    273 #endif
    274 	dsb	ish
    275 	isb
    276 	ret
    277 END(aarch64_tlbi_by_asid_va_ll)
    278