Home | History | Annotate | Line # | Download | only in aarch64
cpufunc_asm_armv8.S revision 1.4
      1 /*	$NetBSD: cpufunc_asm_armv8.S,v 1.4 2019/09/12 06:12:56 ryo Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 Robin Randhawa
      5  * Copyright (c) 2015 The FreeBSD Foundation
      6  * All rights reserved.
      7  *
      8  * Portions of this software were developed by Andrew Turner
      9  * under sponsorship from the FreeBSD Foundation
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     30  * SUCH DAMAGE.
     31  *
     32  * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $
     33  */
     34 
     35 #include "opt_cputypes.h"
     36 #include "opt_multiprocessor.h"
     37 #include <aarch64/asm.h>
     38 
     39 	.text
     40 	.align	2
     41 
     42 /*
     43  * Macro to handle the cache. This takes the start address in x0, length
     44  * in x1. It will corrupt x0, x1, x2, and x3.
     45  */
     46 .macro cache_handle_range dcop = 0, ic = 0, icop = 0
     47 .if \ic == 0
     48 	mrs	x3, ctr_el0
     49 	ubfx	x3, x3, #16, #4		/* x3 = D cache shift */
     50 	mov	x2, #4			/* size of word */
     51 	lsl	x3, x2, x3		/* x3 = D cache line size */
     52 .else
     53 	mrs	x3, ctr_el0
     54 	ubfx	x2, x3, #16, #4		/* x2 = D cache shift */
     55 	and	x3, x3, #15		/* x3 = I cache shift */
     56 	cmp	x3, x2
     57 	bcs	1f
     58 	mov	x3, x2
     59 1:					/* x3 = MAX(IcacheShift,DcacheShift) */
     60 	mov	x2, #4			/* size of word */
     61 	lsl	x3, x2, x3		/* x3 = cache line size */
     62 .endif
     63 	sub	x4, x3, #1		/* Get the address mask */
     64 	and	x2, x0, x4		/* Get the low bits of the address */
     65 	add	x1, x1, x2		/* Add these to the size */
     66 	bic	x0, x0, x4		/* Clear the low bit of the address */
     67 1:
     68 	dc	\dcop, x0
     69 	dsb	ish
     70 .if \ic != 0
     71 	ic	\icop, x0
     72 	dsb	ish
     73 .endif
     74 	add	x0, x0, x3		/* Move to the next line */
     75 	subs	x1, x1, x3		/* Reduce the size */
     76 	b.hi	1b			/* Check if we are done */
     77 .if \ic != 0
     78 	isb
     79 .endif
     80 	ret
     81 .endm
     82 
     83 
     84 ENTRY(aarch64_nullop)
     85 	ret
     86 END(aarch64_nullop)
     87 
     88 ENTRY(aarch64_cpuid)
     89 	mrs	x0, midr_el1
     90 	ret
     91 END(aarch64_cpuid)
     92 
     93 /*
     94  * void aarch64_dcache_wb_range(vaddr_t, vsize_t)
     95  */
     96 ENTRY(aarch64_dcache_wb_range)
     97 	cache_handle_range	dcop = cvac
     98 END(aarch64_dcache_wb_range)
     99 
    100 /*
    101  * void aarch64_dcache_wbinv_range(vaddr_t, vsize_t)
    102  */
    103 ENTRY(aarch64_dcache_wbinv_range)
    104 	cache_handle_range	dcop = civac
    105 END(aarch64_dcache_wbinv_range)
    106 
    107 /*
    108  * void aarch64_dcache_inv_range(vaddr_t, vsize_t)
    109  *
    110  * Note, we must not invalidate everything.  If the range is too big we
    111  * must use wb-inv of the entire cache.
    112  */
    113 ENTRY(aarch64_dcache_inv_range)
    114 	cache_handle_range	dcop = ivac
    115 END(aarch64_dcache_inv_range)
    116 
    117 /*
    118  * void aarch64_idcache_wbinv_range(vaddr_t, vsize_t)
    119  */
    120 ENTRY(aarch64_idcache_wbinv_range)
    121 	cache_handle_range	dcop = civac, ic = 1, icop = ivau
    122 END(aarch64_idcache_wbinv_range)
    123 
    124 /*
    125  * void aarch64_icache_sync_range(vaddr_t, vsize_t)
    126  */
    127 ENTRY(aarch64_icache_sync_range)
    128 	cache_handle_range	dcop = cvau, ic = 1, icop = ivau
    129 END(aarch64_icache_sync_range)
    130 
    131 /*
    132  * void aarch64_icache_inv_all(void)
    133  */
    134 ENTRY(aarch64_icache_inv_all)
    135 	dsb	ish
    136 #ifdef MULTIPROCESSOR
    137 	ic	ialluis
    138 #else
    139 	ic	iallu
    140 #endif
    141 	dsb	ish
    142 	isb
    143 	ret
    144 END(aarch64_icache_inv_all)
    145 
    146 
    147 
    148 ENTRY(aarch64_drain_writebuf)
    149 	dsb	sy
    150 	ret
    151 END(aarch64_drain_writebuf)
    152 
    153 
    154 /*
    155  * TLB ops
    156  */
    157 
    158 /* void aarch64_set_ttbr0(uint64_t ttbr0) */
    159 ENTRY(aarch64_set_ttbr0)
    160 	dsb	ish
    161 	msr	ttbr0_el1, x0
    162 	dsb	ish
    163 	isb
    164 	ret
    165 END(aarch64_set_ttbr0)
    166 
    167 #ifdef CPU_THUNDERX
    168 /*
    169  * Cavium erratum 27456
    170  * void aarch64_set_ttbr0_thunderx(uint64_t ttbr0)
    171  */
    172 ENTRY(aarch64_set_ttbr0_thunderx)
    173 	dsb	ish
    174 	msr	ttbr0_el1, x0
    175 	isb
    176 	ic	iallu
    177 	dsb	nsh
    178 	isb
    179 	ret
    180 END(aarch64_set_ttbr0_thunderx)
    181 #endif /* CPU_THUNDERX */
    182 
    183 /* void aarch64_tlbi_all(void) */
    184 ENTRY(aarch64_tlbi_all)
    185 	dsb	ishst
    186 #ifdef MULTIPROCESSOR
    187 	tlbi	vmalle1is
    188 #else
    189 	tlbi	vmalle1
    190 #endif
    191 	dsb	ish
    192 	isb
    193 	ret
    194 END(aarch64_tlbi_all)
    195 
    196 /* void aarch64_tlbi_by_asid(int asid) */
    197 ENTRY(aarch64_tlbi_by_asid)
    198 	/* x8 = bit 63[ASID]48, 47[RES0]0 */
    199 	lsl	x8, x0, #48
    200 	dsb	ishst
    201 #ifdef MULTIPROCESSOR
    202 	tlbi	aside1is, x8
    203 #else
    204 	tlbi	aside1, x8
    205 #endif
    206 	dsb	ish
    207 	isb
    208 	ret
    209 END(aarch64_tlbi_by_asid)
    210 
    211 /* aarch64_tlbi_by_va(vaddr_t va) */
    212 ENTRY(aarch64_tlbi_by_va)
    213 	/* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
    214 	ubfx	x8, x0, #12, #44
    215 	dsb	ishst
    216 #ifdef MULTIPROCESSOR
    217 	tlbi	vaae1is, x8
    218 #else
    219 	tlbi	vaae1, x8
    220 #endif
    221 	dsb	ish
    222 	isb
    223 	ret
    224 END(aarch64_tlbi_by_va)
    225 
    226 /* aarch64_tlbi_by_va_ll(vaddr_t va) */
    227 ENTRY(aarch64_tlbi_by_va_ll)
    228 	/* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
    229 	ubfx	x8, x0, #12, #44
    230 	dsb	ishst
    231 #ifdef MULTIPROCESSOR
    232 	tlbi	vaale1is, x8
    233 #else
    234 	tlbi	vaale1, x8
    235 #endif
    236 	dsb	ish
    237 	isb
    238 	ret
    239 END(aarch64_tlbi_by_va_ll)
    240 
    241 /* aarch64_tlbi_by_asid_va(int asid, vaddr_t va) */
    242 ENTRY(aarch64_tlbi_by_asid_va)
    243 	/* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
    244 	lsl	x8, x0, #48
    245 	bfxil	x8, x1, #12, #44
    246 	dsb	ishst
    247 #ifdef MULTIPROCESSOR
    248 	tlbi	vae1is, x8
    249 #else
    250 	tlbi	vae1, x8
    251 #endif
    252 	dsb	ish
    253 	isb
    254 	ret
    255 END(aarch64_tlbi_by_asid_va)
    256 
    257 /* aarch64_tlbi_by_asid_va_ll(int asid, vaddr_t va) */
    258 ENTRY(aarch64_tlbi_by_asid_va_ll)
    259 	/* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
    260 	lsl	x8, x0, #48
    261 	bfxil	x8, x1, #12, #44
    262 	dsb	ishst
    263 #ifdef MULTIPROCESSOR
    264 	tlbi	vale1is, x8
    265 #else
    266 	tlbi	vale1, x8
    267 #endif
    268 	dsb	ish
    269 	isb
    270 	ret
    271 END(aarch64_tlbi_by_asid_va_ll)
    272