cpufunc_asm_armv8.S revision 1.4 1 1.4 ryo /* $NetBSD: cpufunc_asm_armv8.S,v 1.4 2019/09/12 06:12:56 ryo Exp $ */
2 1.1 ryo
3 1.1 ryo /*-
4 1.1 ryo * Copyright (c) 2014 Robin Randhawa
5 1.1 ryo * Copyright (c) 2015 The FreeBSD Foundation
6 1.1 ryo * All rights reserved.
7 1.1 ryo *
8 1.1 ryo * Portions of this software were developed by Andrew Turner
9 1.1 ryo * under sponsorship from the FreeBSD Foundation
10 1.1 ryo *
11 1.1 ryo * Redistribution and use in source and binary forms, with or without
12 1.1 ryo * modification, are permitted provided that the following conditions
13 1.1 ryo * are met:
14 1.1 ryo * 1. Redistributions of source code must retain the above copyright
15 1.1 ryo * notice, this list of conditions and the following disclaimer.
16 1.1 ryo * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 ryo * notice, this list of conditions and the following disclaimer in the
18 1.1 ryo * documentation and/or other materials provided with the distribution.
19 1.1 ryo *
20 1.1 ryo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 1.1 ryo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.1 ryo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.1 ryo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 1.1 ryo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 1.1 ryo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 1.1 ryo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 1.1 ryo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 1.1 ryo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 1.1 ryo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 1.1 ryo * SUCH DAMAGE.
31 1.1 ryo *
32 1.1 ryo * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $
33 1.1 ryo */
34 1.1 ryo
35 1.3 ryo #include "opt_cputypes.h"
36 1.1 ryo #include "opt_multiprocessor.h"
37 1.1 ryo #include <aarch64/asm.h>
38 1.1 ryo
39 1.1 ryo .text
40 1.1 ryo .align 2
41 1.1 ryo
42 1.1 ryo /*
43 1.1 ryo * Macro to handle the cache. This takes the start address in x0, length
44 1.1 ryo * in x1. It will corrupt x0, x1, x2, and x3.
45 1.1 ryo */
46 1.1 ryo .macro cache_handle_range dcop = 0, ic = 0, icop = 0
47 1.1 ryo .if \ic == 0
48 1.1 ryo mrs x3, ctr_el0
49 1.1 ryo ubfx x3, x3, #16, #4 /* x3 = D cache shift */
50 1.1 ryo mov x2, #4 /* size of word */
51 1.1 ryo lsl x3, x2, x3 /* x3 = D cache line size */
52 1.1 ryo .else
53 1.1 ryo mrs x3, ctr_el0
54 1.1 ryo ubfx x2, x3, #16, #4 /* x2 = D cache shift */
55 1.1 ryo and x3, x3, #15 /* x3 = I cache shift */
56 1.1 ryo cmp x3, x2
57 1.1 ryo bcs 1f
58 1.1 ryo mov x3, x2
59 1.1 ryo 1: /* x3 = MAX(IcacheShift,DcacheShift) */
60 1.1 ryo mov x2, #4 /* size of word */
61 1.1 ryo lsl x3, x2, x3 /* x3 = cache line size */
62 1.1 ryo .endif
63 1.1 ryo sub x4, x3, #1 /* Get the address mask */
64 1.1 ryo and x2, x0, x4 /* Get the low bits of the address */
65 1.1 ryo add x1, x1, x2 /* Add these to the size */
66 1.1 ryo bic x0, x0, x4 /* Clear the low bit of the address */
67 1.1 ryo 1:
68 1.1 ryo dc \dcop, x0
69 1.1 ryo dsb ish
70 1.1 ryo .if \ic != 0
71 1.1 ryo ic \icop, x0
72 1.1 ryo dsb ish
73 1.1 ryo .endif
74 1.1 ryo add x0, x0, x3 /* Move to the next line */
75 1.1 ryo subs x1, x1, x3 /* Reduce the size */
76 1.1 ryo b.hi 1b /* Check if we are done */
77 1.1 ryo .if \ic != 0
78 1.1 ryo isb
79 1.1 ryo .endif
80 1.1 ryo ret
81 1.1 ryo .endm
82 1.1 ryo
83 1.1 ryo
84 1.1 ryo ENTRY(aarch64_nullop)
85 1.1 ryo ret
86 1.1 ryo END(aarch64_nullop)
87 1.1 ryo
88 1.1 ryo ENTRY(aarch64_cpuid)
89 1.1 ryo mrs x0, midr_el1
90 1.1 ryo ret
91 1.1 ryo END(aarch64_cpuid)
92 1.1 ryo
93 1.1 ryo /*
94 1.1 ryo * void aarch64_dcache_wb_range(vaddr_t, vsize_t)
95 1.1 ryo */
96 1.1 ryo ENTRY(aarch64_dcache_wb_range)
97 1.1 ryo cache_handle_range dcop = cvac
98 1.1 ryo END(aarch64_dcache_wb_range)
99 1.1 ryo
100 1.1 ryo /*
101 1.1 ryo * void aarch64_dcache_wbinv_range(vaddr_t, vsize_t)
102 1.1 ryo */
103 1.1 ryo ENTRY(aarch64_dcache_wbinv_range)
104 1.1 ryo cache_handle_range dcop = civac
105 1.1 ryo END(aarch64_dcache_wbinv_range)
106 1.1 ryo
107 1.1 ryo /*
108 1.1 ryo * void aarch64_dcache_inv_range(vaddr_t, vsize_t)
109 1.1 ryo *
110 1.1 ryo * Note, we must not invalidate everything. If the range is too big we
111 1.1 ryo * must use wb-inv of the entire cache.
112 1.1 ryo */
113 1.1 ryo ENTRY(aarch64_dcache_inv_range)
114 1.1 ryo cache_handle_range dcop = ivac
115 1.1 ryo END(aarch64_dcache_inv_range)
116 1.1 ryo
117 1.1 ryo /*
118 1.1 ryo * void aarch64_idcache_wbinv_range(vaddr_t, vsize_t)
119 1.1 ryo */
120 1.1 ryo ENTRY(aarch64_idcache_wbinv_range)
121 1.1 ryo cache_handle_range dcop = civac, ic = 1, icop = ivau
122 1.1 ryo END(aarch64_idcache_wbinv_range)
123 1.1 ryo
124 1.1 ryo /*
125 1.1 ryo * void aarch64_icache_sync_range(vaddr_t, vsize_t)
126 1.1 ryo */
127 1.1 ryo ENTRY(aarch64_icache_sync_range)
128 1.1 ryo cache_handle_range dcop = cvau, ic = 1, icop = ivau
129 1.1 ryo END(aarch64_icache_sync_range)
130 1.1 ryo
131 1.1 ryo /*
132 1.1 ryo * void aarch64_icache_inv_all(void)
133 1.1 ryo */
134 1.1 ryo ENTRY(aarch64_icache_inv_all)
135 1.1 ryo dsb ish
136 1.1 ryo #ifdef MULTIPROCESSOR
137 1.2 ryo ic ialluis
138 1.2 ryo #else
139 1.1 ryo ic iallu
140 1.1 ryo #endif
141 1.1 ryo dsb ish
142 1.1 ryo isb
143 1.1 ryo ret
144 1.2 ryo END(aarch64_icache_inv_all)
145 1.1 ryo
146 1.1 ryo
147 1.1 ryo
148 1.1 ryo ENTRY(aarch64_drain_writebuf)
149 1.1 ryo dsb sy
150 1.1 ryo ret
151 1.1 ryo END(aarch64_drain_writebuf)
152 1.1 ryo
153 1.1 ryo
154 1.1 ryo /*
155 1.1 ryo * TLB ops
156 1.1 ryo */
157 1.1 ryo
158 1.1 ryo /* void aarch64_set_ttbr0(uint64_t ttbr0) */
159 1.1 ryo ENTRY(aarch64_set_ttbr0)
160 1.1 ryo dsb ish
161 1.1 ryo msr ttbr0_el1, x0
162 1.1 ryo dsb ish
163 1.1 ryo isb
164 1.1 ryo ret
165 1.1 ryo END(aarch64_set_ttbr0)
166 1.1 ryo
167 1.3 ryo #ifdef CPU_THUNDERX
168 1.3 ryo /*
169 1.3 ryo * Cavium erratum 27456
170 1.3 ryo * void aarch64_set_ttbr0_thunderx(uint64_t ttbr0)
171 1.3 ryo */
172 1.3 ryo ENTRY(aarch64_set_ttbr0_thunderx)
173 1.3 ryo dsb ish
174 1.3 ryo msr ttbr0_el1, x0
175 1.3 ryo isb
176 1.3 ryo ic iallu
177 1.3 ryo dsb nsh
178 1.3 ryo isb
179 1.3 ryo ret
180 1.3 ryo END(aarch64_set_ttbr0_thunderx)
181 1.3 ryo #endif /* CPU_THUNDERX */
182 1.3 ryo
183 1.1 ryo /* void aarch64_tlbi_all(void) */
184 1.1 ryo ENTRY(aarch64_tlbi_all)
185 1.1 ryo dsb ishst
186 1.1 ryo #ifdef MULTIPROCESSOR
187 1.1 ryo tlbi vmalle1is
188 1.1 ryo #else
189 1.1 ryo tlbi vmalle1
190 1.1 ryo #endif
191 1.1 ryo dsb ish
192 1.1 ryo isb
193 1.1 ryo ret
194 1.1 ryo END(aarch64_tlbi_all)
195 1.1 ryo
196 1.1 ryo /* void aarch64_tlbi_by_asid(int asid) */
197 1.1 ryo ENTRY(aarch64_tlbi_by_asid)
198 1.1 ryo /* x8 = bit 63[ASID]48, 47[RES0]0 */
199 1.1 ryo lsl x8, x0, #48
200 1.1 ryo dsb ishst
201 1.1 ryo #ifdef MULTIPROCESSOR
202 1.1 ryo tlbi aside1is, x8
203 1.1 ryo #else
204 1.1 ryo tlbi aside1, x8
205 1.1 ryo #endif
206 1.1 ryo dsb ish
207 1.1 ryo isb
208 1.1 ryo ret
209 1.1 ryo END(aarch64_tlbi_by_asid)
210 1.1 ryo
211 1.1 ryo /* aarch64_tlbi_by_va(vaddr_t va) */
212 1.1 ryo ENTRY(aarch64_tlbi_by_va)
213 1.1 ryo /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
214 1.1 ryo ubfx x8, x0, #12, #44
215 1.1 ryo dsb ishst
216 1.1 ryo #ifdef MULTIPROCESSOR
217 1.1 ryo tlbi vaae1is, x8
218 1.1 ryo #else
219 1.1 ryo tlbi vaae1, x8
220 1.1 ryo #endif
221 1.1 ryo dsb ish
222 1.1 ryo isb
223 1.1 ryo ret
224 1.1 ryo END(aarch64_tlbi_by_va)
225 1.1 ryo
226 1.1 ryo /* aarch64_tlbi_by_va_ll(vaddr_t va) */
227 1.1 ryo ENTRY(aarch64_tlbi_by_va_ll)
228 1.1 ryo /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
229 1.1 ryo ubfx x8, x0, #12, #44
230 1.1 ryo dsb ishst
231 1.1 ryo #ifdef MULTIPROCESSOR
232 1.1 ryo tlbi vaale1is, x8
233 1.1 ryo #else
234 1.1 ryo tlbi vaale1, x8
235 1.1 ryo #endif
236 1.1 ryo dsb ish
237 1.1 ryo isb
238 1.1 ryo ret
239 1.1 ryo END(aarch64_tlbi_by_va_ll)
240 1.1 ryo
241 1.1 ryo /* aarch64_tlbi_by_asid_va(int asid, vaddr_t va) */
242 1.1 ryo ENTRY(aarch64_tlbi_by_asid_va)
243 1.1 ryo /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
244 1.1 ryo lsl x8, x0, #48
245 1.1 ryo bfxil x8, x1, #12, #44
246 1.4 ryo dsb ishst
247 1.1 ryo #ifdef MULTIPROCESSOR
248 1.1 ryo tlbi vae1is, x8
249 1.1 ryo #else
250 1.1 ryo tlbi vae1, x8
251 1.1 ryo #endif
252 1.4 ryo dsb ish
253 1.4 ryo isb
254 1.1 ryo ret
255 1.1 ryo END(aarch64_tlbi_by_asid_va)
256 1.1 ryo
257 1.1 ryo /* aarch64_tlbi_by_asid_va_ll(int asid, vaddr_t va) */
258 1.1 ryo ENTRY(aarch64_tlbi_by_asid_va_ll)
259 1.1 ryo /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
260 1.1 ryo lsl x8, x0, #48
261 1.1 ryo bfxil x8, x1, #12, #44
262 1.4 ryo dsb ishst
263 1.1 ryo #ifdef MULTIPROCESSOR
264 1.1 ryo tlbi vale1is, x8
265 1.1 ryo #else
266 1.1 ryo tlbi vale1, x8
267 1.1 ryo #endif
268 1.4 ryo dsb ish
269 1.4 ryo isb
270 1.1 ryo ret
271 1.1 ryo END(aarch64_tlbi_by_asid_va_ll)
272