cpufunc_asm_armv8.S revision 1.5 1 1.5 ryo /* $NetBSD: cpufunc_asm_armv8.S,v 1.5 2020/06/01 08:59:00 ryo Exp $ */
2 1.1 ryo
3 1.1 ryo /*-
4 1.1 ryo * Copyright (c) 2014 Robin Randhawa
5 1.1 ryo * Copyright (c) 2015 The FreeBSD Foundation
6 1.1 ryo * All rights reserved.
7 1.1 ryo *
8 1.1 ryo * Portions of this software were developed by Andrew Turner
9 1.1 ryo * under sponsorship from the FreeBSD Foundation
10 1.1 ryo *
11 1.1 ryo * Redistribution and use in source and binary forms, with or without
12 1.1 ryo * modification, are permitted provided that the following conditions
13 1.1 ryo * are met:
14 1.1 ryo * 1. Redistributions of source code must retain the above copyright
15 1.1 ryo * notice, this list of conditions and the following disclaimer.
16 1.1 ryo * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 ryo * notice, this list of conditions and the following disclaimer in the
18 1.1 ryo * documentation and/or other materials provided with the distribution.
19 1.1 ryo *
20 1.1 ryo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 1.1 ryo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.1 ryo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.1 ryo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 1.1 ryo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 1.1 ryo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 1.1 ryo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 1.1 ryo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 1.1 ryo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 1.1 ryo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 1.1 ryo * SUCH DAMAGE.
31 1.1 ryo *
32 1.1 ryo * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $
33 1.1 ryo */
34 1.1 ryo
35 1.3 ryo #include "opt_cputypes.h"
36 1.1 ryo #include "opt_multiprocessor.h"
37 1.1 ryo #include <aarch64/asm.h>
38 1.1 ryo
39 1.1 ryo .text
40 1.1 ryo .align 2
41 1.1 ryo
42 1.1 ryo /*
43 1.1 ryo * Macro to handle the cache. This takes the start address in x0, length
44 1.5 ryo * in x1. It will corrupt x2-x5.
45 1.1 ryo */
46 1.5 ryo .macro cache_handle_range dcop = 0, icop = 0
47 1.1 ryo mrs x3, ctr_el0
48 1.5 ryo mov x4, #4 /* size of word */
49 1.5 ryo .if \dcop != 0
50 1.1 ryo ubfx x2, x3, #16, #4 /* x2 = D cache shift */
51 1.5 ryo lsl x2, x4, x2 /* x2 = D cache line size */
52 1.5 ryo .endif
53 1.5 ryo .if \icop != 0
54 1.1 ryo and x3, x3, #15 /* x3 = I cache shift */
55 1.5 ryo lsl x3, x4, x3 /* x3 = I cache line size */
56 1.1 ryo .endif
57 1.5 ryo .if \dcop != 0
58 1.5 ryo sub x4, x2, #1 /* Get the address mask */
59 1.5 ryo and x4, x0, x4 /* Get the low bits of the address */
60 1.5 ryo add x5, x1, x4 /* Add these to the size */
61 1.5 ryo bic x4, x0, x4 /* Clear the low bit of the address */
62 1.1 ryo 1:
63 1.5 ryo dc \dcop, x4
64 1.5 ryo add x4, x4, x2 /* Move to the next line */
65 1.5 ryo subs x5, x5, x2 /* Reduce the size */
66 1.5 ryo b.hi 1b /* Check if we are done */
67 1.1 ryo dsb ish
68 1.1 ryo .endif
69 1.5 ryo .if \icop != 0
70 1.5 ryo sub x4, x3, #1 /* Get the address mask */
71 1.5 ryo and x4, x0, x4 /* Get the low bits of the address */
72 1.5 ryo add x5, x1, x4 /* Add these to the size */
73 1.5 ryo bic x4, x0, x4 /* Clear the low bit of the address */
74 1.5 ryo 1:
75 1.5 ryo ic \icop, x4
76 1.5 ryo add x4, x4, x3 /* Move to the next line */
77 1.5 ryo subs x5, x5, x3 /* Reduce the size */
78 1.1 ryo b.hi 1b /* Check if we are done */
79 1.5 ryo dsb ish
80 1.1 ryo isb
81 1.1 ryo .endif
82 1.1 ryo .endm
83 1.1 ryo
84 1.1 ryo
85 1.1 ryo ENTRY(aarch64_nullop)
86 1.1 ryo ret
87 1.1 ryo END(aarch64_nullop)
88 1.1 ryo
89 1.1 ryo ENTRY(aarch64_cpuid)
90 1.1 ryo mrs x0, midr_el1
91 1.1 ryo ret
92 1.1 ryo END(aarch64_cpuid)
93 1.1 ryo
94 1.1 ryo /*
95 1.1 ryo * void aarch64_dcache_wb_range(vaddr_t, vsize_t)
96 1.1 ryo */
97 1.1 ryo ENTRY(aarch64_dcache_wb_range)
98 1.1 ryo cache_handle_range dcop = cvac
99 1.5 ryo ret
100 1.1 ryo END(aarch64_dcache_wb_range)
101 1.1 ryo
102 1.1 ryo /*
103 1.1 ryo * void aarch64_dcache_wbinv_range(vaddr_t, vsize_t)
104 1.1 ryo */
105 1.1 ryo ENTRY(aarch64_dcache_wbinv_range)
106 1.1 ryo cache_handle_range dcop = civac
107 1.5 ryo ret
108 1.1 ryo END(aarch64_dcache_wbinv_range)
109 1.1 ryo
110 1.1 ryo /*
111 1.1 ryo * void aarch64_dcache_inv_range(vaddr_t, vsize_t)
112 1.1 ryo *
113 1.1 ryo * Note, we must not invalidate everything. If the range is too big we
114 1.1 ryo * must use wb-inv of the entire cache.
115 1.1 ryo */
116 1.1 ryo ENTRY(aarch64_dcache_inv_range)
117 1.1 ryo cache_handle_range dcop = ivac
118 1.5 ryo ret
119 1.1 ryo END(aarch64_dcache_inv_range)
120 1.1 ryo
121 1.1 ryo /*
122 1.1 ryo * void aarch64_idcache_wbinv_range(vaddr_t, vsize_t)
123 1.1 ryo */
124 1.1 ryo ENTRY(aarch64_idcache_wbinv_range)
125 1.5 ryo cache_handle_range dcop = civac, icop = ivau
126 1.5 ryo ret
127 1.1 ryo END(aarch64_idcache_wbinv_range)
128 1.1 ryo
129 1.1 ryo /*
130 1.1 ryo * void aarch64_icache_sync_range(vaddr_t, vsize_t)
131 1.1 ryo */
132 1.1 ryo ENTRY(aarch64_icache_sync_range)
133 1.5 ryo cache_handle_range dcop = cvau, icop = ivau
134 1.5 ryo ret
135 1.1 ryo END(aarch64_icache_sync_range)
136 1.1 ryo
137 1.1 ryo /*
138 1.1 ryo * void aarch64_icache_inv_all(void)
139 1.1 ryo */
140 1.1 ryo ENTRY(aarch64_icache_inv_all)
141 1.1 ryo dsb ish
142 1.1 ryo #ifdef MULTIPROCESSOR
143 1.2 ryo ic ialluis
144 1.2 ryo #else
145 1.1 ryo ic iallu
146 1.1 ryo #endif
147 1.1 ryo dsb ish
148 1.1 ryo isb
149 1.1 ryo ret
150 1.2 ryo END(aarch64_icache_inv_all)
151 1.1 ryo
152 1.1 ryo
153 1.1 ryo
154 1.1 ryo ENTRY(aarch64_drain_writebuf)
155 1.1 ryo dsb sy
156 1.1 ryo ret
157 1.1 ryo END(aarch64_drain_writebuf)
158 1.1 ryo
159 1.1 ryo
160 1.1 ryo /*
161 1.1 ryo * TLB ops
162 1.1 ryo */
163 1.1 ryo
164 1.1 ryo /* void aarch64_set_ttbr0(uint64_t ttbr0) */
165 1.1 ryo ENTRY(aarch64_set_ttbr0)
166 1.1 ryo dsb ish
167 1.1 ryo msr ttbr0_el1, x0
168 1.1 ryo dsb ish
169 1.1 ryo isb
170 1.1 ryo ret
171 1.1 ryo END(aarch64_set_ttbr0)
172 1.1 ryo
173 1.3 ryo #ifdef CPU_THUNDERX
174 1.3 ryo /*
175 1.3 ryo * Cavium erratum 27456
176 1.3 ryo * void aarch64_set_ttbr0_thunderx(uint64_t ttbr0)
177 1.3 ryo */
178 1.3 ryo ENTRY(aarch64_set_ttbr0_thunderx)
179 1.3 ryo dsb ish
180 1.3 ryo msr ttbr0_el1, x0
181 1.3 ryo isb
182 1.3 ryo ic iallu
183 1.3 ryo dsb nsh
184 1.3 ryo isb
185 1.3 ryo ret
186 1.3 ryo END(aarch64_set_ttbr0_thunderx)
187 1.3 ryo #endif /* CPU_THUNDERX */
188 1.3 ryo
189 1.1 ryo /* void aarch64_tlbi_all(void) */
190 1.1 ryo ENTRY(aarch64_tlbi_all)
191 1.1 ryo dsb ishst
192 1.1 ryo #ifdef MULTIPROCESSOR
193 1.1 ryo tlbi vmalle1is
194 1.1 ryo #else
195 1.1 ryo tlbi vmalle1
196 1.1 ryo #endif
197 1.1 ryo dsb ish
198 1.1 ryo isb
199 1.1 ryo ret
200 1.1 ryo END(aarch64_tlbi_all)
201 1.1 ryo
202 1.1 ryo /* void aarch64_tlbi_by_asid(int asid) */
203 1.1 ryo ENTRY(aarch64_tlbi_by_asid)
204 1.1 ryo /* x8 = bit 63[ASID]48, 47[RES0]0 */
205 1.1 ryo lsl x8, x0, #48
206 1.1 ryo dsb ishst
207 1.1 ryo #ifdef MULTIPROCESSOR
208 1.1 ryo tlbi aside1is, x8
209 1.1 ryo #else
210 1.1 ryo tlbi aside1, x8
211 1.1 ryo #endif
212 1.1 ryo dsb ish
213 1.1 ryo isb
214 1.1 ryo ret
215 1.1 ryo END(aarch64_tlbi_by_asid)
216 1.1 ryo
217 1.1 ryo /* aarch64_tlbi_by_va(vaddr_t va) */
218 1.1 ryo ENTRY(aarch64_tlbi_by_va)
219 1.1 ryo /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
220 1.1 ryo ubfx x8, x0, #12, #44
221 1.1 ryo dsb ishst
222 1.1 ryo #ifdef MULTIPROCESSOR
223 1.1 ryo tlbi vaae1is, x8
224 1.1 ryo #else
225 1.1 ryo tlbi vaae1, x8
226 1.1 ryo #endif
227 1.1 ryo dsb ish
228 1.1 ryo isb
229 1.1 ryo ret
230 1.1 ryo END(aarch64_tlbi_by_va)
231 1.1 ryo
232 1.1 ryo /* aarch64_tlbi_by_va_ll(vaddr_t va) */
233 1.1 ryo ENTRY(aarch64_tlbi_by_va_ll)
234 1.1 ryo /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
235 1.1 ryo ubfx x8, x0, #12, #44
236 1.1 ryo dsb ishst
237 1.1 ryo #ifdef MULTIPROCESSOR
238 1.1 ryo tlbi vaale1is, x8
239 1.1 ryo #else
240 1.1 ryo tlbi vaale1, x8
241 1.1 ryo #endif
242 1.1 ryo dsb ish
243 1.1 ryo isb
244 1.1 ryo ret
245 1.1 ryo END(aarch64_tlbi_by_va_ll)
246 1.1 ryo
247 1.1 ryo /* aarch64_tlbi_by_asid_va(int asid, vaddr_t va) */
248 1.1 ryo ENTRY(aarch64_tlbi_by_asid_va)
249 1.1 ryo /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
250 1.1 ryo lsl x8, x0, #48
251 1.1 ryo bfxil x8, x1, #12, #44
252 1.4 ryo dsb ishst
253 1.1 ryo #ifdef MULTIPROCESSOR
254 1.1 ryo tlbi vae1is, x8
255 1.1 ryo #else
256 1.1 ryo tlbi vae1, x8
257 1.1 ryo #endif
258 1.4 ryo dsb ish
259 1.4 ryo isb
260 1.1 ryo ret
261 1.1 ryo END(aarch64_tlbi_by_asid_va)
262 1.1 ryo
263 1.1 ryo /* aarch64_tlbi_by_asid_va_ll(int asid, vaddr_t va) */
264 1.1 ryo ENTRY(aarch64_tlbi_by_asid_va_ll)
265 1.1 ryo /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
266 1.1 ryo lsl x8, x0, #48
267 1.1 ryo bfxil x8, x1, #12, #44
268 1.4 ryo dsb ishst
269 1.1 ryo #ifdef MULTIPROCESSOR
270 1.1 ryo tlbi vale1is, x8
271 1.1 ryo #else
272 1.1 ryo tlbi vale1, x8
273 1.1 ryo #endif
274 1.4 ryo dsb ish
275 1.4 ryo isb
276 1.1 ryo ret
277 1.1 ryo END(aarch64_tlbi_by_asid_va_ll)
278