cpufunc_asm_armv8.S revision 1.2 1 /* $NetBSD: cpufunc_asm_armv8.S,v 1.2 2018/07/23 22:51:39 ryo Exp $ */
2
3 /*-
4 * Copyright (c) 2014 Robin Randhawa
5 * Copyright (c) 2015 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Andrew Turner
9 * under sponsorship from the FreeBSD Foundation
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $
33 */
34
35 #include "opt_multiprocessor.h"
36 #include <aarch64/asm.h>
37
38 .text
39 .align 2
40
41 /*
42 * Macro to handle the cache. This takes the start address in x0, length
43 * in x1. It will corrupt x0, x1, x2, and x3.
44 */
45 .macro cache_handle_range dcop = 0, ic = 0, icop = 0
46 .if \ic == 0
47 mrs x3, ctr_el0
48 ubfx x3, x3, #16, #4 /* x3 = D cache shift */
49 mov x2, #4 /* size of word */
50 lsl x3, x2, x3 /* x3 = D cache line size */
51 .else
52 mrs x3, ctr_el0
53 ubfx x2, x3, #16, #4 /* x2 = D cache shift */
54 and x3, x3, #15 /* x3 = I cache shift */
55 cmp x3, x2
56 bcs 1f
57 mov x3, x2
58 1: /* x3 = MAX(IcacheShift,DcacheShift) */
59 mov x2, #4 /* size of word */
60 lsl x3, x2, x3 /* x3 = cache line size */
61 .endif
62 sub x4, x3, #1 /* Get the address mask */
63 and x2, x0, x4 /* Get the low bits of the address */
64 add x1, x1, x2 /* Add these to the size */
65 bic x0, x0, x4 /* Clear the low bit of the address */
66 1:
67 dc \dcop, x0
68 dsb ish
69 .if \ic != 0
70 ic \icop, x0
71 dsb ish
72 .endif
73 add x0, x0, x3 /* Move to the next line */
74 subs x1, x1, x3 /* Reduce the size */
75 b.hi 1b /* Check if we are done */
76 .if \ic != 0
77 isb
78 .endif
79 ret
80 .endm
81
82
83 ENTRY(aarch64_nullop)
84 ret
85 END(aarch64_nullop)
86
87 ENTRY(aarch64_cpuid)
88 mrs x0, midr_el1
89 ret
90 END(aarch64_cpuid)
91
92 /*
93 * void aarch64_dcache_wb_range(vaddr_t, vsize_t)
94 */
95 ENTRY(aarch64_dcache_wb_range)
96 cache_handle_range dcop = cvac
97 END(aarch64_dcache_wb_range)
98
99 /*
100 * void aarch64_dcache_wbinv_range(vaddr_t, vsize_t)
101 */
102 ENTRY(aarch64_dcache_wbinv_range)
103 cache_handle_range dcop = civac
104 END(aarch64_dcache_wbinv_range)
105
106 /*
107 * void aarch64_dcache_inv_range(vaddr_t, vsize_t)
108 *
109 * Note, we must not invalidate everything. If the range is too big we
110 * must use wb-inv of the entire cache.
111 */
112 ENTRY(aarch64_dcache_inv_range)
113 cache_handle_range dcop = ivac
114 END(aarch64_dcache_inv_range)
115
116 /*
117 * void aarch64_idcache_wbinv_range(vaddr_t, vsize_t)
118 */
119 ENTRY(aarch64_idcache_wbinv_range)
120 cache_handle_range dcop = civac, ic = 1, icop = ivau
121 END(aarch64_idcache_wbinv_range)
122
123 /*
124 * void aarch64_icache_sync_range(vaddr_t, vsize_t)
125 */
126 ENTRY(aarch64_icache_sync_range)
127 cache_handle_range dcop = cvau, ic = 1, icop = ivau
128 END(aarch64_icache_sync_range)
129
130 /*
131 * void aarch64_icache_inv_all(void)
132 */
133 ENTRY(aarch64_icache_inv_all)
134 dsb ish
135 #ifdef MULTIPROCESSOR
136 ic ialluis
137 #else
138 ic iallu
139 #endif
140 dsb ish
141 isb
142 ret
143 END(aarch64_icache_inv_all)
144
145
146
147 ENTRY(aarch64_drain_writebuf)
148 dsb sy
149 ret
150 END(aarch64_drain_writebuf)
151
152
153 /*
154 * TLB ops
155 */
156
157 /* void aarch64_set_ttbr0(uint64_t ttbr0) */
158 ENTRY(aarch64_set_ttbr0)
159 dsb ish
160 msr ttbr0_el1, x0
161 dsb ish
162 isb
163 ret
164 END(aarch64_set_ttbr0)
165
166 /* void aarch64_tlbi_all(void) */
167 ENTRY(aarch64_tlbi_all)
168 dsb ishst
169 #ifdef MULTIPROCESSOR
170 tlbi vmalle1is
171 #else
172 tlbi vmalle1
173 #endif
174 dsb ish
175 isb
176 ret
177 END(aarch64_tlbi_all)
178
179 /* void aarch64_tlbi_by_asid(int asid) */
180 ENTRY(aarch64_tlbi_by_asid)
181 /* x8 = bit 63[ASID]48, 47[RES0]0 */
182 lsl x8, x0, #48
183 dsb ishst
184 #ifdef MULTIPROCESSOR
185 tlbi aside1is, x8
186 #else
187 tlbi aside1, x8
188 #endif
189 dsb ish
190 isb
191 ret
192 END(aarch64_tlbi_by_asid)
193
194 /* aarch64_tlbi_by_va(vaddr_t va) */
195 ENTRY(aarch64_tlbi_by_va)
196 /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
197 ubfx x8, x0, #12, #44
198 dsb ishst
199 #ifdef MULTIPROCESSOR
200 tlbi vaae1is, x8
201 #else
202 tlbi vaae1, x8
203 #endif
204 dsb ish
205 isb
206 ret
207 END(aarch64_tlbi_by_va)
208
209 /* aarch64_tlbi_by_va_ll(vaddr_t va) */
210 ENTRY(aarch64_tlbi_by_va_ll)
211 /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
212 ubfx x8, x0, #12, #44
213 dsb ishst
214 #ifdef MULTIPROCESSOR
215 tlbi vaale1is, x8
216 #else
217 tlbi vaale1, x8
218 #endif
219 dsb ish
220 isb
221 ret
222 END(aarch64_tlbi_by_va_ll)
223
224 /* aarch64_tlbi_by_asid_va(int asid, vaddr_t va) */
225 ENTRY(aarch64_tlbi_by_asid_va)
226 /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
227 lsl x8, x0, #48
228 bfxil x8, x1, #12, #44
229 #ifdef MULTIPROCESSOR
230 /* need dsb and isb for inner shareable? */
231 dsb ishst
232 tlbi vae1is, x8
233 dsb ish
234 isb
235 #else
236 /* no need dsb and isb for single entry */
237 tlbi vae1, x8
238 #endif
239 ret
240 END(aarch64_tlbi_by_asid_va)
241
242 /* aarch64_tlbi_by_asid_va_ll(int asid, vaddr_t va) */
243 ENTRY(aarch64_tlbi_by_asid_va_ll)
244 /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
245 lsl x8, x0, #48
246 bfxil x8, x1, #12, #44
247 #ifdef MULTIPROCESSOR
248 /* need dsb and isb for inner shareable? */
249 dsb ishst
250 tlbi vale1is, x8
251 dsb ish
252 isb
253 #else
254 /* no need dsb and isb for single entry */
255 tlbi vale1, x8
256 #endif
257 ret
258 END(aarch64_tlbi_by_asid_va_ll)
259