cpufunc_asm_armv8.S revision 1.2 1 1.2 ryo /* $NetBSD: cpufunc_asm_armv8.S,v 1.2 2018/07/23 22:51:39 ryo Exp $ */
2 1.1 ryo
3 1.1 ryo /*-
4 1.1 ryo * Copyright (c) 2014 Robin Randhawa
5 1.1 ryo * Copyright (c) 2015 The FreeBSD Foundation
6 1.1 ryo * All rights reserved.
7 1.1 ryo *
8 1.1 ryo * Portions of this software were developed by Andrew Turner
9 1.1 ryo * under sponsorship from the FreeBSD Foundation
10 1.1 ryo *
11 1.1 ryo * Redistribution and use in source and binary forms, with or without
12 1.1 ryo * modification, are permitted provided that the following conditions
13 1.1 ryo * are met:
14 1.1 ryo * 1. Redistributions of source code must retain the above copyright
15 1.1 ryo * notice, this list of conditions and the following disclaimer.
16 1.1 ryo * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 ryo * notice, this list of conditions and the following disclaimer in the
18 1.1 ryo * documentation and/or other materials provided with the distribution.
19 1.1 ryo *
20 1.1 ryo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 1.1 ryo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.1 ryo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.1 ryo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 1.1 ryo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 1.1 ryo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 1.1 ryo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 1.1 ryo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 1.1 ryo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 1.1 ryo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 1.1 ryo * SUCH DAMAGE.
31 1.1 ryo *
32 1.1 ryo * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $
33 1.1 ryo */
34 1.1 ryo
35 1.1 ryo #include "opt_multiprocessor.h"
36 1.1 ryo #include <aarch64/asm.h>
37 1.1 ryo
38 1.1 ryo .text
39 1.1 ryo .align 2
40 1.1 ryo
41 1.1 ryo /*
42 1.1 ryo * Macro to handle the cache. This takes the start address in x0, length
43 1.1 ryo * in x1. It will corrupt x0, x1, x2, and x3.
44 1.1 ryo */
45 1.1 ryo .macro cache_handle_range dcop = 0, ic = 0, icop = 0
46 1.1 ryo .if \ic == 0
47 1.1 ryo mrs x3, ctr_el0
48 1.1 ryo ubfx x3, x3, #16, #4 /* x3 = D cache shift */
49 1.1 ryo mov x2, #4 /* size of word */
50 1.1 ryo lsl x3, x2, x3 /* x3 = D cache line size */
51 1.1 ryo .else
52 1.1 ryo mrs x3, ctr_el0
53 1.1 ryo ubfx x2, x3, #16, #4 /* x2 = D cache shift */
54 1.1 ryo and x3, x3, #15 /* x3 = I cache shift */
55 1.1 ryo cmp x3, x2
56 1.1 ryo bcs 1f
57 1.1 ryo mov x3, x2
58 1.1 ryo 1: /* x3 = MAX(IcacheShift,DcacheShift) */
59 1.1 ryo mov x2, #4 /* size of word */
60 1.1 ryo lsl x3, x2, x3 /* x3 = cache line size */
61 1.1 ryo .endif
62 1.1 ryo sub x4, x3, #1 /* Get the address mask */
63 1.1 ryo and x2, x0, x4 /* Get the low bits of the address */
64 1.1 ryo add x1, x1, x2 /* Add these to the size */
65 1.1 ryo bic x0, x0, x4 /* Clear the low bit of the address */
66 1.1 ryo 1:
67 1.1 ryo dc \dcop, x0
68 1.1 ryo dsb ish
69 1.1 ryo .if \ic != 0
70 1.1 ryo ic \icop, x0
71 1.1 ryo dsb ish
72 1.1 ryo .endif
73 1.1 ryo add x0, x0, x3 /* Move to the next line */
74 1.1 ryo subs x1, x1, x3 /* Reduce the size */
75 1.1 ryo b.hi 1b /* Check if we are done */
76 1.1 ryo .if \ic != 0
77 1.1 ryo isb
78 1.1 ryo .endif
79 1.1 ryo ret
80 1.1 ryo .endm
81 1.1 ryo
82 1.1 ryo
83 1.1 ryo ENTRY(aarch64_nullop)
84 1.1 ryo ret
85 1.1 ryo END(aarch64_nullop)
86 1.1 ryo
87 1.1 ryo ENTRY(aarch64_cpuid)
88 1.1 ryo mrs x0, midr_el1
89 1.1 ryo ret
90 1.1 ryo END(aarch64_cpuid)
91 1.1 ryo
92 1.1 ryo /*
93 1.1 ryo * void aarch64_dcache_wb_range(vaddr_t, vsize_t)
94 1.1 ryo */
95 1.1 ryo ENTRY(aarch64_dcache_wb_range)
96 1.1 ryo cache_handle_range dcop = cvac
97 1.1 ryo END(aarch64_dcache_wb_range)
98 1.1 ryo
99 1.1 ryo /*
100 1.1 ryo * void aarch64_dcache_wbinv_range(vaddr_t, vsize_t)
101 1.1 ryo */
102 1.1 ryo ENTRY(aarch64_dcache_wbinv_range)
103 1.1 ryo cache_handle_range dcop = civac
104 1.1 ryo END(aarch64_dcache_wbinv_range)
105 1.1 ryo
106 1.1 ryo /*
107 1.1 ryo * void aarch64_dcache_inv_range(vaddr_t, vsize_t)
108 1.1 ryo *
109 1.1 ryo * Note, we must not invalidate everything. If the range is too big we
110 1.1 ryo * must use wb-inv of the entire cache.
111 1.1 ryo */
112 1.1 ryo ENTRY(aarch64_dcache_inv_range)
113 1.1 ryo cache_handle_range dcop = ivac
114 1.1 ryo END(aarch64_dcache_inv_range)
115 1.1 ryo
116 1.1 ryo /*
117 1.1 ryo * void aarch64_idcache_wbinv_range(vaddr_t, vsize_t)
118 1.1 ryo */
119 1.1 ryo ENTRY(aarch64_idcache_wbinv_range)
120 1.1 ryo cache_handle_range dcop = civac, ic = 1, icop = ivau
121 1.1 ryo END(aarch64_idcache_wbinv_range)
122 1.1 ryo
123 1.1 ryo /*
124 1.1 ryo * void aarch64_icache_sync_range(vaddr_t, vsize_t)
125 1.1 ryo */
126 1.1 ryo ENTRY(aarch64_icache_sync_range)
127 1.1 ryo cache_handle_range dcop = cvau, ic = 1, icop = ivau
128 1.1 ryo END(aarch64_icache_sync_range)
129 1.1 ryo
130 1.1 ryo /*
131 1.1 ryo * void aarch64_icache_inv_all(void)
132 1.1 ryo */
133 1.1 ryo ENTRY(aarch64_icache_inv_all)
134 1.1 ryo dsb ish
135 1.1 ryo #ifdef MULTIPROCESSOR
136 1.2 ryo ic ialluis
137 1.2 ryo #else
138 1.1 ryo ic iallu
139 1.1 ryo #endif
140 1.1 ryo dsb ish
141 1.1 ryo isb
142 1.1 ryo ret
143 1.2 ryo END(aarch64_icache_inv_all)
144 1.1 ryo
145 1.1 ryo
146 1.1 ryo
147 1.1 ryo ENTRY(aarch64_drain_writebuf)
148 1.1 ryo dsb sy
149 1.1 ryo ret
150 1.1 ryo END(aarch64_drain_writebuf)
151 1.1 ryo
152 1.1 ryo
153 1.1 ryo /*
154 1.1 ryo * TLB ops
155 1.1 ryo */
156 1.1 ryo
157 1.1 ryo /* void aarch64_set_ttbr0(uint64_t ttbr0) */
158 1.1 ryo ENTRY(aarch64_set_ttbr0)
159 1.1 ryo dsb ish
160 1.1 ryo msr ttbr0_el1, x0
161 1.1 ryo dsb ish
162 1.1 ryo isb
163 1.1 ryo ret
164 1.1 ryo END(aarch64_set_ttbr0)
165 1.1 ryo
166 1.1 ryo /* void aarch64_tlbi_all(void) */
167 1.1 ryo ENTRY(aarch64_tlbi_all)
168 1.1 ryo dsb ishst
169 1.1 ryo #ifdef MULTIPROCESSOR
170 1.1 ryo tlbi vmalle1is
171 1.1 ryo #else
172 1.1 ryo tlbi vmalle1
173 1.1 ryo #endif
174 1.1 ryo dsb ish
175 1.1 ryo isb
176 1.1 ryo ret
177 1.1 ryo END(aarch64_tlbi_all)
178 1.1 ryo
179 1.1 ryo /* void aarch64_tlbi_by_asid(int asid) */
180 1.1 ryo ENTRY(aarch64_tlbi_by_asid)
181 1.1 ryo /* x8 = bit 63[ASID]48, 47[RES0]0 */
182 1.1 ryo lsl x8, x0, #48
183 1.1 ryo dsb ishst
184 1.1 ryo #ifdef MULTIPROCESSOR
185 1.1 ryo tlbi aside1is, x8
186 1.1 ryo #else
187 1.1 ryo tlbi aside1, x8
188 1.1 ryo #endif
189 1.1 ryo dsb ish
190 1.1 ryo isb
191 1.1 ryo ret
192 1.1 ryo END(aarch64_tlbi_by_asid)
193 1.1 ryo
194 1.1 ryo /* aarch64_tlbi_by_va(vaddr_t va) */
195 1.1 ryo ENTRY(aarch64_tlbi_by_va)
196 1.1 ryo /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
197 1.1 ryo ubfx x8, x0, #12, #44
198 1.1 ryo dsb ishst
199 1.1 ryo #ifdef MULTIPROCESSOR
200 1.1 ryo tlbi vaae1is, x8
201 1.1 ryo #else
202 1.1 ryo tlbi vaae1, x8
203 1.1 ryo #endif
204 1.1 ryo dsb ish
205 1.1 ryo isb
206 1.1 ryo ret
207 1.1 ryo END(aarch64_tlbi_by_va)
208 1.1 ryo
209 1.1 ryo /* aarch64_tlbi_by_va_ll(vaddr_t va) */
210 1.1 ryo ENTRY(aarch64_tlbi_by_va_ll)
211 1.1 ryo /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
212 1.1 ryo ubfx x8, x0, #12, #44
213 1.1 ryo dsb ishst
214 1.1 ryo #ifdef MULTIPROCESSOR
215 1.1 ryo tlbi vaale1is, x8
216 1.1 ryo #else
217 1.1 ryo tlbi vaale1, x8
218 1.1 ryo #endif
219 1.1 ryo dsb ish
220 1.1 ryo isb
221 1.1 ryo ret
222 1.1 ryo END(aarch64_tlbi_by_va_ll)
223 1.1 ryo
224 1.1 ryo /* aarch64_tlbi_by_asid_va(int asid, vaddr_t va) */
225 1.1 ryo ENTRY(aarch64_tlbi_by_asid_va)
226 1.1 ryo /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
227 1.1 ryo lsl x8, x0, #48
228 1.1 ryo bfxil x8, x1, #12, #44
229 1.1 ryo #ifdef MULTIPROCESSOR
230 1.1 ryo /* need dsb and isb for inner shareable? */
231 1.1 ryo dsb ishst
232 1.1 ryo tlbi vae1is, x8
233 1.1 ryo dsb ish
234 1.1 ryo isb
235 1.1 ryo #else
236 1.1 ryo /* no need dsb and isb for single entry */
237 1.1 ryo tlbi vae1, x8
238 1.1 ryo #endif
239 1.1 ryo ret
240 1.1 ryo END(aarch64_tlbi_by_asid_va)
241 1.1 ryo
242 1.1 ryo /* aarch64_tlbi_by_asid_va_ll(int asid, vaddr_t va) */
243 1.1 ryo ENTRY(aarch64_tlbi_by_asid_va_ll)
244 1.1 ryo /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
245 1.1 ryo lsl x8, x0, #48
246 1.1 ryo bfxil x8, x1, #12, #44
247 1.1 ryo #ifdef MULTIPROCESSOR
248 1.1 ryo /* need dsb and isb for inner shareable? */
249 1.1 ryo dsb ishst
250 1.1 ryo tlbi vale1is, x8
251 1.1 ryo dsb ish
252 1.1 ryo isb
253 1.1 ryo #else
254 1.1 ryo /* no need dsb and isb for single entry */
255 1.1 ryo tlbi vale1, x8
256 1.1 ryo #endif
257 1.1 ryo ret
258 1.1 ryo END(aarch64_tlbi_by_asid_va_ll)
259