pl310.c revision 1.13 1 1.13 matt /* $NetBSD: pl310.c,v 1.13 2014/02/23 21:19:06 matt Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (c) 2012 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Matt Thomas
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #include <sys/cdefs.h>
33 1.13 matt __KERNEL_RCSID(0, "$NetBSD: pl310.c,v 1.13 2014/02/23 21:19:06 matt Exp $");
34 1.1 matt
35 1.1 matt #include <sys/param.h>
36 1.1 matt #include <sys/bus.h>
37 1.1 matt #include <sys/cpu.h>
38 1.1 matt #include <sys/device.h>
39 1.5 matt #include <sys/atomic.h>
40 1.1 matt
41 1.13 matt #include <arm/locore.h>
42 1.13 matt
43 1.1 matt #include <arm/cortex/mpcore_var.h>
44 1.1 matt #include <arm/cortex/pl310_reg.h>
45 1.3 matt #include <arm/cortex/pl310_var.h>
46 1.1 matt
47 1.1 matt static int arml2cc_match(device_t, cfdata_t, void *);
48 1.1 matt static void arml2cc_attach(device_t, device_t, void *);
49 1.1 matt
50 1.1 matt #define L2CC_BASE 0x2000
51 1.1 matt #define L2CC_SIZE 0x1000
52 1.1 matt
53 1.1 matt struct arml2cc_softc {
54 1.1 matt device_t sc_dev;
55 1.1 matt bus_space_tag_t sc_memt;
56 1.1 matt bus_space_handle_t sc_memh;
57 1.5 matt kmutex_t sc_lock;
58 1.5 matt uint32_t sc_waymask;
59 1.5 matt struct evcnt sc_ev_inv __aligned(8);
60 1.5 matt struct evcnt sc_ev_wb;
61 1.5 matt struct evcnt sc_ev_wbinv;
62 1.5 matt bool sc_enabled;
63 1.1 matt };
64 1.1 matt
65 1.5 matt __CTASSERT(offsetof(struct arml2cc_softc, sc_ev_inv.ev_count) % 8 == 0);
66 1.5 matt __CTASSERT(offsetof(struct arml2cc_softc, sc_ev_wb.ev_count) % 8 == 0);
67 1.5 matt __CTASSERT(offsetof(struct arml2cc_softc, sc_ev_wbinv.ev_count) % 8 == 0);
68 1.5 matt
69 1.1 matt CFATTACH_DECL_NEW(arml2cc, sizeof(struct arml2cc_softc),
70 1.1 matt arml2cc_match, arml2cc_attach, NULL, NULL);
71 1.1 matt
72 1.9 matt static inline void arml2cc_disable(struct arml2cc_softc *);
73 1.9 matt static inline void arml2cc_enable(struct arml2cc_softc *);
74 1.5 matt static void arml2cc_sdcache_wb_range(vaddr_t, paddr_t, psize_t);
75 1.5 matt static void arml2cc_sdcache_inv_range(vaddr_t, paddr_t, psize_t);
76 1.5 matt static void arml2cc_sdcache_wbinv_range(vaddr_t, paddr_t, psize_t);
77 1.3 matt
78 1.5 matt static struct arml2cc_softc *arml2cc_sc;
79 1.1 matt
80 1.1 matt static inline uint32_t
81 1.1 matt arml2cc_read_4(struct arml2cc_softc *sc, bus_size_t o)
82 1.1 matt {
83 1.1 matt return bus_space_read_4(sc->sc_memt, sc->sc_memh, o);
84 1.1 matt }
85 1.1 matt
86 1.1 matt static inline void
87 1.1 matt arml2cc_write_4(struct arml2cc_softc *sc, bus_size_t o, uint32_t v)
88 1.1 matt {
89 1.1 matt bus_space_write_4(sc->sc_memt, sc->sc_memh, o, v);
90 1.1 matt }
91 1.1 matt
92 1.1 matt
93 1.1 matt /* ARGSUSED */
94 1.1 matt static int
95 1.1 matt arml2cc_match(device_t parent, cfdata_t cf, void *aux)
96 1.1 matt {
97 1.1 matt struct mpcore_attach_args * const mpcaa = aux;
98 1.1 matt
99 1.5 matt if (arml2cc_sc)
100 1.1 matt return 0;
101 1.1 matt
102 1.1 matt if (!CPU_ID_CORTEX_A9_P(curcpu()->ci_arm_cpuid))
103 1.1 matt return 0;
104 1.1 matt
105 1.1 matt if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0)
106 1.1 matt return 0;
107 1.1 matt
108 1.1 matt /*
109 1.1 matt * This isn't present on UP A9s (since CBAR isn't present).
110 1.1 matt */
111 1.1 matt uint32_t mpidr = armreg_mpidr_read();
112 1.1 matt if (mpidr == 0 || (mpidr & MPIDR_U))
113 1.1 matt return 0;
114 1.1 matt
115 1.1 matt return 1;
116 1.1 matt }
117 1.1 matt
118 1.1 matt static const struct {
119 1.1 matt uint8_t rev;
120 1.1 matt uint8_t str[7];
121 1.1 matt } pl310_revs[] = {
122 1.1 matt { 0, " r0p0" },
123 1.1 matt { 2, " r1p0" },
124 1.1 matt { 4, " r2p0" },
125 1.1 matt { 5, " r3p0" },
126 1.1 matt { 6, " r3p1" },
127 1.1 matt { 8, " r3p2" },
128 1.1 matt { 9, " r3p3" },
129 1.1 matt };
130 1.1 matt
131 1.1 matt static void
132 1.1 matt arml2cc_attach(device_t parent, device_t self, void *aux)
133 1.1 matt {
134 1.1 matt struct arml2cc_softc * const sc = device_private(self);
135 1.1 matt struct mpcore_attach_args * const mpcaa = aux;
136 1.5 matt const char * const xname = device_xname(self);
137 1.1 matt
138 1.5 matt arml2cc_sc = sc;
139 1.1 matt sc->sc_dev = self;
140 1.1 matt sc->sc_memt = mpcaa->mpcaa_memt;
141 1.5 matt sc->sc_waymask = __BIT(arm_scache.dcache_ways) - 1;
142 1.5 matt
143 1.5 matt evcnt_attach_dynamic(&sc->sc_ev_inv, EVCNT_TYPE_MISC, NULL,
144 1.5 matt xname, "L2 inv requests");
145 1.5 matt evcnt_attach_dynamic(&sc->sc_ev_wb, EVCNT_TYPE_MISC, NULL,
146 1.5 matt xname, "L2 wb requests");
147 1.5 matt evcnt_attach_dynamic(&sc->sc_ev_wbinv, EVCNT_TYPE_MISC, NULL,
148 1.5 matt xname, "L2 wbinv requests");
149 1.5 matt
150 1.5 matt mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
151 1.1 matt
152 1.1 matt bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh,
153 1.1 matt L2CC_BASE, L2CC_SIZE, &sc->sc_memh);
154 1.1 matt
155 1.1 matt uint32_t id = arml2cc_read_4(sc, L2C_CACHE_ID);
156 1.1 matt u_int rev = __SHIFTOUT(id, CACHE_ID_REV);
157 1.1 matt
158 1.1 matt const char *revstr = "";
159 1.1 matt for (size_t i = 0; i < __arraycount(pl310_revs); i++) {
160 1.1 matt if (rev == pl310_revs[i].rev) {
161 1.1 matt revstr = pl310_revs[i].str;
162 1.1 matt break;
163 1.1 matt }
164 1.1 matt }
165 1.1 matt
166 1.4 matt const bool enabled_p = arml2cc_read_4(sc, L2C_CTL) != 0;
167 1.4 matt
168 1.1 matt aprint_naive("\n");
169 1.3 matt aprint_normal(": ARM PL310%s L2 Cache Controller%s\n",
170 1.4 matt revstr, enabled_p ? "" : " (disabled)");
171 1.4 matt
172 1.4 matt if (enabled_p) {
173 1.6 matt if (device_cfdata(self)->cf_flags & 1) {
174 1.5 matt arml2cc_disable(sc);
175 1.5 matt aprint_normal_dev(self, "cache %s\n",
176 1.5 matt arml2cc_read_4(sc, L2C_CTL) ? "enabled" : "disabled");
177 1.5 matt sc->sc_enabled = false;
178 1.5 matt } else {
179 1.5 matt cpufuncs.cf_sdcache_wb_range = arml2cc_sdcache_wb_range;
180 1.5 matt cpufuncs.cf_sdcache_inv_range = arml2cc_sdcache_inv_range;
181 1.5 matt cpufuncs.cf_sdcache_wbinv_range = arml2cc_sdcache_wbinv_range;
182 1.5 matt sc->sc_enabled = true;
183 1.5 matt }
184 1.6 matt } else if ((device_cfdata(self)->cf_flags & 1) == 0) {
185 1.6 matt if (!enabled_p) {
186 1.6 matt arml2cc_enable(sc);
187 1.6 matt aprint_normal_dev(self, "cache %s\n",
188 1.6 matt arml2cc_read_4(sc, L2C_CTL) ? "enabled" : "disabled");
189 1.6 matt }
190 1.6 matt cpufuncs.cf_sdcache_wb_range = arml2cc_sdcache_wb_range;
191 1.6 matt cpufuncs.cf_sdcache_inv_range = arml2cc_sdcache_inv_range;
192 1.6 matt cpufuncs.cf_sdcache_wbinv_range = arml2cc_sdcache_wbinv_range;
193 1.6 matt sc->sc_enabled = true;
194 1.5 matt }
195 1.3 matt
196 1.12 matt KASSERTMSG(arm_pcache.dcache_line_size == arm_scache.dcache_line_size,
197 1.12 matt "pcache %u scache %u",
198 1.12 matt arm_pcache.dcache_line_size, arm_scache.dcache_line_size);
199 1.3 matt }
200 1.3 matt
201 1.5 matt static inline void
202 1.10 matt arml2cc_cache_op(struct arml2cc_softc *sc, bus_size_t off, uint32_t val,
203 1.10 matt bool wait)
204 1.3 matt {
205 1.5 matt arml2cc_write_4(sc, off, val);
206 1.10 matt if (wait) {
207 1.10 matt while (arml2cc_read_4(sc, off) & 1) {
208 1.10 matt /* spin */
209 1.10 matt }
210 1.3 matt }
211 1.5 matt }
212 1.3 matt
213 1.5 matt static inline void
214 1.5 matt arml2cc_cache_way_op(struct arml2cc_softc *sc, bus_size_t off, uint32_t way_mask)
215 1.5 matt {
216 1.5 matt arml2cc_write_4(sc, off, way_mask);
217 1.5 matt while (arml2cc_read_4(sc, off) & way_mask) {
218 1.3 matt /* spin */
219 1.3 matt }
220 1.5 matt }
221 1.1 matt
222 1.5 matt static inline void
223 1.5 matt arml2cc_cache_sync(struct arml2cc_softc *sc)
224 1.5 matt {
225 1.10 matt arml2cc_cache_op(sc, L2C_CACHE_SYNC, 0, true);
226 1.5 matt }
227 1.5 matt
228 1.5 matt static inline void
229 1.5 matt arml2cc_disable(struct arml2cc_softc *sc)
230 1.5 matt {
231 1.5 matt mutex_spin_enter(&sc->sc_lock);
232 1.5 matt
233 1.5 matt arml2cc_cache_way_op(sc, L2C_CLEAN_INV_WAY, sc->sc_waymask);
234 1.5 matt arml2cc_cache_sync(sc);
235 1.5 matt
236 1.5 matt arml2cc_write_4(sc, L2C_CTL, 0); // turn it off
237 1.5 matt mutex_spin_exit(&sc->sc_lock);
238 1.5 matt }
239 1.5 matt
240 1.5 matt static inline void
241 1.5 matt arml2cc_enable(struct arml2cc_softc *sc)
242 1.5 matt {
243 1.5 matt mutex_spin_enter(&sc->sc_lock);
244 1.5 matt
245 1.5 matt arml2cc_write_4(sc, L2C_CTL, 1); // turn it on
246 1.5 matt
247 1.8 matt arml2cc_cache_way_op(sc, L2C_INV_WAY, sc->sc_waymask);
248 1.5 matt arml2cc_cache_sync(sc);
249 1.5 matt
250 1.5 matt mutex_spin_exit(&sc->sc_lock);
251 1.3 matt }
252 1.3 matt
253 1.3 matt void
254 1.3 matt arml2cc_init(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t o)
255 1.3 matt {
256 1.3 matt struct arm_cache_info * const info = &arm_scache;
257 1.3 matt
258 1.3 matt uint32_t cfg = bus_space_read_4(bst, bsh, o + L2C_CACHE_TYPE);
259 1.3 matt
260 1.3 matt info->cache_type = __SHIFTOUT(cfg, CACHE_TYPE_CTYPE);
261 1.3 matt info->cache_unified = __SHIFTOUT(cfg, CACHE_TYPE_HARVARD) == 0;
262 1.1 matt u_int cfg_dsize = __SHIFTOUT(cfg, CACHE_TYPE_DSIZE);
263 1.3 matt
264 1.1 matt u_int d_waysize = 8192 << __SHIFTOUT(cfg_dsize, CACHE_TYPE_xWAYSIZE);
265 1.3 matt info->dcache_ways = 8 << __SHIFTOUT(cfg_dsize, CACHE_TYPE_xASSOC);
266 1.3 matt info->dcache_line_size = 32 << __SHIFTOUT(cfg_dsize, CACHE_TYPE_xLINESIZE);
267 1.3 matt info->dcache_size = info->dcache_ways * d_waysize;
268 1.3 matt
269 1.3 matt if (info->cache_unified) {
270 1.3 matt info->icache_ways = info->dcache_ways;
271 1.3 matt info->icache_line_size = info->dcache_line_size;
272 1.3 matt info->icache_size = info->dcache_size;
273 1.3 matt } else {
274 1.1 matt u_int cfg_isize = __SHIFTOUT(cfg, CACHE_TYPE_ISIZE);
275 1.1 matt u_int i_waysize = 8192 << __SHIFTOUT(cfg_isize, CACHE_TYPE_xWAYSIZE);
276 1.3 matt info->icache_ways = 8 << __SHIFTOUT(cfg_isize, CACHE_TYPE_xASSOC);
277 1.3 matt info->icache_line_size = 32 << __SHIFTOUT(cfg_isize, CACHE_TYPE_xLINESIZE);
278 1.3 matt info->icache_size = i_waysize * info->icache_ways;
279 1.1 matt }
280 1.1 matt }
281 1.4 matt
282 1.4 matt static void
283 1.5 matt arml2cc_cache_range_op(paddr_t pa, psize_t len, bus_size_t cache_op)
284 1.4 matt {
285 1.5 matt struct arml2cc_softc * const sc = arml2cc_sc;
286 1.5 matt const size_t line_size = arm_scache.dcache_line_size;
287 1.4 matt const size_t line_mask = line_size - 1;
288 1.5 matt size_t off = pa & line_mask;
289 1.4 matt if (off) {
290 1.4 matt len += off;
291 1.5 matt pa -= off;
292 1.4 matt }
293 1.5 matt len = roundup2(len, line_size);
294 1.11 matt mutex_spin_enter(&sc->sc_lock);
295 1.11 matt if (__predict_false(!sc->sc_enabled)) {
296 1.5 matt mutex_spin_exit(&sc->sc_lock);
297 1.11 matt return;
298 1.4 matt }
299 1.11 matt for (const paddr_t endpa = pa + len; pa < endpa; pa += line_size) {
300 1.11 matt arml2cc_cache_op(sc, cache_op, pa, false);
301 1.11 matt }
302 1.11 matt arml2cc_cache_sync(sc);
303 1.11 matt mutex_spin_exit(&sc->sc_lock);
304 1.4 matt }
305 1.5 matt
306 1.5 matt static void
307 1.5 matt arml2cc_sdcache_inv_range(vaddr_t va, paddr_t pa, psize_t len)
308 1.5 matt {
309 1.5 matt atomic_inc_64(&arml2cc_sc->sc_ev_inv.ev_count);
310 1.5 matt arml2cc_cache_range_op(pa, len, L2C_INV_PA);
311 1.5 matt }
312 1.5 matt
313 1.5 matt static void
314 1.5 matt arml2cc_sdcache_wb_range(vaddr_t va, paddr_t pa, psize_t len)
315 1.5 matt {
316 1.5 matt atomic_inc_64(&arml2cc_sc->sc_ev_wb.ev_count);
317 1.5 matt arml2cc_cache_range_op(pa, len, L2C_CLEAN_PA);
318 1.5 matt }
319 1.5 matt
320 1.5 matt static void
321 1.5 matt arml2cc_sdcache_wbinv_range(vaddr_t va, paddr_t pa, psize_t len)
322 1.5 matt {
323 1.5 matt atomic_inc_64(&arml2cc_sc->sc_ev_wbinv.ev_count);
324 1.5 matt arml2cc_cache_range_op(pa, len, L2C_CLEAN_INV_PA);
325 1.5 matt }
326