1 /* $NetBSD: pl310.c,v 1.23 2025/12/16 12:20:22 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2012 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: pl310.c,v 1.23 2025/12/16 12:20:22 skrll Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/cpu.h> 38 #include <sys/device.h> 39 #include <sys/atomic.h> 40 41 #include <arm/locore.h> 42 43 #include <arm/cortex/mpcore_var.h> 44 #include <arm/cortex/pl310_reg.h> 45 #include <arm/cortex/pl310_var.h> 46 47 static int arml2cc_match(device_t, cfdata_t, void *); 48 static void arml2cc_attach(device_t, device_t, void *); 49 50 #define L2CC_SIZE 0x1000 51 52 struct arml2cc_softc { 53 device_t sc_dev; 54 bus_space_tag_t sc_memt; 55 bus_space_handle_t sc_memh; 56 kmutex_t sc_lock; 57 uint32_t sc_waymask; 58 struct evcnt sc_ev_inv __aligned(8); 59 struct evcnt sc_ev_wb; 60 struct evcnt sc_ev_wbinv; 61 bool sc_enabled; 62 }; 63 64 void (*arml2cc_enable_func)(bool); 65 66 __CTASSERT(offsetof(struct arml2cc_softc, sc_ev_inv.ev_count) % 8 == 0); 67 __CTASSERT(offsetof(struct arml2cc_softc, sc_ev_wb.ev_count) % 8 == 0); 68 __CTASSERT(offsetof(struct arml2cc_softc, sc_ev_wbinv.ev_count) % 8 == 0); 69 70 CFATTACH_DECL_NEW(arml2cc, sizeof(struct arml2cc_softc), 71 arml2cc_match, arml2cc_attach, NULL, NULL); 72 73 static inline void arml2cc_disable(struct arml2cc_softc *); 74 static inline void arml2cc_enable(struct arml2cc_softc *); 75 static void arml2cc_sdcache_wb_range(vaddr_t, paddr_t, psize_t); 76 static void arml2cc_sdcache_inv_range(vaddr_t, paddr_t, psize_t); 77 static void arml2cc_sdcache_wbinv_range(vaddr_t, paddr_t, psize_t); 78 79 static struct arml2cc_softc *arml2cc_sc; 80 81 static inline uint32_t 82 arml2cc_read_4(struct arml2cc_softc *sc, bus_size_t o) 83 { 84 return bus_space_read_4(sc->sc_memt, sc->sc_memh, o); 85 } 86 87 static inline void 88 arml2cc_write_4(struct arml2cc_softc *sc, bus_size_t o, uint32_t v) 89 { 90 bus_space_write_4(sc->sc_memt, sc->sc_memh, o, v); 91 } 92 93 94 /* ARGSUSED */ 95 static int 96 arml2cc_match(device_t parent, cfdata_t cf, void *aux) 97 { 98 struct mpcore_attach_args * const mpcaa = aux; 99 100 if (arml2cc_sc) 101 return 0; 102 103 if (!CPU_ID_CORTEX_A9_P(curcpu()->ci_arm_cpuid) && 104 !CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid)) 105 return 0; 106 107 if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0) 108 return 0; 109 110 /* 111 * This isn't present on UP A9s (since CBAR isn't present). 112 */ 113 uint32_t mpidr = armreg_mpidr_read(); 114 if (mpidr == 0 || (mpidr & MPIDR_U)) 115 return 0; 116 117 return 1; 118 } 119 120 static const struct { 121 uint8_t rev; 122 uint8_t str[7]; 123 } pl310_revs[] = { 124 { 0, " r0p0" }, 125 { 2, " r1p0" }, 126 { 4, " r2p0" }, 127 { 5, " r3p0" }, 128 { 6, " r3p1" }, 129 { 7, " r3p1a" }, 130 { 8, " r3p2" }, 131 { 9, " r3p3" }, 132 }; 133 134 static void 135 arml2cc_attach(device_t parent, device_t self, void *aux) 136 { 137 struct arml2cc_softc * const sc = device_private(self); 138 struct mpcore_attach_args * const mpcaa = aux; 139 const char * const xname = device_xname(self); 140 prop_dictionary_t dict = device_properties(self); 141 uint32_t off; 142 143 aprint_naive("\n"); 144 145 if (!prop_dictionary_get_uint32(dict, "offset", &off)) 146 off = mpcaa->mpcaa_off1; 147 148 arml2cc_sc = sc; 149 sc->sc_dev = self; 150 sc->sc_memt = mpcaa->mpcaa_memt; 151 sc->sc_waymask = __BIT(arm_scache.dcache_ways) - 1; 152 153 evcnt_attach_dynamic(&sc->sc_ev_inv, EVCNT_TYPE_MISC, NULL, 154 xname, "L2 inv requests"); 155 evcnt_attach_dynamic(&sc->sc_ev_wb, EVCNT_TYPE_MISC, NULL, 156 xname, "L2 wb requests"); 157 evcnt_attach_dynamic(&sc->sc_ev_wbinv, EVCNT_TYPE_MISC, NULL, 158 xname, "L2 wbinv requests"); 159 160 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH); 161 162 bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh, 163 off, L2CC_SIZE, &sc->sc_memh); 164 165 uint32_t id = arml2cc_read_4(sc, L2C_CACHE_ID); 166 u_int rev = __SHIFTOUT(id, CACHE_ID_REV); 167 168 const char *revstr = ""; 169 for (size_t i = 0; i < __arraycount(pl310_revs); i++) { 170 if (rev == pl310_revs[i].rev) { 171 revstr = pl310_revs[i].str; 172 break; 173 } 174 } 175 176 const bool enabled_p = arml2cc_read_4(sc, L2C_CTL) != 0; 177 178 aprint_normal(": ARM PL310%s L2 Cache Controller%s\n", 179 revstr, enabled_p ? "" : " (disabled)"); 180 181 if (enabled_p) { 182 if (device_cfdata(self)->cf_flags & 1) { 183 arml2cc_disable(sc); 184 aprint_normal_dev(self, "cache %s\n", 185 arml2cc_read_4(sc, L2C_CTL) ? "enabled" : "disabled"); 186 sc->sc_enabled = false; 187 } else { 188 cpufuncs.cf_sdcache_wb_range = arml2cc_sdcache_wb_range; 189 cpufuncs.cf_sdcache_inv_range = arml2cc_sdcache_inv_range; 190 cpufuncs.cf_sdcache_wbinv_range = arml2cc_sdcache_wbinv_range; 191 sc->sc_enabled = true; 192 } 193 } else if ((device_cfdata(self)->cf_flags & 1) == 0) { 194 if (!enabled_p) { 195 arml2cc_enable(sc); 196 aprint_normal_dev(self, "cache %s\n", 197 arml2cc_read_4(sc, L2C_CTL) ? "enabled" : "disabled"); 198 } 199 cpufuncs.cf_sdcache_wb_range = arml2cc_sdcache_wb_range; 200 cpufuncs.cf_sdcache_inv_range = arml2cc_sdcache_inv_range; 201 cpufuncs.cf_sdcache_wbinv_range = arml2cc_sdcache_wbinv_range; 202 sc->sc_enabled = true; 203 } 204 205 KASSERTMSG(arm_pcache.dcache_line_size == arm_scache.dcache_line_size, 206 "pcache %u scache %u", 207 arm_pcache.dcache_line_size, arm_scache.dcache_line_size); 208 } 209 210 static inline void 211 arml2cc_cache_op(struct arml2cc_softc *sc, bus_size_t off, uint32_t val, 212 bool wait) 213 { 214 arml2cc_write_4(sc, off, val); 215 if (wait) { 216 while (arml2cc_read_4(sc, off) & 1) { 217 /* spin */ 218 } 219 } 220 } 221 222 static inline void 223 arml2cc_cache_way_op(struct arml2cc_softc *sc, bus_size_t off, uint32_t way_mask) 224 { 225 arml2cc_write_4(sc, off, way_mask); 226 while (arml2cc_read_4(sc, off) & way_mask) { 227 /* spin */ 228 } 229 } 230 231 static inline void 232 arml2cc_cache_sync(struct arml2cc_softc *sc) 233 { 234 arml2cc_cache_op(sc, L2C_CACHE_SYNC, 0, true); 235 } 236 237 static inline void 238 arml2cc_disable(struct arml2cc_softc *sc) 239 { 240 mutex_spin_enter(&sc->sc_lock); 241 242 arml2cc_cache_way_op(sc, L2C_CLEAN_INV_WAY, sc->sc_waymask); 243 arml2cc_cache_sync(sc); 244 245 if (arml2cc_enable_func) 246 arml2cc_enable_func(false); 247 else 248 arml2cc_write_4(sc, L2C_CTL, 0); // turn it off 249 250 mutex_spin_exit(&sc->sc_lock); 251 } 252 253 static inline void 254 arml2cc_enable(struct arml2cc_softc *sc) 255 { 256 mutex_spin_enter(&sc->sc_lock); 257 258 arml2cc_cache_way_op(sc, L2C_INV_WAY, sc->sc_waymask); 259 arml2cc_cache_sync(sc); 260 261 if (arml2cc_enable_func) 262 arml2cc_enable_func(true); 263 else 264 arml2cc_write_4(sc, L2C_CTL, 1); // turn it on 265 266 mutex_spin_exit(&sc->sc_lock); 267 } 268 269 void 270 arml2cc_set_enable_func(void (*func)(bool)) 271 { 272 arml2cc_enable_func = func; 273 } 274 275 void 276 arml2cc_get_cacheinfo(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t o) 277 { 278 struct arm_cache_info * const info = &arm_scache; 279 280 uint32_t cfg = bus_space_read_4(bst, bsh, o + L2C_CACHE_TYPE); 281 282 info->cache_type = __SHIFTOUT(cfg, CACHE_TYPE_CTYPE); 283 info->cache_unified = __SHIFTOUT(cfg, CACHE_TYPE_HARVARD) == 0; 284 u_int cfg_dsize = __SHIFTOUT(cfg, CACHE_TYPE_DSIZE); 285 286 u_int d_waysize = 8192 << __SHIFTOUT(cfg_dsize, CACHE_TYPE_xWAYSIZE); 287 info->dcache_ways = 8 << __SHIFTOUT(cfg_dsize, CACHE_TYPE_xASSOC); 288 info->dcache_line_size = 32 << __SHIFTOUT(cfg_dsize, CACHE_TYPE_xLINESIZE); 289 info->dcache_size = info->dcache_ways * d_waysize; 290 info->dcache_type = CACHE_TYPE_PIPT; 291 info->icache_type = CACHE_TYPE_PIPT; 292 293 if (info->cache_unified) { 294 info->icache_ways = info->dcache_ways; 295 info->icache_line_size = info->dcache_line_size; 296 info->icache_size = info->dcache_size; 297 } else { 298 u_int cfg_isize = __SHIFTOUT(cfg, CACHE_TYPE_ISIZE); 299 u_int i_waysize = 8192 << __SHIFTOUT(cfg_isize, CACHE_TYPE_xWAYSIZE); 300 info->icache_ways = 8 << __SHIFTOUT(cfg_isize, CACHE_TYPE_xASSOC); 301 info->icache_line_size = 32 << __SHIFTOUT(cfg_isize, CACHE_TYPE_xLINESIZE); 302 info->icache_size = i_waysize * info->icache_ways; 303 } 304 } 305 306 static void 307 arml2cc_cache_range_op(paddr_t pa, psize_t len, bus_size_t cache_op) 308 { 309 struct arml2cc_softc * const sc = arml2cc_sc; 310 const size_t line_size = arm_scache.dcache_line_size; 311 const size_t line_mask = line_size - 1; 312 size_t off = pa & line_mask; 313 if (off) { 314 len += off; 315 pa -= off; 316 } 317 len = roundup2(len, line_size); 318 mutex_spin_enter(&sc->sc_lock); 319 if (__predict_false(!sc->sc_enabled)) { 320 mutex_spin_exit(&sc->sc_lock); 321 return; 322 } 323 for (const paddr_t endpa = pa + len; pa < endpa; pa += line_size) { 324 arml2cc_cache_op(sc, cache_op, pa, false); 325 } 326 arml2cc_cache_sync(sc); 327 mutex_spin_exit(&sc->sc_lock); 328 } 329 330 static void 331 arml2cc_sdcache_inv_range(vaddr_t va, paddr_t pa, psize_t len) 332 { 333 atomic_inc_64(&arml2cc_sc->sc_ev_inv.ev_count); 334 arml2cc_cache_range_op(pa, len, L2C_INV_PA); 335 } 336 337 static void 338 arml2cc_sdcache_wb_range(vaddr_t va, paddr_t pa, psize_t len) 339 { 340 atomic_inc_64(&arml2cc_sc->sc_ev_wb.ev_count); 341 arml2cc_cache_range_op(pa, len, L2C_CLEAN_PA); 342 } 343 344 static void 345 arml2cc_sdcache_wbinv_range(vaddr_t va, paddr_t pa, psize_t len) 346 { 347 atomic_inc_64(&arml2cc_sc->sc_ev_wbinv.ev_count); 348 arml2cc_cache_range_op(pa, len, L2C_CLEAN_INV_PA); 349 } 350