Home | History | Annotate | Line # | Download | only in cortex
pl310.c revision 1.17.16.1
      1  1.17.16.1  pgoyette /*	$NetBSD: pl310.c,v 1.17.16.1 2018/06/25 07:25:39 pgoyette Exp $	*/
      2        1.1      matt 
      3        1.1      matt /*-
      4        1.1      matt  * Copyright (c) 2012 The NetBSD Foundation, Inc.
      5        1.1      matt  * All rights reserved.
      6        1.1      matt  *
      7        1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      8        1.1      matt  * by Matt Thomas
      9        1.1      matt  *
     10        1.1      matt  * Redistribution and use in source and binary forms, with or without
     11        1.1      matt  * modification, are permitted provided that the following conditions
     12        1.1      matt  * are met:
     13        1.1      matt  * 1. Redistributions of source code must retain the above copyright
     14        1.1      matt  *    notice, this list of conditions and the following disclaimer.
     15        1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     16        1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     17        1.1      matt  *    documentation and/or other materials provided with the distribution.
     18        1.1      matt  *
     19        1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20        1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21        1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22        1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23        1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24        1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25        1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26        1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27        1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28        1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29        1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     30        1.1      matt  */
     31        1.1      matt 
     32        1.1      matt #include <sys/cdefs.h>
     33  1.17.16.1  pgoyette __KERNEL_RCSID(0, "$NetBSD: pl310.c,v 1.17.16.1 2018/06/25 07:25:39 pgoyette Exp $");
     34        1.1      matt 
     35        1.1      matt #include <sys/param.h>
     36        1.1      matt #include <sys/bus.h>
     37        1.1      matt #include <sys/cpu.h>
     38        1.1      matt #include <sys/device.h>
     39        1.5      matt #include <sys/atomic.h>
     40        1.1      matt 
     41       1.13      matt #include <arm/locore.h>
     42       1.13      matt 
     43        1.1      matt #include <arm/cortex/mpcore_var.h>
     44        1.1      matt #include <arm/cortex/pl310_reg.h>
     45        1.3      matt #include <arm/cortex/pl310_var.h>
     46        1.1      matt 
     47        1.1      matt static int arml2cc_match(device_t, cfdata_t, void *);
     48        1.1      matt static void arml2cc_attach(device_t, device_t, void *);
     49        1.1      matt 
     50        1.1      matt #define	L2CC_SIZE	0x1000
     51        1.1      matt 
     52        1.1      matt struct arml2cc_softc {
     53        1.1      matt 	device_t sc_dev;
     54        1.1      matt 	bus_space_tag_t sc_memt;
     55        1.1      matt 	bus_space_handle_t sc_memh;
     56        1.5      matt 	kmutex_t sc_lock;
     57        1.5      matt 	uint32_t sc_waymask;
     58        1.5      matt 	struct evcnt sc_ev_inv __aligned(8);
     59        1.5      matt 	struct evcnt sc_ev_wb;
     60        1.5      matt 	struct evcnt sc_ev_wbinv;
     61        1.5      matt 	bool sc_enabled;
     62        1.1      matt };
     63        1.1      matt 
     64        1.5      matt __CTASSERT(offsetof(struct arml2cc_softc, sc_ev_inv.ev_count) % 8 == 0);
     65        1.5      matt __CTASSERT(offsetof(struct arml2cc_softc, sc_ev_wb.ev_count) % 8 == 0);
     66        1.5      matt __CTASSERT(offsetof(struct arml2cc_softc, sc_ev_wbinv.ev_count) % 8 == 0);
     67        1.5      matt 
     68        1.1      matt CFATTACH_DECL_NEW(arml2cc, sizeof(struct arml2cc_softc),
     69        1.1      matt     arml2cc_match, arml2cc_attach, NULL, NULL);
     70        1.1      matt 
     71        1.9      matt static inline void arml2cc_disable(struct arml2cc_softc *);
     72        1.9      matt static inline void arml2cc_enable(struct arml2cc_softc *);
     73        1.5      matt static void arml2cc_sdcache_wb_range(vaddr_t, paddr_t, psize_t);
     74        1.5      matt static void arml2cc_sdcache_inv_range(vaddr_t, paddr_t, psize_t);
     75        1.5      matt static void arml2cc_sdcache_wbinv_range(vaddr_t, paddr_t, psize_t);
     76        1.3      matt 
     77        1.5      matt static struct arml2cc_softc *arml2cc_sc;
     78        1.1      matt 
     79        1.1      matt static inline uint32_t
     80        1.1      matt arml2cc_read_4(struct arml2cc_softc *sc, bus_size_t o)
     81        1.1      matt {
     82        1.1      matt 	return bus_space_read_4(sc->sc_memt, sc->sc_memh, o);
     83        1.1      matt }
     84        1.1      matt 
     85        1.1      matt static inline void
     86        1.1      matt arml2cc_write_4(struct arml2cc_softc *sc, bus_size_t o, uint32_t v)
     87        1.1      matt {
     88        1.1      matt 	bus_space_write_4(sc->sc_memt, sc->sc_memh, o, v);
     89        1.1      matt }
     90        1.1      matt 
     91        1.1      matt 
     92        1.1      matt /* ARGSUSED */
     93        1.1      matt static int
     94        1.1      matt arml2cc_match(device_t parent, cfdata_t cf, void *aux)
     95        1.1      matt {
     96        1.1      matt 	struct mpcore_attach_args * const mpcaa = aux;
     97        1.1      matt 
     98        1.5      matt 	if (arml2cc_sc)
     99        1.1      matt 		return 0;
    100        1.1      matt 
    101       1.17  jmcneill 	if (!CPU_ID_CORTEX_A9_P(curcpu()->ci_arm_cpuid) &&
    102       1.17  jmcneill 	    !CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid))
    103        1.1      matt 		return 0;
    104        1.1      matt 
    105        1.1      matt 	if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0)
    106        1.1      matt 		return 0;
    107        1.1      matt 
    108        1.1      matt 	/*
    109        1.1      matt 	 * This isn't present on UP A9s (since CBAR isn't present).
    110        1.1      matt 	 */
    111        1.1      matt 	uint32_t mpidr = armreg_mpidr_read();
    112        1.1      matt 	if (mpidr == 0 || (mpidr & MPIDR_U))
    113        1.1      matt 		return 0;
    114        1.1      matt 
    115        1.1      matt 	return 1;
    116        1.1      matt }
    117        1.1      matt 
    118        1.1      matt static const struct {
    119        1.1      matt 	uint8_t rev;
    120        1.1      matt 	uint8_t str[7];
    121        1.1      matt } pl310_revs[] = {
    122        1.1      matt 	{ 0, " r0p0" },
    123        1.1      matt 	{ 2, " r1p0" },
    124        1.1      matt 	{ 4, " r2p0" },
    125        1.1      matt 	{ 5, " r3p0" },
    126        1.1      matt 	{ 6, " r3p1" },
    127       1.15      matt 	{ 7, " r3p1a" },
    128        1.1      matt 	{ 8, " r3p2" },
    129        1.1      matt 	{ 9, " r3p3" },
    130        1.1      matt };
    131        1.1      matt 
    132        1.1      matt static void
    133        1.1      matt arml2cc_attach(device_t parent, device_t self, void *aux)
    134        1.1      matt {
    135        1.1      matt         struct arml2cc_softc * const sc = device_private(self);
    136        1.1      matt 	struct mpcore_attach_args * const mpcaa = aux;
    137        1.5      matt 	const char * const xname = device_xname(self);
    138       1.15      matt 	prop_dictionary_t dict = device_properties(self);
    139       1.15      matt 	uint32_t off;
    140       1.15      matt 
    141       1.17  jmcneill 	aprint_naive("\n");
    142       1.17  jmcneill 
    143       1.15      matt 	if (!prop_dictionary_get_uint32(dict, "offset", &off)) {
    144       1.17  jmcneill 		if (CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid)) {
    145       1.17  jmcneill 			/*
    146       1.17  jmcneill 			 * PL310 on Cortex-A5 is external to PERIPHBASE, so
    147       1.17  jmcneill 			 * "offset" property is required.
    148       1.17  jmcneill 			 */
    149       1.17  jmcneill 			aprint_normal(": not configured\n");
    150       1.17  jmcneill 			return;
    151       1.17  jmcneill 		}
    152  1.17.16.1  pgoyette 		off = mpcaa->mpcaa_off1;
    153       1.15      matt 	}
    154        1.1      matt 
    155        1.5      matt 	arml2cc_sc = sc;
    156        1.1      matt 	sc->sc_dev = self;
    157        1.1      matt 	sc->sc_memt = mpcaa->mpcaa_memt;
    158        1.5      matt 	sc->sc_waymask = __BIT(arm_scache.dcache_ways) - 1;
    159        1.5      matt 
    160        1.5      matt 	evcnt_attach_dynamic(&sc->sc_ev_inv, EVCNT_TYPE_MISC, NULL,
    161        1.5      matt 	    xname, "L2 inv requests");
    162        1.5      matt 	evcnt_attach_dynamic(&sc->sc_ev_wb, EVCNT_TYPE_MISC, NULL,
    163        1.5      matt 	    xname, "L2 wb requests");
    164        1.5      matt 	evcnt_attach_dynamic(&sc->sc_ev_wbinv, EVCNT_TYPE_MISC, NULL,
    165        1.5      matt 	    xname, "L2 wbinv requests");
    166        1.5      matt 
    167        1.5      matt 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
    168        1.1      matt 
    169        1.1      matt 	bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh,
    170       1.15      matt 	    off, L2CC_SIZE, &sc->sc_memh);
    171        1.1      matt 
    172        1.1      matt 	uint32_t id = arml2cc_read_4(sc, L2C_CACHE_ID);
    173        1.1      matt 	u_int rev = __SHIFTOUT(id, CACHE_ID_REV);
    174        1.1      matt 
    175        1.1      matt 	const char *revstr = "";
    176        1.1      matt 	for (size_t i = 0; i < __arraycount(pl310_revs); i++) {
    177        1.1      matt 		if (rev == pl310_revs[i].rev) {
    178        1.1      matt 			revstr = pl310_revs[i].str;
    179        1.1      matt 			break;
    180        1.1      matt 		}
    181        1.1      matt 	}
    182        1.1      matt 
    183        1.4      matt 	const bool enabled_p = arml2cc_read_4(sc, L2C_CTL) != 0;
    184        1.4      matt 
    185        1.3      matt 	aprint_normal(": ARM PL310%s L2 Cache Controller%s\n",
    186        1.4      matt 	    revstr, enabled_p ? "" : " (disabled)");
    187        1.4      matt 
    188        1.4      matt 	if (enabled_p) {
    189        1.6      matt 		if (device_cfdata(self)->cf_flags & 1) {
    190        1.5      matt 			arml2cc_disable(sc);
    191        1.5      matt 			aprint_normal_dev(self, "cache %s\n",
    192        1.5      matt 			    arml2cc_read_4(sc, L2C_CTL) ? "enabled" : "disabled");
    193        1.5      matt 			sc->sc_enabled = false;
    194        1.5      matt 		} else {
    195        1.5      matt 			cpufuncs.cf_sdcache_wb_range = arml2cc_sdcache_wb_range;
    196        1.5      matt 			cpufuncs.cf_sdcache_inv_range = arml2cc_sdcache_inv_range;
    197        1.5      matt 			cpufuncs.cf_sdcache_wbinv_range = arml2cc_sdcache_wbinv_range;
    198        1.5      matt 			sc->sc_enabled = true;
    199        1.5      matt 		}
    200        1.6      matt 	} else if ((device_cfdata(self)->cf_flags & 1) == 0) {
    201        1.6      matt 		if (!enabled_p) {
    202        1.6      matt 			arml2cc_enable(sc);
    203        1.6      matt 			aprint_normal_dev(self, "cache %s\n",
    204        1.6      matt 			    arml2cc_read_4(sc, L2C_CTL) ? "enabled" : "disabled");
    205        1.6      matt 		}
    206        1.6      matt 		cpufuncs.cf_sdcache_wb_range = arml2cc_sdcache_wb_range;
    207        1.6      matt 		cpufuncs.cf_sdcache_inv_range = arml2cc_sdcache_inv_range;
    208        1.6      matt 		cpufuncs.cf_sdcache_wbinv_range = arml2cc_sdcache_wbinv_range;
    209        1.6      matt 		sc->sc_enabled = true;
    210        1.5      matt 	}
    211        1.3      matt 
    212       1.12      matt 	KASSERTMSG(arm_pcache.dcache_line_size == arm_scache.dcache_line_size,
    213       1.12      matt 	    "pcache %u scache %u",
    214       1.12      matt 	    arm_pcache.dcache_line_size, arm_scache.dcache_line_size);
    215        1.3      matt }
    216        1.3      matt 
    217        1.5      matt static inline void
    218       1.10      matt arml2cc_cache_op(struct arml2cc_softc *sc, bus_size_t off, uint32_t val,
    219       1.10      matt     bool wait)
    220        1.3      matt {
    221        1.5      matt 	arml2cc_write_4(sc, off, val);
    222       1.10      matt 	if (wait) {
    223       1.10      matt 		while (arml2cc_read_4(sc, off) & 1) {
    224       1.10      matt 			/* spin */
    225       1.10      matt 		}
    226        1.3      matt 	}
    227        1.5      matt }
    228        1.3      matt 
    229        1.5      matt static inline void
    230        1.5      matt arml2cc_cache_way_op(struct arml2cc_softc *sc, bus_size_t off, uint32_t way_mask)
    231        1.5      matt {
    232        1.5      matt 	arml2cc_write_4(sc, off, way_mask);
    233        1.5      matt 	while (arml2cc_read_4(sc, off) & way_mask) {
    234        1.3      matt 		/* spin */
    235        1.3      matt 	}
    236        1.5      matt }
    237        1.1      matt 
    238        1.5      matt static inline void
    239        1.5      matt arml2cc_cache_sync(struct arml2cc_softc *sc)
    240        1.5      matt {
    241       1.10      matt 	arml2cc_cache_op(sc, L2C_CACHE_SYNC, 0, true);
    242        1.5      matt }
    243        1.5      matt 
    244        1.5      matt static inline void
    245        1.5      matt arml2cc_disable(struct arml2cc_softc *sc)
    246        1.5      matt {
    247        1.5      matt 	mutex_spin_enter(&sc->sc_lock);
    248        1.5      matt 
    249        1.5      matt 	arml2cc_cache_way_op(sc, L2C_CLEAN_INV_WAY, sc->sc_waymask);
    250        1.5      matt 	arml2cc_cache_sync(sc);
    251        1.5      matt 
    252        1.5      matt 	arml2cc_write_4(sc, L2C_CTL, 0);	// turn it off
    253        1.5      matt 	mutex_spin_exit(&sc->sc_lock);
    254        1.5      matt }
    255        1.5      matt 
    256        1.5      matt static inline void
    257        1.5      matt arml2cc_enable(struct arml2cc_softc *sc)
    258        1.5      matt {
    259        1.5      matt 	mutex_spin_enter(&sc->sc_lock);
    260        1.5      matt 
    261        1.8      matt 	arml2cc_cache_way_op(sc, L2C_INV_WAY, sc->sc_waymask);
    262        1.5      matt 	arml2cc_cache_sync(sc);
    263        1.5      matt 
    264       1.16      matt 	arml2cc_write_4(sc, L2C_CTL, 1);	// turn it on
    265       1.16      matt 
    266        1.5      matt 	mutex_spin_exit(&sc->sc_lock);
    267        1.3      matt }
    268        1.3      matt 
    269        1.3      matt void
    270        1.3      matt arml2cc_init(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t o)
    271        1.3      matt {
    272        1.3      matt 	struct arm_cache_info * const info = &arm_scache;
    273        1.3      matt 
    274        1.3      matt 	uint32_t cfg = bus_space_read_4(bst, bsh, o + L2C_CACHE_TYPE);
    275        1.3      matt 
    276        1.3      matt 	info->cache_type = __SHIFTOUT(cfg, CACHE_TYPE_CTYPE);
    277        1.3      matt 	info->cache_unified = __SHIFTOUT(cfg, CACHE_TYPE_HARVARD) == 0;
    278        1.1      matt 	u_int cfg_dsize = __SHIFTOUT(cfg, CACHE_TYPE_DSIZE);
    279        1.3      matt 
    280        1.1      matt 	u_int d_waysize = 8192 << __SHIFTOUT(cfg_dsize, CACHE_TYPE_xWAYSIZE);
    281        1.3      matt 	info->dcache_ways = 8 << __SHIFTOUT(cfg_dsize, CACHE_TYPE_xASSOC);
    282        1.3      matt 	info->dcache_line_size = 32 << __SHIFTOUT(cfg_dsize, CACHE_TYPE_xLINESIZE);
    283        1.3      matt 	info->dcache_size = info->dcache_ways * d_waysize;
    284       1.14      matt 	info->dcache_type = CACHE_TYPE_PIPT;
    285       1.14      matt 	info->icache_type = CACHE_TYPE_PIPT;
    286        1.3      matt 
    287        1.3      matt 	if (info->cache_unified) {
    288        1.3      matt 		info->icache_ways = info->dcache_ways;
    289        1.3      matt 		info->icache_line_size = info->dcache_line_size;
    290        1.3      matt 		info->icache_size = info->dcache_size;
    291        1.3      matt 	} else {
    292        1.1      matt 		u_int cfg_isize = __SHIFTOUT(cfg, CACHE_TYPE_ISIZE);
    293        1.1      matt 		u_int i_waysize = 8192 << __SHIFTOUT(cfg_isize, CACHE_TYPE_xWAYSIZE);
    294        1.3      matt 		info->icache_ways = 8 << __SHIFTOUT(cfg_isize, CACHE_TYPE_xASSOC);
    295        1.3      matt 		info->icache_line_size = 32 << __SHIFTOUT(cfg_isize, CACHE_TYPE_xLINESIZE);
    296        1.3      matt 		info->icache_size = i_waysize * info->icache_ways;
    297        1.1      matt 	}
    298        1.1      matt }
    299        1.4      matt 
    300        1.4      matt static void
    301        1.5      matt arml2cc_cache_range_op(paddr_t pa, psize_t len, bus_size_t cache_op)
    302        1.4      matt {
    303        1.5      matt 	struct arml2cc_softc * const sc = arml2cc_sc;
    304        1.5      matt 	const size_t line_size = arm_scache.dcache_line_size;
    305        1.4      matt 	const size_t line_mask = line_size - 1;
    306        1.5      matt 	size_t off = pa & line_mask;
    307        1.4      matt 	if (off) {
    308        1.4      matt 		len += off;
    309        1.5      matt 		pa -= off;
    310        1.4      matt 	}
    311        1.5      matt 	len = roundup2(len, line_size);
    312       1.11      matt 	mutex_spin_enter(&sc->sc_lock);
    313       1.11      matt 	if (__predict_false(!sc->sc_enabled)) {
    314        1.5      matt 		mutex_spin_exit(&sc->sc_lock);
    315       1.11      matt 		return;
    316        1.4      matt 	}
    317       1.11      matt 	for (const paddr_t endpa = pa + len; pa < endpa; pa += line_size) {
    318       1.11      matt 		arml2cc_cache_op(sc, cache_op, pa, false);
    319       1.11      matt 	}
    320       1.11      matt 	arml2cc_cache_sync(sc);
    321       1.11      matt 	mutex_spin_exit(&sc->sc_lock);
    322        1.4      matt }
    323        1.5      matt 
    324        1.5      matt static void
    325        1.5      matt arml2cc_sdcache_inv_range(vaddr_t va, paddr_t pa, psize_t len)
    326        1.5      matt {
    327        1.5      matt 	atomic_inc_64(&arml2cc_sc->sc_ev_inv.ev_count);
    328        1.5      matt 	arml2cc_cache_range_op(pa, len, L2C_INV_PA);
    329        1.5      matt }
    330        1.5      matt 
    331        1.5      matt static void
    332        1.5      matt arml2cc_sdcache_wb_range(vaddr_t va, paddr_t pa, psize_t len)
    333        1.5      matt {
    334        1.5      matt 	atomic_inc_64(&arml2cc_sc->sc_ev_wb.ev_count);
    335        1.5      matt 	arml2cc_cache_range_op(pa, len, L2C_CLEAN_PA);
    336        1.5      matt }
    337        1.5      matt 
    338        1.5      matt static void
    339        1.5      matt arml2cc_sdcache_wbinv_range(vaddr_t va, paddr_t pa, psize_t len)
    340        1.5      matt {
    341        1.5      matt 	atomic_inc_64(&arml2cc_sc->sc_ev_wbinv.ev_count);
    342        1.5      matt 	arml2cc_cache_range_op(pa, len, L2C_CLEAN_INV_PA);
    343        1.5      matt }
    344