Home | History | Annotate | Line # | Download | only in tprof
tprof_armv7.c revision 1.4
      1  1.4     skrll /* $NetBSD: tprof_armv7.c,v 1.4 2020/10/30 18:54:37 skrll Exp $ */
      2  1.1  jmcneill 
      3  1.1  jmcneill /*-
      4  1.1  jmcneill  * Copyright (c) 2018 Jared McNeill <jmcneill (at) invisible.ca>
      5  1.1  jmcneill  * All rights reserved.
      6  1.1  jmcneill  *
      7  1.1  jmcneill  * Redistribution and use in source and binary forms, with or without
      8  1.1  jmcneill  * modification, are permitted provided that the following conditions
      9  1.1  jmcneill  * are met:
     10  1.1  jmcneill  * 1. Redistributions of source code must retain the above copyright
     11  1.1  jmcneill  *    notice, this list of conditions and the following disclaimer.
     12  1.1  jmcneill  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1  jmcneill  *    notice, this list of conditions and the following disclaimer in the
     14  1.1  jmcneill  *    documentation and/or other materials provided with the distribution.
     15  1.1  jmcneill  *
     16  1.1  jmcneill  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  1.1  jmcneill  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  1.1  jmcneill  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  1.1  jmcneill  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  1.1  jmcneill  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  1.1  jmcneill  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22  1.1  jmcneill  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  1.1  jmcneill  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  1.1  jmcneill  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  1.1  jmcneill  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  1.1  jmcneill  * SUCH DAMAGE.
     27  1.1  jmcneill  */
     28  1.1  jmcneill 
     29  1.1  jmcneill #include <sys/cdefs.h>
     30  1.4     skrll __KERNEL_RCSID(0, "$NetBSD: tprof_armv7.c,v 1.4 2020/10/30 18:54:37 skrll Exp $");
     31  1.1  jmcneill 
     32  1.1  jmcneill #include <sys/param.h>
     33  1.1  jmcneill #include <sys/bus.h>
     34  1.1  jmcneill #include <sys/cpu.h>
     35  1.1  jmcneill #include <sys/xcall.h>
     36  1.1  jmcneill 
     37  1.1  jmcneill #include <dev/tprof/tprof.h>
     38  1.1  jmcneill 
     39  1.1  jmcneill #include <arm/armreg.h>
     40  1.1  jmcneill #include <arm/locore.h>
     41  1.1  jmcneill 
     42  1.1  jmcneill #include <dev/tprof/tprof_armv7.h>
     43  1.1  jmcneill 
     44  1.1  jmcneill #define	PMCR_D			__BIT(3)
     45  1.1  jmcneill #define	PMCR_E			__BIT(0)
     46  1.1  jmcneill 
     47  1.1  jmcneill #define	PMEVTYPER_P		__BIT(31)
     48  1.1  jmcneill #define	PMEVTYPER_U		__BIT(30)
     49  1.1  jmcneill #define	PMEVTYPER_EVTCOUNT	__BITS(7,0)
     50  1.1  jmcneill 
     51  1.1  jmcneill static tprof_param_t armv7_pmu_param;
     52  1.1  jmcneill static const u_int armv7_pmu_counter = 1;
     53  1.1  jmcneill static uint32_t counter_val;
     54  1.1  jmcneill static uint32_t counter_reset_val;
     55  1.1  jmcneill 
     56  1.1  jmcneill static bool
     57  1.1  jmcneill armv7_pmu_event_implemented(uint16_t event)
     58  1.1  jmcneill {
     59  1.1  jmcneill 	uint32_t eid[2];
     60  1.1  jmcneill 
     61  1.1  jmcneill 	if (event >= 64)
     62  1.1  jmcneill 		return false;
     63  1.1  jmcneill 
     64  1.1  jmcneill 	eid[0] = armreg_pmceid0_read();
     65  1.1  jmcneill 	eid[1] = armreg_pmceid1_read();
     66  1.1  jmcneill 
     67  1.1  jmcneill 	const u_int idx = event / 32;
     68  1.1  jmcneill 	const u_int bit = event % 32;
     69  1.1  jmcneill 
     70  1.1  jmcneill 	if (eid[idx] & __BIT(bit))
     71  1.1  jmcneill 		return true;
     72  1.1  jmcneill 
     73  1.1  jmcneill 	return false;
     74  1.1  jmcneill }
     75  1.1  jmcneill 
     76  1.1  jmcneill static void
     77  1.1  jmcneill armv7_pmu_set_pmevtyper(u_int counter, uint64_t val)
     78  1.1  jmcneill {
     79  1.1  jmcneill 	armreg_pmselr_write(counter);
     80  1.4     skrll 	isb();
     81  1.1  jmcneill 	armreg_pmxevtyper_write(val);
     82  1.1  jmcneill }
     83  1.1  jmcneill 
     84  1.1  jmcneill static void
     85  1.1  jmcneill armv7_pmu_set_pmevcntr(u_int counter, uint32_t val)
     86  1.1  jmcneill {
     87  1.1  jmcneill 	armreg_pmselr_write(counter);
     88  1.4     skrll 	isb();
     89  1.1  jmcneill 	armreg_pmxevcntr_write(val);
     90  1.1  jmcneill }
     91  1.1  jmcneill 
     92  1.1  jmcneill static void
     93  1.1  jmcneill armv7_pmu_start_cpu(void *arg1, void *arg2)
     94  1.1  jmcneill {
     95  1.1  jmcneill 	const uint32_t counter_mask = __BIT(armv7_pmu_counter);
     96  1.1  jmcneill 	uint64_t pmcr, pmevtyper;
     97  1.1  jmcneill 
     98  1.1  jmcneill 	/* Enable performance monitor */
     99  1.1  jmcneill 	pmcr = armreg_pmcr_read();
    100  1.1  jmcneill 	pmcr |= PMCR_E;
    101  1.1  jmcneill 	armreg_pmcr_write(pmcr);
    102  1.1  jmcneill 
    103  1.1  jmcneill 	/* Disable event counter */
    104  1.1  jmcneill 	armreg_pmcntenclr_write(counter_mask);
    105  1.1  jmcneill 
    106  1.1  jmcneill 	/* Configure event counter */
    107  1.1  jmcneill 	pmevtyper = __SHIFTIN(armv7_pmu_param.p_event, PMEVTYPER_EVTCOUNT);
    108  1.1  jmcneill 	if (!ISSET(armv7_pmu_param.p_flags, TPROF_PARAM_USER))
    109  1.1  jmcneill 		pmevtyper |= PMEVTYPER_U;
    110  1.1  jmcneill 	if (!ISSET(armv7_pmu_param.p_flags, TPROF_PARAM_KERN))
    111  1.1  jmcneill 		pmevtyper |= PMEVTYPER_P;
    112  1.1  jmcneill 
    113  1.1  jmcneill 	armv7_pmu_set_pmevtyper(armv7_pmu_counter, pmevtyper);
    114  1.1  jmcneill 
    115  1.1  jmcneill 	/* Enable overflow interrupts */
    116  1.1  jmcneill 	armreg_pmintenset_write(counter_mask);
    117  1.1  jmcneill 
    118  1.1  jmcneill 	/* Clear overflow flag */
    119  1.1  jmcneill 	armreg_pmovsr_write(counter_mask);
    120  1.1  jmcneill 
    121  1.1  jmcneill 	/* Initialize event counter value */
    122  1.1  jmcneill 	armv7_pmu_set_pmevcntr(armv7_pmu_counter, counter_reset_val);
    123  1.1  jmcneill 
    124  1.1  jmcneill 	/* Enable event counter */
    125  1.1  jmcneill 	armreg_pmcntenset_write(counter_mask);
    126  1.1  jmcneill }
    127  1.1  jmcneill 
    128  1.1  jmcneill static void
    129  1.1  jmcneill armv7_pmu_stop_cpu(void *arg1, void *arg2)
    130  1.1  jmcneill {
    131  1.1  jmcneill 	const uint32_t counter_mask = __BIT(armv7_pmu_counter);
    132  1.1  jmcneill 	uint32_t pmcr;
    133  1.1  jmcneill 
    134  1.1  jmcneill 	/* Disable overflow interrupts */
    135  1.1  jmcneill 	armreg_pmintenclr_write(counter_mask);
    136  1.1  jmcneill 
    137  1.1  jmcneill 	/* Disable event counter */
    138  1.1  jmcneill 	armreg_pmcntenclr_write(counter_mask);
    139  1.1  jmcneill 
    140  1.1  jmcneill 	/* Disable performance monitor */
    141  1.1  jmcneill 	pmcr = armreg_pmcr_read();
    142  1.1  jmcneill 	pmcr &= ~PMCR_E;
    143  1.1  jmcneill 	armreg_pmcr_write(pmcr);
    144  1.1  jmcneill }
    145  1.1  jmcneill 
    146  1.1  jmcneill static uint64_t
    147  1.1  jmcneill armv7_pmu_estimate_freq(void)
    148  1.1  jmcneill {
    149  1.1  jmcneill 	uint64_t cpufreq = curcpu()->ci_data.cpu_cc_freq;
    150  1.1  jmcneill 	uint64_t freq = 10000;
    151  1.1  jmcneill 	uint32_t pmcr;
    152  1.1  jmcneill 
    153  1.1  jmcneill 	counter_val = cpufreq / freq;
    154  1.1  jmcneill 	if (counter_val == 0)
    155  1.1  jmcneill 		counter_val = 4000000000ULL / freq;
    156  1.1  jmcneill 
    157  1.1  jmcneill 	pmcr = armreg_pmcr_read();
    158  1.1  jmcneill 	if (pmcr & PMCR_D)
    159  1.1  jmcneill 		counter_val /= 64;
    160  1.1  jmcneill 
    161  1.1  jmcneill 	return freq;
    162  1.1  jmcneill }
    163  1.1  jmcneill 
    164  1.1  jmcneill static uint32_t
    165  1.1  jmcneill armv7_pmu_ident(void)
    166  1.1  jmcneill {
    167  1.1  jmcneill 	return TPROF_IDENT_ARMV7_GENERIC;
    168  1.1  jmcneill }
    169  1.1  jmcneill 
    170  1.1  jmcneill static int
    171  1.1  jmcneill armv7_pmu_start(const tprof_param_t *param)
    172  1.1  jmcneill {
    173  1.1  jmcneill 	uint64_t xc;
    174  1.1  jmcneill 
    175  1.1  jmcneill 	if (!armv7_pmu_event_implemented(param->p_event)) {
    176  1.3       rin 		printf("%s: event %#llx not implemented on this CPU\n",
    177  1.1  jmcneill 		    __func__, param->p_event);
    178  1.1  jmcneill 		return EINVAL;
    179  1.1  jmcneill 	}
    180  1.1  jmcneill 
    181  1.1  jmcneill 	counter_reset_val = -counter_val + 1;
    182  1.1  jmcneill 
    183  1.1  jmcneill 	armv7_pmu_param = *param;
    184  1.1  jmcneill 	xc = xc_broadcast(0, armv7_pmu_start_cpu, NULL, NULL);
    185  1.1  jmcneill 	xc_wait(xc);
    186  1.1  jmcneill 
    187  1.1  jmcneill 	return 0;
    188  1.1  jmcneill }
    189  1.1  jmcneill 
    190  1.1  jmcneill static void
    191  1.1  jmcneill armv7_pmu_stop(const tprof_param_t *param)
    192  1.1  jmcneill {
    193  1.1  jmcneill 	uint64_t xc;
    194  1.1  jmcneill 
    195  1.1  jmcneill 	xc = xc_broadcast(0, armv7_pmu_stop_cpu, NULL, NULL);
    196  1.1  jmcneill 	xc_wait(xc);
    197  1.1  jmcneill }
    198  1.1  jmcneill 
    199  1.1  jmcneill static const tprof_backend_ops_t tprof_armv7_pmu_ops = {
    200  1.1  jmcneill 	.tbo_estimate_freq = armv7_pmu_estimate_freq,
    201  1.1  jmcneill 	.tbo_ident = armv7_pmu_ident,
    202  1.1  jmcneill 	.tbo_start = armv7_pmu_start,
    203  1.1  jmcneill 	.tbo_stop = armv7_pmu_stop,
    204  1.1  jmcneill };
    205  1.1  jmcneill 
    206  1.1  jmcneill int
    207  1.1  jmcneill armv7_pmu_intr(void *priv)
    208  1.1  jmcneill {
    209  1.1  jmcneill 	const struct trapframe * const tf = priv;
    210  1.1  jmcneill 	const uint32_t counter_mask = __BIT(armv7_pmu_counter);
    211  1.1  jmcneill 	tprof_frame_info_t tfi;
    212  1.1  jmcneill 
    213  1.1  jmcneill 	const uint32_t pmovsr = armreg_pmovsr_read();
    214  1.1  jmcneill 	if ((pmovsr & counter_mask) != 0) {
    215  1.1  jmcneill 		tfi.tfi_pc = tf->tf_pc;
    216  1.1  jmcneill 		tfi.tfi_inkernel = tfi.tfi_pc >= VM_MIN_KERNEL_ADDRESS &&
    217  1.1  jmcneill 		    tfi.tfi_pc < VM_MAX_KERNEL_ADDRESS;
    218  1.1  jmcneill 		tprof_sample(NULL, &tfi);
    219  1.1  jmcneill 
    220  1.1  jmcneill 		armv7_pmu_set_pmevcntr(armv7_pmu_counter, counter_reset_val);
    221  1.1  jmcneill 	}
    222  1.1  jmcneill 	armreg_pmovsr_write(pmovsr);
    223  1.1  jmcneill 
    224  1.1  jmcneill 	return 1;
    225  1.1  jmcneill }
    226  1.1  jmcneill 
    227  1.1  jmcneill int
    228  1.1  jmcneill armv7_pmu_init(void)
    229  1.1  jmcneill {
    230  1.2  jmcneill 	/* Disable user mode access to performance monitors */
    231  1.2  jmcneill 	armreg_pmuserenr_write(0);
    232  1.2  jmcneill 
    233  1.2  jmcneill 	/* Disable interrupts */
    234  1.2  jmcneill 	armreg_pmintenclr_write(~0U);
    235  1.2  jmcneill 
    236  1.2  jmcneill 	/* Disable counters */
    237  1.2  jmcneill 	armreg_pmcntenclr_write(~0U);
    238  1.2  jmcneill 
    239  1.2  jmcneill 	/* Disable performance monitor */
    240  1.2  jmcneill 	armreg_pmcr_write(0);
    241  1.2  jmcneill 
    242  1.1  jmcneill 	return tprof_backend_register("tprof_armv7", &tprof_armv7_pmu_ops,
    243  1.1  jmcneill 	    TPROF_BACKEND_VERSION);
    244  1.1  jmcneill }
    245