Home | History | Annotate | Line # | Download | only in include
cpu.h revision 1.48.2.1
      1  1.48.2.1    martin /* $NetBSD: cpu.h,v 1.48.2.1 2024/10/13 10:43:11 martin Exp $ */
      2       1.1      matt 
      3       1.1      matt /*-
      4      1.26     skrll  * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
      5       1.1      matt  * All rights reserved.
      6       1.1      matt  *
      7       1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      8       1.1      matt  * by Matt Thomas of 3am Software Foundry.
      9       1.1      matt  *
     10       1.1      matt  * Redistribution and use in source and binary forms, with or without
     11       1.1      matt  * modification, are permitted provided that the following conditions
     12       1.1      matt  * are met:
     13       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     14       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     15       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     17       1.1      matt  *    documentation and/or other materials provided with the distribution.
     18       1.1      matt  *
     19       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1      matt  */
     31       1.1      matt 
     32       1.1      matt #ifndef _AARCH64_CPU_H_
     33       1.1      matt #define _AARCH64_CPU_H_
     34       1.1      matt 
     35      1.21     skrll #include <arm/cpu.h>
     36      1.21     skrll 
     37       1.1      matt #ifdef __aarch64__
     38       1.1      matt 
     39       1.2       ryo #ifdef _KERNEL_OPT
     40      1.38       ryo #include "opt_gprof.h"
     41       1.2       ryo #include "opt_multiprocessor.h"
     42      1.48     skrll #include "opt_pmap.h"
     43       1.2       ryo #endif
     44       1.2       ryo 
     45       1.7       ryo #include <sys/param.h>
     46       1.7       ryo 
     47       1.1      matt #if defined(_KERNEL) || defined(_KMEMUSER)
     48       1.2       ryo #include <sys/evcnt.h>
     49      1.11       mrg 
     50      1.35     skrll #include <aarch64/armreg.h>
     51       1.2       ryo #include <aarch64/frame.h>
     52       1.2       ryo 
     53       1.1      matt struct clockframe {
     54       1.2       ryo 	struct trapframe cf_tf;
     55       1.1      matt };
     56       1.1      matt 
     57       1.2       ryo /* (spsr & 15) == SPSR_M_EL0T(64bit,0) or USER(32bit,0) */
     58       1.2       ryo #define CLKF_USERMODE(cf)	((((cf)->cf_tf.tf_spsr) & 0x0f) == 0)
     59       1.2       ryo #define CLKF_PC(cf)		((cf)->cf_tf.tf_pc)
     60       1.2       ryo #define CLKF_INTR(cf)		((void)(cf), curcpu()->ci_intr_depth > 1)
     61       1.1      matt 
     62      1.12     skrll /*
     63      1.12     skrll  * LWP_PC: Find out the program counter for the given lwp.
     64      1.12     skrll  */
     65      1.12     skrll #define LWP_PC(l)		((l)->l_md.md_utf->tf_pc)
     66      1.12     skrll 
     67       1.1      matt #include <sys/cpu_data.h>
     68       1.1      matt #include <sys/device_if.h>
     69       1.1      matt #include <sys/intr.h>
     70       1.1      matt 
     71      1.13       ryo struct aarch64_cpufuncs {
     72      1.13       ryo 	void (*cf_set_ttbr0)(uint64_t);
     73      1.24       ryo 	void (*cf_icache_sync_range)(vaddr_t, vsize_t);
     74      1.13       ryo };
     75      1.13       ryo 
     76      1.43     skrll #define MAX_CACHE_LEVEL	8		/* ARMv8 has maximum 8 level cache */
     77      1.43     skrll 
     78      1.43     skrll struct aarch64_cache_unit {
     79      1.43     skrll 	u_int cache_type;
     80      1.43     skrll #define CACHE_TYPE_VPIPT	0	/* VMID-aware PIPT */
     81      1.43     skrll #define CACHE_TYPE_VIVT		1	/* ASID-tagged VIVT */
     82      1.43     skrll #define CACHE_TYPE_VIPT		2
     83      1.43     skrll #define CACHE_TYPE_PIPT		3
     84      1.43     skrll 	u_int cache_line_size;
     85      1.43     skrll 	u_int cache_ways;
     86      1.43     skrll 	u_int cache_sets;
     87      1.43     skrll 	u_int cache_way_size;
     88      1.43     skrll 	u_int cache_size;
     89      1.43     skrll };
     90      1.43     skrll 
     91      1.43     skrll struct aarch64_cache_info {
     92      1.43     skrll 	u_int cacheable;
     93      1.43     skrll #define CACHE_CACHEABLE_NONE	0
     94      1.43     skrll #define CACHE_CACHEABLE_ICACHE	1	/* instruction cache only */
     95      1.43     skrll #define CACHE_CACHEABLE_DCACHE	2	/* data cache only */
     96      1.43     skrll #define CACHE_CACHEABLE_IDCACHE	3	/* instruction and data caches */
     97      1.43     skrll #define CACHE_CACHEABLE_UNIFIED	4	/* unified cache */
     98      1.43     skrll 	struct aarch64_cache_unit icache;
     99      1.43     skrll 	struct aarch64_cache_unit dcache;
    100      1.43     skrll };
    101      1.43     skrll 
    102       1.1      matt struct cpu_info {
    103       1.1      matt 	struct cpu_data ci_data;
    104       1.1      matt 	device_t ci_dev;
    105       1.1      matt 	cpuid_t ci_cpuid;
    106      1.26     skrll 
    107      1.26     skrll 	/*
    108      1.26     skrll 	 * the following are in their own cache line, as they are stored to
    109      1.26     skrll 	 * regularly by remote CPUs; when they were mixed with other fields
    110      1.26     skrll 	 * we observed frequent cache misses.
    111      1.26     skrll 	 */
    112      1.26     skrll 	int ci_want_resched __aligned(COHERENCY_UNIT);
    113      1.26     skrll 	/* XXX pending IPIs? */
    114      1.26     skrll 
    115      1.26     skrll 	/*
    116      1.26     skrll 	 * this is stored frequently, and is fetched by remote CPUs.
    117      1.26     skrll 	 */
    118      1.26     skrll 	struct lwp *ci_curlwp __aligned(COHERENCY_UNIT);
    119      1.16        ad 	struct lwp *ci_onproc;
    120      1.26     skrll 
    121      1.26     skrll 	/*
    122      1.26     skrll 	 * largely CPU-private.
    123      1.26     skrll 	 */
    124      1.26     skrll 	struct lwp *ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
    125       1.1      matt 
    126       1.1      matt 	uint64_t ci_lastintr;
    127       1.1      matt 
    128       1.1      matt 	int ci_mtx_oldspl;
    129       1.1      matt 	int ci_mtx_count;
    130       1.1      matt 
    131      1.32  jmcneill 	int ci_cpl;		/* current processor level (spl) */
    132      1.46  jmcneill 	volatile int ci_hwpl;	/* current hardware priority */
    133       1.2       ryo 	volatile u_int ci_softints;
    134       1.1      matt 	volatile u_int ci_intr_depth;
    135      1.37     skrll 	volatile uint32_t ci_blocked_pics;
    136      1.37     skrll 	volatile uint32_t ci_pending_pics;
    137      1.37     skrll 	volatile uint32_t ci_pending_ipls;
    138       1.2       ryo 
    139      1.23  riastrad 	int ci_kfpu_spl;
    140      1.23  riastrad 
    141      1.48     skrll #if defined(PMAP_MI)
    142      1.48     skrll         struct pmap_tlb_info *ci_tlb_info;
    143      1.48     skrll         struct pmap *ci_pmap_lastuser;
    144      1.48     skrll         struct pmap *ci_pmap_cur;
    145      1.48     skrll #endif
    146      1.48     skrll 
    147      1.41     skrll 	/* ASID of current pmap */
    148      1.41     skrll 	tlb_asid_t ci_pmap_asid_cur;
    149      1.40     skrll 
    150       1.2       ryo 	/* event counters */
    151       1.2       ryo 	struct evcnt ci_vfp_use;
    152       1.2       ryo 	struct evcnt ci_vfp_reuse;
    153       1.2       ryo 	struct evcnt ci_vfp_save;
    154       1.2       ryo 	struct evcnt ci_vfp_release;
    155      1.25       ryo 	struct evcnt ci_uct_trap;
    156      1.29  jmcneill 	struct evcnt ci_intr_preempt;
    157  1.48.2.1    martin 	struct evcnt ci_rndrrs_fail;
    158       1.6  jmcneill 
    159      1.18       mrg 	/* FDT or similar supplied "cpu capacity" */
    160      1.18       mrg 	uint32_t ci_capacity_dmips_mhz;
    161      1.18       mrg 
    162       1.6  jmcneill 	/* interrupt controller */
    163       1.6  jmcneill 	u_int ci_gic_redist;	/* GICv3 redistributor index */
    164       1.6  jmcneill 	uint64_t ci_gic_sgir;	/* GICv3 SGIR target */
    165       1.7       ryo 
    166       1.9  jmcneill 	/* ACPI */
    167      1.30  jmcneill 	uint32_t ci_acpiid;	/* ACPI Processor Unique ID */
    168       1.9  jmcneill 
    169      1.43     skrll 	/* cached system registers */
    170      1.43     skrll 	uint64_t ci_sctlr_el1;
    171      1.43     skrll 	uint64_t ci_sctlr_el2;
    172      1.43     skrll 
    173      1.42     skrll 	/* sysctl(9) exposed system registers */
    174      1.11       mrg 	struct aarch64_sysctl_cpu_id ci_id;
    175       1.7       ryo 
    176      1.42     skrll 	/* cache information and function pointers */
    177      1.44     skrll 	struct aarch64_cache_info ci_cacheinfo[MAX_CACHE_LEVEL];
    178      1.13       ryo 	struct aarch64_cpufuncs ci_cpufuncs;
    179       1.7       ryo 
    180      1.38       ryo #if defined(GPROF) && defined(MULTIPROCESSOR)
    181      1.38       ryo 	struct gmonparam *ci_gmon;	/* MI per-cpu GPROF */
    182      1.38       ryo #endif
    183       1.7       ryo } __aligned(COHERENCY_UNIT);
    184       1.1      matt 
    185      1.22  christos #ifdef _KERNEL
    186      1.45       ryo static inline __always_inline struct lwp * __attribute__ ((const))
    187      1.26     skrll aarch64_curlwp(void)
    188       1.1      matt {
    189      1.26     skrll 	struct lwp *l;
    190      1.26     skrll 	__asm("mrs %0, tpidr_el1" : "=r"(l));
    191      1.26     skrll 	return l;
    192       1.1      matt }
    193       1.1      matt 
    194      1.26     skrll /* forward declaration; defined in sys/lwp.h. */
    195      1.26     skrll static __inline struct cpu_info *lwp_getcpu(struct lwp *);
    196      1.26     skrll 
    197      1.26     skrll #define	curcpu()		(lwp_getcpu(aarch64_curlwp()))
    198      1.26     skrll #define	setsoftast(ci)		(cpu_signotify((ci)->ci_onproc))
    199      1.26     skrll #undef curlwp
    200      1.26     skrll #define	curlwp			(aarch64_curlwp())
    201      1.48     skrll #define	curpcb			((struct pcb *)lwp_getpcb(curlwp))
    202      1.10     skrll 
    203      1.26     skrll void	cpu_signotify(struct lwp *l);
    204      1.21     skrll void	cpu_need_proftick(struct lwp *l);
    205      1.21     skrll 
    206      1.21     skrll void	cpu_hatch(struct cpu_info *);
    207       1.2       ryo 
    208       1.2       ryo extern struct cpu_info *cpu_info[];
    209      1.21     skrll extern struct cpu_info cpu_info_store[];
    210       1.1      matt 
    211      1.28       ryo #define CPU_INFO_ITERATOR	int
    212      1.20  riastrad #if defined(MULTIPROCESSOR) || defined(_MODULE)
    213       1.2       ryo #define cpu_number()		(curcpu()->ci_index)
    214       1.2       ryo #define CPU_IS_PRIMARY(ci)	((ci)->ci_index == 0)
    215       1.3       ryo #define CPU_INFO_FOREACH(cii, ci)					\
    216       1.3       ryo 	cii = 0, ci = cpu_info[0];					\
    217       1.3       ryo 	cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL;	\
    218       1.2       ryo 	cii++
    219       1.2       ryo #else /* MULTIPROCESSOR */
    220       1.2       ryo #define cpu_number()		0
    221       1.2       ryo #define CPU_IS_PRIMARY(ci)	true
    222       1.3       ryo #define CPU_INFO_FOREACH(cii, ci)					\
    223       1.2       ryo 	cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
    224       1.2       ryo #endif /* MULTIPROCESSOR */
    225       1.1      matt 
    226      1.26     skrll #define	LWP0_CPU_INFO	(&cpu_info_store[0])
    227       1.1      matt 
    228      1.33  jmcneill #define	__HAVE_CPU_DOSOFTINTS_CI
    229      1.33  jmcneill 
    230       1.1      matt static inline void
    231      1.33  jmcneill cpu_dosoftints_ci(struct cpu_info *ci)
    232       1.1      matt {
    233       1.2       ryo #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS)
    234       1.2       ryo 	void dosoftints(void);
    235       1.2       ryo 
    236      1.33  jmcneill 	if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0) {
    237       1.2       ryo 		dosoftints();
    238      1.33  jmcneill 	}
    239      1.33  jmcneill #endif
    240      1.33  jmcneill }
    241      1.33  jmcneill 
    242      1.33  jmcneill static inline void
    243      1.33  jmcneill cpu_dosoftints(void)
    244      1.33  jmcneill {
    245      1.33  jmcneill #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS)
    246      1.33  jmcneill 	cpu_dosoftints_ci(curcpu());
    247       1.2       ryo #endif
    248       1.1      matt }
    249       1.1      matt 
    250      1.33  jmcneill 
    251      1.22  christos #endif /* _KERNEL */
    252      1.22  christos 
    253       1.1      matt #endif /* _KERNEL || _KMEMUSER */
    254       1.1      matt 
    255       1.1      matt #endif
    256       1.1      matt 
    257       1.1      matt #endif /* _AARCH64_CPU_H_ */
    258