Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: cpu.h,v 1.125 2023/07/11 11:01:18 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1994-1996 Mark Brinicombe.
      5  * Copyright (c) 1994 Brini.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software written for Brini by Mark Brinicombe
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by Brini.
     21  * 4. The name of the company nor the name of the author may be used to
     22  *    endorse or promote products derived from this software without specific
     23  *    prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  * RiscBSD kernel project
     38  *
     39  * cpu.h
     40  *
     41  * CPU specific symbols
     42  *
     43  * Created      : 18/09/94
     44  *
     45  * Based on kate/katelib/arm6.h
     46  */
     47 
     48 #ifndef _ARM_CPU_H_
     49 #define _ARM_CPU_H_
     50 
     51 #ifdef _KERNEL
     52 #ifndef _LOCORE
     53 
     54 typedef unsigned long mpidr_t;
     55 
     56 #ifdef MULTIPROCESSOR
     57 extern u_int arm_cpu_max;
     58 extern mpidr_t cpu_mpidr[];
     59 
     60 void cpu_init_secondary_processor(int);
     61 void cpu_boot_secondary_processors(void);
     62 void cpu_mpstart(void);
     63 bool cpu_hatched_p(u_int);
     64 
     65 void cpu_clr_mbox(int);
     66 void cpu_set_hatched(int);
     67 
     68 #endif
     69 
     70 struct proc;
     71 
     72 void	cpu_proc_fork(struct proc *, struct proc *);
     73 
     74 #endif	/* !_LOCORE */
     75 #endif	/* _KERNEL */
     76 
     77 #ifdef __arm__
     78 
     79 /*
     80  * User-visible definitions
     81  */
     82 
     83 /*  CTL_MACHDEP definitions. */
     84 #define	CPU_DEBUG		1	/* int: misc kernel debug control */
     85 #define	CPU_BOOTED_DEVICE	2	/* string: device we booted from */
     86 #define	CPU_BOOTED_KERNEL	3	/* string: kernel we booted */
     87 #define	CPU_CONSDEV		4	/* struct: dev_t of our console */
     88 #define	CPU_POWERSAVE		5	/* int: use CPU powersave mode */
     89 
     90 #if defined(_KERNEL) || defined(_KMEMUSER)
     91 
     92 /*
     93  * Kernel-only definitions
     94  */
     95 
     96 #if !defined(_MODULE) && defined(_KERNEL_OPT)
     97 #include "opt_gprof.h"
     98 #include "opt_multiprocessor.h"
     99 #include "opt_cpuoptions.h"
    100 #include "opt_lockdebug.h"
    101 #include "opt_cputypes.h"
    102 #endif /* !_MODULE && _KERNEL_OPT */
    103 
    104 #ifndef _LOCORE
    105 #if defined(TPIDRPRW_IS_CURLWP) || defined(TPIDRPRW_IS_CURCPU)
    106 #include <arm/armreg.h>
    107 #endif /* TPIDRPRW_IS_CURLWP || TPIDRPRW_IS_CURCPU */
    108 
    109 /* 1 == use cpu_sleep(), 0 == don't */
    110 extern int cpu_do_powersave;
    111 extern int cpu_fpu_present;
    112 
    113 /* All the CLKF_* macros take a struct clockframe * as an argument. */
    114 
    115 /*
    116  * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
    117  * frame came from USR mode or not.
    118  */
    119 #define CLKF_USERMODE(cf) (((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE)
    120 
    121 /*
    122  * CLKF_INTR: True if we took the interrupt from inside another
    123  * interrupt handler.
    124  */
    125 #if !defined(__ARM_EABI__)
    126 /* Hack to treat FPE time as interrupt time so we can measure it */
    127 #define CLKF_INTR(cf)						\
    128 	((curcpu()->ci_intr_depth > 1) ||			\
    129 	    ((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE)
    130 #else
    131 #define CLKF_INTR(cf)	((void)(cf), curcpu()->ci_intr_depth > 1)
    132 #endif
    133 
    134 /*
    135  * CLKF_PC: Extract the program counter from a clockframe
    136  */
    137 #define CLKF_PC(frame)		(frame->cf_tf.tf_pc)
    138 
    139 /*
    140  * LWP_PC: Find out the program counter for the given lwp.
    141  */
    142 #define LWP_PC(l)		(lwp_trapframe(l)->tf_pc)
    143 
    144 /*
    145  * Per-CPU information.  For now we assume one CPU.
    146  */
    147 #ifdef _KERNEL
    148 static inline int curcpl(void);
    149 static inline void set_curcpl(int);
    150 static inline void cpu_dosoftints(void);
    151 #endif
    152 
    153 #include <sys/param.h>
    154 
    155 #ifdef _KMEMUSER
    156 #include <sys/intr.h>
    157 #endif
    158 #include <sys/atomic.h>
    159 #include <sys/cpu_data.h>
    160 #include <sys/device_if.h>
    161 #include <sys/evcnt.h>
    162 
    163 /*
    164  * Cache info variables.
    165  */
    166 #define	CACHE_TYPE_VIVT		0
    167 #define	CACHE_TYPE_xxPT		1
    168 #define	CACHE_TYPE_VIPT		1
    169 #define	CACHE_TYPE_PIxx		2
    170 #define	CACHE_TYPE_PIPT		3
    171 
    172 /* PRIMARY CACHE VARIABLES */
    173 struct arm_cache_info {
    174 	u_int icache_size;
    175 	u_int icache_line_size;
    176 	u_int icache_ways;
    177 	u_int icache_way_size;
    178 	u_int icache_sets;
    179 
    180 	u_int dcache_size;
    181 	u_int dcache_line_size;
    182 	u_int dcache_ways;
    183 	u_int dcache_way_size;
    184 	u_int dcache_sets;
    185 
    186 	uint8_t cache_type;
    187 	bool cache_unified;
    188 	uint8_t icache_type;
    189 	uint8_t dcache_type;
    190 };
    191 
    192 struct cpu_info {
    193 	struct cpu_data	ci_data;	/* MI per-cpu data */
    194 	device_t	ci_dev;		/* Device corresponding to this CPU */
    195 	cpuid_t		ci_cpuid;
    196 	uint32_t	ci_arm_cpuid;	/* aggregate CPU id */
    197 	uint32_t	ci_arm_cputype;	/* CPU type */
    198 	uint32_t	ci_arm_cpurev;	/* CPU revision */
    199 	uint32_t	ci_ctrl;	/* The CPU control register */
    200 
    201 	/*
    202 	 * the following are in their own cache line, as they are stored to
    203 	 * regularly by remote CPUs; when they were mixed with other fields
    204 	 * we observed frequent cache misses.
    205 	 */
    206 	int		ci_want_resched __aligned(COHERENCY_UNIT);
    207 					/* resched() was called */
    208 	lwp_t *		ci_curlwp __aligned(COHERENCY_UNIT);
    209 					/* current lwp */
    210 	lwp_t *		ci_onproc;	/* current user LWP / kthread */
    211 
    212 	/*
    213 	 * largely CPU-private.
    214 	 */
    215 	lwp_t *		ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
    216 
    217 	struct cpu_softc *
    218 			ci_softc;	/* platform softc */
    219 
    220 	int		ci_cpl;		/* current processor level (spl) */
    221 	volatile int	ci_hwpl;	/* current hardware priority */
    222 	int		ci_kfpu_spl;
    223 
    224 	volatile u_int	ci_intr_depth;	/* */
    225 	volatile u_int	ci_softints;
    226 	volatile uint32_t ci_blocked_pics;
    227 	volatile uint32_t ci_pending_pics;
    228 	volatile uint32_t ci_pending_ipls;
    229 
    230 	lwp_t *		ci_lastlwp;	/* last lwp */
    231 
    232 	struct evcnt	ci_arm700bugcount;
    233 	int32_t		ci_mtx_count;
    234 	int		ci_mtx_oldspl;
    235 	register_t	ci_undefsave[3];
    236 	uint32_t	ci_vfp_id;
    237 	uint64_t	ci_lastintr;
    238 
    239 	struct pmap *	ci_pmap_lastuser;
    240 	struct pmap *	ci_pmap_cur;
    241 	tlb_asid_t	ci_pmap_asid_cur;
    242 
    243 	struct trapframe *
    244 			ci_ddb_regs;
    245 
    246 	struct evcnt	ci_abt_evs[16];
    247 	struct evcnt	ci_und_ev;
    248 	struct evcnt	ci_und_cp15_ev;
    249 	struct evcnt	ci_vfp_evs[3];
    250 
    251 	uint32_t	ci_midr;
    252 	uint32_t	ci_actlr;
    253 	uint32_t	ci_revidr;
    254 	uint32_t	ci_mpidr;
    255 	uint32_t	ci_mvfr[2];
    256 
    257 	uint32_t	ci_capacity_dmips_mhz;
    258 
    259 	struct arm_cache_info
    260 			ci_cacheinfo;
    261 
    262 #if defined(GPROF) && defined(MULTIPROCESSOR)
    263 	struct gmonparam *ci_gmon;	/* MI per-cpu GPROF */
    264 #endif
    265 };
    266 
    267 extern struct cpu_info cpu_info_store[];
    268 
    269 struct lwp *arm_curlwp(void);
    270 struct cpu_info *arm_curcpu(void);
    271 
    272 #ifdef _KERNEL
    273 #if defined(_MODULE)
    274 
    275 #define	curlwp		arm_curlwp()
    276 #define curcpu()	arm_curcpu()
    277 
    278 #elif defined(TPIDRPRW_IS_CURLWP)
    279 static inline struct lwp *
    280 _curlwp(void)
    281 {
    282 	return (struct lwp *) armreg_tpidrprw_read();
    283 }
    284 
    285 static inline void
    286 _curlwp_set(struct lwp *l)
    287 {
    288 	armreg_tpidrprw_write((uintptr_t)l);
    289 }
    290 
    291 // Also in <sys/lwp.h> but also here if this was included before <sys/lwp.h>
    292 static inline struct cpu_info *lwp_getcpu(struct lwp *);
    293 
    294 #define	curlwp		_curlwp()
    295 // curcpu() expands into two instructions: a mrc and a ldr
    296 #define	curcpu()	lwp_getcpu(_curlwp())
    297 #elif defined(TPIDRPRW_IS_CURCPU)
    298 #ifdef __HAVE_PREEMPTION
    299 #error __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
    300 #endif
    301 static inline struct cpu_info *
    302 curcpu(void)
    303 {
    304 	return (struct cpu_info *) armreg_tpidrprw_read();
    305 }
    306 #elif !defined(MULTIPROCESSOR)
    307 #define	curcpu()	(&cpu_info_store[0])
    308 #elif !defined(__HAVE_PREEMPTION)
    309 #error MULTIPROCESSOR && !__HAVE_PREEMPTION requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP
    310 #else
    311 #error MULTIPROCESSOR && __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
    312 #endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */
    313 
    314 #ifndef curlwp
    315 #define	curlwp		(curcpu()->ci_curlwp)
    316 #endif
    317 #define curpcb		((struct pcb *)lwp_getpcb(curlwp))
    318 
    319 #define CPU_INFO_ITERATOR	int
    320 #if defined(_MODULE) || defined(MULTIPROCESSOR)
    321 extern struct cpu_info *cpu_info[];
    322 #define cpu_number()		(curcpu()->ci_index)
    323 #define CPU_IS_PRIMARY(ci)	((ci)->ci_index == 0)
    324 #define CPU_INFO_FOREACH(cii, ci)			\
    325 	cii = 0, ci = cpu_info[0]; cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; cii++
    326 #else
    327 #define cpu_number()            0
    328 
    329 #define CPU_IS_PRIMARY(ci)	true
    330 #define CPU_INFO_FOREACH(cii, ci)			\
    331 	cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
    332 #endif
    333 
    334 #define	LWP0_CPU_INFO	(&cpu_info_store[0])
    335 
    336 static inline int
    337 curcpl(void)
    338 {
    339 	return curcpu()->ci_cpl;
    340 }
    341 
    342 static inline void
    343 set_curcpl(int pri)
    344 {
    345 	curcpu()->ci_cpl = pri;
    346 }
    347 
    348 static inline void
    349 cpu_dosoftints(void)
    350 {
    351 #ifdef __HAVE_FAST_SOFTINTS
    352 	void	dosoftints(void);
    353 #ifndef __HAVE_PIC_FAST_SOFTINTS
    354 	struct cpu_info * const ci = curcpu();
    355 	if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0)
    356 		dosoftints();
    357 #endif
    358 #endif
    359 }
    360 
    361 /*
    362  * Scheduling glue
    363  */
    364 void cpu_signotify(struct lwp *);
    365 #define	setsoftast(ci)		(cpu_signotify((ci)->ci_onproc))
    366 
    367 /*
    368  * Give a profiling tick to the current process when the user profiling
    369  * buffer pages are invalid.  On the i386, request an ast to send us
    370  * through trap(), marking the proc as needing a profiling tick.
    371  */
    372 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, \
    373 				 setsoftast(lwp_getcpu(l)))
    374 
    375 /*
    376  * We've already preallocated the stack for the idlelwps for additional CPUs.
    377  * This hook allows to return them.
    378  */
    379 vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *);
    380 
    381 #ifdef _ARM_ARCH_6
    382 int	cpu_maxproc_hook(int);
    383 #endif
    384 
    385 #endif /* _KERNEL */
    386 
    387 #endif /* !_LOCORE */
    388 
    389 #endif /* _KERNEL || _KMEMUSER */
    390 
    391 #elif defined(__aarch64__)
    392 
    393 #include <aarch64/cpu.h>
    394 
    395 #endif /* __arm__/__aarch64__ */
    396 
    397 #endif /* !_ARM_CPU_H_ */
    398