Home | History | Annotate | Line # | Download | only in include
cpu.h revision 1.105
      1  1.105     skrll /*	$NetBSD: cpu.h,v 1.105 2020/01/18 14:40:04 skrll Exp $	*/
      2    1.1   reinoud 
      3    1.1   reinoud /*
      4    1.1   reinoud  * Copyright (c) 1994-1996 Mark Brinicombe.
      5    1.1   reinoud  * Copyright (c) 1994 Brini.
      6    1.1   reinoud  * All rights reserved.
      7    1.1   reinoud  *
      8    1.1   reinoud  * This code is derived from software written for Brini by Mark Brinicombe
      9    1.1   reinoud  *
     10    1.1   reinoud  * Redistribution and use in source and binary forms, with or without
     11    1.1   reinoud  * modification, are permitted provided that the following conditions
     12    1.1   reinoud  * are met:
     13    1.1   reinoud  * 1. Redistributions of source code must retain the above copyright
     14    1.1   reinoud  *    notice, this list of conditions and the following disclaimer.
     15    1.1   reinoud  * 2. Redistributions in binary form must reproduce the above copyright
     16    1.1   reinoud  *    notice, this list of conditions and the following disclaimer in the
     17    1.1   reinoud  *    documentation and/or other materials provided with the distribution.
     18    1.1   reinoud  * 3. All advertising materials mentioning features or use of this software
     19    1.1   reinoud  *    must display the following acknowledgement:
     20    1.1   reinoud  *	This product includes software developed by Brini.
     21    1.1   reinoud  * 4. The name of the company nor the name of the author may be used to
     22    1.1   reinoud  *    endorse or promote products derived from this software without specific
     23    1.1   reinoud  *    prior written permission.
     24    1.1   reinoud  *
     25    1.1   reinoud  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     26    1.1   reinoud  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     27    1.1   reinoud  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     28    1.1   reinoud  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     29    1.1   reinoud  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     30    1.1   reinoud  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     31    1.1   reinoud  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32    1.1   reinoud  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33    1.1   reinoud  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34    1.1   reinoud  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35    1.1   reinoud  * SUCH DAMAGE.
     36    1.1   reinoud  *
     37    1.1   reinoud  * RiscBSD kernel project
     38    1.1   reinoud  *
     39    1.1   reinoud  * cpu.h
     40    1.1   reinoud  *
     41    1.1   reinoud  * CPU specific symbols
     42    1.1   reinoud  *
     43    1.1   reinoud  * Created      : 18/09/94
     44    1.1   reinoud  *
     45    1.1   reinoud  * Based on kate/katelib/arm6.h
     46    1.1   reinoud  */
     47    1.1   reinoud 
     48   1.11     bjh21 #ifndef _ARM_CPU_H_
     49   1.11     bjh21 #define _ARM_CPU_H_
     50    1.1   reinoud 
     51   1.96       ryo #ifdef __arm__
     52   1.96       ryo 
     53    1.8     bjh21 /*
     54    1.8     bjh21  * User-visible definitions
     55    1.8     bjh21  */
     56    1.8     bjh21 
     57    1.8     bjh21 /*  CTL_MACHDEP definitions. */
     58    1.8     bjh21 #define	CPU_DEBUG		1	/* int: misc kernel debug control */
     59    1.8     bjh21 #define	CPU_BOOTED_DEVICE	2	/* string: device we booted from */
     60    1.8     bjh21 #define	CPU_BOOTED_KERNEL	3	/* string: kernel we booted */
     61    1.8     bjh21 #define	CPU_CONSDEV		4	/* struct: dev_t of our console */
     62   1.29   thorpej #define	CPU_POWERSAVE		5	/* int: use CPU powersave mode */
     63    1.8     bjh21 
     64   1.63  christos #if defined(_KERNEL) || defined(_KMEMUSER)
     65    1.8     bjh21 
     66    1.8     bjh21 /*
     67    1.8     bjh21  * Kernel-only definitions
     68    1.8     bjh21  */
     69    1.8     bjh21 
     70   1.76      matt #if !defined(_MODULE) && defined(_KERNEL_OPT)
     71   1.34    martin #include "opt_multiprocessor.h"
     72   1.54      matt #include "opt_cpuoptions.h"
     73    1.1   reinoud #include "opt_lockdebug.h"
     74   1.53  rearnsha #include "opt_cputypes.h"
     75   1.76      matt #endif /* !_MODULE && _KERNEL_OPT */
     76   1.53  rearnsha 
     77   1.29   thorpej #ifndef _LOCORE
     78   1.79      matt #if defined(TPIDRPRW_IS_CURLWP) || defined(TPIDRPRW_IS_CURCPU)
     79   1.79      matt #include <arm/armreg.h>
     80   1.79      matt #endif
     81   1.79      matt 
     82   1.29   thorpej /* 1 == use cpu_sleep(), 0 == don't */
     83   1.29   thorpej extern int cpu_do_powersave;
     84   1.75      matt extern int cpu_fpu_present;
     85   1.11     bjh21 
     86    1.8     bjh21 /* All the CLKF_* macros take a struct clockframe * as an argument. */
     87    1.8     bjh21 
     88    1.1   reinoud /*
     89   1.11     bjh21  * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
     90   1.11     bjh21  * frame came from USR mode or not.
     91    1.1   reinoud  */
     92   1.76      matt #define CLKF_USERMODE(cf) (((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE)
     93    1.1   reinoud 
     94    1.1   reinoud /*
     95   1.11     bjh21  * CLKF_INTR: True if we took the interrupt from inside another
     96   1.11     bjh21  * interrupt handler.
     97   1.11     bjh21  */
     98   1.95     skrll #if !defined(__ARM_EABI__)
     99    1.1   reinoud /* Hack to treat FPE time as interrupt time so we can measure it */
    100   1.76      matt #define CLKF_INTR(cf)						\
    101   1.76      matt 	((curcpu()->ci_intr_depth > 1) ||			\
    102   1.76      matt 	    ((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE)
    103   1.11     bjh21 #else
    104   1.91     skrll #define CLKF_INTR(cf)	((void)(cf), curcpu()->ci_intr_depth > 1)
    105   1.11     bjh21 #endif
    106    1.1   reinoud 
    107   1.11     bjh21 /*
    108   1.11     bjh21  * CLKF_PC: Extract the program counter from a clockframe
    109   1.11     bjh21  */
    110   1.65     skrll #define CLKF_PC(frame)		(frame->cf_tf.tf_pc)
    111    1.8     bjh21 
    112   1.11     bjh21 /*
    113   1.33   thorpej  * LWP_PC: Find out the program counter for the given lwp.
    114   1.11     bjh21  */
    115   1.68      matt #define LWP_PC(l)		(lwp_trapframe(l)->tf_pc)
    116    1.8     bjh21 
    117   1.40     bjh21 /*
    118   1.11     bjh21  * Per-CPU information.  For now we assume one CPU.
    119    1.1   reinoud  */
    120   1.85      matt #ifdef _KERNEL
    121   1.54      matt static inline int curcpl(void);
    122   1.54      matt static inline void set_curcpl(int);
    123   1.54      matt static inline void cpu_dosoftints(void);
    124   1.85      matt #endif
    125    1.1   reinoud 
    126   1.78      matt #ifdef _KMEMUSER
    127   1.78      matt #include <sys/intr.h>
    128   1.78      matt #endif
    129   1.87      matt #include <sys/atomic.h>
    130   1.76      matt #include <sys/cpu_data.h>
    131   1.62  uebayasi #include <sys/device_if.h>
    132   1.61  uebayasi #include <sys/evcnt.h>
    133   1.72      matt 
    134    1.1   reinoud struct cpu_info {
    135   1.99     skrll 	struct cpu_data	ci_data;	/* MI per-cpu data */
    136   1.99     skrll 	device_t	ci_dev;		/* Device corresponding to this CPU */
    137   1.99     skrll 	cpuid_t		ci_cpuid;
    138   1.99     skrll 	uint32_t	ci_arm_cpuid;	/* aggregate CPU id */
    139   1.99     skrll 	uint32_t	ci_arm_cputype;	/* CPU type */
    140   1.99     skrll 	uint32_t	ci_arm_cpurev;	/* CPU revision */
    141   1.99     skrll 	uint32_t	ci_ctrl;	/* The CPU control register */
    142   1.99     skrll 	int		ci_cpl;		/* current processor level (spl) */
    143   1.99     skrll 	volatile int	ci_astpending;	/* */
    144   1.99     skrll 	int		ci_want_resched;/* resched() was called */
    145   1.99     skrll 	int		ci_intr_depth;	/* */
    146   1.99     skrll 
    147   1.99     skrll 	struct cpu_softc *
    148   1.99     skrll 			ci_softc;	/* platform softc */
    149   1.99     skrll 
    150   1.99     skrll 	lwp_t *		ci_softlwps[SOFTINT_COUNT];
    151   1.99     skrll 	volatile uint32_t
    152   1.99     skrll 			ci_softints;
    153   1.99     skrll 
    154   1.99     skrll 	lwp_t *		ci_curlwp;	/* current lwp */
    155  1.103        ad 	lwp_t *		ci_onproc;	/* current user LWP / kthread */
    156   1.99     skrll 	lwp_t *		ci_lastlwp;	/* last lwp */
    157   1.99     skrll 
    158   1.99     skrll 	struct evcnt	ci_arm700bugcount;
    159   1.99     skrll 	int32_t		ci_mtx_count;
    160   1.99     skrll 	int		ci_mtx_oldspl;
    161   1.99     skrll 	register_t	ci_undefsave[3];
    162   1.99     skrll 	uint32_t	ci_vfp_id;
    163   1.99     skrll 	uint64_t	ci_lastintr;
    164   1.99     skrll 
    165   1.99     skrll 	struct pmap_tlb_info *
    166   1.99     skrll 			ci_tlb_info;
    167   1.99     skrll 	struct pmap *	ci_pmap_lastuser;
    168   1.99     skrll 	struct pmap *	ci_pmap_cur;
    169   1.99     skrll 	tlb_asid_t	ci_pmap_asid_cur;
    170   1.99     skrll 
    171   1.99     skrll 	struct trapframe *
    172   1.99     skrll 			ci_ddb_regs;
    173   1.99     skrll 
    174   1.99     skrll 	struct evcnt	ci_abt_evs[16];
    175   1.99     skrll 	struct evcnt	ci_und_ev;
    176   1.99     skrll 	struct evcnt	ci_und_cp15_ev;
    177   1.99     skrll 	struct evcnt	ci_vfp_evs[3];
    178   1.99     skrll 
    179  1.100     skrll 	uint32_t	ci_midr;
    180  1.100     skrll 	uint32_t	ci_mpidr;
    181  1.104       mrg #define arm_cpu_mpidr(ci)	((ci)->ci_mpidr)
    182  1.104       mrg 	uint32_t	ci_capacity_dmips_mhz;
    183  1.100     skrll 
    184  1.100     skrll 	struct arm_cache_info *
    185  1.100     skrll 			ci_cacheinfo;
    186  1.100     skrll 
    187   1.72      matt #if defined(MP_CPU_INFO_MEMBERS)
    188   1.30     bjh21 	MP_CPU_INFO_MEMBERS
    189   1.30     bjh21 #endif
    190    1.1   reinoud };
    191   1.11     bjh21 
    192    1.1   reinoud extern struct cpu_info cpu_info_store;
    193   1.76      matt 
    194   1.86      matt struct lwp *arm_curlwp(void);
    195   1.86      matt struct cpu_info *arm_curcpu(void);
    196   1.86      matt 
    197   1.86      matt #if defined(_MODULE)
    198   1.86      matt 
    199   1.86      matt #define	curlwp		arm_curlwp()
    200   1.86      matt #define curcpu()	arm_curcpu()
    201   1.86      matt 
    202   1.86      matt #elif defined(TPIDRPRW_IS_CURLWP)
    203   1.54      matt static inline struct lwp *
    204   1.54      matt _curlwp(void)
    205   1.54      matt {
    206   1.72      matt 	return (struct lwp *) armreg_tpidrprw_read();
    207   1.54      matt }
    208   1.54      matt 
    209   1.54      matt static inline void
    210   1.54      matt _curlwp_set(struct lwp *l)
    211   1.54      matt {
    212   1.72      matt 	armreg_tpidrprw_write((uintptr_t)l);
    213   1.54      matt }
    214   1.54      matt 
    215   1.91     skrll // Also in <sys/lwp.h> but also here if this was included before <sys/lwp.h>
    216   1.85      matt static inline struct cpu_info *lwp_getcpu(struct lwp *);
    217   1.85      matt 
    218   1.85      matt #define	curlwp		_curlwp()
    219   1.85      matt // curcpu() expands into two instructions: a mrc and a ldr
    220   1.85      matt #define	curcpu()	lwp_getcpu(_curlwp())
    221   1.69      matt #elif defined(TPIDRPRW_IS_CURCPU)
    222   1.88      matt #ifdef __HAVE_PREEMPTION
    223   1.88      matt #error __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
    224   1.88      matt #endif
    225   1.54      matt static inline struct cpu_info *
    226   1.54      matt curcpu(void)
    227   1.54      matt {
    228   1.72      matt 	return (struct cpu_info *) armreg_tpidrprw_read();
    229   1.54      matt }
    230   1.72      matt #elif !defined(MULTIPROCESSOR)
    231   1.72      matt #define	curcpu()	(&cpu_info_store)
    232   1.89  jmcneill #elif !defined(__HAVE_PREEMPTION)
    233   1.88      matt #error MULTIPROCESSOR && !__HAVE_PREEMPTION requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP
    234   1.54      matt #else
    235   1.88      matt #error MULTIPROCESSOR && __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
    236   1.69      matt #endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */
    237   1.72      matt 
    238   1.54      matt #ifndef curlwp
    239   1.54      matt #define	curlwp		(curcpu()->ci_curlwp)
    240   1.54      matt #endif
    241   1.72      matt 
    242   1.72      matt #define CPU_INFO_ITERATOR	int
    243   1.92     skrll #if defined(_MODULE) || defined(MULTIPROCESSOR)
    244   1.72      matt extern struct cpu_info *cpu_info[];
    245   1.82      matt #define cpu_number()		(curcpu()->ci_index)
    246   1.82      matt #define CPU_IS_PRIMARY(ci)	((ci)->ci_index == 0)
    247   1.72      matt #define CPU_INFO_FOREACH(cii, ci)			\
    248   1.94       mrg 	cii = 0, ci = cpu_info[0]; cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; cii++
    249   1.91     skrll #else
    250   1.72      matt #define cpu_number()            0
    251   1.72      matt 
    252   1.72      matt #define CPU_IS_PRIMARY(ci)	true
    253   1.72      matt #define CPU_INFO_FOREACH(cii, ci)			\
    254   1.81  christos 	cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
    255   1.72      matt #endif
    256   1.72      matt 
    257   1.92     skrll #if defined(MULTIPROCESSOR)
    258   1.98     skrll 
    259  1.100     skrll extern uint32_t cpu_mpidr[];
    260  1.101  jmcneill bool cpu_hatched_p(u_int);
    261   1.98     skrll 
    262   1.98     skrll void cpu_mpstart(void);
    263   1.98     skrll void cpu_init_secondary_processor(int);
    264   1.92     skrll void cpu_boot_secondary_processors(void);
    265   1.92     skrll #endif
    266   1.92     skrll 
    267   1.54      matt #define	LWP0_CPU_INFO	(&cpu_info_store)
    268   1.54      matt 
    269   1.54      matt static inline int
    270   1.54      matt curcpl(void)
    271   1.54      matt {
    272   1.54      matt 	return curcpu()->ci_cpl;
    273   1.54      matt }
    274   1.54      matt 
    275   1.54      matt static inline void
    276   1.54      matt set_curcpl(int pri)
    277   1.54      matt {
    278   1.54      matt 	curcpu()->ci_cpl = pri;
    279   1.54      matt }
    280   1.54      matt 
    281   1.54      matt static inline void
    282   1.54      matt cpu_dosoftints(void)
    283   1.54      matt {
    284   1.72      matt #ifdef __HAVE_FAST_SOFTINTS
    285   1.72      matt 	void	dosoftints(void);
    286   1.72      matt #ifndef __HAVE_PIC_FAST_SOFTINTS
    287   1.56      matt 	struct cpu_info * const ci = curcpu();
    288   1.56      matt 	if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0)
    289   1.54      matt 		dosoftints();
    290   1.72      matt #endif
    291   1.72      matt #endif
    292   1.54      matt }
    293   1.11     bjh21 
    294   1.33   thorpej void	cpu_proc_fork(struct proc *, struct proc *);
    295   1.11     bjh21 
    296   1.11     bjh21 /*
    297   1.11     bjh21  * Scheduling glue
    298   1.11     bjh21  */
    299   1.11     bjh21 
    300   1.87      matt #ifdef __HAVE_PREEMPTION
    301   1.90      matt #define setsoftast(ci)		atomic_or_uint(&(ci)->ci_astpending, __BIT(0))
    302   1.87      matt #else
    303   1.90      matt #define setsoftast(ci)		((ci)->ci_astpending = __BIT(0))
    304   1.87      matt #endif
    305    1.1   reinoud 
    306    1.1   reinoud /*
    307    1.1   reinoud  * Notify the current process (p) that it has a signal pending,
    308    1.1   reinoud  * process as soon as possible.
    309    1.1   reinoud  */
    310    1.1   reinoud 
    311   1.90      matt #define cpu_signotify(l)		setsoftast((l)->l_cpu)
    312    1.1   reinoud 
    313    1.1   reinoud /*
    314    1.1   reinoud  * Give a profiling tick to the current process when the user profiling
    315    1.1   reinoud  * buffer pages are invalid.  On the i386, request an ast to send us
    316    1.1   reinoud  * through trap(), marking the proc as needing a profiling tick.
    317    1.1   reinoud  */
    318   1.90      matt #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, \
    319   1.90      matt 				 setsoftast((l)->l_cpu))
    320    1.1   reinoud 
    321   1.72      matt /*
    322   1.91     skrll  * We've already preallocated the stack for the idlelwps for additional CPUs.
    323   1.72      matt  * This hook allows to return them.
    324   1.72      matt  */
    325   1.72      matt vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *);
    326   1.72      matt 
    327   1.11     bjh21 /*
    328   1.11     bjh21  * cpu device glue (belongs in cpuvar.h)
    329   1.11     bjh21  */
    330   1.70      matt void	cpu_attach(device_t, cpuid_t);
    331   1.11     bjh21 
    332  1.105     skrll #ifdef _ARM_ARCH_6
    333  1.105     skrll int	cpu_maxproc_hook(int);
    334  1.105     skrll #endif
    335  1.105     skrll 
    336   1.76      matt #endif /* !_LOCORE */
    337    1.1   reinoud 
    338    1.8     bjh21 #endif /* _KERNEL */
    339    1.1   reinoud 
    340   1.96       ryo #elif defined(__aarch64__)
    341   1.96       ryo 
    342   1.96       ryo #include <aarch64/cpu.h>
    343   1.96       ryo 
    344   1.96       ryo #endif /* __arm__/__aarch64__ */
    345   1.96       ryo 
    346   1.11     bjh21 #endif /* !_ARM_CPU_H_ */
    347