Home | History | Annotate | Line # | Download | only in include
cpu.h revision 1.13
      1 /* $NetBSD: cpu.h,v 1.13 2023/07/29 06:59:47 skrll Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _RISCV_CPU_H_
     33 #define _RISCV_CPU_H_
     34 
     35 #if defined(_KERNEL) || defined(_KMEMUSER)
     36 
     37 struct clockframe {
     38 	vaddr_t cf_epc;
     39 	register_t cf_status;
     40 	int cf_intr_depth;
     41 };
     42 
     43 #define CLKF_USERMODE(cf)	(((cf)->cf_status & SR_SPP) == 0)
     44 #define CLKF_PC(cf)		((cf)->cf_epc)
     45 #define CLKF_INTR(cf)		((cf)->cf_intr_depth > 1)
     46 
     47 #include <sys/cpu_data.h>
     48 #include <sys/device_if.h>
     49 #include <sys/evcnt.h>
     50 #include <sys/intr.h>
     51 
     52 struct cpu_info {
     53 	struct cpu_data ci_data;
     54 	device_t ci_dev;
     55 	cpuid_t ci_cpuid;
     56 	struct lwp *ci_curlwp;
     57 	struct lwp *ci_onproc;		/* current user LWP / kthread */
     58 	struct lwp *ci_softlwps[SOFTINT_COUNT];
     59 	struct trapframe *ci_ddb_regs;
     60 
     61 	uint64_t ci_lastintr;
     62 	uint64_t ci_lastintr_scheduled;
     63 	struct evcnt ci_ev_timer;
     64 	struct evcnt ci_ev_timer_missed;
     65 
     66 	u_long ci_cpu_freq;		/* CPU frequency */
     67 	int ci_mtx_oldspl;
     68 	int ci_mtx_count;
     69 	int ci_cpl;
     70 	volatile u_int ci_intr_depth;
     71 
     72 	int ci_want_resched __aligned(COHERENCY_UNIT);
     73 	u_int ci_softints;
     74 
     75 	tlb_asid_t ci_pmap_asid_cur;
     76 
     77 	union pmap_segtab *ci_pmap_user_segtab;
     78 #ifdef _LP64
     79 	union pmap_segtab *ci_pmap_user_seg0tab;
     80 #endif
     81 
     82 	struct evcnt ci_ev_fpu_saves;
     83 	struct evcnt ci_ev_fpu_loads;
     84 	struct evcnt ci_ev_fpu_reenables;
     85 
     86 	struct pmap_tlb_info *ci_tlb_info;
     87 
     88 #ifdef MULTIPROCESSOR
     89 	volatile u_long ci_flags;
     90 #define	CPUF_PRIMARY	__BIT(0)		/* CPU is primary CPU */
     91 #define	CPUF_PRESENT	__BIT(1)		/* CPU is present */
     92 #define	CPUF_RUNNING	__BIT(2)		/* CPU is running */
     93 #define	CPUF_PAUSED	__BIT(3)		/* CPU is paused */
     94 #define	CPUF_USERPMAP	__BIT(4)		/* CPU has a user pmap activated */
     95 
     96 	volatile u_long ci_request_ipis;
     97 						/* bitmask of IPIs requested */
     98 	u_long ci_active_ipis;			/* bitmask of IPIs being serviced */
     99 
    100 	struct evcnt ci_evcnt_all_ipis;		/* aggregated IPI counter */
    101 	struct evcnt ci_evcnt_per_ipi[NIPIS];	/* individual IPI counters */
    102 	struct evcnt ci_evcnt_synci_onproc_rqst;
    103 	struct evcnt ci_evcnt_synci_deferred_rqst;
    104 	struct evcnt ci_evcnt_synci_ipi_rqst;
    105 
    106 	kcpuset_t *ci_shootdowncpus;
    107 	kcpuset_t *ci_multicastcpus;
    108 	kcpuset_t *ci_watchcpus;
    109 	kcpuset_t *ci_ddbcpus;
    110 #endif
    111 
    112 #if defined(GPROF) && defined(MULTIPROCESSOR)
    113 	struct gmonparam *ci_gmon;	/* MI per-cpu GPROF */
    114 #endif
    115 };
    116 
    117 #endif /* _KERNEL || _KMEMUSER */
    118 
    119 #ifdef _KERNEL
    120 
    121 extern struct cpu_info *cpu_info[];
    122 extern struct cpu_info cpu_info_store[];
    123 
    124 
    125 #ifdef MULTIPROCESSOR
    126 extern u_int riscv_cpu_max;
    127 extern cpuid_t cpu_hartid[];
    128 
    129 void cpu_hatch(struct cpu_info *);
    130 
    131 void cpu_init_secondary_processor(int);
    132 void cpu_boot_secondary_processors(void);
    133 void cpu_mpstart(void);
    134 bool cpu_hatched_p(u_int);
    135 
    136 void cpu_clr_mbox(int);
    137 void cpu_set_hatched(int);
    138 
    139 
    140 void	cpu_halt(void);
    141 void	cpu_halt_others(void);
    142 bool	cpu_is_paused(cpuid_t);
    143 void	cpu_pause(void);
    144 void	cpu_pause_others(void);
    145 void	cpu_resume(cpuid_t);
    146 void	cpu_resume_others(void);
    147 void	cpu_debug_dump(void);
    148 
    149 extern kcpuset_t *cpus_running;
    150 extern kcpuset_t *cpus_hatched;
    151 extern kcpuset_t *cpus_paused;
    152 extern kcpuset_t *cpus_resumed;
    153 extern kcpuset_t *cpus_halted;
    154 
    155 /*
    156  * definitions of cpu-dependent requirements
    157  * referenced in generic code
    158  */
    159 
    160 /*
    161  * Send an inter-processor interrupt to each other CPU (excludes curcpu())
    162  */
    163 void cpu_broadcast_ipi(int);
    164 
    165 /*
    166  * Send an inter-processor interrupt to CPUs in kcpuset (excludes curcpu())
    167  */
    168 void cpu_multicast_ipi(const kcpuset_t *, int);
    169 
    170 /*
    171  * Send an inter-processor interrupt to another CPU.
    172  */
    173 int cpu_send_ipi(struct cpu_info *, int);
    174 
    175 #endif
    176 
    177 struct lwp;
    178 static inline struct cpu_info *lwp_getcpu(struct lwp *);
    179 
    180 register struct lwp *riscv_curlwp __asm("tp");
    181 #define	curlwp		riscv_curlwp
    182 #define	curcpu()	lwp_getcpu(curlwp)
    183 #define	curpcb		((struct pcb *)lwp_getpcb(curlwp))
    184 
    185 static inline cpuid_t
    186 cpu_number(void)
    187 {
    188 #ifdef MULTIPROCESSOR
    189 	return curcpu()->ci_cpuid;
    190 #else
    191 	return 0;
    192 #endif
    193 }
    194 
    195 void	cpu_proc_fork(struct proc *, struct proc *);
    196 void	cpu_signotify(struct lwp *);
    197 void	cpu_need_proftick(struct lwp *l);
    198 void	cpu_boot_secondary_processors(void);
    199 
    200 #define CPU_INFO_ITERATOR	cpuid_t
    201 #ifdef MULTIPROCESSOR
    202 #define	CPU_IS_PRIMARY(ci)	((ci)->ci_flags & CPUF_PRIMARY)
    203 #define	CPU_INFO_FOREACH(cii, ci)		\
    204     cii = 0, ci = &cpu_info_store[0]; 		\
    205     ci != NULL; 				\
    206     cii++, ncpu ? (ci = cpu_infos[cii]) 	\
    207 		: (ci = NULL)
    208 #else
    209 #define CPU_IS_PRIMARY(ci)	true
    210 #define CPU_INFO_FOREACH(cii, ci) \
    211 	(cii) = 0, (ci) = curcpu(); (cii) == 0; (cii)++
    212 #endif
    213 
    214 #define CPU_INFO_CURPMAP(ci)	(curlwp->l_proc->p_vmspace->vm_map.pmap)
    215 
    216 static inline void
    217 cpu_dosoftints(void)
    218 {
    219 	extern void dosoftints(void);
    220         struct cpu_info * const ci = curcpu();
    221         if (ci->ci_intr_depth == 0
    222 	    && (ci->ci_data.cpu_softints >> ci->ci_cpl) > 0)
    223                 dosoftints();
    224 }
    225 
    226 static inline bool
    227 cpu_intr_p(void)
    228 {
    229 	return curcpu()->ci_intr_depth > 0;
    230 }
    231 
    232 #define LWP_PC(l)	cpu_lwp_pc(l)
    233 
    234 vaddr_t	cpu_lwp_pc(struct lwp *);
    235 
    236 static inline void
    237 cpu_idle(void)
    238 {
    239 	asm volatile("wfi" ::: "memory");
    240 }
    241 
    242 #endif /* _KERNEL */
    243 
    244 #endif /* _RISCV_CPU_H_ */
    245