Home | History | Annotate | Line # | Download | only in include
cpu.h revision 1.2
      1  1.2  matt /* $NetBSD: cpu.h,v 1.2 2015/03/28 16:13:56 matt Exp $ */
      2  1.1  matt 
      3  1.1  matt /*-
      4  1.1  matt  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  1.1  matt  * All rights reserved.
      6  1.1  matt  *
      7  1.1  matt  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  matt  * by Matt Thomas of 3am Software Foundry.
      9  1.1  matt  *
     10  1.1  matt  * Redistribution and use in source and binary forms, with or without
     11  1.1  matt  * modification, are permitted provided that the following conditions
     12  1.1  matt  * are met:
     13  1.1  matt  * 1. Redistributions of source code must retain the above copyright
     14  1.1  matt  *    notice, this list of conditions and the following disclaimer.
     15  1.1  matt  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  matt  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  matt  *    documentation and/or other materials provided with the distribution.
     18  1.1  matt  *
     19  1.1  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  matt  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  matt  */
     31  1.1  matt 
     32  1.1  matt #ifndef _RISCV_CPU_H_
     33  1.1  matt #define _RISCV_CPU_H_
     34  1.1  matt 
     35  1.1  matt #if defined(_KERNEL) || defined(_KMEMUSER)
     36  1.1  matt struct clockframe {
     37  1.1  matt 	uintptr_t cf_pc;
     38  1.1  matt 	uint32_t cf_sr;
     39  1.1  matt 	int cf_intr_depth;
     40  1.1  matt };
     41  1.1  matt 
     42  1.1  matt #define CLKF_USERMODE(cf)	(((cf)->cf_sr & 1) == 0)
     43  1.1  matt #define CLKF_PC(cf)		((cf)->cf_pc)
     44  1.1  matt #define CLKF_INTR(cf)		((cf)->cf_intr_depth > 0)
     45  1.1  matt 
     46  1.1  matt #include <sys/cpu_data.h>
     47  1.1  matt #include <sys/device_if.h>
     48  1.1  matt #include <sys/evcnt.h>
     49  1.1  matt #include <sys/intr.h>
     50  1.1  matt 
     51  1.1  matt struct cpu_info {
     52  1.1  matt 	struct cpu_data ci_data;
     53  1.1  matt 	device_t ci_dev;
     54  1.1  matt 	cpuid_t ci_cpuid;
     55  1.1  matt 	struct lwp *ci_curlwp;
     56  1.1  matt 	struct lwp *ci_softlwps[SOFTINT_COUNT];
     57  1.1  matt 	struct trapframe *ci_ddb_regs;
     58  1.1  matt 
     59  1.1  matt 	uint64_t ci_lastintr;
     60  1.1  matt 
     61  1.1  matt 	int ci_mtx_oldspl;
     62  1.1  matt 	int ci_mtx_count;
     63  1.1  matt 
     64  1.1  matt 	int ci_want_resched;
     65  1.1  matt 	int ci_cpl;
     66  1.1  matt 	u_int ci_softints;
     67  1.1  matt 	volatile u_int ci_intr_depth;
     68  1.1  matt 
     69  1.1  matt 	tlb_asid_t ci_pmap_asid_cur;
     70  1.1  matt #if 0
     71  1.1  matt 	union pmap_pdetab *ci_pmap_user_pdetab;
     72  1.1  matt #ifdef _LP64
     73  1.1  matt 	union pmap_pdetab *ci_pmap_user_pde0tab;
     74  1.1  matt #endif
     75  1.1  matt #endif
     76  1.1  matt 
     77  1.1  matt 	struct evcnt ci_ev_fpu_saves;
     78  1.1  matt 	struct evcnt ci_ev_fpu_loads;
     79  1.1  matt 	struct evcnt ci_ev_fpu_reenables;
     80  1.1  matt };
     81  1.1  matt 
     82  1.1  matt extern struct cpu_info cpu_info_store;
     83  1.1  matt 
     84  1.2  matt register struct lwp *riscv_curlwp __asm("tp");
     85  1.1  matt #define	curlwp		riscv_curlwp
     86  1.1  matt 
     87  1.1  matt static inline struct cpu_info *
     88  1.1  matt curcpu(void)
     89  1.1  matt {
     90  1.1  matt 	struct cpu_info *ci;
     91  1.2  matt 	__asm("csrr\t%0, sscratch" : "=r"(ci));
     92  1.1  matt 	return ci;
     93  1.1  matt }
     94  1.1  matt 
     95  1.1  matt static inline cpuid_t
     96  1.1  matt cpu_number(void)
     97  1.1  matt {
     98  1.1  matt #ifdef MULTIPROCESSOR
     99  1.1  matt 	return curcpu()->ci_cpuid;
    100  1.1  matt #else
    101  1.1  matt 	return 0;
    102  1.1  matt #endif
    103  1.1  matt }
    104  1.1  matt 
    105  1.1  matt void	cpu_set_curpri(int);
    106  1.1  matt void	cpu_proc_fork(struct proc *, struct proc *);
    107  1.1  matt void	cpu_signotify(struct lwp *);
    108  1.1  matt void	cpu_need_proftick(struct lwp *l);
    109  1.1  matt void	cpu_boot_secondary_processors(void);
    110  1.1  matt 
    111  1.1  matt #define CPU_INFO_ITERATOR	cpuid_t
    112  1.1  matt #ifdef MULTIPROCESSOR
    113  1.1  matt #define CPU_INFO_FOREACH(cii, ci) \
    114  1.1  matt 	(cii) = 0; ((ci) = cpu_infos[cii]) != NULL; (cii)++
    115  1.1  matt #else
    116  1.1  matt #define CPU_INFO_FOREACH(cii, ci) \
    117  1.1  matt 	(cii) = 0, (ci) = curcpu(); (cii) == 0; (cii)++
    118  1.1  matt #endif
    119  1.1  matt 
    120  1.1  matt #define CPU_INFO_CURPMAP(ci)	(curlwp->l_proc->p_vmspace->vm_map.pmap)
    121  1.1  matt 
    122  1.1  matt static inline void
    123  1.1  matt cpu_dosoftints(void)
    124  1.1  matt {
    125  1.1  matt 	extern void dosoftints(void);
    126  1.1  matt         struct cpu_info * const ci = curcpu();
    127  1.1  matt         if (ci->ci_intr_depth == 0
    128  1.1  matt 	    && (ci->ci_data.cpu_softints >> ci->ci_cpl) > 0)
    129  1.1  matt                 dosoftints();
    130  1.1  matt }
    131  1.1  matt 
    132  1.1  matt static inline bool
    133  1.1  matt cpu_intr_p(void)
    134  1.1  matt {
    135  1.1  matt 	return curcpu()->ci_intr_depth > 0;
    136  1.1  matt }
    137  1.1  matt 
    138  1.1  matt #define LWP_PC(l)	cpu_lwp_pc(l)
    139  1.1  matt 
    140  1.1  matt vaddr_t	cpu_lwp_pc(struct lwp *);
    141  1.1  matt 
    142  1.1  matt static inline void
    143  1.1  matt cpu_idle(void)
    144  1.1  matt {
    145  1.1  matt }
    146  1.1  matt 
    147  1.1  matt #endif /* _KERNEL || _KMEMUSER */
    148  1.1  matt 
    149  1.1  matt #endif /* _RISCV_CPU_H_ */
    150