Home | History | Annotate | Line # | Download | only in include
cpu.h revision 1.98.2.1
      1 /*	$NetBSD: cpu.h,v 1.98.2.1 2013/01/16 05:33:05 yamt Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This software was developed by the Computer Systems Engineering group
      8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
      9  * contributed to Berkeley.
     10  *
     11  * All advertising materials mentioning features or use of this software
     12  * must display the following acknowledgement:
     13  *	This product includes software developed by the University of
     14  *	California, Lawrence Berkeley Laboratory.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
     41  */
     42 
     43 #ifndef _CPU_H_
     44 #define _CPU_H_
     45 
     46 /*
     47  * CTL_MACHDEP definitions.
     48  */
     49 #define	CPU_BOOTED_KERNEL	1	/* string: booted kernel name */
     50 #define	CPU_BOOTED_DEVICE	2	/* string: device booted from */
     51 #define	CPU_BOOT_ARGS		3	/* string: args booted with */
     52 #define	CPU_ARCH		4	/* integer: cpu architecture version */
     53 #define	CPU_MAXID		5	/* number of valid machdep ids */
     54 
     55 #if defined(_KERNEL) || defined(_KMEMUSER)
     56 /*
     57  * Exported definitions unique to SPARC cpu support.
     58  */
     59 
     60 #if defined(_KERNEL_OPT)
     61 #include "opt_multiprocessor.h"
     62 #include "opt_lockdebug.h"
     63 #endif
     64 
     65 #include <machine/psl.h>
     66 #include <machine/reg.h>
     67 #include <machine/pte.h>
     68 #include <machine/intr.h>
     69 #if defined(_KERNEL)
     70 #include <machine/cpuset.h>
     71 #include <sparc64/sparc64/intreg.h>
     72 #endif
     73 
     74 #include <sys/cpu_data.h>
     75 #include <sys/evcnt.h>
     76 
     77 /*
     78  * The cpu_info structure is part of a 64KB structure mapped both the kernel
     79  * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
     80  * Each processor's cpu_info is accessible at CPUINFO_VA only for that
     81  * processor.  Other processors can access that through an additional mapping
     82  * in the kernel pmap.
     83  *
     84  * The 64KB page contains:
     85  *
     86  * cpu_info
     87  * interrupt stack (all remaining space)
     88  * idle PCB
     89  * idle stack (STACKSPACE - sizeof(PCB))
     90  * 32KB TSB
     91  */
     92 
     93 struct cpu_info {
     94 	struct cpu_data		ci_data;	/* MI per-cpu data */
     95 
     96 
     97 	/*
     98 	 * SPARC cpu_info structures live at two VAs: one global
     99 	 * VA (so each CPU can access any other CPU's cpu_info)
    100 	 * and an alias VA CPUINFO_VA which is the same on each
    101 	 * CPU and maps to that CPU's cpu_info.  Since the alias
    102 	 * CPUINFO_VA is how we locate our cpu_info, we have to
    103 	 * self-reference the global VA so that we can return it
    104 	 * in the curcpu() macro.
    105 	 */
    106 	struct cpu_info * volatile ci_self;
    107 
    108 	/* Most important fields first */
    109 	struct lwp		*ci_curlwp;
    110 	struct pcb		*ci_cpcb;
    111 	struct cpu_info		*ci_next;
    112 
    113 	struct lwp		*ci_fplwp;
    114 
    115 	void			*ci_eintstack;
    116 
    117 	int			ci_mtx_count;
    118 	int			ci_mtx_oldspl;
    119 
    120 	/* Spinning up the CPU */
    121 	void			(*ci_spinup)(void);
    122 	paddr_t			ci_paddr;
    123 
    124 	int			ci_cpuid;
    125 
    126 	/* CPU PROM information. */
    127 	u_int			ci_node;
    128 
    129 	/* %tick and cpu frequency information */
    130 	u_long			ci_tick_increment;
    131 	uint64_t		ci_cpu_clockrate[2];	/* %tick */
    132 	uint64_t		ci_system_clockrate[2];	/* %stick */
    133 
    134 	/* Interrupts */
    135 	struct intrhand		*ci_intrpending[16];
    136 	struct intrhand		*ci_tick_ih;
    137 
    138 	/* Event counters */
    139 	struct evcnt		ci_tick_evcnt;
    140 
    141 	/* This could be under MULTIPROCESSOR, but there's no good reason */
    142 	struct evcnt		ci_ipi_evcnt[IPI_EVCNT_NUM];
    143 
    144 	int			ci_flags;
    145 	int			ci_want_ast;
    146 	int			ci_want_resched;
    147 	int			ci_idepth;
    148 
    149 /*
    150  * A context is simply a small number that differentiates multiple mappings
    151  * of the same address.  Contexts on the spitfire are 13 bits, but could
    152  * be as large as 17 bits.
    153  *
    154  * Each context is either free or attached to a pmap.
    155  *
    156  * The context table is an array of pointers to psegs.  Just dereference
    157  * the right pointer and you get to the pmap segment tables.  These are
    158  * physical addresses, of course.
    159  *
    160  * ci_ctx_lock protects this CPUs context allocation/free.
    161  * These are all allocated almost with in the same cacheline.
    162  */
    163 	kmutex_t		ci_ctx_lock;
    164 	int			ci_pmap_next_ctx;
    165 	int			ci_numctx;
    166 	paddr_t 		*ci_ctxbusy;
    167 	LIST_HEAD(, pmap) 	ci_pmap_ctxlist;
    168 
    169 	/*
    170 	 * The TSBs are per cpu too (since MMU context differs between
    171 	 * cpus). These are just caches for the TLBs.
    172 	 */
    173 	pte_t			*ci_tsb_dmmu;
    174 	pte_t			*ci_tsb_immu;
    175 
    176 	volatile void		*ci_ddb_regs;	/* DDB regs */
    177 };
    178 
    179 #endif /* _KERNEL || _KMEMUSER */
    180 
    181 #ifdef _KERNEL
    182 
    183 #define CPUF_PRIMARY	1
    184 
    185 /*
    186  * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
    187  */
    188 struct cpu_bootargs {
    189 	u_int	cb_node;	/* PROM CPU node */
    190 	volatile int cb_flags;
    191 
    192 	vaddr_t cb_ktext;
    193 	paddr_t cb_ktextp;
    194 	vaddr_t cb_ektext;
    195 
    196 	vaddr_t cb_kdata;
    197 	paddr_t cb_kdatap;
    198 	vaddr_t cb_ekdata;
    199 
    200 	paddr_t	cb_cpuinfo;
    201 };
    202 
    203 extern struct cpu_bootargs *cpu_args;
    204 
    205 #if defined(MULTIPROCESSOR)
    206 extern int sparc_ncpus;
    207 #else
    208 #define sparc_ncpus 1
    209 #endif
    210 
    211 extern struct cpu_info *cpus;
    212 extern struct pool_cache *fpstate_cache;
    213 
    214 #define	curcpu()	(((struct cpu_info *)CPUINFO_VA)->ci_self)
    215 #define	cpu_number()	(curcpu()->ci_index)
    216 #define	CPU_IS_PRIMARY(ci)	((ci)->ci_flags & CPUF_PRIMARY)
    217 
    218 #define CPU_INFO_ITERATOR		int
    219 #define CPU_INFO_FOREACH(cii, ci)	cii = 0, ci = cpus; ci != NULL; \
    220 					ci = ci->ci_next
    221 
    222 #define curlwp		curcpu()->ci_curlwp
    223 #define fplwp		curcpu()->ci_fplwp
    224 #define curpcb		curcpu()->ci_cpcb
    225 
    226 #define want_ast	curcpu()->ci_want_ast
    227 #define want_resched	curcpu()->ci_want_resched
    228 
    229 /*
    230  * definitions of cpu-dependent requirements
    231  * referenced in generic code
    232  */
    233 #define	cpu_wait(p)	/* nothing */
    234 void cpu_proc_fork(struct proc *, struct proc *);
    235 
    236 /* run on the cpu itself */
    237 void	cpu_pmap_init(struct cpu_info *);
    238 /* run upfront to prepare the cpu_info */
    239 void	cpu_pmap_prepare(struct cpu_info *, bool);
    240 
    241 #if defined(MULTIPROCESSOR)
    242 extern vaddr_t cpu_spinup_trampoline;
    243 
    244 extern  char   *mp_tramp_code;
    245 extern  u_long  mp_tramp_code_len;
    246 extern  u_long  mp_tramp_tlb_slots;
    247 extern  u_long  mp_tramp_func;
    248 extern  u_long  mp_tramp_ci;
    249 
    250 void	cpu_hatch(void);
    251 void	cpu_boot_secondary_processors(void);
    252 
    253 /*
    254  * Call a function on other cpus:
    255  *	multicast - send to everyone in the sparc64_cpuset_t
    256  *	broadcast - send to to all cpus but ourselves
    257  *	send - send to just this cpu
    258  * The called function do not follow the C ABI, so need to be coded in
    259  * assembler.
    260  */
    261 typedef void (* ipifunc_t)(void *, void *);
    262 
    263 void	sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
    264 void	sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
    265 void	sparc64_send_ipi(int, ipifunc_t, uint64_t, uint64_t);
    266 
    267 /*
    268  * Call an arbitrary C function on another cpu (or all others but ourself)
    269  */
    270 typedef void (*ipi_c_call_func_t)(void*);
    271 void	sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*);
    272 
    273 #endif
    274 
    275 /* Provide %pc of a lwp */
    276 #define	LWP_PC(l)	((l)->l_md.md_tf->tf_pc)
    277 
    278 /*
    279  * Arguments to hardclock, softclock and gatherstats encapsulate the
    280  * previous machine state in an opaque clockframe.  The ipl is here
    281  * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
    282  * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
    283  */
    284 struct clockframe {
    285 	struct trapframe64 t;
    286 };
    287 
    288 #define	CLKF_USERMODE(framep)	(((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
    289 #define	CLKF_PC(framep)		((framep)->t.tf_pc)
    290 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
    291 #define	CLKF_INTR(framep)						\
    292 	((!CLKF_USERMODE(framep))&&					\
    293 		(((framep)->t.tf_out[6] & 1 ) ?				\
    294 			(((vaddr_t)(framep)->t.tf_out[6] <		\
    295 				(vaddr_t)EINTSTACK-0x7ff) &&		\
    296 			((vaddr_t)(framep)->t.tf_out[6] >		\
    297 				(vaddr_t)INTSTACK-0x7ff)) :		\
    298 			(((vaddr_t)(framep)->t.tf_out[6] <		\
    299 				(vaddr_t)EINTSTACK) &&			\
    300 			((vaddr_t)(framep)->t.tf_out[6] >		\
    301 				(vaddr_t)INTSTACK))))
    302 
    303 /*
    304  * Give a profiling tick to the current process when the user profiling
    305  * buffer pages are invalid.  On the sparc, request an ast to send us
    306  * through trap(), marking the proc as needing a profiling tick.
    307  */
    308 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
    309 
    310 /*
    311  * Notify an LWP that it has a signal pending, process as soon as possible.
    312  */
    313 void cpu_signotify(struct lwp *);
    314 
    315 /*
    316  * Interrupt handler chains.  Interrupt handlers should return 0 for
    317  * ``not me'' or 1 (``I took care of it'').  intr_establish() inserts a
    318  * handler into the list.  The handler is called with its (single)
    319  * argument, or with a pointer to a clockframe if ih_arg is NULL.
    320  */
    321 struct intrhand {
    322 	int			(*ih_fun)(void *);
    323 	void			*ih_arg;
    324 	/* if we have to take the biglock, we interpose a wrapper
    325 	 * and need to save the original function and arg */
    326 	int			(*ih_realfun)(void *);
    327 	void			*ih_realarg;
    328 	short			ih_number;	/* interrupt number */
    329 						/* the H/W provides */
    330 	char			ih_pil;		/* interrupt priority */
    331 	struct intrhand		*ih_next;	/* global list */
    332 	struct intrhand		*ih_pending;	/* interrupt queued */
    333 	volatile uint64_t	*ih_map;	/* Interrupt map reg */
    334 	volatile uint64_t	*ih_clr;	/* clear interrupt reg */
    335 	struct evcnt		ih_cnt;		/* counter for vmstat */
    336 	uint32_t		ih_ivec;
    337 	char			ih_name[32];	/* name for the above */
    338 };
    339 extern struct intrhand *intrhand[];
    340 extern struct intrhand *intrlev[MAXINTNUM];
    341 
    342 void	intr_establish(int level, bool mpsafe, struct intrhand *);
    343 void	*sparc_softintr_establish(int, int (*)(void *), void *);
    344 void	sparc_softintr_schedule(void *);
    345 void	sparc_softintr_disestablish(void *);
    346 
    347 /* disksubr.c */
    348 struct dkbad;
    349 int isbad(struct dkbad *bt, int, int, int);
    350 /* machdep.c */
    351 void *	reserve_dumppages(void *);
    352 /* clock.c */
    353 struct timeval;
    354 int	tickintr(void *);	/* level 10/14 (tick) interrupt code */
    355 int	stickintr(void *);	/* system tick interrupt code */
    356 int	clockintr(void *);	/* level 10 (clock) interrupt code */
    357 int	statintr(void *);	/* level 14 (statclock) interrupt code */
    358 int	schedintr(void *);	/* level 10 (schedclock) interrupt code */
    359 void	tickintr_establish(int, int (*)(void *));
    360 void	stickintr_establish(int, int (*)(void *));
    361 /* locore.s */
    362 struct fpstate64;
    363 void	savefpstate(struct fpstate64 *);
    364 void	loadfpstate(struct fpstate64 *);
    365 void	clearfpstate(void);
    366 uint64_t	probeget(paddr_t, int, int);
    367 int	probeset(paddr_t, int, int, uint64_t);
    368 
    369 #define	 write_all_windows() __asm volatile("flushw" : : )
    370 #define	 write_user_windows() __asm volatile("flushw" : : )
    371 
    372 struct pcb;
    373 void	snapshot(struct pcb *);
    374 struct frame *getfp(void);
    375 void	switchtoctx_us(int);
    376 void	switchtoctx_usiii(int);
    377 void	next_tick(long);
    378 void	next_stick(long);
    379 /* trap.c */
    380 void	cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
    381 int	rwindow_save(struct lwp *);
    382 /* cons.c */
    383 int	cnrom(void);
    384 /* zs.c */
    385 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
    386 /* fb.c */
    387 void	fb_unblank(void);
    388 /* kgdb_stub.c */
    389 #ifdef KGDB
    390 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
    391 void kgdb_connect(int);
    392 void kgdb_panic(void);
    393 #endif
    394 /* emul.c */
    395 int	fixalign(struct lwp *, struct trapframe64 *);
    396 int	emulinstr(vaddr_t, struct trapframe64 *);
    397 
    398 #else /* _KERNEL */
    399 
    400 /*
    401  * XXX: provide some definitions for crash(8), probably can share
    402  */
    403 #if defined(_KMEMUSER)
    404 #define	curcpu()	(((struct cpu_info *)CPUINFO_VA)->ci_self)
    405 #define curlwp		curcpu()->ci_curlwp
    406 #endif
    407 
    408 #endif /* _KERNEL */
    409 #endif /* _CPU_H_ */
    410