Home | History | Annotate | Line # | Download | only in include
cpu.h revision 1.96
      1 /*	$NetBSD: cpu.h,v 1.96 2016/12/10 09:51:43 mrg Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This software was developed by the Computer Systems Engineering group
      8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
      9  * contributed to Berkeley.
     10  *
     11  * All advertising materials mentioning features or use of this software
     12  * must display the following acknowledgement:
     13  *	This product includes software developed by the University of
     14  *	California, Lawrence Berkeley Laboratory.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
     41  */
     42 
     43 #ifndef _CPU_H_
     44 #define _CPU_H_
     45 
     46 /*
     47  * CTL_MACHDEP definitions.
     48  */
     49 #define	CPU_BOOTED_KERNEL	1	/* string: booted kernel name */
     50 #define	CPU_BOOTED_DEVICE	2	/* string: device booted from */
     51 #define	CPU_BOOT_ARGS		3	/* string: args booted with */
     52 #define	CPU_ARCH		4	/* integer: cpu architecture version */
     53 #define	CPU_MAXID		5	/* number of valid machdep ids */
     54 
     55 #ifdef _KERNEL
     56 /*
     57  * Exported definitions unique to SPARC cpu support.
     58  */
     59 
     60 #if defined(_KERNEL_OPT)
     61 #include "opt_multiprocessor.h"
     62 #include "opt_lockdebug.h"
     63 #include "opt_sparc_arch.h"
     64 #endif
     65 
     66 #include <sys/cpu_data.h>
     67 #include <sys/evcnt.h>
     68 
     69 #include <machine/intr.h>
     70 #include <machine/psl.h>
     71 
     72 #if defined(_KERNEL)
     73 #include <sparc/sparc/cpuvar.h>
     74 #include <sparc/sparc/intreg.h>
     75 #else
     76 #include <arch/sparc/sparc/vaddrs.h>
     77 #include <arch/sparc/sparc/cache.h>
     78 #endif
     79 
     80 /*
     81  * Message structure for Inter Processor Communication in MP systems
     82  */
     83 struct xpmsg {
     84 	volatile int tag;
     85 #define	XPMSG15_PAUSECPU	1
     86 #define	XPMSG_FUNC		4
     87 #define	XPMSG_FTRP		5
     88 
     89 	volatile union {
     90 		/*
     91 		 * Cross call: ask to run (*func)(arg0,arg1,arg2)
     92 		 * or (*trap)(arg0,arg1,arg2). `trap' should be the
     93 		 * address of a `fast trap' handler that executes in
     94 		 * the trap window (see locore.s).
     95 		 */
     96 		struct xpmsg_func {
     97 			void	(*func)(int, int, int);
     98 			void	(*trap)(int, int, int);
     99 			int	arg0;
    100 			int	arg1;
    101 			int	arg2;
    102 		} xpmsg_func;
    103 	} u;
    104 	volatile int	received;
    105 	volatile int	complete;
    106 };
    107 
    108 /*
    109  * The cpuinfo structure. This structure maintains information about one
    110  * currently installed CPU (there may be several of these if the machine
    111  * supports multiple CPUs, as on some Sun4m architectures). The information
    112  * in this structure supersedes the old "cpumod", "mmumod", and similar
    113  * fields.
    114  */
    115 
    116 struct cpu_info {
    117 	struct cpu_data ci_data;	/* MI per-cpu data */
    118 
    119 	/*
    120 	 * Primary Inter-processor message area.  Keep this aligned
    121 	 * to a cache line boundary if possible, as the structure
    122 	 * itself is one (normal 32 byte) cache-line.
    123 	 */
    124 	struct xpmsg	msg __aligned(32);
    125 
    126 	/* Scheduler flags */
    127 	int	ci_want_ast;
    128 	int	ci_want_resched;
    129 
    130 	/*
    131 	 * SPARC cpu_info structures live at two VAs: one global
    132 	 * VA (so each CPU can access any other CPU's cpu_info)
    133 	 * and an alias VA CPUINFO_VA which is the same on each
    134 	 * CPU and maps to that CPU's cpu_info.  Since the alias
    135 	 * CPUINFO_VA is how we locate our cpu_info, we have to
    136 	 * self-reference the global VA so that we can return it
    137 	 * in the curcpu() macro.
    138 	 */
    139 	struct cpu_info * volatile ci_self;
    140 
    141 	int		ci_cpuid;	/* CPU index (see cpus[] array) */
    142 
    143 	/* Context administration */
    144 	int		*ctx_tbl;	/* [4m] SRMMU-edible context table */
    145 	paddr_t		ctx_tbl_pa;	/* [4m] ctx table physical address */
    146 
    147 	/* Cache information */
    148 	struct cacheinfo	cacheinfo;	/* see cache.h */
    149 
    150 	/* various flags to workaround anomalies in chips */
    151 	volatile int	flags;		/* see CPUFLG_xxx, below */
    152 
    153 	/* Per processor counter register (sun4m only) */
    154 	volatile struct counter_4m	*counterreg_4m;
    155 
    156 	/* Per processor interrupt mask register (sun4m only) */
    157 	volatile struct icr_pi	*intreg_4m;
    158 	/*
    159 	 * Send a IPI to (cpi).  For Ross cpus we need to read
    160 	 * the pending register to avoid a hardware bug.
    161 	 */
    162 #define raise_ipi(cpi,lvl)	do {			\
    163 	int x;						\
    164 	(cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl);	\
    165 	x = (cpi)->intreg_4m->pi_pend; __USE(x);	\
    166 } while (0)
    167 
    168 	int		sun4_mmu3l;	/* [4]: 3-level MMU present */
    169 #if defined(SUN4_MMU3L)
    170 #define HASSUN4_MMU3L	(cpuinfo.sun4_mmu3l)
    171 #else
    172 #define HASSUN4_MMU3L	(0)
    173 #endif
    174 	int		ci_idepth;		/* Interrupt depth */
    175 
    176 	/*
    177 	 * The following pointers point to processes that are somehow
    178 	 * associated with this CPU--running on it, using its FPU,
    179 	 * etc.
    180 	 */
    181 	struct	lwp	*ci_curlwp;		/* CPU owner */
    182 	struct	lwp 	*fplwp;			/* FPU owner */
    183 
    184 	int		ci_mtx_count;
    185 	int		ci_mtx_oldspl;
    186 
    187 	/*
    188 	 * Idle PCB and Interrupt stack;
    189 	 */
    190 	void		*eintstack;		/* End of interrupt stack */
    191 #define INT_STACK_SIZE	(128 * 128)		/* 128 128-byte stack frames */
    192 	void		*redzone;		/* DEBUG: stack red zone */
    193 #define REDSIZE		(8*96)			/* some room for bouncing */
    194 
    195 	struct	pcb	*curpcb;		/* CPU's PCB & kernel stack */
    196 
    197 	/* locore defined: */
    198 	void	(*get_syncflt)(void);		/* Not C-callable */
    199 	int	(*get_asyncflt)(u_int *, u_int *);
    200 
    201 	/* Synchronous Fault Status; temporary storage */
    202 	struct {
    203 		int	sfsr;
    204 		int	sfva;
    205 	} syncfltdump;
    206 
    207 	/*
    208 	 * Cache handling functions.
    209 	 * Most cache flush function come in two flavours: one that
    210 	 * acts only on the CPU it executes on, and another that
    211 	 * uses inter-processor signals to flush the cache on
    212 	 * all processor modules.
    213 	 * The `ft_' versions are fast trap cache flush handlers.
    214 	 */
    215 	void	(*cache_flush)(void *, u_int);
    216 	void	(*vcache_flush_page)(int, int);
    217 	void	(*sp_vcache_flush_page)(int, int);
    218 	void	(*ft_vcache_flush_page)(int, int);
    219 	void	(*vcache_flush_segment)(int, int, int);
    220 	void	(*sp_vcache_flush_segment)(int, int, int);
    221 	void	(*ft_vcache_flush_segment)(int, int, int);
    222 	void	(*vcache_flush_region)(int, int);
    223 	void	(*sp_vcache_flush_region)(int, int);
    224 	void	(*ft_vcache_flush_region)(int, int);
    225 	void	(*vcache_flush_context)(int);
    226 	void	(*sp_vcache_flush_context)(int);
    227 	void	(*ft_vcache_flush_context)(int);
    228 
    229 	/* The are helpers for (*cache_flush)() */
    230 	void	(*sp_vcache_flush_range)(int, int, int);
    231 	void	(*ft_vcache_flush_range)(int, int, int);
    232 
    233 	void	(*pcache_flush_page)(paddr_t, int);
    234 	void	(*pure_vcache_flush)(void);
    235 	void	(*cache_flush_all)(void);
    236 
    237 	/* Support for hardware-assisted page clear/copy */
    238 	void	(*zero_page)(paddr_t);
    239 	void	(*copy_page)(paddr_t, paddr_t);
    240 
    241 	/* Virtual addresses for use in pmap copy_page/zero_page */
    242 	void *	vpage[2];
    243 	int	*vpage_pte[2];		/* pte location of vpage[] */
    244 
    245 	void	(*cache_enable)(void);
    246 
    247 	int	cpu_type;	/* Type: see CPUTYP_xxx below */
    248 
    249 	/* Inter-processor message area (high priority but used infrequently) */
    250 	struct xpmsg	msg_lev15;
    251 
    252 	/* CPU information */
    253 	int		node;		/* PROM node for this CPU */
    254 	int		mid;		/* Module ID for MP systems */
    255 	int		mbus;		/* 1 if CPU is on MBus */
    256 	int		mxcc;		/* 1 if a MBus-level MXCC is present */
    257 	const char	*cpu_longname;	/* CPU model */
    258 	int		cpu_impl;	/* CPU implementation code */
    259 	int		cpu_vers;	/* CPU version code */
    260 	int		mmu_impl;	/* MMU implementation code */
    261 	int		mmu_vers;	/* MMU version code */
    262 	int		master;		/* 1 if this is bootup CPU */
    263 
    264 	vaddr_t		mailbox;	/* VA of CPU's mailbox */
    265 
    266 	int		mmu_ncontext;	/* Number of contexts supported */
    267 	int		mmu_nregion; 	/* Number of regions supported */
    268 	int		mmu_nsegment;	/* [4/4c] Segments */
    269 	int		mmu_npmeg;	/* [4/4c] Pmegs */
    270 
    271 /* XXX - we currently don't actually use the following */
    272 	int		arch;		/* Architecture: CPU_SUN4x */
    273 	int		class;		/* Class: SuperSPARC, microSPARC... */
    274 	int		classlvl;	/* Iteration in class: 1, 2, etc. */
    275 	int		classsublvl;	/* stepping in class (version) */
    276 
    277 	int		hz;		/* Clock speed */
    278 
    279 	/* FPU information */
    280 	int		fpupresent;	/* true if FPU is present */
    281 	int		fpuvers;	/* FPU revision */
    282 	const char	*fpu_name;	/* FPU model */
    283 	char		fpu_namebuf[32];/* Buffer for FPU name, if necessary */
    284 
    285 	/* XXX */
    286 	volatile void	*ci_ddb_regs;		/* DDB regs */
    287 
    288 	/*
    289 	 * The following are function pointers to do interesting CPU-dependent
    290 	 * things without having to do type-tests all the time
    291 	 */
    292 
    293 	/* bootup things: access to physical memory */
    294 	u_int	(*read_physmem)(u_int addr, int space);
    295 	void	(*write_physmem)(u_int addr, u_int data);
    296 	void	(*cache_tablewalks)(void);
    297 	void	(*mmu_enable)(void);
    298 	void	(*hotfix)(struct cpu_info *);
    299 
    300 
    301 #if 0
    302 	/* hardware-assisted block operation routines */
    303 	void		(*hwbcopy)(const void *from, void *to, size_t len);
    304 	void		(*hwbzero)(void *buf, size_t len);
    305 
    306 	/* routine to clear mbus-sbus buffers */
    307 	void		(*mbusflush)(void);
    308 #endif
    309 
    310 	/*
    311 	 * Memory error handler; parity errors, unhandled NMIs and other
    312 	 * unrecoverable faults end up here.
    313 	 */
    314 	void		(*memerr)(unsigned, u_int, u_int, struct trapframe *);
    315 	void		(*idlespin)(struct cpu_info *);
    316 	/* Module Control Registers */
    317 	/*bus_space_handle_t*/ long ci_mbusport;
    318 	/*bus_space_handle_t*/ long ci_mxccregs;
    319 
    320 	u_int	ci_tt;			/* Last trap (if tracing) */
    321 
    322 	/*
    323 	 * Start/End VA's of this cpu_info region; we upload the other pages
    324 	 * in this region that aren't part of the cpu_info to uvm.
    325 	 */
    326 	vaddr_t	ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2;
    327 
    328 	struct evcnt ci_savefpstate;
    329 	struct evcnt ci_savefpstate_null;
    330 	struct evcnt ci_xpmsg_mutex_fail;
    331 	struct evcnt ci_xpmsg_mutex_fail_call;
    332 	struct evcnt ci_intrcnt[16];
    333 	struct evcnt ci_sintrcnt[16];
    334 };
    335 
    336 /*
    337  * definitions of cpu-dependent requirements
    338  * referenced in generic code
    339  */
    340 #define	curcpu()		(cpuinfo.ci_self)
    341 #define	curlwp			(cpuinfo.ci_curlwp)
    342 #define	CPU_IS_PRIMARY(ci)	((ci)->master)
    343 
    344 #define	cpu_number()		(cpuinfo.ci_cpuid)
    345 void	cpu_proc_fork(struct proc *, struct proc *);
    346 
    347 #if defined(MULTIPROCESSOR)
    348 void	cpu_boot_secondary_processors(void);
    349 #endif
    350 
    351 /*
    352  * Arguments to hardclock, softclock and statclock encapsulate the
    353  * previous machine state in an opaque clockframe.  The ipl is here
    354  * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
    355  * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
    356  */
    357 struct clockframe {
    358 	u_int	psr;		/* psr before interrupt, excluding PSR_ET */
    359 	u_int	pc;		/* pc at interrupt */
    360 	u_int	npc;		/* npc at interrupt */
    361 	u_int	ipl;		/* actual interrupt priority level */
    362 	u_int	fp;		/* %fp at interrupt */
    363 };
    364 typedef struct clockframe clockframe;
    365 
    366 extern int eintstack[];
    367 
    368 #define	CLKF_USERMODE(framep)	(((framep)->psr & PSR_PS) == 0)
    369 #define	CLKF_LOPRI(framep,n)	(((framep)->psr & PSR_PIL) < (n) << 8)
    370 #define	CLKF_PC(framep)		((framep)->pc)
    371 #if defined(MULTIPROCESSOR)
    372 #define	CLKF_INTR(framep)						\
    373 	((framep)->fp > (u_int)cpuinfo.eintstack - INT_STACK_SIZE &&	\
    374 	 (framep)->fp < (u_int)cpuinfo.eintstack)
    375 #else
    376 #define	CLKF_INTR(framep)	((framep)->fp < (u_int)eintstack)
    377 #endif
    378 
    379 void	sparc_softintr_init(void);
    380 
    381 /*
    382  * Preempt the current process on the target CPU if in interrupt from
    383  * user mode, or after the current trap/syscall if in system mode.
    384  */
    385 #define cpu_need_resched(ci, flags) do {				\
    386 	__USE(flags);							\
    387 	(ci)->ci_want_resched = 1;					\
    388 	(ci)->ci_want_ast = 1;						\
    389 									\
    390 	/* Just interrupt the target CPU, so it can notice its AST */	\
    391 	if (((flags) & RESCHED_IMMED) || (ci)->ci_cpuid != cpu_number()) \
    392 		XCALL0(sparc_noop, 1U << (ci)->ci_cpuid);		\
    393 } while (/*CONSTCOND*/0)
    394 
    395 /*
    396  * Give a profiling tick to the current process when the user profiling
    397  * buffer pages are invalid.  On the sparc, request an ast to send us
    398  * through trap(), marking the proc as needing a profiling tick.
    399  */
    400 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, cpuinfo.ci_want_ast = 1)
    401 
    402 /*
    403  * Notify the current process (p) that it has a signal pending,
    404  * process as soon as possible.
    405  */
    406 #define cpu_signotify(l) do {						\
    407 	(l)->l_cpu->ci_want_ast = 1;					\
    408 									\
    409 	/* Just interrupt the target CPU, so it can notice its AST */	\
    410 	if ((l)->l_cpu->ci_cpuid != cpu_number())			\
    411 		XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid);		\
    412 } while (/*CONSTCOND*/0)
    413 
    414 /* CPU architecture version */
    415 extern int cpu_arch;
    416 
    417 /* Number of CPUs in the system */
    418 extern int sparc_ncpus;
    419 
    420 /* Provide %pc of a lwp */
    421 #define LWP_PC(l)       ((l)->l_md.md_tf->tf_pc)
    422 
    423 /*
    424  * Interrupt handler chains.  Interrupt handlers should return 0 for
    425  * ``not me'' or 1 (``I took care of it'').  intr_establish() inserts a
    426  * handler into the list.  The handler is called with its (single)
    427  * argument, or with a pointer to a clockframe if ih_arg is NULL.
    428  *
    429  * realfun/realarg are used to chain callers, usually with the
    430  * biglock wrapper.
    431  */
    432 extern struct intrhand {
    433 	int	(*ih_fun)(void *);
    434 	void	*ih_arg;
    435 	struct	intrhand *ih_next;
    436 	int	ih_classipl;
    437 	int	(*ih_realfun)(void *);
    438 	void	*ih_realarg;
    439 } *intrhand[15];
    440 
    441 void	intr_establish(int, int, struct intrhand *, void (*)(void), bool);
    442 void	intr_disestablish(int, struct intrhand *);
    443 
    444 void	intr_lock_kernel(void);
    445 void	intr_unlock_kernel(void);
    446 
    447 /* disksubr.c */
    448 struct dkbad;
    449 int isbad(struct dkbad *, int, int, int);
    450 
    451 /* machdep.c */
    452 int	ldcontrolb(void *);
    453 void	dumpconf(void);
    454 void *	reserve_dumppages(void *);
    455 void	wcopy(const void *, void *, u_int);
    456 void	wzero(void *, u_int);
    457 
    458 /* clock.c */
    459 struct timeval;
    460 void	lo_microtime(struct timeval *);
    461 void	schedintr(void *);
    462 
    463 /* locore.s */
    464 struct fpstate;
    465 void	ipi_savefpstate(struct fpstate *);
    466 void	savefpstate(struct fpstate *);
    467 void	loadfpstate(struct fpstate *);
    468 int	probeget(void *, int);
    469 void	write_all_windows(void);
    470 void	write_user_windows(void);
    471 void 	lwp_trampoline(void);
    472 struct pcb;
    473 void	snapshot(struct pcb *);
    474 struct frame *getfp(void);
    475 int	xldcontrolb(void *, struct pcb *);
    476 void	copywords(const void *, void *, size_t);
    477 void	qcopy(const void *, void *, size_t);
    478 void	qzero(void *, size_t);
    479 
    480 /* trap.c */
    481 void	cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
    482 int	rwindow_save(struct lwp *);
    483 
    484 /* cons.c */
    485 int	cnrom(void);
    486 
    487 /* zs.c */
    488 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
    489 #ifdef KGDB
    490 void zs_kgdb_init(void);
    491 #endif
    492 
    493 /* fb.c */
    494 void	fb_unblank(void);
    495 
    496 /* kgdb_stub.c */
    497 #ifdef KGDB
    498 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
    499 void kgdb_connect(int);
    500 void kgdb_panic(void);
    501 #endif
    502 
    503 /* emul.c */
    504 struct trapframe;
    505 int fixalign(struct lwp *, struct trapframe *, void **);
    506 int emulinstr(int, struct trapframe *);
    507 
    508 /* cpu.c */
    509 void mp_pause_cpus(void);
    510 void mp_resume_cpus(void);
    511 void mp_halt_cpus(void);
    512 #ifdef DDB
    513 void mp_pause_cpus_ddb(void);
    514 void mp_resume_cpus_ddb(void);
    515 #endif
    516 
    517 /* intr.c */
    518 u_int setitr(u_int);
    519 u_int getitr(void);
    520 
    521 
    522 /*
    523  *
    524  * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
    525  * of the trap vector table.  The next eight bits are supplied by the
    526  * hardware when the trap occurs, and the bottom four bits are always
    527  * zero (so that we can shove up to 16 bytes of executable code---exactly
    528  * four instructions---into each trap vector).
    529  *
    530  * The hardware allocates half the trap vectors to hardware and half to
    531  * software.
    532  *
    533  * Traps have priorities assigned (lower number => higher priority).
    534  */
    535 
    536 struct trapvec {
    537 	int	tv_instr[4];		/* the four instructions */
    538 };
    539 
    540 extern struct trapvec *trapbase;	/* the 256 vectors */
    541 
    542 #endif /* _KERNEL */
    543 #endif /* _CPU_H_ */
    544