Home | History | Annotate | Line # | Download | only in include
cpu.h revision 1.52.2.1
      1  1.52.2.1       ad /*	$NetBSD: cpu.h,v 1.52.2.1 2007/01/28 08:59:45 ad Exp $	*/
      2       1.1       ws 
      3       1.1       ws /*
      4       1.5       ws  * Copyright (C) 1999 Wolfgang Solfrank.
      5       1.5       ws  * Copyright (C) 1999 TooLs GmbH.
      6       1.9     matt  * Copyright (C) 1995-1997 Wolfgang Solfrank.
      7       1.9     matt  * Copyright (C) 1995-1997 TooLs GmbH.
      8       1.1       ws  * All rights reserved.
      9       1.1       ws  *
     10       1.1       ws  * Redistribution and use in source and binary forms, with or without
     11       1.1       ws  * modification, are permitted provided that the following conditions
     12       1.1       ws  * are met:
     13       1.1       ws  * 1. Redistributions of source code must retain the above copyright
     14       1.1       ws  *    notice, this list of conditions and the following disclaimer.
     15       1.1       ws  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1       ws  *    notice, this list of conditions and the following disclaimer in the
     17       1.1       ws  *    documentation and/or other materials provided with the distribution.
     18       1.1       ws  * 3. All advertising materials mentioning features or use of this software
     19       1.1       ws  *    must display the following acknowledgement:
     20       1.1       ws  *	This product includes software developed by TooLs GmbH.
     21       1.1       ws  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     22       1.1       ws  *    derived from this software without specific prior written permission.
     23       1.1       ws  *
     24       1.1       ws  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     25       1.1       ws  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     26       1.1       ws  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     27       1.1       ws  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     28       1.1       ws  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     29       1.1       ws  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     30       1.1       ws  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     31       1.1       ws  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     32       1.1       ws  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     33       1.1       ws  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     34       1.1       ws  */
     35       1.5       ws #ifndef	_POWERPC_CPU_H_
     36       1.5       ws #define	_POWERPC_CPU_H_
     37       1.1       ws 
     38      1.27     matt struct cache_info {
     39      1.27     matt 	int dcache_size;
     40      1.27     matt 	int dcache_line_size;
     41      1.27     matt 	int icache_size;
     42      1.27     matt 	int icache_line_size;
     43      1.27     matt };
     44      1.27     matt 
     45      1.27     matt #ifdef _KERNEL
     46       1.9     matt #if defined(_KERNEL_OPT)
     47       1.9     matt #include "opt_lockdebug.h"
     48       1.9     matt #include "opt_multiprocessor.h"
     49      1.16     matt #include "opt_ppcarch.h"
     50       1.9     matt #endif
     51       1.9     matt 
     52       1.9     matt #include <machine/frame.h>
     53       1.9     matt #include <machine/psl.h>
     54       1.9     matt #include <machine/intr.h>
     55      1.20     matt #include <sys/device.h>
     56       1.9     matt 
     57      1.42     yamt #include <sys/cpu_data.h>
     58      1.14      eeh 
     59       1.9     matt struct cpu_info {
     60      1.42     yamt 	struct cpu_data ci_data;	/* MI per-cpu data */
     61       1.9     matt 	struct device *ci_dev;		/* device of corresponding cpu */
     62      1.23  thorpej 	struct lwp *ci_curlwp;		/* current owner of the processor */
     63       1.9     matt 
     64       1.9     matt 	struct pcb *ci_curpcb;
     65      1.38     matt 	struct pmap *ci_curpm;
     66      1.23  thorpej 	struct lwp *ci_fpulwp;
     67      1.23  thorpej 	struct lwp *ci_veclwp;
     68       1.9     matt 	struct pcb *ci_idle_pcb;	/* PA of our idle pcb */
     69       1.9     matt 	int ci_cpuid;
     70       1.9     matt 
     71      1.28     matt 	volatile int ci_astpending;
     72       1.9     matt 	int ci_want_resched;
     73      1.28     matt 	volatile u_long ci_lasttb;
     74      1.28     matt 	volatile int ci_tickspending;
     75      1.50    freza 	volatile int ci_cpl;
     76      1.50    freza 	volatile int ci_iactive;
     77      1.50    freza 	volatile int ci_ipending;
     78       1.9     matt 	int ci_intrdepth;
     79  1.52.2.1       ad 	int ci_mtx_oldspl;
     80  1.52.2.1       ad 	int ci_mtx_count;
     81       1.9     matt 	char *ci_intstk;
     82      1.32     matt #define	CPUSAVE_LEN	8
     83      1.32     matt 	register_t ci_tempsave[CPUSAVE_LEN];
     84      1.32     matt 	register_t ci_ddbsave[CPUSAVE_LEN];
     85      1.32     matt 	register_t ci_ipkdbsave[CPUSAVE_LEN];
     86      1.32     matt #define	CPUSAVE_R28	0		/* where r28 gets saved */
     87      1.32     matt #define	CPUSAVE_R29	1		/* where r29 gets saved */
     88      1.32     matt #define	CPUSAVE_R30	2		/* where r30 gets saved */
     89      1.32     matt #define	CPUSAVE_R31	3		/* where r31 gets saved */
     90      1.32     matt #define	CPUSAVE_DAR	4		/* where SPR_DAR gets saved */
     91      1.32     matt #define	CPUSAVE_DSISR	5		/* where SPR_DSISR gets saved */
     92      1.32     matt #define	CPUSAVE_SRR0	6		/* where SRR0 gets saved */
     93      1.32     matt #define	CPUSAVE_SRR1	7		/* where SRR1 gets saved */
     94      1.32     matt #define	DISISAVE_LEN	4
     95      1.32     matt 	register_t ci_disisave[DISISAVE_LEN];
     96      1.14      eeh 	struct cache_info ci_ci;
     97      1.40     matt 	void *ci_sysmon_cookie;
     98      1.43     matt 	void (*ci_idlespin)(void);
     99      1.44   briggs 	uint32_t ci_khz;
    100      1.25     matt 	struct evcnt ci_ev_clock;	/* clock intrs */
    101      1.50    freza 	struct evcnt ci_ev_statclock; 	/* stat clock */
    102      1.25     matt 	struct evcnt ci_ev_softclock;	/* softclock intrs */
    103      1.25     matt 	struct evcnt ci_ev_softnet;	/* softnet intrs */
    104      1.25     matt 	struct evcnt ci_ev_softserial;	/* softserial intrs */
    105       1.9     matt 	struct evcnt ci_ev_traps;	/* calls to trap() */
    106       1.9     matt 	struct evcnt ci_ev_kdsi;	/* kernel DSI traps */
    107       1.9     matt 	struct evcnt ci_ev_udsi;	/* user DSI traps */
    108       1.9     matt 	struct evcnt ci_ev_udsi_fatal;	/* user DSI trap failures */
    109      1.33     matt 	struct evcnt ci_ev_kisi;	/* kernel ISI traps */
    110       1.9     matt 	struct evcnt ci_ev_isi;		/* user ISI traps */
    111       1.9     matt 	struct evcnt ci_ev_isi_fatal;	/* user ISI trap failures */
    112       1.9     matt 	struct evcnt ci_ev_pgm;		/* user PGM traps */
    113       1.9     matt 	struct evcnt ci_ev_fpu;		/* FPU traps */
    114       1.9     matt 	struct evcnt ci_ev_fpusw;	/* FPU context switch */
    115       1.9     matt 	struct evcnt ci_ev_ali;		/* Alignment traps */
    116       1.9     matt 	struct evcnt ci_ev_ali_fatal;	/* Alignment fatal trap */
    117       1.9     matt 	struct evcnt ci_ev_scalls;	/* system call traps */
    118       1.9     matt 	struct evcnt ci_ev_vec;		/* Altivec traps */
    119       1.9     matt 	struct evcnt ci_ev_vecsw;	/* Altivec context switches */
    120      1.16     matt 	struct evcnt ci_ev_umchk;	/* user MCHK events */
    121       1.9     matt };
    122       1.9     matt 
    123       1.9     matt #ifdef MULTIPROCESSOR
    124      1.47    perry static __inline int
    125      1.11      chs cpu_number(void)
    126       1.9     matt {
    127       1.9     matt 	int pir;
    128       1.9     matt 
    129      1.30     matt 	__asm ("mfspr %0,1023" : "=r"(pir));
    130       1.9     matt 	return pir;
    131       1.9     matt }
    132       1.9     matt 
    133      1.11      chs void	cpu_boot_secondary_processors(void);
    134       1.9     matt 
    135       1.9     matt 
    136       1.9     matt #define CPU_IS_PRIMARY(ci)	((ci)->ci_cpuid == 0)
    137      1.18      chs #define CPU_INFO_ITERATOR		int
    138      1.18      chs #define CPU_INFO_FOREACH(cii, ci)					\
    139      1.18      chs 	cii = 0, ci = &cpu_info[0]; cii < CPU_MAXNUM; cii++, ci++
    140      1.18      chs 
    141       1.9     matt #else
    142       1.9     matt 
    143       1.9     matt #define cpu_number()		0
    144       1.9     matt 
    145      1.18      chs #define CPU_INFO_ITERATOR		int
    146      1.18      chs #define CPU_INFO_FOREACH(cii, ci)					\
    147      1.18      chs 	cii = 0, ci = curcpu(); ci != NULL; ci = NULL
    148      1.18      chs 
    149       1.9     matt #endif /* MULTIPROCESSOR */
    150       1.9     matt 
    151      1.25     matt extern struct cpu_info cpu_info[];
    152      1.25     matt 
    153      1.47    perry static __inline struct cpu_info *
    154      1.25     matt curcpu(void)
    155      1.25     matt {
    156      1.25     matt 	struct cpu_info *ci;
    157      1.25     matt 
    158      1.46    perry 	__asm volatile ("mfsprg %0,0" : "=r"(ci));
    159      1.25     matt 	return ci;
    160      1.25     matt }
    161      1.25     matt 
    162      1.25     matt #define curlwp			(curcpu()->ci_curlwp)
    163      1.25     matt #define curpcb			(curcpu()->ci_curpcb)
    164      1.38     matt #define curpm			(curcpu()->ci_curpm)
    165      1.25     matt 
    166      1.47    perry static __inline register_t
    167      1.18      chs mfmsr(void)
    168      1.18      chs {
    169      1.20     matt 	register_t msr;
    170      1.18      chs 
    171      1.46    perry 	__asm volatile ("mfmsr %0" : "=r"(msr));
    172      1.18      chs 	return msr;
    173      1.18      chs }
    174      1.18      chs 
    175      1.47    perry static __inline void
    176      1.20     matt mtmsr(register_t msr)
    177      1.18      chs {
    178      1.18      chs 
    179      1.46    perry 	__asm volatile ("mtmsr %0" : : "r"(msr));
    180      1.19      chs }
    181      1.19      chs 
    182      1.47    perry static __inline uint32_t
    183      1.19      chs mftbl(void)
    184      1.19      chs {
    185      1.19      chs 	uint32_t tbl;
    186      1.19      chs 
    187      1.46    perry 	__asm volatile (
    188      1.29  hannken #ifdef PPC_IBM403
    189      1.29  hannken "	mftblo %0	\n"
    190      1.29  hannken #else
    191      1.29  hannken "	mftbl %0	\n"
    192      1.29  hannken #endif
    193      1.29  hannken 	: "=r" (tbl));
    194      1.29  hannken 
    195      1.19      chs 	return tbl;
    196      1.19      chs }
    197      1.19      chs 
    198      1.47    perry static __inline uint64_t
    199      1.19      chs mftb(void)
    200      1.19      chs {
    201      1.19      chs 	uint64_t tb;
    202      1.32     matt 
    203      1.32     matt #ifdef _LP64
    204      1.46    perry 	__asm volatile ("mftb %0" : "=r"(tb));
    205      1.32     matt #else
    206      1.19      chs 	int tmp;
    207      1.19      chs 
    208      1.46    perry 	__asm volatile (
    209      1.29  hannken #ifdef PPC_IBM403
    210      1.29  hannken "1:	mftbhi %0	\n"
    211      1.29  hannken "	mftblo %0+1	\n"
    212      1.29  hannken "	mftbhi %1	\n"
    213      1.29  hannken #else
    214      1.22  thorpej "1:	mftbu %0	\n"
    215      1.22  thorpej "	mftb %0+1	\n"
    216      1.22  thorpej "	mftbu %1	\n"
    217      1.29  hannken #endif
    218      1.22  thorpej "	cmplw %0,%1	\n"
    219      1.29  hannken "	bne- 1b		\n"
    220      1.29  hannken 	: "=r" (tb), "=r"(tmp) :: "cr0");
    221      1.32     matt #endif
    222      1.29  hannken 
    223      1.19      chs 	return tb;
    224      1.24   kleink }
    225      1.24   kleink 
    226      1.47    perry static __inline uint32_t
    227      1.24   kleink mfrtcl(void)
    228      1.24   kleink {
    229      1.24   kleink 	uint32_t rtcl;
    230      1.24   kleink 
    231      1.46    perry 	__asm volatile ("mfrtcl %0" : "=r"(rtcl));
    232      1.24   kleink 	return rtcl;
    233      1.24   kleink }
    234      1.24   kleink 
    235      1.47    perry static __inline void
    236      1.24   kleink mfrtc(uint32_t *rtcp)
    237      1.24   kleink {
    238      1.24   kleink 	uint32_t tmp;
    239      1.24   kleink 
    240      1.46    perry 	__asm volatile (
    241      1.24   kleink "1:	mfrtcu	%0	\n"
    242      1.24   kleink "	mfrtcl	%1	\n"
    243      1.24   kleink "	mfrtcu	%2	\n"
    244      1.24   kleink "	cmplw	%0,%2	\n"
    245      1.24   kleink "	bne-	1b"
    246      1.41   kleink 	    : "=r"(*rtcp), "=r"(*(rtcp + 1)), "=r"(tmp) :: "cr0");
    247      1.19      chs }
    248      1.19      chs 
    249      1.47    perry static __inline uint32_t
    250      1.19      chs mfpvr(void)
    251      1.19      chs {
    252      1.19      chs 	uint32_t pvr;
    253      1.19      chs 
    254      1.46    perry 	__asm volatile ("mfpvr %0" : "=r"(pvr));
    255      1.19      chs 	return (pvr);
    256      1.18      chs }
    257      1.18      chs 
    258      1.49    freza static __inline int
    259      1.49    freza cntlzw(uint32_t val)
    260      1.49    freza {
    261      1.49    freza 	int 			cnt;
    262      1.49    freza 
    263      1.49    freza 	__asm volatile ("cntlzw %0,%1" : "=r"(cnt) : "r"(val));
    264      1.49    freza 	return (cnt);
    265      1.49    freza }
    266      1.49    freza 
    267      1.48    freza #if defined(PPC_IBM4XX) || defined(PPC_IBM403)
    268      1.48    freza /*
    269      1.48    freza  * DCR (Device Control Register) access. These have to be
    270      1.48    freza  * macros because register address is encoded as immediate
    271      1.48    freza  * operand.
    272      1.48    freza  */
    273      1.48    freza #define mtdcr(reg, val) 					\
    274      1.48    freza 	__asm volatile("mtdcr %0,%1" : : "K"(reg), "r"(val))
    275      1.48    freza 
    276      1.48    freza #define mfdcr(reg)						\
    277      1.48    freza ({								\
    278      1.48    freza 	uint32_t __val;						\
    279      1.48    freza 								\
    280      1.48    freza 	__asm volatile("mfdcr %0,%1" : "=r"(__val) : "K"(reg)); \
    281      1.48    freza 	__val;							\
    282      1.48    freza })
    283      1.48    freza #endif /* PPC_IBM4XX || PPC_IBM403 */
    284      1.48    freza 
    285      1.37     matt /*
    286      1.37     matt  * CLKF_BASEPRI is dependent on the underlying interrupt code
    287      1.37     matt  * and can not be defined here.  It should be defined in
    288      1.37     matt  * <machine/intr.h>
    289      1.37     matt  */
    290       1.9     matt #define	CLKF_USERMODE(frame)	(((frame)->srr1 & PSL_PR) != 0)
    291       1.9     matt #define	CLKF_PC(frame)		((frame)->srr0)
    292       1.9     matt #define	CLKF_INTR(frame)	((frame)->depth > 0)
    293       1.9     matt 
    294      1.23  thorpej #define	LWP_PC(l)		(trapframe(l)->srr0)
    295       1.9     matt 
    296       1.9     matt #define	cpu_swapout(p)
    297      1.23  thorpej #define	cpu_proc_fork(p1, p2)
    298       1.9     matt 
    299       1.9     matt extern int powersave;
    300       1.9     matt extern int cpu_timebase;
    301       1.9     matt extern int cpu_printfataltraps;
    302      1.16     matt extern char cpu_model[];
    303      1.16     matt 
    304      1.16     matt struct cpu_info *cpu_attach_common(struct device *, int);
    305      1.18      chs void cpu_setup(struct device *, struct cpu_info *);
    306      1.16     matt void cpu_identify(char *, size_t);
    307      1.16     matt void delay (unsigned int);
    308      1.16     matt void cpu_probe_cache(void);
    309      1.16     matt void dcache_flush_page(vaddr_t);
    310      1.16     matt void icache_flush_page(vaddr_t);
    311      1.16     matt void dcache_flush(vaddr_t, vsize_t);
    312      1.16     matt void icache_flush(vaddr_t, vsize_t);
    313      1.31      scw void *mapiodev(paddr_t, psize_t);
    314      1.52     matt void unmapiodev(vaddr_t, vsize_t);
    315       1.9     matt 
    316       1.9     matt #define	DELAY(n)		delay(n)
    317       1.9     matt 
    318  1.52.2.1       ad #define	cpu_need_resched(ci)	(ci->ci_want_resched = 1, ci->ci_astpending = 1)
    319  1.52.2.1       ad #define	cpu_need_proftick(p)	((l)->l_pflag |= LP_OWEUPC, curcpu()->ci_astpending = 1)
    320  1.52.2.1       ad #define	cpu_signotify(l)	(curcpu()->ci_astpending = 1)	/* XXXSMP */
    321       1.9     matt 
    322      1.51  sanjayl #if defined(PPC_OEA) || defined(PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
    323      1.26     matt void oea_init(void (*)(void));
    324      1.26     matt void oea_startup(const char *);
    325      1.26     matt void oea_dumpsys(void);
    326      1.26     matt void oea_install_extint(void (*)(void));
    327      1.16     matt paddr_t kvtop(caddr_t);
    328      1.16     matt void softnet(int);
    329      1.16     matt 
    330      1.16     matt extern paddr_t msgbuf_paddr;
    331      1.16     matt extern int cpu_altivec;
    332      1.16     matt #endif
    333      1.16     matt 
    334       1.9     matt #endif /* _KERNEL */
    335       1.9     matt 
    336       1.9     matt #if defined(_KERNEL) || defined(_STANDALONE)
    337       1.9     matt #if !defined(CACHELINESIZE)
    338      1.29  hannken #ifdef PPC_IBM403
    339      1.29  hannken #define	CACHELINESIZE	16
    340      1.29  hannken #else
    341      1.51  sanjayl #if defined (PPC_OEA64_BRIDGE)
    342      1.51  sanjayl #define	CACHELINESIZE	128
    343      1.51  sanjayl #else
    344       1.9     matt #define	CACHELINESIZE	32
    345      1.51  sanjayl #endif /* PPC_OEA64_BRIDGE */
    346      1.29  hannken #endif
    347       1.9     matt #endif
    348      1.10     matt #endif
    349      1.10     matt 
    350      1.15     matt void __syncicache(void *, size_t);
    351      1.14      eeh 
    352       1.5       ws /*
    353       1.5       ws  * CTL_MACHDEP definitions.
    354       1.5       ws  */
    355       1.9     matt #define	CPU_CACHELINE		1
    356       1.9     matt #define	CPU_TIMEBASE		2
    357       1.9     matt #define	CPU_CPUTEMP		3
    358       1.9     matt #define	CPU_PRINTFATALTRAPS	4
    359      1.14      eeh #define	CPU_CACHEINFO		5
    360      1.16     matt #define	CPU_ALTIVEC		6
    361      1.16     matt #define	CPU_MODEL		7
    362      1.17     matt #define	CPU_POWERSAVE		8
    363      1.17     matt #define	CPU_MAXID		9
    364       1.1       ws 
    365       1.5       ws #define	CTL_MACHDEP_NAMES { \
    366       1.5       ws 	{ 0, 0 }, \
    367       1.5       ws 	{ "cachelinesize", CTLTYPE_INT }, \
    368       1.7     matt 	{ "timebase", CTLTYPE_INT }, \
    369       1.7     matt 	{ "cputempature", CTLTYPE_INT }, \
    370       1.9     matt 	{ "printfataltraps", CTLTYPE_INT }, \
    371      1.14      eeh 	{ "cacheinfo", CTLTYPE_STRUCT }, \
    372      1.16     matt 	{ "altivec", CTLTYPE_INT }, \
    373      1.16     matt 	{ "model", CTLTYPE_STRING }, \
    374      1.17     matt 	{ "powersave", CTLTYPE_INT }, \
    375       1.1       ws }
    376       1.1       ws 
    377       1.5       ws #endif	/* _POWERPC_CPU_H_ */
    378