cpu.h revision 1.53 1 1.53 uwe /* $NetBSD: cpu.h,v 1.53 2008/03/22 03:23:27 uwe Exp $ */
2 1.1 itojun
3 1.1 itojun /*-
4 1.34 wiz * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
5 1.1 itojun * Copyright (c) 1990 The Regents of the University of California.
6 1.1 itojun * All rights reserved.
7 1.32 agc *
8 1.32 agc * This code is derived from software contributed to Berkeley by
9 1.32 agc * William Jolitz.
10 1.32 agc *
11 1.32 agc * Redistribution and use in source and binary forms, with or without
12 1.32 agc * modification, are permitted provided that the following conditions
13 1.32 agc * are met:
14 1.32 agc * 1. Redistributions of source code must retain the above copyright
15 1.32 agc * notice, this list of conditions and the following disclaimer.
16 1.32 agc * 2. Redistributions in binary form must reproduce the above copyright
17 1.32 agc * notice, this list of conditions and the following disclaimer in the
18 1.32 agc * documentation and/or other materials provided with the distribution.
19 1.32 agc * 3. Neither the name of the University nor the names of its contributors
20 1.32 agc * may be used to endorse or promote products derived from this software
21 1.32 agc * without specific prior written permission.
22 1.32 agc *
23 1.32 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 1.32 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 1.32 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 1.32 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 1.32 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 1.32 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 1.32 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 1.32 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 1.32 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 1.32 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.32 agc * SUCH DAMAGE.
34 1.32 agc *
35 1.32 agc * @(#)cpu.h 5.4 (Berkeley) 5/9/91
36 1.32 agc */
37 1.32 agc
38 1.1 itojun /*
39 1.25 uch * SH3/SH4 support.
40 1.1 itojun *
41 1.1 itojun * T.Horiuchi Brains Corp. 5/22/98
42 1.1 itojun */
43 1.1 itojun
44 1.1 itojun #ifndef _SH3_CPU_H_
45 1.28 uch #define _SH3_CPU_H_
46 1.1 itojun
47 1.12 mrg #if defined(_KERNEL_OPT)
48 1.8 thorpej #include "opt_lockdebug.h"
49 1.8 thorpej #endif
50 1.8 thorpej
51 1.25 uch #include <sh3/psl.h>
52 1.25 uch #include <sh3/frame.h>
53 1.8 thorpej
54 1.26 uch #ifdef _KERNEL
55 1.36 yamt #include <sys/cpu_data.h>
56 1.8 thorpej struct cpu_info {
57 1.35 yamt struct cpu_data ci_data; /* MI per-cpu data */
58 1.45 ad cpuid_t ci_cpuid;
59 1.42 ad int ci_mtx_count;
60 1.42 ad int ci_mtx_oldspl;
61 1.46 ad int ci_want_resched;
62 1.53 uwe int ci_idepth;
63 1.8 thorpej };
64 1.8 thorpej
65 1.8 thorpej extern struct cpu_info cpu_info_store;
66 1.8 thorpej #define curcpu() (&cpu_info_store)
67 1.1 itojun
68 1.1 itojun /*
69 1.1 itojun * definitions of cpu-dependent requirements
70 1.1 itojun * referenced in generic code
71 1.1 itojun */
72 1.2 tsubai #define cpu_number() 0
73 1.28 uch /*
74 1.28 uch * Can't swapout u-area, (__SWAP_BROKEN)
75 1.24 uch * since we use P1 converted address for trapframe.
76 1.24 uch */
77 1.28 uch #define cpu_swapin(p) /* nothing */
78 1.24 uch #define cpu_swapout(p) panic("cpu_swapout: can't get here");
79 1.31 thorpej #define cpu_proc_fork(p1, p2) /* nothing */
80 1.1 itojun
81 1.1 itojun /*
82 1.26 uch * Arguments to hardclock and gatherstats encapsulate the previous
83 1.26 uch * machine state in an opaque clockframe.
84 1.1 itojun */
85 1.26 uch struct clockframe {
86 1.26 uch int spc; /* program counter at time of interrupt */
87 1.26 uch int ssr; /* status register at time of interrupt */
88 1.26 uch int ssp; /* stack pointer at time of interrupt */
89 1.26 uch };
90 1.1 itojun
91 1.49 uwe
92 1.26 uch #define CLKF_USERMODE(cf) (!KERNELMODE((cf)->ssr))
93 1.26 uch #define CLKF_PC(cf) ((cf)->spc)
94 1.53 uwe #define CLKF_INTR(cf) (curcpu()->ci_idepth > 0)
95 1.1 itojun
96 1.1 itojun /*
97 1.24 uch * This is used during profiling to integrate system time. It can safely
98 1.24 uch * assume that the process is resident.
99 1.24 uch */
100 1.24 uch #define PROC_PC(p) \
101 1.24 uch (((struct trapframe *)(p)->p_md.md_regs)->tf_spc)
102 1.24 uch
103 1.24 uch /*
104 1.1 itojun * Preempt the current process if in interrupt from user mode,
105 1.1 itojun * or after the current trap/syscall if in system mode.
106 1.1 itojun */
107 1.44 yamt #define cpu_need_resched(ci, flags) \
108 1.24 uch do { \
109 1.46 ad ci->ci_want_resched = 1; \
110 1.44 yamt if (curlwp != ci->ci_data.cpu_idlelwp) \
111 1.42 ad aston(curlwp); \
112 1.24 uch } while (/*CONSTCOND*/0)
113 1.1 itojun
114 1.1 itojun /*
115 1.1 itojun * Give a profiling tick to the current process when the user profiling
116 1.24 uch * buffer pages are invalid. On the MIPS, request an ast to send us
117 1.24 uch * through trap, marking the proc as needing a profiling tick.
118 1.1 itojun */
119 1.42 ad #define cpu_need_proftick(l) \
120 1.24 uch do { \
121 1.42 ad (l)->l_pflag |= LP_OWEUPC; \
122 1.42 ad aston(l); \
123 1.24 uch } while (/*CONSTCOND*/0)
124 1.1 itojun
125 1.1 itojun /*
126 1.1 itojun * Notify the current process (p) that it has a signal pending,
127 1.1 itojun * process as soon as possible.
128 1.1 itojun */
129 1.42 ad #define cpu_signotify(l) aston(l)
130 1.24 uch
131 1.42 ad #define aston(l) ((l)->l_md.md_astpending = 1)
132 1.24 uch
133 1.1 itojun /*
134 1.1 itojun * We need a machine-independent name for this.
135 1.1 itojun */
136 1.1 itojun #define DELAY(x) delay(x)
137 1.26 uch #endif /* _KERNEL */
138 1.1 itojun
139 1.1 itojun /*
140 1.25 uch * Logical address space of SH3/SH4 CPU.
141 1.1 itojun */
142 1.28 uch #define SH3_PHYS_MASK 0x1fffffff
143 1.25 uch
144 1.28 uch #define SH3_P0SEG_BASE 0x00000000 /* TLB mapped, also U0SEG */
145 1.28 uch #define SH3_P0SEG_END 0x7fffffff
146 1.28 uch #define SH3_P1SEG_BASE 0x80000000 /* pa == va */
147 1.28 uch #define SH3_P1SEG_END 0x9fffffff
148 1.28 uch #define SH3_P2SEG_BASE 0xa0000000 /* pa == va, non-cacheable */
149 1.28 uch #define SH3_P2SEG_END 0xbfffffff
150 1.28 uch #define SH3_P3SEG_BASE 0xc0000000 /* TLB mapped, kernel mode */
151 1.28 uch #define SH3_P3SEG_END 0xdfffffff
152 1.28 uch #define SH3_P4SEG_BASE 0xe0000000 /* peripheral space */
153 1.28 uch #define SH3_P4SEG_END 0xffffffff
154 1.28 uch
155 1.39 uwe #define SH3_P1SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK)
156 1.39 uwe #define SH3_P2SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK)
157 1.39 uwe #define SH3_PHYS_TO_P1SEG(x) ((uint32_t)(x) | SH3_P1SEG_BASE)
158 1.39 uwe #define SH3_PHYS_TO_P2SEG(x) ((uint32_t)(x) | SH3_P2SEG_BASE)
159 1.39 uwe #define SH3_P1SEG_TO_P2SEG(x) ((uint32_t)(x) | 0x20000000)
160 1.40 uwe #define SH3_P2SEG_TO_P1SEG(x) ((uint32_t)(x) & ~0x20000000)
161 1.14 uch
162 1.40 uwe #ifndef __lint__
163 1.14 uch
164 1.52 uwe /*
165 1.52 uwe * Switch from P1 (cached) to P2 (uncached). This used to be written
166 1.52 uwe * using gcc's assigned goto extension, but gcc4 aggressive optimizations
167 1.52 uwe * tend to optimize that away under certain circumstances.
168 1.52 uwe */
169 1.52 uwe #define RUN_P2 \
170 1.52 uwe do { \
171 1.52 uwe register uint32_t r0 asm("r0"); \
172 1.52 uwe uint32_t pc; \
173 1.52 uwe __asm volatile( \
174 1.52 uwe " mov.l 1f, %1 ;" \
175 1.52 uwe " mova 2f, %0 ;" \
176 1.52 uwe " or %0, %1 ;" \
177 1.52 uwe " jmp @%1 ;" \
178 1.52 uwe " nop ;" \
179 1.52 uwe " .align 2 ;" \
180 1.52 uwe "1: .long 0x20000000;" \
181 1.52 uwe "2:;" \
182 1.52 uwe : "=r"(r0), "=r"(pc)); \
183 1.40 uwe } while (0)
184 1.40 uwe
185 1.52 uwe /*
186 1.52 uwe * Switch from P2 (uncached) back to P1 (cached). We need to be
187 1.52 uwe * running on P2 to access cache control, memory-mapped cache and TLB
188 1.52 uwe * arrays, etc. and after touching them at least 8 instructinos are
189 1.52 uwe * necessary before jumping to P1, so provide that padding here.
190 1.52 uwe */
191 1.52 uwe #define RUN_P1 \
192 1.52 uwe do { \
193 1.52 uwe register uint32_t r0 asm("r0"); \
194 1.52 uwe uint32_t pc; \
195 1.52 uwe __asm volatile( \
196 1.52 uwe /*1*/ " mov.l 1f, %1 ;" \
197 1.52 uwe /*2*/ " mova 2f, %0 ;" \
198 1.52 uwe /*3*/ " nop ;" \
199 1.52 uwe /*4*/ " and %0, %1 ;" \
200 1.52 uwe /*5*/ " nop ;" \
201 1.52 uwe /*6*/ " nop ;" \
202 1.52 uwe /*7*/ " nop ;" \
203 1.52 uwe /*8*/ " nop ;" \
204 1.52 uwe " jmp @%1 ;" \
205 1.52 uwe " nop ;" \
206 1.52 uwe " .align 2 ;" \
207 1.52 uwe "1: .long ~0x20000000;" \
208 1.52 uwe "2:;" \
209 1.52 uwe : "=r"(r0), "=r"(pc)); \
210 1.40 uwe } while (0)
211 1.40 uwe
212 1.52 uwe /*
213 1.52 uwe * If RUN_P1 is the last thing we do in a function we can omit it, b/c
214 1.52 uwe * we are going to return to a P1 caller anyway, but we still need to
215 1.52 uwe * ensure there's at least 8 instructions before jump to P1.
216 1.52 uwe */
217 1.52 uwe #define PAD_P1_SWITCH __asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop;")
218 1.52 uwe
219 1.40 uwe #else /* __lint__ */
220 1.52 uwe #define RUN_P2 do {} while (/* CONSTCOND */ 0)
221 1.52 uwe #define RUN_P1 do {} while (/* CONSTCOND */ 0)
222 1.52 uwe #define PAD_P1_SWITCH do {} while (/* CONSTCOND */ 0)
223 1.40 uwe #endif
224 1.27 msaitoh
225 1.27 msaitoh #if defined(SH4)
226 1.27 msaitoh /* SH4 Processor Version Register */
227 1.27 msaitoh #define SH4_PVR_ADDR 0xff000030 /* P4 address */
228 1.39 uwe #define SH4_PVR (*(volatile uint32_t *) SH4_PVR_ADDR)
229 1.30 msaitoh #define SH4_PRR_ADDR 0xff000044 /* P4 address */
230 1.39 uwe #define SH4_PRR (*(volatile uint32_t *) SH4_PRR_ADDR)
231 1.27 msaitoh
232 1.27 msaitoh #define SH4_PVR_MASK 0xffffff00
233 1.27 msaitoh #define SH4_PVR_SH7750 0x04020500 /* SH7750 */
234 1.27 msaitoh #define SH4_PVR_SH7750S 0x04020600 /* SH7750S */
235 1.30 msaitoh #define SH4_PVR_SH775xR 0x04050000 /* SH775xR */
236 1.27 msaitoh #define SH4_PVR_SH7751 0x04110000 /* SH7751 */
237 1.30 msaitoh
238 1.30 msaitoh #define SH4_PRR_MASK 0xfffffff0
239 1.30 msaitoh #define SH4_PRR_7750R 0x00000100 /* SH7750R */
240 1.30 msaitoh #define SH4_PRR_7751R 0x00000110 /* SH7751R */
241 1.27 msaitoh #endif
242 1.1 itojun
243 1.1 itojun /*
244 1.1 itojun * pull in #defines for kinds of processors
245 1.1 itojun */
246 1.1 itojun #include <machine/cputypes.h>
247 1.1 itojun
248 1.22 uch /*
249 1.22 uch * CTL_MACHDEP definitions.
250 1.22 uch */
251 1.22 uch #define CPU_CONSDEV 1 /* dev_t: console terminal device */
252 1.22 uch #define CPU_LOADANDRESET 2 /* load kernel image and reset */
253 1.22 uch #define CPU_MAXID 3 /* number of valid machdep ids */
254 1.22 uch
255 1.25 uch #ifdef _KERNEL
256 1.25 uch void sh_cpu_init(int, int);
257 1.25 uch void sh_startup(void);
258 1.41 uwe void cpu_reset(void) __attribute__((__noreturn__)); /* soft reset */
259 1.39 uwe void _cpu_spin(uint32_t); /* for delay loop. */
260 1.25 uch void delay(int);
261 1.25 uch struct pcb;
262 1.25 uch void savectx(struct pcb *);
263 1.25 uch void dumpsys(void);
264 1.25 uch #endif /* _KERNEL */
265 1.1 itojun #endif /* !_SH3_CPU_H_ */
266