cpu.h revision 1.45.2.3 1 /* cpu.h,v 1.45.2.2 2008/01/09 01:48:46 matt Exp */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
5 * Copyright (c) 1990 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)cpu.h 5.4 (Berkeley) 5/9/91
36 */
37
38 /*
39 * SH3/SH4 support.
40 *
41 * T.Horiuchi Brains Corp. 5/22/98
42 */
43
44 #ifndef _SH3_CPU_H_
45 #define _SH3_CPU_H_
46
47 #if defined(_KERNEL_OPT)
48 #include "opt_lockdebug.h"
49 #endif
50
51 #include <sh3/psl.h>
52 #include <sh3/frame.h>
53
54 #ifdef _KERNEL
55 #include <sys/cpu_data.h>
56 struct cpu_info {
57 struct cpu_data ci_data; /* MI per-cpu data */
58 cpuid_t ci_cpuid;
59 int ci_mtx_count;
60 int ci_mtx_oldspl;
61 int ci_want_resched;
62 };
63
64 extern struct cpu_info cpu_info_store;
65 #define curcpu() (&cpu_info_store)
66
67 /*
68 * definitions of cpu-dependent requirements
69 * referenced in generic code
70 */
71 #define cpu_number() 0
72 /*
73 * Can't swapout u-area, (__SWAP_BROKEN)
74 * since we use P1 converted address for trapframe.
75 */
76 #define cpu_swapin(p) /* nothing */
77 #define cpu_swapout(p) panic("cpu_swapout: can't get here");
78 #define cpu_proc_fork(p1, p2) /* nothing */
79
80 /*
81 * Interrupt stack location.
82 */
83 extern vaddr_t intstack, intfp, intsp;
84
85 /*
86 * Arguments to hardclock and gatherstats encapsulate the previous
87 * machine state in an opaque clockframe.
88 */
89 struct clockframe {
90 int spc; /* program counter at time of interrupt */
91 int ssr; /* status register at time of interrupt */
92 int ssp; /* stack pointer at time of interrupt */
93 };
94
95
96 #define CLKF_USERMODE(cf) (!KERNELMODE((cf)->ssr))
97 #define CLKF_PC(cf) ((cf)->spc)
98 #define CLKF_INTR(cf) ((vaddr_t)(cf)->ssp <= intsp)
99
100 /*
101 * This is used during profiling to integrate system time. It can safely
102 * assume that the process is resident.
103 */
104 #define PROC_PC(p) \
105 (((struct trapframe *)(p)->p_md.md_regs)->tf_spc)
106
107 /*
108 * Preempt the current process if in interrupt from user mode,
109 * or after the current trap/syscall if in system mode.
110 */
111 #define cpu_need_resched(ci, flags) \
112 do { \
113 ci->ci_want_resched = 1; \
114 if (curlwp != ci->ci_data.cpu_idlelwp) \
115 aston(curlwp); \
116 } while (/*CONSTCOND*/0)
117
118 /*
119 * Give a profiling tick to the current process when the user profiling
120 * buffer pages are invalid. On the MIPS, request an ast to send us
121 * through trap, marking the proc as needing a profiling tick.
122 */
123 #define cpu_need_proftick(l) \
124 do { \
125 (l)->l_pflag |= LP_OWEUPC; \
126 aston(l); \
127 } while (/*CONSTCOND*/0)
128
129 /*
130 * Notify the current process (p) that it has a signal pending,
131 * process as soon as possible.
132 */
133 #define cpu_signotify(l) aston(l)
134
135 #define aston(l) ((l)->l_md.md_astpending = 1)
136
137 /*
138 * We need a machine-independent name for this.
139 */
140 #define DELAY(x) delay(x)
141 #endif /* _KERNEL */
142
143 /*
144 * Logical address space of SH3/SH4 CPU.
145 */
146 #define SH3_PHYS_MASK 0x1fffffff
147
148 #define SH3_P0SEG_BASE 0x00000000 /* TLB mapped, also U0SEG */
149 #define SH3_P0SEG_END 0x7fffffff
150 #define SH3_P1SEG_BASE 0x80000000 /* pa == va */
151 #define SH3_P1SEG_END 0x9fffffff
152 #define SH3_P2SEG_BASE 0xa0000000 /* pa == va, non-cacheable */
153 #define SH3_P2SEG_END 0xbfffffff
154 #define SH3_P3SEG_BASE 0xc0000000 /* TLB mapped, kernel mode */
155 #define SH3_P3SEG_END 0xdfffffff
156 #define SH3_P4SEG_BASE 0xe0000000 /* peripheral space */
157 #define SH3_P4SEG_END 0xffffffff
158
159 #define SH3_P1SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK)
160 #define SH3_P2SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK)
161 #define SH3_PHYS_TO_P1SEG(x) ((uint32_t)(x) | SH3_P1SEG_BASE)
162 #define SH3_PHYS_TO_P2SEG(x) ((uint32_t)(x) | SH3_P2SEG_BASE)
163 #define SH3_P1SEG_TO_P2SEG(x) ((uint32_t)(x) | 0x20000000)
164 #define SH3_P2SEG_TO_P1SEG(x) ((uint32_t)(x) & ~0x20000000)
165
166 #ifndef __lint__
167
168 /*
169 * Switch from P1 (cached) to P2 (uncached). This used to be written
170 * using gcc's assigned goto extension, but gcc4 aggressive optimizations
171 * tend to optimize that away under certain circumstances.
172 */
173 #define RUN_P2 \
174 do { \
175 register uint32_t r0 asm("r0"); \
176 uint32_t pc; \
177 __asm volatile( \
178 " mov.l 1f, %1 ;" \
179 " mova 2f, %0 ;" \
180 " or %0, %1 ;" \
181 " jmp @%1 ;" \
182 " nop ;" \
183 " .align 2 ;" \
184 "1: .long 0x20000000;" \
185 "2:;" \
186 : "=r"(r0), "=r"(pc)); \
187 } while (0)
188
189 /*
190 * Switch from P2 (uncached) back to P1 (cached). We need to be
191 * running on P2 to access cache control, memory-mapped cache and TLB
192 * arrays, etc. and after touching them at least 8 instructinos are
193 * necessary before jumping to P1, so provide that padding here.
194 */
195 #define RUN_P1 \
196 do { \
197 register uint32_t r0 asm("r0"); \
198 uint32_t pc; \
199 __asm volatile( \
200 /*1*/ " mov.l 1f, %1 ;" \
201 /*2*/ " mova 2f, %0 ;" \
202 /*3*/ " nop ;" \
203 /*4*/ " and %0, %1 ;" \
204 /*5*/ " nop ;" \
205 /*6*/ " nop ;" \
206 /*7*/ " nop ;" \
207 /*8*/ " nop ;" \
208 " jmp @%1 ;" \
209 " nop ;" \
210 " .align 2 ;" \
211 "1: .long ~0x20000000;" \
212 "2:;" \
213 : "=r"(r0), "=r"(pc)); \
214 } while (0)
215
216 /*
217 * If RUN_P1 is the last thing we do in a function we can omit it, b/c
218 * we are going to return to a P1 caller anyway, but we still need to
219 * ensure there's at least 8 instructions before jump to P1.
220 */
221 #define PAD_P1_SWITCH __asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop;")
222
223 #else /* __lint__ */
224 #define RUN_P2 do {} while (/* CONSTCOND */ 0)
225 #define RUN_P1 do {} while (/* CONSTCOND */ 0)
226 #define PAD_P1_SWITCH do {} while (/* CONSTCOND */ 0)
227 #endif
228
229 #if defined(SH4)
230 /* SH4 Processor Version Register */
231 #define SH4_PVR_ADDR 0xff000030 /* P4 address */
232 #define SH4_PVR (*(volatile uint32_t *) SH4_PVR_ADDR)
233 #define SH4_PRR_ADDR 0xff000044 /* P4 address */
234 #define SH4_PRR (*(volatile uint32_t *) SH4_PRR_ADDR)
235
236 #define SH4_PVR_MASK 0xffffff00
237 #define SH4_PVR_SH7750 0x04020500 /* SH7750 */
238 #define SH4_PVR_SH7750S 0x04020600 /* SH7750S */
239 #define SH4_PVR_SH775xR 0x04050000 /* SH775xR */
240 #define SH4_PVR_SH7751 0x04110000 /* SH7751 */
241
242 #define SH4_PRR_MASK 0xfffffff0
243 #define SH4_PRR_7750R 0x00000100 /* SH7750R */
244 #define SH4_PRR_7751R 0x00000110 /* SH7751R */
245 #endif
246
247 /*
248 * pull in #defines for kinds of processors
249 */
250 #include <machine/cputypes.h>
251
252 /*
253 * CTL_MACHDEP definitions.
254 */
255 #define CPU_CONSDEV 1 /* dev_t: console terminal device */
256 #define CPU_LOADANDRESET 2 /* load kernel image and reset */
257 #define CPU_MAXID 3 /* number of valid machdep ids */
258
259 #ifdef _KERNEL
260 void sh_cpu_init(int, int);
261 void sh_startup(void);
262 void cpu_reset(void) __attribute__((__noreturn__)); /* soft reset */
263 void _cpu_spin(uint32_t); /* for delay loop. */
264 void delay(int);
265 struct pcb;
266 void savectx(struct pcb *);
267 void dumpsys(void);
268 #endif /* _KERNEL */
269 #endif /* !_SH3_CPU_H_ */
270