cpu.h revision 1.58 1 /* $NetBSD: cpu.h,v 1.58 2019/11/23 19:40:36 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2019 The NetBSD Foundation, Inc. All rights reserved.
5 * Copyright (c) 1990 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)cpu.h 5.4 (Berkeley) 5/9/91
36 */
37
38 /*
39 * SH3/SH4 support.
40 *
41 * T.Horiuchi Brains Corp. 5/22/98
42 */
43
44 #ifndef _SH3_CPU_H_
45 #define _SH3_CPU_H_
46
47 #if defined(_KERNEL_OPT)
48 #include "opt_lockdebug.h"
49 #endif
50
51 #include <sh3/psl.h>
52 #include <sh3/frame.h>
53
54 #ifdef _KERNEL
55 #include <sys/cpu_data.h>
56 struct cpu_info {
57 struct cpu_data ci_data; /* MI per-cpu data */
58 cpuid_t ci_cpuid;
59 int ci_mtx_count;
60 int ci_mtx_oldspl;
61 int ci_want_resched;
62 int ci_idepth;
63 };
64
65 extern struct cpu_info cpu_info_store;
66 #define curcpu() (&cpu_info_store)
67
68 /*
69 * definitions of cpu-dependent requirements
70 * referenced in generic code
71 */
72 #define cpu_number() 0
73
74 #define cpu_proc_fork(p1, p2) /* nothing */
75
76 /*
77 * Arguments to hardclock and gatherstats encapsulate the previous
78 * machine state in an opaque clockframe.
79 */
80 struct clockframe {
81 int spc; /* program counter at time of interrupt */
82 int ssr; /* status register at time of interrupt */
83 int ssp; /* stack pointer at time of interrupt */
84 };
85
86
87 #define CLKF_USERMODE(cf) (!KERNELMODE((cf)->ssr))
88 #define CLKF_PC(cf) ((cf)->spc)
89 #define CLKF_INTR(cf) (curcpu()->ci_idepth > 0)
90
91 /*
92 * This is used during profiling to integrate system time. It can safely
93 * assume that the process is resident.
94 */
95 #define LWP_PC(l) \
96 (((struct trapframe *)(l)->l_md.md_regs)->tf_spc)
97
98 /*
99 * Preempt the current process if in interrupt from user mode,
100 * or after the current trap/syscall if in system mode.
101 */
102 #define cpu_need_resched(ci,l,flags) \
103 do { \
104 if ((flags & RESCHED_IDLE) == 0) \
105 aston(curlwp); \
106 } while (/*CONSTCOND*/0)
107
108 /*
109 * Give a profiling tick to the current process when the user profiling
110 * buffer pages are invalid. On the MIPS, request an ast to send us
111 * through trap, marking the proc as needing a profiling tick.
112 */
113 #define cpu_need_proftick(l) \
114 do { \
115 (l)->l_pflag |= LP_OWEUPC; \
116 aston(l); \
117 } while (/*CONSTCOND*/0)
118
119 /*
120 * Notify the current process (p) that it has a signal pending,
121 * process as soon as possible.
122 */
123 #define cpu_signotify(l) aston(l)
124
125 #define aston(l) ((l)->l_md.md_astpending = 1)
126
127 /*
128 * We need a machine-independent name for this.
129 */
130 #define DELAY(x) delay(x)
131 #endif /* _KERNEL */
132
133 /*
134 * Logical address space of SH3/SH4 CPU.
135 */
136 #define SH3_PHYS_MASK 0x1fffffff
137
138 #define SH3_P0SEG_BASE 0x00000000 /* TLB mapped, also U0SEG */
139 #define SH3_P0SEG_END 0x7fffffff
140 #define SH3_P1SEG_BASE 0x80000000 /* pa == va */
141 #define SH3_P1SEG_END 0x9fffffff
142 #define SH3_P2SEG_BASE 0xa0000000 /* pa == va, non-cacheable */
143 #define SH3_P2SEG_END 0xbfffffff
144 #define SH3_P3SEG_BASE 0xc0000000 /* TLB mapped, kernel mode */
145 #define SH3_P3SEG_END 0xdfffffff
146 #define SH3_P4SEG_BASE 0xe0000000 /* peripheral space */
147 #define SH3_P4SEG_END 0xffffffff
148
149 #define SH3_P1SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK)
150 #define SH3_P2SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK)
151 #define SH3_PHYS_TO_P1SEG(x) ((uint32_t)(x) | SH3_P1SEG_BASE)
152 #define SH3_PHYS_TO_P2SEG(x) ((uint32_t)(x) | SH3_P2SEG_BASE)
153 #define SH3_P1SEG_TO_P2SEG(x) ((uint32_t)(x) | 0x20000000)
154 #define SH3_P2SEG_TO_P1SEG(x) ((uint32_t)(x) & ~0x20000000)
155
156 #ifndef __lint__
157
158 /*
159 * Switch from P1 (cached) to P2 (uncached). This used to be written
160 * using gcc's assigned goto extension, but gcc4 aggressive optimizations
161 * tend to optimize that away under certain circumstances.
162 */
163 #define RUN_P2 \
164 do { \
165 register uint32_t r0 asm("r0"); \
166 uint32_t pc; \
167 __asm volatile( \
168 " mov.l 1f, %1 ;" \
169 " mova 2f, %0 ;" \
170 " or %0, %1 ;" \
171 " jmp @%1 ;" \
172 " nop ;" \
173 " .align 2 ;" \
174 "1: .long 0x20000000;" \
175 "2:;" \
176 : "=r"(r0), "=r"(pc)); \
177 } while (0)
178
179 /*
180 * Switch from P2 (uncached) back to P1 (cached). We need to be
181 * running on P2 to access cache control, memory-mapped cache and TLB
182 * arrays, etc. and after touching them at least 8 instructinos are
183 * necessary before jumping to P1, so provide that padding here.
184 */
185 #define RUN_P1 \
186 do { \
187 register uint32_t r0 asm("r0"); \
188 uint32_t pc; \
189 __asm volatile( \
190 /*1*/ " mov.l 1f, %1 ;" \
191 /*2*/ " mova 2f, %0 ;" \
192 /*3*/ " nop ;" \
193 /*4*/ " and %0, %1 ;" \
194 /*5*/ " nop ;" \
195 /*6*/ " nop ;" \
196 /*7*/ " nop ;" \
197 /*8*/ " nop ;" \
198 " jmp @%1 ;" \
199 " nop ;" \
200 " .align 2 ;" \
201 "1: .long ~0x20000000;" \
202 "2:;" \
203 : "=r"(r0), "=r"(pc)); \
204 } while (0)
205
206 /*
207 * If RUN_P1 is the last thing we do in a function we can omit it, b/c
208 * we are going to return to a P1 caller anyway, but we still need to
209 * ensure there's at least 8 instructions before jump to P1.
210 */
211 #define PAD_P1_SWITCH __asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop;")
212
213 #else /* __lint__ */
214 #define RUN_P2 do {} while (/* CONSTCOND */ 0)
215 #define RUN_P1 do {} while (/* CONSTCOND */ 0)
216 #define PAD_P1_SWITCH do {} while (/* CONSTCOND */ 0)
217 #endif
218
219 #if defined(SH4)
220 /* SH4 Processor Version Register */
221 #define SH4_PVR_ADDR 0xff000030 /* P4 address */
222 #define SH4_PVR (*(volatile uint32_t *) SH4_PVR_ADDR)
223 #define SH4_PRR_ADDR 0xff000044 /* P4 address */
224 #define SH4_PRR (*(volatile uint32_t *) SH4_PRR_ADDR)
225
226 #define SH4_PVR_MASK 0xffffff00
227 #define SH4_PVR_SH7750 0x04020500 /* SH7750 */
228 #define SH4_PVR_SH7750S 0x04020600 /* SH7750S */
229 #define SH4_PVR_SH775xR 0x04050000 /* SH775xR */
230 #define SH4_PVR_SH7751 0x04110000 /* SH7751 */
231
232 #define SH4_PRR_MASK 0xfffffff0
233 #define SH4_PRR_7750R 0x00000100 /* SH7750R */
234 #define SH4_PRR_7751R 0x00000110 /* SH7751R */
235 #endif
236
237 /*
238 * pull in #defines for kinds of processors
239 */
240 #include <machine/cputypes.h>
241
242 /*
243 * CTL_MACHDEP definitions.
244 */
245 #define CPU_CONSDEV 1 /* dev_t: console terminal device */
246 #define CPU_LOADANDRESET 2 /* load kernel image and reset */
247
248 #ifdef _KERNEL
249 void sh_cpu_init(int, int);
250 void sh_startup(void);
251 void cpu_reset(void) __attribute__((__noreturn__)); /* soft reset */
252 void _cpu_spin(uint32_t); /* for delay loop. */
253 void delay(int);
254 struct pcb;
255 void savectx(struct pcb *);
256 void dumpsys(void);
257 #endif /* _KERNEL */
258 #endif /* !_SH3_CPU_H_ */
259