riscv_machdep.c revision 1.8 1 1.1 matt /*-
2 1.6 ad * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
3 1.1 matt * All rights reserved.
4 1.1 matt *
5 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
6 1.1 matt * by Matt Thomas of 3am Software Foundry.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt *
17 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
28 1.1 matt */
29 1.1 matt
30 1.1 matt #include <sys/cdefs.h>
31 1.1 matt
32 1.1 matt #include "opt_modular.h"
33 1.1 matt
34 1.8 ad __RCSID("$NetBSD: riscv_machdep.c,v 1.8 2019/12/31 13:07:12 ad Exp $");
35 1.1 matt
36 1.1 matt #include <sys/param.h>
37 1.1 matt #include <sys/systm.h>
38 1.1 matt #include <sys/cpu.h>
39 1.1 matt #include <sys/exec.h>
40 1.1 matt #include <sys/lwp.h>
41 1.1 matt #include <sys/kmem.h>
42 1.1 matt #include <sys/ktrace.h>
43 1.1 matt #include <sys/module.h>
44 1.1 matt #include <sys/proc.h>
45 1.1 matt #include <sys/reboot.h>
46 1.1 matt #include <sys/syscall.h>
47 1.1 matt
48 1.1 matt #include <uvm/uvm_extern.h>
49 1.1 matt
50 1.1 matt #include <riscv/locore.h>
51 1.1 matt
52 1.1 matt int cpu_printfataltraps;
53 1.1 matt char machine[] = MACHINE;
54 1.1 matt char machine_arch[] = MACHINE_ARCH;
55 1.1 matt
56 1.1 matt struct vm_map *phys_map;
57 1.1 matt
58 1.1 matt struct trapframe cpu_ddb_regs;
59 1.1 matt
60 1.1 matt struct cpu_info cpu_info_store = {
61 1.1 matt .ci_cpl = IPL_HIGH,
62 1.1 matt .ci_ddb_regs = &cpu_ddb_regs,
63 1.1 matt };
64 1.1 matt
65 1.1 matt const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
66 1.1 matt [PCU_FPU] = &pcu_fpu_ops,
67 1.1 matt };
68 1.1 matt
69 1.1 matt void
70 1.1 matt delay(unsigned long us)
71 1.1 matt {
72 1.1 matt const uint32_t cycles_per_us = curcpu()->ci_data.cpu_cc_freq / 1000000;
73 1.1 matt const uint64_t cycles = (uint64_t)us * cycles_per_us;
74 1.1 matt const uint64_t finish = riscvreg_cycle_read() + cycles;
75 1.1 matt
76 1.1 matt while (riscvreg_cycle_read() < finish) {
77 1.1 matt /* spin, baby spin */
78 1.1 matt }
79 1.1 matt }
80 1.1 matt
81 1.1 matt #ifdef MODULAR
82 1.1 matt /*
83 1.1 matt * Push any modules loaded by the boot loader.
84 1.1 matt */
85 1.1 matt void
86 1.1 matt module_init_md(void)
87 1.1 matt {
88 1.1 matt }
89 1.1 matt #endif /* MODULAR */
90 1.1 matt
91 1.1 matt /*
92 1.1 matt * Set registers on exec.
93 1.1 matt * Clear all registers except sp, pc, and t9.
94 1.1 matt * $sp is set to the stack pointer passed in. $pc is set to the entry
95 1.1 matt * point given by the exec_package passed in, as is $t9 (used for PIC
96 1.1 matt * code by the MIPS elf abi).
97 1.1 matt */
98 1.1 matt void
99 1.1 matt setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
100 1.1 matt {
101 1.1 matt struct trapframe * const tf = l->l_md.md_utf;
102 1.1 matt struct proc * const p = l->l_proc;
103 1.1 matt
104 1.1 matt memset(tf, 0, sizeof(struct trapframe));
105 1.1 matt tf->tf_sp = (intptr_t)stack_align(stack);
106 1.1 matt tf->tf_pc = (intptr_t)pack->ep_entry & ~1;
107 1.1 matt #ifdef _LP64
108 1.1 matt tf->tf_sr = (p->p_flag & PK_32) ? SR_USER32 : SR_USER;
109 1.1 matt #else
110 1.1 matt tf->tf_sr = SR_USER;
111 1.1 matt #endif
112 1.1 matt // Set up arguments for _start(obj, cleanup, ps_strings)
113 1.1 matt tf->tf_a0 = 0; // obj
114 1.1 matt tf->tf_a1 = 0; // cleanup
115 1.1 matt tf->tf_a2 = p->p_psstrp; // ps_strings
116 1.1 matt }
117 1.1 matt
118 1.1 matt void
119 1.4 kamil md_child_return(struct lwp *l)
120 1.1 matt {
121 1.1 matt struct trapframe * const tf = l->l_md.md_utf;
122 1.1 matt
123 1.1 matt tf->tf_a0 = 0;
124 1.1 matt tf->tf_a1 = 1;
125 1.1 matt tf->tf_sr &= ~SR_EF; /* Disable FP as we can't be them. */
126 1.1 matt }
127 1.1 matt
128 1.1 matt void
129 1.1 matt cpu_spawn_return(struct lwp *l)
130 1.1 matt {
131 1.1 matt userret(l);
132 1.1 matt }
133 1.1 matt
134 1.1 matt /*
135 1.1 matt * Start a new LWP
136 1.1 matt */
137 1.1 matt void
138 1.1 matt startlwp(void *arg)
139 1.1 matt {
140 1.1 matt ucontext_t * const uc = arg;
141 1.1 matt lwp_t * const l = curlwp;
142 1.1 matt int error __diagused;
143 1.1 matt
144 1.1 matt error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
145 1.1 matt KASSERT(error == 0);
146 1.1 matt
147 1.1 matt kmem_free(uc, sizeof(ucontext_t));
148 1.1 matt userret(l);
149 1.1 matt }
150 1.1 matt
151 1.1 matt // We've worked hard to make sure struct reg and __gregset_t are the same.
152 1.1 matt // Ditto for struct fpreg and fregset_t.
153 1.1 matt
154 1.1 matt CTASSERT(sizeof(struct reg) == sizeof(__gregset_t));
155 1.1 matt CTASSERT(sizeof(struct fpreg) == sizeof(__fregset_t));
156 1.1 matt
157 1.1 matt void
158 1.1 matt cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
159 1.1 matt {
160 1.1 matt const struct trapframe * const tf = l->l_md.md_utf;
161 1.1 matt
162 1.1 matt /* Save register context. */
163 1.1 matt *(struct reg *)mcp->__gregs = tf->tf_regs;
164 1.1 matt
165 1.1 matt mcp->__private = (intptr_t)l->l_private;
166 1.1 matt
167 1.1 matt *flags |= _UC_CPU | _UC_TLSBASE;
168 1.1 matt
169 1.1 matt /* Save floating point register context, if any. */
170 1.1 matt KASSERT(l == curlwp);
171 1.2 chs if (fpu_valid_p(l)) {
172 1.1 matt /*
173 1.1 matt * If this process is the current FP owner, dump its
174 1.1 matt * context to the PCB first.
175 1.1 matt */
176 1.2 chs fpu_save(l);
177 1.1 matt
178 1.1 matt struct pcb * const pcb = lwp_getpcb(l);
179 1.1 matt *(struct fpreg *)mcp->__fregs = pcb->pcb_fpregs;
180 1.1 matt *flags |= _UC_FPU;
181 1.1 matt }
182 1.1 matt }
183 1.1 matt
184 1.1 matt int
185 1.1 matt cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
186 1.1 matt {
187 1.1 matt /*
188 1.1 matt * Verify that at least the PC and SP are user addresses.
189 1.1 matt */
190 1.1 matt if ((intptr_t) mcp->__gregs[_REG_PC] < 0
191 1.1 matt || (intptr_t) mcp->__gregs[_REG_SP] < 0
192 1.1 matt || (mcp->__gregs[_REG_PC] & 1))
193 1.1 matt return EINVAL;
194 1.1 matt
195 1.1 matt return 0;
196 1.1 matt }
197 1.1 matt
198 1.1 matt int
199 1.1 matt cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
200 1.1 matt {
201 1.1 matt struct trapframe * const tf = l->l_md.md_utf;
202 1.1 matt struct proc * const p = l->l_proc;
203 1.1 matt const __greg_t * const gr = mcp->__gregs;
204 1.1 matt int error;
205 1.1 matt
206 1.1 matt /* Restore register context, if any. */
207 1.1 matt if (flags & _UC_CPU) {
208 1.1 matt error = cpu_mcontext_validate(l, mcp);
209 1.1 matt if (error)
210 1.1 matt return error;
211 1.1 matt
212 1.1 matt /* Save register context. */
213 1.1 matt tf->tf_regs = *(const struct reg *)gr;
214 1.1 matt }
215 1.1 matt
216 1.1 matt /* Restore the private thread context */
217 1.1 matt if (flags & _UC_TLSBASE) {
218 1.1 matt lwp_setprivate(l, (void *)(intptr_t)mcp->__private);
219 1.1 matt }
220 1.1 matt
221 1.1 matt /* Restore floating point register context, if any. */
222 1.1 matt if (flags & _UC_FPU) {
223 1.1 matt KASSERT(l == curlwp);
224 1.1 matt /* Tell PCU we are replacing the FPU contents. */
225 1.2 chs fpu_replace(l);
226 1.1 matt
227 1.1 matt /*
228 1.1 matt * The PCB FP regs struct includes the FP CSR, so use the
229 1.1 matt * proper size of fpreg when copying.
230 1.1 matt */
231 1.1 matt struct pcb * const pcb = lwp_getpcb(l);
232 1.1 matt pcb->pcb_fpregs = *(const struct fpreg *)mcp->__fregs;
233 1.1 matt }
234 1.1 matt
235 1.1 matt mutex_enter(p->p_lock);
236 1.1 matt if (flags & _UC_SETSTACK)
237 1.1 matt l->l_sigstk.ss_flags |= SS_ONSTACK;
238 1.1 matt if (flags & _UC_CLRSTACK)
239 1.1 matt l->l_sigstk.ss_flags &= ~SS_ONSTACK;
240 1.1 matt mutex_exit(p->p_lock);
241 1.1 matt
242 1.1 matt return (0);
243 1.1 matt }
244 1.1 matt
245 1.1 matt void
246 1.6 ad cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
247 1.1 matt {
248 1.1 matt KASSERT(kpreempt_disabled());
249 1.1 matt
250 1.6 ad if ((flags & RESCHED_KPREEMPT) != 0) {
251 1.1 matt #ifdef __HAVE_PREEMPTION
252 1.6 ad if ((flags & RESCHED_REMOTE) != 0) {
253 1.6 ad cpu_send_ipi(ci, IPI_KPREEMPT);
254 1.6 ad } else {
255 1.1 matt softint_trigger(SOFTINT_KPREEMPT);
256 1.1 matt }
257 1.1 matt #endif
258 1.1 matt return;
259 1.1 matt }
260 1.6 ad if ((flags & RESCHED_REMOTE) != 0) {
261 1.1 matt #ifdef MULTIPROCESSOR
262 1.1 matt cpu_send_ipi(ci, IPI_AST);
263 1.1 matt #endif
264 1.6 ad } else {
265 1.6 ad l->l_md.md_astpending = 1; /* force call to ast() */
266 1.6 ad }
267 1.1 matt }
268 1.1 matt
269 1.1 matt void
270 1.1 matt cpu_signotify(struct lwp *l)
271 1.1 matt {
272 1.1 matt KASSERT(kpreempt_disabled());
273 1.1 matt #ifdef __HAVE_FAST_SOFTINTS
274 1.1 matt KASSERT(lwp_locked(l, NULL));
275 1.1 matt #endif
276 1.1 matt
277 1.6 ad if (l->l_cpu != curcpu()) {
278 1.6 ad #ifdef MULTIPROCESSOR
279 1.6 ad cpu_send_ipi(ci, IPI_AST);
280 1.6 ad #endif
281 1.6 ad } else {
282 1.6 ad l->l_md.md_astpending = 1; /* force call to ast() */
283 1.6 ad }
284 1.1 matt }
285 1.1 matt
286 1.1 matt void
287 1.1 matt cpu_need_proftick(struct lwp *l)
288 1.1 matt {
289 1.1 matt KASSERT(kpreempt_disabled());
290 1.1 matt KASSERT(l->l_cpu == curcpu());
291 1.1 matt
292 1.1 matt l->l_pflag |= LP_OWEUPC;
293 1.1 matt l->l_md.md_astpending = 1; /* force call to ast() */
294 1.1 matt }
295 1.1 matt
296 1.1 matt void
297 1.1 matt cpu_reboot(int how, char *bootstr)
298 1.1 matt {
299 1.1 matt for (;;) {
300 1.1 matt }
301 1.1 matt }
302 1.1 matt
303 1.1 matt void
304 1.1 matt cpu_dumpconf(void)
305 1.1 matt {
306 1.1 matt // TBD!!
307 1.1 matt }
308 1.1 matt
309 1.1 matt void
310 1.1 matt cpu_startup(void)
311 1.1 matt {
312 1.1 matt vaddr_t minaddr, maxaddr;
313 1.1 matt char pbuf[9]; /* "99999 MB" */
314 1.1 matt
315 1.1 matt /*
316 1.1 matt * Good {morning,afternoon,evening,night}.
317 1.1 matt */
318 1.1 matt printf("%s%s", copyright, version);
319 1.1 matt format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
320 1.1 matt printf("total memory = %s\n", pbuf);
321 1.1 matt
322 1.1 matt minaddr = 0;
323 1.1 matt /*
324 1.1 matt * Allocate a submap for physio.
325 1.1 matt */
326 1.1 matt phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
327 1.1 matt VM_PHYS_SIZE, 0, FALSE, NULL);
328 1.1 matt
329 1.8 ad format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem()));
330 1.1 matt printf("avail memory = %s\n", pbuf);
331 1.1 matt }
332 1.1 matt
333 1.1 matt void
334 1.1 matt init_riscv(vaddr_t kernstart, vaddr_t kernend)
335 1.1 matt {
336 1.1 matt }
337